From 46fdd70114df29df63eaee93e837033be26a1f3d Mon Sep 17 00:00:00 2001 From: Minio Trusted Date: Tue, 13 Dec 2016 09:31:19 -0800 Subject: [PATCH 001/100] Fix docker file and avoid creating README.md --- Dockerfile | 1 - 1 file changed, 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index eaefcff0b..80a91b589 100644 --- a/Dockerfile +++ b/Dockerfile @@ -9,7 +9,6 @@ RUN \ go-wrapper download && \ go-wrapper install -ldflags "$(go run buildscripts/gen-ldflags.go)" && \ mkdir -p /export/docker && \ - cp /go/src/app/docs/docker/README.md /export/docker/ && \ rm -rf /go/pkg /go/src && \ apk del git From ab49498fc3b46d420b65e12a2aa5670999330070 Mon Sep 17 00:00:00 2001 From: Krishnan Parthasarathi Date: Wed, 14 Dec 2016 00:48:31 +0530 Subject: [PATCH 002/100] server: Exit gracefully if no endpoint is local to it. (#3442) --- cmd/server-main.go | 18 ++++++++++++++++++ cmd/server-main_test.go | 29 +++++++++++++++++++++++++++++ 2 files changed, 47 insertions(+) diff --git a/cmd/server-main.go b/cmd/server-main.go index bb45b459d..04b3ac667 100644 --- a/cmd/server-main.go +++ b/cmd/server-main.go @@ -17,6 +17,7 @@ package cmd import ( + "errors" "fmt" "io/ioutil" "net" @@ -360,6 +361,18 @@ func checkServerSyntax(c *cli.Context) { } } +// Checks if any of the endpoints supplied is local to a server instance. +func isAnyEndpointLocal(eps []*url.URL) bool { + anyLocalEp := false + for _, ep := range eps { + if isLocalStorage(ep) { + anyLocalEp = true + break + } + } + return anyLocalEp +} + // serverMain handler called for 'minio server' command. func serverMain(c *cli.Context) { if !c.Args().Present() || c.Args().First() == "help" { @@ -401,6 +414,11 @@ func serverMain(c *cli.Context) { endpoints, err := parseStorageEndpoints(c.Args()) fatalIf(err, "Unable to parse storage endpoints %s", c.Args()) + // Should exit gracefully if none of the endpoints passed as command line argument is local to this server. + if !isAnyEndpointLocal(endpoints) { + fatalIf(errors.New("No endpoint is local to this server"), "None of the disks supplied are local to this instance. Please check the disks supplied.") + } + storageDisks, err := initStorageDisks(endpoints) fatalIf(err, "Unable to initialize storage disk(s).") diff --git a/cmd/server-main_test.go b/cmd/server-main_test.go index 4d0463971..1f6a1997f 100644 --- a/cmd/server-main_test.go +++ b/cmd/server-main_test.go @@ -347,3 +347,32 @@ func TestInitServerConfig(t *testing.T) { initServerConfig(ctx) } } + +// Tests isAnyEndpointLocal function with inputs such that it returns true and false respectively. +func TestIsAnyEndpointLocal(t *testing.T) { + testCases := []struct { + disks []string + result bool + }{ + { + disks: []string{"http://4.4.4.4/mnt/disk1", + "http://4.4.4.4/mnt/disk1"}, + result: false, + }, + { + disks: []string{"http://localhost/mnt/disk1", + "http://localhost/mnt/disk1"}, + result: true, + }, + } + for i, test := range testCases { + endpoints, err := parseStorageEndpoints(test.disks) + if err != nil { + t.Fatalf("Test %d - Failed to parse storage endpoints %v", i+1, err) + } + actual := isAnyEndpointLocal(endpoints) + if actual != test.result { + t.Errorf("Test %d - Expected %v but received %v", i+1, test.result, actual) + } + } +} From 3fe2d77b70c9d309eb9576b30fdde054edddf1c3 Mon Sep 17 00:00:00 2001 From: Karthic Rao Date: Wed, 14 Dec 2016 01:21:48 +0530 Subject: [PATCH 003/100] Adding functions for resetting globals. (#3421) --- cmd/test-utils_test.go | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/cmd/test-utils_test.go b/cmd/test-utils_test.go index 01746bd94..e620148e3 100644 --- a/cmd/test-utils_test.go +++ b/cmd/test-utils_test.go @@ -432,6 +432,39 @@ func StartTestPeersRPCServer(t TestErrHandler, instanceType string) TestServer { return testRPCServer } +// Sets the global config path to empty string. +func resetGlobalConfigPath() { + setGlobalConfigPath("") +} + +// sets globalObjectAPI to `nil`. +func resetGlobalObjectAPI() { + globalObjLayerMutex.Lock() + globalObjectAPI = nil + globalObjLayerMutex.Unlock() +} + +// reset the value of the Global server config. +// set it to `nil`. +func resetGlobalConfig() { + // hold the mutex lock before a new config is assigned. + serverConfigMu.Lock() + // Save the loaded config globally. + serverConfig = nil + serverConfigMu.Unlock() +} + +// Resets all the globals used modified in tests. +// Resetting ensures that the changes made to globals by one test doesn't affect others. +func resetTestGlobals() { + // set globalObjectAPI to `nil`. + resetGlobalObjectAPI() + // Reset config path set. + resetGlobalConfigPath() + // Reset Global server config. + resetGlobalConfig() +} + // Configure the server for the test run. func newTestConfig(bucketLocation string) (rootPath string, err error) { // Get test root. From b28ff501260d5b84779029ac22423fe88b3fcdc4 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Wed, 14 Dec 2016 08:02:32 -0800 Subject: [PATCH 004/100] lock/server: Check if the lock server itself is skewed back. (#3447) --- cmd/lock-rpc-server-common.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cmd/lock-rpc-server-common.go b/cmd/lock-rpc-server-common.go index eee2f6246..716028de6 100644 --- a/cmd/lock-rpc-server-common.go +++ b/cmd/lock-rpc-server-common.go @@ -58,9 +58,11 @@ func (l *lockServer) removeEntry(name, uid string, lri *[]lockRequesterInfo) boo } // Validate lock args. +// - validate time stamp. +// - validate jwt token. func (l *lockServer) validateLockArgs(args *LockArgs) error { curTime := time.Now().UTC() - if curTime.Sub(args.Timestamp) > globalMaxSkewTime { + if curTime.Sub(args.Timestamp) > globalMaxSkewTime || args.Timestamp.Sub(curTime) > globalMaxSkewTime { return errServerTimeMismatch } if !isRPCTokenValid(args.Token) { From d9fd6f9a9667d3dc81a87fe36e2cc4be93ac2596 Mon Sep 17 00:00:00 2001 From: koolhead17 Date: Thu, 15 Dec 2016 07:15:47 +0530 Subject: [PATCH 005/100] docs: Removed $/# from code block to make code copy easier. (#3448) --- README.md | 28 ++++++++++++++-------------- docs/FreeBSD.md | 30 +++++++++++++++--------------- docs/distributed/README.md | 12 ++++++------ docs/erasure/README.md | 2 +- 4 files changed, 36 insertions(+), 36 deletions(-) diff --git a/README.md b/README.md index 885df8b50..9abe0bec2 100644 --- a/README.md +++ b/README.md @@ -7,14 +7,14 @@ Minio server is light enough to be bundled with the application stack, similar t ## Docker Container ### Stable ```sh -$ docker pull minio/minio -$ docker run -p 9000:9000 minio/minio server /export +docker pull minio/minio +docker run -p 9000:9000 minio/minio server /export ``` ### Edge ```sh -$ docker pull minio/minio:edge -$ docker run -p 9000:9000 minio/minio:edge server /export +docker pull minio/minio:edge +docker run -p 9000:9000 minio/minio:edge server /export ``` Please visit Minio Docker quickstart guide for more [here](https://docs.minio.io/docs/minio-docker-quickstart-guide) @@ -23,8 +23,8 @@ Please visit Minio Docker quickstart guide for more [here](https://docs.minio.io Install minio packages using [Homebrew](http://brew.sh/) ```sh -$ brew install minio -$ minio server ~/Photos +brew install minio +minio server ~/Photos ``` ### Binary Download @@ -32,8 +32,8 @@ $ minio server ~/Photos | ----------| -------- | ------| |Apple OS X|64-bit Intel|https://dl.minio.io/server/minio/release/darwin-amd64/minio| ```sh -$ chmod 755 minio -$ ./minio server ~/Photos +chmod 755 minio +./minio server ~/Photos ``` ## GNU/Linux @@ -44,8 +44,8 @@ $ ./minio server ~/Photos ||32-bit Intel|https://dl.minio.io/server/minio/release/linux-386/minio| ||32-bit ARM|https://dl.minio.io/server/minio/release/linux-arm/minio| ```sh -$ chmod +x minio -$ ./minio server ~/Photos +chmod +x minio +./minio server ~/Photos ``` ## Microsoft Windows @@ -55,7 +55,7 @@ $ ./minio server ~/Photos |Microsoft Windows|64-bit|https://dl.minio.io/server/minio/release/windows-amd64/minio.exe| ||32-bit|https://dl.minio.io/server/minio/release/windows-386/minio.exe| ```sh -C:\Users\Username\Downloads> minio.exe server D:\Photos +minio.exe server D:\Photos ``` ## FreeBSD @@ -64,8 +64,8 @@ C:\Users\Username\Downloads> minio.exe server D:\Photos | ----------| -------- | ------| |FreeBSD|64-bit|https://dl.minio.io/server/minio/release/freebsd-amd64/minio| ```sh -$ chmod 755 minio -$ ./minio server ~/Photos +chmod 755 minio +./minio server ~/Photos ``` Please visit official zfs FreeBSD guide for more details [here](https://www.freebsd.org/doc/handbook/zfs-quickstart.html) @@ -75,7 +75,7 @@ Source installation is only intended for developers and advanced users. If you d ```sh -$ go get -u github.com/minio/minio +go get -u github.com/minio/minio ``` diff --git a/docs/FreeBSD.md b/docs/FreeBSD.md index cddfe9f22..bd11c1eea 100644 --- a/docs/FreeBSD.md +++ b/docs/FreeBSD.md @@ -18,13 +18,13 @@ Start ZFS service ```sh -# service zfs start + service zfs start ``` ```sh -# dd if=/dev/zero of=/zfs bs=1M count=4000 + dd if=/dev/zero of=/zfs bs=1M count=4000 ``` @@ -32,7 +32,7 @@ Configure a loopback device on the `/zfs` file. ```sh -# mdconfig -a -t vnode -f /zfs + mdconfig -a -t vnode -f /zfs ``` @@ -40,13 +40,13 @@ Create zfs pool ```sh -# zpool create minio-example /dev/md0 + zpool create minio-example /dev/md0 ``` ```sh -# df /minio-example + df /minio-example Filesystem 512-blocks Used Avail Capacity Mounted on minio-example 7872440 38 7872402 0% /minio-example @@ -56,8 +56,8 @@ Verify if it is writable ```sh -# touch /minio-example/testfile -# ls -l /minio-example/testfile +touch /minio-example/testfile + ls -l /minio-example/testfile -rw-r--r-- 1 root wheel 0 Apr 26 00:51 /minio-example/testfile ``` @@ -69,8 +69,8 @@ However, this pool is not taking advantage of any ZFS features, so let's create ```sh -# zfs create minio-example/compressed-objects -# zfs set compression=lz4 minio-example/compressed-objects + zfs create minio-example/compressed-objects + zfs set compression=lz4 minio-example/compressed-objects ``` @@ -78,7 +78,7 @@ To keep monitoring your pool use ```sh -# zpool status + zpool status pool: minio-example state: ONLINE scan: none requested @@ -98,7 +98,7 @@ Now start minio server on the ``/minio-example/compressed-objects``, change the ```sh -# chown -R minio-user:minio-user /minio-example/compressed-objects + chown -R minio-user:minio-user /minio-example/compressed-objects ``` @@ -106,9 +106,9 @@ Now login as ``minio-user`` and start minio server. ```sh -$ curl https://dl.minio.io/server/minio/release/freebsd-amd64/minio > minio -$ chmod 755 minio -$ ./minio server /minio-example/compressed-objects +curl https://dl.minio.io/server/minio/release/freebsd-amd64/minio > minio +chmod 755 minio +./minio server /minio-example/compressed-objects ``` @@ -126,7 +126,7 @@ It is possible to build the minio server from source on FreeBSD. To do this we We will need to install golang and GNU make: ```sh -$ sudo pkg install go gmake +sudo pkg install go gmake ``` Now we can proceed with the normal build process of minio server as found [here](https://github.com/minio/minio/blob/master/CONTRIBUTING.md). The only caveat is we need to specify gmake (GNU make) when building minio server as the current Makefile is not BSD make compatible: diff --git a/docs/distributed/README.md b/docs/distributed/README.md index 741fe7aea..749ca1693 100644 --- a/docs/distributed/README.md +++ b/docs/distributed/README.md @@ -41,9 +41,9 @@ Below examples will clarify further: Example 1: Start distributed Minio instance with 1 drive each on 8 nodes, by running this command on all the 8 nodes. ```shell -$ export MINIO_ACCESS_KEY= -$ export MINIO_SECRET_KEY= -$ minio server http://192.168.1.11/export1 http://192.168.1.12/export2 \ +export MINIO_ACCESS_KEY= +export MINIO_SECRET_KEY= +minio server http://192.168.1.11/export1 http://192.168.1.12/export2 \ http://192.168.1.13/export3 http://192.168.1.14/export4 \ http://192.168.1.15/export5 http://192.168.1.16/export6 \ http://192.168.1.17/export7 http://192.168.1.18/export8 @@ -54,9 +54,9 @@ $ minio server http://192.168.1.11/export1 http://192.168.1.12/export2 \ Example 2: Start distributed Minio instance with 4 drives each on 4 nodes, by running this command on all the 4 nodes. ```shell -$ export MINIO_ACCESS_KEY= -$ export MINIO_SECRET_KEY= -$ minio server http://192.168.1.11/export1 http://192.168.1.11/export2 \ +export MINIO_ACCESS_KEY= +export MINIO_SECRET_KEY= +minio server http://192.168.1.11/export1 http://192.168.1.11/export2 \ http://192.168.1.11/export3 http://192.168.1.11/export4 \ http://192.168.1.12/export1 http://192.168.1.12/export2 \ http://192.168.1.12/export3 http://192.168.1.12/export4 \ diff --git a/docs/erasure/README.md b/docs/erasure/README.md index fe915d32b..7a72daf3a 100644 --- a/docs/erasure/README.md +++ b/docs/erasure/README.md @@ -49,7 +49,7 @@ Example: Start Minio server in a 12 drives setup. ```sh -$ minio server /mnt/export1/backend /mnt/export2/backend /mnt/export3/backend /mnt/export4/backend /mnt/export5/backend /mnt/export6/backend /mnt/export7/backend /mnt/export8/backend /mnt/export9/backend /mnt/export10/backend /mnt/export11/backend /mnt/export12/backend +minio server /mnt/export1/backend /mnt/export2/backend /mnt/export3/backend /mnt/export4/backend /mnt/export5/backend /mnt/export6/backend /mnt/export7/backend /mnt/export8/backend /mnt/export9/backend /mnt/export10/backend /mnt/export11/backend /mnt/export12/backend ``` From 664ff063a14b6ae0c38ff03e90bb68317dc32fc1 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Wed, 14 Dec 2016 20:42:19 -0800 Subject: [PATCH 006/100] server: checkEndpoints syntax properly. (#3451) --- cmd/server-main.go | 70 +++++++++++++++++++---------------------- cmd/server-main_test.go | 40 ++++++++++++++--------- 2 files changed, 59 insertions(+), 51 deletions(-) diff --git a/cmd/server-main.go b/cmd/server-main.go index 04b3ac667..fe41c5562 100644 --- a/cmd/server-main.go +++ b/cmd/server-main.go @@ -17,7 +17,6 @@ package cmd import ( - "errors" "fmt" "io/ioutil" "net" @@ -261,48 +260,36 @@ func isDistributedSetup(eps []*url.URL) bool { return false } -// We just exit for invalid endpoints. -func checkEndpointsSyntax(eps []*url.URL, disks []string) { +// Check if the endpoints are following expected syntax, i.e +// valid scheme, valid path across all platforms. +func checkEndpointsSyntax(eps []*url.URL, disks []string) error { for i, u := range eps { switch u.Scheme { - case "", "http", "https": - // Scheme is "" for FS and singlenode-XL, hence pass. + case "": + // "/" is not allowed. + if u.Path == "" || u.Path == "/" { + return fmt.Errorf("Root path is not allowed : %s (%s)", u.Path, disks[i]) + } + case "http", "https": + // "http://server1/" is not allowed + if u.Path == "" || u.Path == "/" || u.Path == "\\" { + return fmt.Errorf("Root path is not allowed : %s (%s)", u.Path, disks[i]) + } default: if runtime.GOOS == "windows" { // On windows for "C:\export" scheme will be "C" matched, err := regexp.MatchString("^[a-zA-Z]$", u.Scheme) - fatalIf(err, "Parsing scheme : %s (%s)", u.Scheme, disks[i]) + if err != nil { + return fmt.Errorf("Invalid scheme : %s (%s), ERROR %s", u.Scheme, disks[i], err) + } if matched { break } } - fatalIf(errInvalidArgument, "Invalid scheme : %s (%s)", u.Scheme, disks[i]) - } - if runtime.GOOS == "windows" { - if u.Scheme == "http" || u.Scheme == "https" { - // "http://server1/" is not allowed - if u.Path == "" || u.Path == "/" || u.Path == "\\" { - fatalIf(errInvalidArgument, "Empty path for %s", disks[i]) - } - } - } else { - if u.Scheme == "http" || u.Scheme == "https" { - // "http://server1/" is not allowed. - if u.Path == "" || u.Path == "/" { - fatalIf(errInvalidArgument, "Empty path for %s", disks[i]) - } - } else { - // "/" is not allowed. - if u.Path == "" || u.Path == "/" { - fatalIf(errInvalidArgument, "Empty path for %s", disks[i]) - } - } + return fmt.Errorf("Invalid scheme : %s (%s)", u.Scheme, disks[i]) } } - - if err := checkDuplicateEndpoints(eps); err != nil { - fatalIf(errInvalidArgument, "Duplicate entries in %s", strings.Join(disks, " ")) - } + return nil } // Make sure all the command line parameters are OK and exit in case of invalid parameters. @@ -315,11 +302,18 @@ func checkServerSyntax(c *cli.Context) { // Verify syntax for all the XL disks. disks := c.Args() endpoints, err := parseStorageEndpoints(disks) - fatalIf(err, "Unable to parse storage endpoints %s", disks) - checkEndpointsSyntax(endpoints, disks) + fatalIf(err, "Unable to parse storage endpoints %s", strings.Join(disks, " ")) + + // Validate if endpoints follow the expected syntax. + err = checkEndpointsSyntax(endpoints, disks) + fatalIf(err, "Invalid endpoints found %s", strings.Join(disks, " ")) + + // Validate for duplicate endpoints are supplied. + err = checkDuplicateEndpoints(endpoints) + fatalIf(err, "Duplicate entries in %s", strings.Join(disks, " ")) if len(endpoints) > 1 { - // For XL setup. + // Validate if we have sufficient disks for XL setup. err = checkSufficientDisks(endpoints) fatalIf(err, "Storage endpoint error.") } @@ -361,7 +355,7 @@ func checkServerSyntax(c *cli.Context) { } } -// Checks if any of the endpoints supplied is local to a server instance. +// Checks if any of the endpoints supplied is local to this server. func isAnyEndpointLocal(eps []*url.URL) bool { anyLocalEp := false for _, ep := range eps { @@ -385,6 +379,7 @@ func serverMain(c *cli.Context) { // Initialization routine, such as config loading, enable logging, .. minioInit() + // Check for minio updates from dl.minio.io checkUpdate() // Server address. @@ -414,9 +409,10 @@ func serverMain(c *cli.Context) { endpoints, err := parseStorageEndpoints(c.Args()) fatalIf(err, "Unable to parse storage endpoints %s", c.Args()) - // Should exit gracefully if none of the endpoints passed as command line argument is local to this server. + // Should exit gracefully if none of the endpoints passed + // as command line args are local to this server. if !isAnyEndpointLocal(endpoints) { - fatalIf(errors.New("No endpoint is local to this server"), "None of the disks supplied are local to this instance. Please check the disks supplied.") + fatalIf(errInvalidArgument, "None of the disks passed as command line args are local to this server.") } storageDisks, err := initStorageDisks(endpoints) diff --git a/cmd/server-main_test.go b/cmd/server-main_test.go index 1f6a1997f..5019f55ce 100644 --- a/cmd/server-main_test.go +++ b/cmd/server-main_test.go @@ -197,6 +197,8 @@ func TestParseStorageEndpoints(t *testing.T) { globalMinioHost = "" } +// Test check endpoints syntax function for syntax verification +// across various scenarios of inputs. func TestCheckEndpointsSyntax(t *testing.T) { var testCases []string if runtime.GOOS == "windows" { @@ -221,19 +223,36 @@ func TestCheckEndpointsSyntax(t *testing.T) { for _, disk := range testCases { eps, err := parseStorageEndpoints([]string{disk}) if err != nil { - t.Error(disk, err) - continue + t.Fatalf("Unable to parse %s, error %s", disk, err) } - // This will fatalIf() if endpoint is invalid. - checkEndpointsSyntax(eps, []string{disk}) + if err = checkEndpointsSyntax(eps, []string{disk}); err != nil { + t.Errorf("Invalid endpoints %s", err) + } + } + eps, err := parseStorageEndpoints([]string{"/"}) + if err != nil { + t.Fatalf("Unable to parse /, error %s", err) + } + if err = checkEndpointsSyntax(eps, []string{"/"}); err == nil { + t.Error("Should fail, passed instead") + } + eps, err = parseStorageEndpoints([]string{"http://localhost/"}) + if err != nil { + t.Fatalf("Unable to parse http://localhost/, error %s", err) + } + if err = checkEndpointsSyntax(eps, []string{"http://localhost/"}); err == nil { + t.Error("Should fail, passed instead") } } +// Tests check server syntax. func TestCheckServerSyntax(t *testing.T) { app := cli.NewApp() app.Commands = []cli.Command{serverCmd} serverFlagSet := flag.NewFlagSet("server", 0) - cli.NewContext(app, serverFlagSet, nil) + serverFlagSet.String("address", ":9000", "") + ctx := cli.NewContext(app, serverFlagSet, serverFlagSet) + disksGen := func(n int) []string { disks, err := getRandomDisks(n) if err != nil { @@ -247,21 +266,14 @@ func TestCheckServerSyntax(t *testing.T) { disksGen(8), disksGen(16), } + for i, disks := range testCases { err := serverFlagSet.Parse(disks) if err != nil { t.Errorf("Test %d failed to parse arguments %s", i+1, disks) } defer removeRoots(disks) - endpoints, err := parseStorageEndpoints(disks) - if err != nil { - t.Fatalf("Test %d : Unexpected error %s", i+1, err) - } - checkEndpointsSyntax(endpoints, disks) - _, err = initStorageDisks(endpoints) - if err != nil { - t.Errorf("Test %d : disk init failed : %s", i+1, err) - } + checkServerSyntax(ctx) } } From 8e6e9301ce85dd8da3db5e41ce9d895f580193ca Mon Sep 17 00:00:00 2001 From: Aditya Manthramurthy Date: Thu, 15 Dec 2016 21:53:48 +0530 Subject: [PATCH 007/100] Add support for Kafka as a notifications target (#2869) (#3439) --- cmd/bucket-notification-utils.go | 7 + cmd/config-migrate.go | 92 ++ cmd/config-migrate_test.go | 10 +- cmd/config-old.go | 37 + cmd/{config-v10.go => config-v11.go} | 89 +- ...{config-v10_test.go => config-v11_test.go} | 7 + cmd/event-notifier.go | 25 + cmd/globals.go | 2 +- cmd/notifiers.go | 21 + cmd/notify-kafka.go | 125 +++ vendor/github.com/davecgh/go-spew/LICENSE | 15 + .../github.com/davecgh/go-spew/spew/bypass.go | 152 +++ .../github.com/davecgh/go-spew/spew/common.go | 341 +++++++ .../github.com/davecgh/go-spew/spew/config.go | 306 ++++++ vendor/github.com/davecgh/go-spew/spew/doc.go | 211 ++++ .../github.com/davecgh/go-spew/spew/dump.go | 509 ++++++++++ .../github.com/davecgh/go-spew/spew/format.go | 419 ++++++++ .../github.com/davecgh/go-spew/spew/spew.go | 148 +++ .../github.com/eapache/go-resiliency/LICENSE | 22 + .../eapache/go-resiliency/breaker/README.md | 34 + .../eapache/go-resiliency/breaker/breaker.go | 161 ++++ .../eapache/go-xerial-snappy/LICENSE | 21 + .../eapache/go-xerial-snappy/README.md | 13 + .../eapache/go-xerial-snappy/snappy.go | 43 + vendor/github.com/eapache/queue/LICENSE | 21 + vendor/github.com/eapache/queue/README.md | 16 + vendor/github.com/eapache/queue/queue.go | 102 ++ vendor/github.com/golang/snappy/AUTHORS | 15 + vendor/github.com/golang/snappy/CONTRIBUTORS | 37 + vendor/github.com/golang/snappy/LICENSE | 27 + vendor/github.com/golang/snappy/README | 107 +++ vendor/github.com/golang/snappy/decode.go | 237 +++++ .../github.com/golang/snappy/decode_amd64.go | 14 + .../github.com/golang/snappy/decode_amd64.s | 490 ++++++++++ vendor/github.com/golang/snappy/encode.go | 285 ++++++ .../github.com/golang/snappy/encode_amd64.go | 29 + .../github.com/golang/snappy/encode_amd64.s | 730 ++++++++++++++ vendor/github.com/golang/snappy/snappy.go | 87 ++ vendor/github.com/klauspost/crc32/LICENSE | 28 + vendor/github.com/klauspost/crc32/README.md | 87 ++ vendor/github.com/klauspost/crc32/crc32.go | 207 ++++ .../github.com/klauspost/crc32/crc32_amd64.go | 230 +++++ .../github.com/klauspost/crc32/crc32_amd64.s | 319 +++++++ .../klauspost/crc32/crc32_amd64p32.go | 43 + .../klauspost/crc32/crc32_amd64p32.s | 67 ++ .../klauspost/crc32/crc32_generic.go | 89 ++ .../klauspost/crc32/crc32_otherarch.go | 15 + .../github.com/klauspost/crc32/crc32_s390x.go | 91 ++ .../github.com/klauspost/crc32/crc32_s390x.s | 249 +++++ .../gopkg.in/Shopify/sarama.v1/CHANGELOG.md | 323 +++++++ vendor/gopkg.in/Shopify/sarama.v1/MIT-LICENSE | 20 + vendor/gopkg.in/Shopify/sarama.v1/Makefile | 21 + vendor/gopkg.in/Shopify/sarama.v1/README.md | 36 + vendor/gopkg.in/Shopify/sarama.v1/Vagrantfile | 19 + .../Shopify/sarama.v1/api_versions_request.go | 24 + .../sarama.v1/api_versions_response.go | 86 ++ .../Shopify/sarama.v1/async_producer.go | 903 ++++++++++++++++++ vendor/gopkg.in/Shopify/sarama.v1/broker.go | 526 ++++++++++ vendor/gopkg.in/Shopify/sarama.v1/client.go | 733 ++++++++++++++ vendor/gopkg.in/Shopify/sarama.v1/config.go | 399 ++++++++ vendor/gopkg.in/Shopify/sarama.v1/consumer.go | 715 ++++++++++++++ .../sarama.v1/consumer_group_members.go | 94 ++ .../sarama.v1/consumer_metadata_request.go | 26 + .../sarama.v1/consumer_metadata_response.go | 85 ++ .../gopkg.in/Shopify/sarama.v1/crc32_field.go | 36 + .../sarama.v1/describe_groups_request.go | 30 + .../sarama.v1/describe_groups_response.go | 174 ++++ vendor/gopkg.in/Shopify/sarama.v1/dev.yml | 13 + .../Shopify/sarama.v1/encoder_decoder.go | 84 ++ vendor/gopkg.in/Shopify/sarama.v1/errors.go | 194 ++++ .../Shopify/sarama.v1/fetch_request.go | 136 +++ .../Shopify/sarama.v1/fetch_response.go | 210 ++++ .../Shopify/sarama.v1/heartbeat_request.go | 47 + .../Shopify/sarama.v1/heartbeat_response.go | 32 + .../Shopify/sarama.v1/join_group_request.go | 108 +++ .../Shopify/sarama.v1/join_group_response.go | 114 +++ .../Shopify/sarama.v1/leave_group_request.go | 40 + .../Shopify/sarama.v1/leave_group_response.go | 32 + .../Shopify/sarama.v1/length_field.go | 29 + .../Shopify/sarama.v1/list_groups_request.go | 24 + .../Shopify/sarama.v1/list_groups_response.go | 68 ++ vendor/gopkg.in/Shopify/sarama.v1/message.go | 163 ++++ .../gopkg.in/Shopify/sarama.v1/message_set.go | 89 ++ .../Shopify/sarama.v1/metadata_request.go | 52 + .../Shopify/sarama.v1/metadata_response.go | 239 +++++ .../gopkg.in/Shopify/sarama.v1/mockbroker.go | 300 ++++++ .../Shopify/sarama.v1/mockresponses.go | 455 +++++++++ .../sarama.v1/offset_commit_request.go | 190 ++++ .../sarama.v1/offset_commit_response.go | 85 ++ .../Shopify/sarama.v1/offset_fetch_request.go | 81 ++ .../sarama.v1/offset_fetch_response.go | 143 +++ .../Shopify/sarama.v1/offset_manager.go | 542 +++++++++++ .../Shopify/sarama.v1/offset_request.go | 117 +++ .../Shopify/sarama.v1/offset_response.go | 142 +++ .../Shopify/sarama.v1/packet_decoder.go | 45 + .../Shopify/sarama.v1/packet_encoder.go | 42 + .../gopkg.in/Shopify/sarama.v1/partitioner.go | 123 +++ .../Shopify/sarama.v1/prep_encoder.go | 110 +++ .../Shopify/sarama.v1/produce_request.go | 157 +++ .../Shopify/sarama.v1/produce_response.go | 158 +++ .../gopkg.in/Shopify/sarama.v1/produce_set.go | 166 ++++ .../Shopify/sarama.v1/real_decoder.go | 259 +++++ .../Shopify/sarama.v1/real_encoder.go | 115 +++ vendor/gopkg.in/Shopify/sarama.v1/request.go | 117 +++ .../Shopify/sarama.v1/response_header.go | 21 + vendor/gopkg.in/Shopify/sarama.v1/sarama.go | 58 ++ .../sarama.v1/sasl_handshake_request.go | 33 + .../sarama.v1/sasl_handshake_response.go | 38 + .../Shopify/sarama.v1/sync_group_request.go | 100 ++ .../Shopify/sarama.v1/sync_group_response.go | 40 + .../Shopify/sarama.v1/sync_producer.go | 140 +++ vendor/gopkg.in/Shopify/sarama.v1/utils.go | 150 +++ vendor/vendor.json | 42 + 113 files changed, 16290 insertions(+), 35 deletions(-) rename cmd/{config-v10.go => config-v11.go} (71%) rename cmd/{config-v10_test.go => config-v11_test.go} (91%) create mode 100644 cmd/notify-kafka.go create mode 100644 vendor/github.com/davecgh/go-spew/LICENSE create mode 100644 vendor/github.com/davecgh/go-spew/spew/bypass.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/common.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/config.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/doc.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/dump.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/format.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/spew.go create mode 100644 vendor/github.com/eapache/go-resiliency/LICENSE create mode 100644 vendor/github.com/eapache/go-resiliency/breaker/README.md create mode 100644 vendor/github.com/eapache/go-resiliency/breaker/breaker.go create mode 100644 vendor/github.com/eapache/go-xerial-snappy/LICENSE create mode 100644 vendor/github.com/eapache/go-xerial-snappy/README.md create mode 100644 vendor/github.com/eapache/go-xerial-snappy/snappy.go create mode 100644 vendor/github.com/eapache/queue/LICENSE create mode 100644 vendor/github.com/eapache/queue/README.md create mode 100644 vendor/github.com/eapache/queue/queue.go create mode 100644 vendor/github.com/golang/snappy/AUTHORS create mode 100644 vendor/github.com/golang/snappy/CONTRIBUTORS create mode 100644 vendor/github.com/golang/snappy/LICENSE create mode 100644 vendor/github.com/golang/snappy/README create mode 100644 vendor/github.com/golang/snappy/decode.go create mode 100644 vendor/github.com/golang/snappy/decode_amd64.go create mode 100644 vendor/github.com/golang/snappy/decode_amd64.s create mode 100644 vendor/github.com/golang/snappy/encode.go create mode 100644 vendor/github.com/golang/snappy/encode_amd64.go create mode 100644 vendor/github.com/golang/snappy/encode_amd64.s create mode 100644 vendor/github.com/golang/snappy/snappy.go create mode 100644 vendor/github.com/klauspost/crc32/LICENSE create mode 100644 vendor/github.com/klauspost/crc32/README.md create mode 100644 vendor/github.com/klauspost/crc32/crc32.go create mode 100644 vendor/github.com/klauspost/crc32/crc32_amd64.go create mode 100644 vendor/github.com/klauspost/crc32/crc32_amd64.s create mode 100644 vendor/github.com/klauspost/crc32/crc32_amd64p32.go create mode 100644 vendor/github.com/klauspost/crc32/crc32_amd64p32.s create mode 100644 vendor/github.com/klauspost/crc32/crc32_generic.go create mode 100644 vendor/github.com/klauspost/crc32/crc32_otherarch.go create mode 100644 vendor/github.com/klauspost/crc32/crc32_s390x.go create mode 100644 vendor/github.com/klauspost/crc32/crc32_s390x.s create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/CHANGELOG.md create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/MIT-LICENSE create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/Makefile create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/README.md create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/Vagrantfile create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/api_versions_request.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/api_versions_response.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/async_producer.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/broker.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/client.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/config.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/consumer.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/consumer_group_members.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/consumer_metadata_request.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/consumer_metadata_response.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/crc32_field.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/describe_groups_request.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/describe_groups_response.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/dev.yml create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/encoder_decoder.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/errors.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/fetch_request.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/fetch_response.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/heartbeat_request.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/heartbeat_response.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/join_group_request.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/join_group_response.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/leave_group_request.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/leave_group_response.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/length_field.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/list_groups_request.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/list_groups_response.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/message.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/message_set.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/metadata_request.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/metadata_response.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/mockbroker.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/mockresponses.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/offset_commit_request.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/offset_commit_response.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/offset_fetch_request.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/offset_fetch_response.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/offset_manager.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/offset_request.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/offset_response.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/packet_decoder.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/packet_encoder.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/partitioner.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/prep_encoder.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/produce_request.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/produce_response.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/produce_set.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/real_decoder.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/real_encoder.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/request.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/response_header.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/sarama.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/sasl_handshake_request.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/sasl_handshake_response.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/sync_group_request.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/sync_group_response.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/sync_producer.go create mode 100644 vendor/gopkg.in/Shopify/sarama.v1/utils.go diff --git a/cmd/bucket-notification-utils.go b/cmd/bucket-notification-utils.go index d322d7d53..ff25e9d51 100644 --- a/cmd/bucket-notification-utils.go +++ b/cmd/bucket-notification-utils.go @@ -147,6 +147,10 @@ func isValidQueueID(queueARN string) bool { pgN := serverConfig.GetPostgreSQLNotifyByID(sqsARN.AccountID) // Postgres can work with only default conn. info. return pgN.Enable + } else if isKafkaQueue(sqsARN) { + kafkaN := serverConfig.GetKafkaNotifyByID(sqsARN.AccountID) + return (kafkaN.Enable && len(kafkaN.Brokers) > 0 && + kafkaN.Topic != "") } return false } @@ -236,6 +240,7 @@ func validateNotificationConfig(nConfig notificationConfig) APIErrorCode { // - elasticsearch // - redis // - postgresql +// - kafka func unmarshalSqsARN(queueARN string) (mSqs arnSQS) { mSqs = arnSQS{} if !strings.HasPrefix(queueARN, minioSqs+serverConfig.GetRegion()+":") { @@ -253,6 +258,8 @@ func unmarshalSqsARN(queueARN string) (mSqs arnSQS) { mSqs.Type = queueTypeRedis case strings.HasSuffix(sqsType, queueTypePostgreSQL): mSqs.Type = queueTypePostgreSQL + case strings.HasSuffix(sqsType, queueTypeKafka): + mSqs.Type = queueTypeKafka } // Add more queues here. mSqs.AccountID = strings.TrimSuffix(sqsType, ":"+mSqs.Type) return mSqs diff --git a/cmd/config-migrate.go b/cmd/config-migrate.go index 0633c51a9..0107a3d8c 100644 --- a/cmd/config-migrate.go +++ b/cmd/config-migrate.go @@ -62,6 +62,10 @@ func migrateConfig() error { if err := migrateV9ToV10(); err != nil { return err } + // Migrate version '10' to '11'. + if err := migrateV10ToV11(); err != nil { + return err + } return nil } @@ -633,3 +637,91 @@ func migrateV9ToV10() error { ) return nil } + +// Version '10' to '11' migration. Add support for Kafka +// notifications. +func migrateV10ToV11() error { + cv10, err := loadConfigV10() + if err != nil { + if os.IsNotExist(err) { + return nil + } + return fmt.Errorf("Unable to load config version ‘10’. %v", err) + } + if cv10.Version != "10" { + return nil + } + + // Copy over fields from V10 into V11 config struct + srvConfig := &serverConfigV11{} + srvConfig.Version = "11" + srvConfig.Credential = cv10.Credential + srvConfig.Region = cv10.Region + if srvConfig.Region == "" { + // Region needs to be set for AWS Signature Version 4. + srvConfig.Region = "us-east-1" + } + srvConfig.Logger.Console = cv10.Logger.Console + srvConfig.Logger.File = cv10.Logger.File + + // check and set notifiers config + if len(cv10.Notify.AMQP) == 0 { + srvConfig.Notify.AMQP = make(map[string]amqpNotify) + srvConfig.Notify.AMQP["1"] = amqpNotify{} + } else { + srvConfig.Notify.AMQP = cv10.Notify.AMQP + } + if len(cv10.Notify.NATS) == 0 { + srvConfig.Notify.NATS = make(map[string]natsNotify) + srvConfig.Notify.NATS["1"] = natsNotify{} + } else { + srvConfig.Notify.NATS = cv10.Notify.NATS + } + if len(cv10.Notify.ElasticSearch) == 0 { + srvConfig.Notify.ElasticSearch = make(map[string]elasticSearchNotify) + srvConfig.Notify.ElasticSearch["1"] = elasticSearchNotify{} + } else { + srvConfig.Notify.ElasticSearch = cv10.Notify.ElasticSearch + } + if len(cv10.Notify.Redis) == 0 { + srvConfig.Notify.Redis = make(map[string]redisNotify) + srvConfig.Notify.Redis["1"] = redisNotify{} + } else { + srvConfig.Notify.Redis = cv10.Notify.Redis + } + if len(cv10.Notify.PostgreSQL) == 0 { + srvConfig.Notify.PostgreSQL = make(map[string]postgreSQLNotify) + srvConfig.Notify.PostgreSQL["1"] = postgreSQLNotify{} + } else { + srvConfig.Notify.PostgreSQL = cv10.Notify.PostgreSQL + } + // V10 will not have a Kafka config. So we initialize one here. + srvConfig.Notify.Kafka = make(map[string]kafkaNotify) + srvConfig.Notify.Kafka["1"] = kafkaNotify{} + + qc, err := quick.New(srvConfig) + if err != nil { + return fmt.Errorf("Unable to initialize the quick config. %v", + err) + } + configFile, err := getConfigFile() + if err != nil { + return fmt.Errorf("Unable to get config file. %v", err) + } + + err = qc.Save(configFile) + if err != nil { + return fmt.Errorf( + "Failed to migrate config from ‘"+ + cv10.Version+"’ to ‘"+srvConfig.Version+ + "’ failed. %v", err, + ) + } + + console.Println( + "Migration from version ‘" + + cv10.Version + "’ to ‘" + srvConfig.Version + + "’ completed successfully.", + ) + return nil +} diff --git a/cmd/config-migrate_test.go b/cmd/config-migrate_test.go index 1955f0720..2e0a1ce41 100644 --- a/cmd/config-migrate_test.go +++ b/cmd/config-migrate_test.go @@ -97,10 +97,13 @@ func TestServerConfigMigrateInexistentConfig(t *testing.T) { if err := migrateV9ToV10(); err != nil { t.Fatal("migrate v9 to v10 should succeed when no config file is found") } + if err := migrateV10ToV11(); err != nil { + t.Fatal("migrate v10 to v11 should succeed when no config file is found") + } } -// Test if a config migration from v2 to v10 is successfully done -func TestServerConfigMigrateV2toV10(t *testing.T) { +// Test if a config migration from v2 to v11 is successfully done +func TestServerConfigMigrateV2toV11(t *testing.T) { rootPath, err := newTestConfig("us-east-1") if err != nil { t.Fatalf("Init Test config failed") @@ -200,4 +203,7 @@ func TestServerConfigMigrateFaultyConfig(t *testing.T) { if err := migrateV9ToV10(); err == nil { t.Fatal("migrateConfigV9ToV10() should fail with a corrupted json") } + if err := migrateV10ToV11(); err == nil { + t.Fatal("migrateConfigV10ToV11() should fail with a corrupted json") + } } diff --git a/cmd/config-old.go b/cmd/config-old.go index 6e03cf8ba..5f85873ce 100644 --- a/cmd/config-old.go +++ b/cmd/config-old.go @@ -443,3 +443,40 @@ func loadConfigV9() (*serverConfigV9, error) { } return srvCfg, nil } + +// serverConfigV10 server configuration version '10' which is like +// version '9' except it drops support of syslog config, and makes the +// RWMutex global (so it does not exist in this struct). +type serverConfigV10 struct { + Version string `json:"version"` + + // S3 API configuration. + Credential credential `json:"credential"` + Region string `json:"region"` + + // Additional error logging configuration. + Logger logger `json:"logger"` + + // Notification queue configuration. + Notify notifier `json:"notify"` +} + +func loadConfigV10() (*serverConfigV10, error) { + configFile, err := getConfigFile() + if err != nil { + return nil, err + } + if _, err = os.Stat(configFile); err != nil { + return nil, err + } + srvCfg := &serverConfigV10{} + srvCfg.Version = "10" + qc, err := quick.New(srvCfg) + if err != nil { + return nil, err + } + if err := qc.Load(configFile); err != nil { + return nil, err + } + return srvCfg, nil +} diff --git a/cmd/config-v10.go b/cmd/config-v11.go similarity index 71% rename from cmd/config-v10.go rename to cmd/config-v11.go index 8d633c7f7..9bf425b26 100644 --- a/cmd/config-v10.go +++ b/cmd/config-v11.go @@ -26,9 +26,9 @@ import ( // Read Write mutex for safe access to ServerConfig. var serverConfigMu sync.RWMutex -// serverConfigV10 server configuration version '10' which is like version '9' -// except it drops support of syslog config. -type serverConfigV10 struct { +// serverConfigV11 server configuration version '11' which is like +// version '10' except it adds support for Kafka notifications. +type serverConfigV11 struct { Version string `json:"version"` // S3 API configuration. @@ -42,11 +42,12 @@ type serverConfigV10 struct { Notify notifier `json:"notify"` } -// initConfig - initialize server config and indicate if we are creating a new file or we are just loading +// initConfig - initialize server config and indicate if we are +// creating a new file or we are just loading func initConfig() (bool, error) { if !isConfigFileExists() { // Initialize server config. - srvCfg := &serverConfigV10{} + srvCfg := &serverConfigV11{} srvCfg.Version = globalMinioConfigVersion srvCfg.Region = "us-east-1" srvCfg.Credential = mustGenAccessKeys() @@ -68,6 +69,8 @@ func initConfig() (bool, error) { srvCfg.Notify.NATS["1"] = natsNotify{} srvCfg.Notify.PostgreSQL = make(map[string]postgreSQLNotify) srvCfg.Notify.PostgreSQL["1"] = postgreSQLNotify{} + srvCfg.Notify.Kafka = make(map[string]kafkaNotify) + srvCfg.Notify.Kafka["1"] = kafkaNotify{} // Create config path. err := createConfigPath() @@ -91,7 +94,7 @@ func initConfig() (bool, error) { if _, err = os.Stat(configFile); err != nil { return false, err } - srvCfg := &serverConfigV10{} + srvCfg := &serverConfigV11{} srvCfg.Version = globalMinioConfigVersion qc, err := quick.New(srvCfg) if err != nil { @@ -113,10 +116,10 @@ func initConfig() (bool, error) { } // serverConfig server config. -var serverConfig *serverConfigV10 +var serverConfig *serverConfigV11 // GetVersion get current config version. -func (s serverConfigV10) GetVersion() string { +func (s serverConfigV11) GetVersion() string { serverConfigMu.RLock() defer serverConfigMu.RUnlock() @@ -125,14 +128,14 @@ func (s serverConfigV10) GetVersion() string { /// Logger related. -func (s *serverConfigV10) SetAMQPNotifyByID(accountID string, amqpn amqpNotify) { +func (s *serverConfigV11) SetAMQPNotifyByID(accountID string, amqpn amqpNotify) { serverConfigMu.Lock() defer serverConfigMu.Unlock() s.Notify.AMQP[accountID] = amqpn } -func (s serverConfigV10) GetAMQP() map[string]amqpNotify { +func (s serverConfigV11) GetAMQP() map[string]amqpNotify { serverConfigMu.RLock() defer serverConfigMu.RUnlock() @@ -140,7 +143,7 @@ func (s serverConfigV10) GetAMQP() map[string]amqpNotify { } // GetAMQPNotify get current AMQP logger. -func (s serverConfigV10) GetAMQPNotifyByID(accountID string) amqpNotify { +func (s serverConfigV11) GetAMQPNotifyByID(accountID string) amqpNotify { serverConfigMu.RLock() defer serverConfigMu.RUnlock() @@ -148,35 +151,35 @@ func (s serverConfigV10) GetAMQPNotifyByID(accountID string) amqpNotify { } // -func (s *serverConfigV10) SetNATSNotifyByID(accountID string, natsn natsNotify) { +func (s *serverConfigV11) SetNATSNotifyByID(accountID string, natsn natsNotify) { serverConfigMu.Lock() defer serverConfigMu.Unlock() s.Notify.NATS[accountID] = natsn } -func (s serverConfigV10) GetNATS() map[string]natsNotify { +func (s serverConfigV11) GetNATS() map[string]natsNotify { serverConfigMu.RLock() defer serverConfigMu.RUnlock() return s.Notify.NATS } // GetNATSNotify get current NATS logger. -func (s serverConfigV10) GetNATSNotifyByID(accountID string) natsNotify { +func (s serverConfigV11) GetNATSNotifyByID(accountID string) natsNotify { serverConfigMu.RLock() defer serverConfigMu.RUnlock() return s.Notify.NATS[accountID] } -func (s *serverConfigV10) SetElasticSearchNotifyByID(accountID string, esNotify elasticSearchNotify) { +func (s *serverConfigV11) SetElasticSearchNotifyByID(accountID string, esNotify elasticSearchNotify) { serverConfigMu.Lock() defer serverConfigMu.Unlock() s.Notify.ElasticSearch[accountID] = esNotify } -func (s serverConfigV10) GetElasticSearch() map[string]elasticSearchNotify { +func (s serverConfigV11) GetElasticSearch() map[string]elasticSearchNotify { serverConfigMu.RLock() defer serverConfigMu.RUnlock() @@ -184,21 +187,21 @@ func (s serverConfigV10) GetElasticSearch() map[string]elasticSearchNotify { } // GetElasticSearchNotify get current ElasicSearch logger. -func (s serverConfigV10) GetElasticSearchNotifyByID(accountID string) elasticSearchNotify { +func (s serverConfigV11) GetElasticSearchNotifyByID(accountID string) elasticSearchNotify { serverConfigMu.RLock() defer serverConfigMu.RUnlock() return s.Notify.ElasticSearch[accountID] } -func (s *serverConfigV10) SetRedisNotifyByID(accountID string, rNotify redisNotify) { +func (s *serverConfigV11) SetRedisNotifyByID(accountID string, rNotify redisNotify) { serverConfigMu.Lock() defer serverConfigMu.Unlock() s.Notify.Redis[accountID] = rNotify } -func (s serverConfigV10) GetRedis() map[string]redisNotify { +func (s serverConfigV11) GetRedis() map[string]redisNotify { serverConfigMu.RLock() defer serverConfigMu.RUnlock() @@ -206,36 +209,58 @@ func (s serverConfigV10) GetRedis() map[string]redisNotify { } // GetRedisNotify get current Redis logger. -func (s serverConfigV10) GetRedisNotifyByID(accountID string) redisNotify { +func (s serverConfigV11) GetRedisNotifyByID(accountID string) redisNotify { serverConfigMu.RLock() defer serverConfigMu.RUnlock() return s.Notify.Redis[accountID] } -func (s *serverConfigV10) SetPostgreSQLNotifyByID(accountID string, pgn postgreSQLNotify) { +func (s *serverConfigV11) SetPostgreSQLNotifyByID(accountID string, pgn postgreSQLNotify) { serverConfigMu.Lock() defer serverConfigMu.Unlock() s.Notify.PostgreSQL[accountID] = pgn } -func (s serverConfigV10) GetPostgreSQL() map[string]postgreSQLNotify { +func (s serverConfigV11) GetPostgreSQL() map[string]postgreSQLNotify { serverConfigMu.RLock() defer serverConfigMu.RUnlock() return s.Notify.PostgreSQL } -func (s serverConfigV10) GetPostgreSQLNotifyByID(accountID string) postgreSQLNotify { +func (s serverConfigV11) GetPostgreSQLNotifyByID(accountID string) postgreSQLNotify { serverConfigMu.RLock() defer serverConfigMu.RUnlock() return s.Notify.PostgreSQL[accountID] } +// Kafka related functions +func (s *serverConfigV11) SetKafkaNotifyByID(accountID string, kn kafkaNotify) { + serverConfigMu.Lock() + defer serverConfigMu.Unlock() + + s.Notify.Kafka[accountID] = kn +} + +func (s serverConfigV11) GetKafka() map[string]kafkaNotify { + serverConfigMu.RLock() + defer serverConfigMu.RUnlock() + + return s.Notify.Kafka +} + +func (s serverConfigV11) GetKafkaNotifyByID(accountID string) kafkaNotify { + serverConfigMu.RLock() + defer serverConfigMu.RUnlock() + + return s.Notify.Kafka[accountID] +} + // SetFileLogger set new file logger. -func (s *serverConfigV10) SetFileLogger(flogger fileLogger) { +func (s *serverConfigV11) SetFileLogger(flogger fileLogger) { serverConfigMu.Lock() defer serverConfigMu.Unlock() @@ -243,7 +268,7 @@ func (s *serverConfigV10) SetFileLogger(flogger fileLogger) { } // GetFileLogger get current file logger. -func (s serverConfigV10) GetFileLogger() fileLogger { +func (s serverConfigV11) GetFileLogger() fileLogger { serverConfigMu.RLock() defer serverConfigMu.RUnlock() @@ -251,7 +276,7 @@ func (s serverConfigV10) GetFileLogger() fileLogger { } // SetConsoleLogger set new console logger. -func (s *serverConfigV10) SetConsoleLogger(clogger consoleLogger) { +func (s *serverConfigV11) SetConsoleLogger(clogger consoleLogger) { serverConfigMu.Lock() defer serverConfigMu.Unlock() @@ -259,7 +284,7 @@ func (s *serverConfigV10) SetConsoleLogger(clogger consoleLogger) { } // GetConsoleLogger get current console logger. -func (s serverConfigV10) GetConsoleLogger() consoleLogger { +func (s serverConfigV11) GetConsoleLogger() consoleLogger { serverConfigMu.RLock() defer serverConfigMu.RUnlock() @@ -267,7 +292,7 @@ func (s serverConfigV10) GetConsoleLogger() consoleLogger { } // SetRegion set new region. -func (s *serverConfigV10) SetRegion(region string) { +func (s *serverConfigV11) SetRegion(region string) { serverConfigMu.Lock() defer serverConfigMu.Unlock() @@ -275,7 +300,7 @@ func (s *serverConfigV10) SetRegion(region string) { } // GetRegion get current region. -func (s serverConfigV10) GetRegion() string { +func (s serverConfigV11) GetRegion() string { serverConfigMu.RLock() defer serverConfigMu.RUnlock() @@ -283,7 +308,7 @@ func (s serverConfigV10) GetRegion() string { } // SetCredentials set new credentials. -func (s *serverConfigV10) SetCredential(creds credential) { +func (s *serverConfigV11) SetCredential(creds credential) { serverConfigMu.Lock() defer serverConfigMu.Unlock() @@ -291,7 +316,7 @@ func (s *serverConfigV10) SetCredential(creds credential) { } // GetCredentials get current credentials. -func (s serverConfigV10) GetCredential() credential { +func (s serverConfigV11) GetCredential() credential { serverConfigMu.RLock() defer serverConfigMu.RUnlock() @@ -299,7 +324,7 @@ func (s serverConfigV10) GetCredential() credential { } // Save config. -func (s serverConfigV10) Save() error { +func (s serverConfigV11) Save() error { serverConfigMu.RLock() defer serverConfigMu.RUnlock() diff --git a/cmd/config-v10_test.go b/cmd/config-v11_test.go similarity index 91% rename from cmd/config-v10_test.go rename to cmd/config-v11_test.go index 1f3d31623..755976d43 100644 --- a/cmd/config-v10_test.go +++ b/cmd/config-v11_test.go @@ -60,6 +60,13 @@ func TestServerConfig(t *testing.T) { t.Errorf("Expecting Redis config %#v found %#v", redisNotify{}, savedNotifyCfg3) } + // Set new kafka notification id. + serverConfig.SetKafkaNotifyByID("2", kafkaNotify{}) + savedNotifyCfg4 := serverConfig.GetKafkaNotifyByID("2") + if !reflect.DeepEqual(savedNotifyCfg4, kafkaNotify{}) { + t.Errorf("Expecting Kafka config %#v found %#v", kafkaNotify{}, savedNotifyCfg4) + } + // Set new console logger. serverConfig.SetConsoleLogger(consoleLogger{ Enable: true, diff --git a/cmd/event-notifier.go b/cmd/event-notifier.go index b8d697d3f..07865e168 100644 --- a/cmd/event-notifier.go +++ b/cmd/event-notifier.go @@ -659,6 +659,31 @@ func loadAllQueueTargets() (map[string]*logrus.Logger, error) { } queueTargets[queueARN] = pgLog } + // Load Kafka targets, initialize their respective loggers. + for accountID, kafkaN := range serverConfig.GetKafka() { + if !kafkaN.Enable { + continue + } + // Construct the queue ARN for Kafka. + queueARN := minioSqs + serverConfig.GetRegion() + ":" + accountID + ":" + queueTypeKafka + _, ok := queueTargets[queueARN] + if ok { + continue + } + // Using accountID initialize a new Kafka logrus instance. + kafkaLog, err := newKafkaNotify(accountID) + if err != nil { + // Encapsulate network error to be more informative. + if _, ok := err.(net.Error); ok { + return nil, &net.OpError{ + Op: "Connecting to " + queueARN, Net: "tcp", + Err: err, + } + } + return nil, err + } + queueTargets[queueARN] = kafkaLog + } // Successfully initialized queue targets. return queueTargets, nil diff --git a/cmd/globals.go b/cmd/globals.go index bc7aa5b36..96ae4a9e7 100644 --- a/cmd/globals.go +++ b/cmd/globals.go @@ -36,7 +36,7 @@ const ( // minio configuration related constants. const ( - globalMinioConfigVersion = "10" + globalMinioConfigVersion = "11" globalMinioConfigDir = ".minio" globalMinioCertsDir = "certs" globalMinioCertsCADir = "CAs" diff --git a/cmd/notifiers.go b/cmd/notifiers.go index ce56312ff..f40190421 100644 --- a/cmd/notifiers.go +++ b/cmd/notifiers.go @@ -38,6 +38,8 @@ const ( queueTypeRedis = "redis" // Static string indicating queue type 'postgresql'. queueTypePostgreSQL = "postgresql" + // Static string indicating queue type 'kafka'. + queueTypeKafka = "kafka" ) // Topic type. @@ -58,6 +60,7 @@ type notifier struct { ElasticSearch map[string]elasticSearchNotify `json:"elasticsearch"` Redis map[string]redisNotify `json:"redis"` PostgreSQL map[string]postgreSQLNotify `json:"postgresql"` + Kafka map[string]kafkaNotify `json:"kafka"` // Add new notification queues. } @@ -154,6 +157,24 @@ func isPostgreSQLQueue(sqsArn arnSQS) bool { return true } +// Returns true if queueArn is for Kafka. +func isKafkaQueue(sqsArn arnSQS) bool { + if sqsArn.Type != queueTypeKafka { + return false + } + kafkaNotifyCfg := serverConfig.GetKafkaNotifyByID(sqsArn.AccountID) + if !kafkaNotifyCfg.Enable { + return false + } + kafkaC, err := dialKafka(kafkaNotifyCfg) + if err != nil { + errorIf(err, "Unable to dial Kafka server %#v", kafkaNotifyCfg) + return false + } + defer kafkaC.Close() + return true +} + // Match function matches wild cards in 'pattern' for events. func eventMatch(eventType string, events []string) (ok bool) { for _, event := range events { diff --git a/cmd/notify-kafka.go b/cmd/notify-kafka.go new file mode 100644 index 000000000..d6e5d1f66 --- /dev/null +++ b/cmd/notify-kafka.go @@ -0,0 +1,125 @@ +/* + * Minio Cloud Storage, (C) 2014-2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "fmt" + "io/ioutil" + + "github.com/Sirupsen/logrus" + + sarama "gopkg.in/Shopify/sarama.v1" +) + +// kafkaNotify holds the configuration of the Kafka server/cluster to +// send notifications to. +type kafkaNotify struct { + // Flag to enable/disable this notification from configuration + // file. + Enable bool `json:"enable"` + + // List of Kafka brokers in `addr:host` format. + Brokers []string `json:"brokers"` + + // Topic to which event notifications should be sent. + Topic string `json:"topic"` +} + +// kafkaConn contains the active connection to the Kafka cluster and +// the topic to send event notifications to. +type kafkaConn struct { + producer sarama.SyncProducer + topic string +} + +func dialKafka(kn kafkaNotify) (kafkaConn, error) { + if !kn.Enable { + return kafkaConn{}, errNotifyNotEnabled + } + + if kn.Topic == "" { + return kafkaConn{}, fmt.Errorf( + "Kafka Notifier Error: Topic was not specified in configuration") + } + + config := sarama.NewConfig() + // Wait for all in-sync replicas to ack the message + config.Producer.RequiredAcks = sarama.WaitForAll + // Retry up to 10 times to produce the message + config.Producer.Retry.Max = 10 + config.Producer.Return.Successes = true + + p, err := sarama.NewSyncProducer(kn.Brokers, config) + if err != nil { + return kafkaConn{}, fmt.Errorf( + "Kafka Notifier Error: Failed to start producer: %v", + err, + ) + } + + return kafkaConn{p, kn.Topic}, nil +} + +func newKafkaNotify(accountID string) (*logrus.Logger, error) { + kafkaNotifyCfg := serverConfig.GetKafkaNotifyByID(accountID) + + // Try connecting to the configured Kafka broker(s). + kc, err := dialKafka(kafkaNotifyCfg) + if err != nil { + return nil, err + } + + // Configure kafkaConn object as a Hook in logrus. + kafkaLog := logrus.New() + kafkaLog.Out = ioutil.Discard + kafkaLog.Formatter = new(logrus.JSONFormatter) + kafkaLog.Hooks.Add(kc) + + return kafkaLog, nil +} + +func (kC kafkaConn) Close() { + _ = kC.producer.Close() +} + +// Fire - to implement logrus.Hook interface +func (kC kafkaConn) Fire(entry *logrus.Entry) error { + body, err := entry.Reader() + if err != nil { + return err + } + + // Construct message to send to Kafka + msg := sarama.ProducerMessage{ + Topic: kC.topic, + Value: sarama.ByteEncoder(body.Bytes()), + } + + // Attempt sending the message to Kafka + _, _, err = kC.producer.SendMessage(&msg) + if err != nil { + return fmt.Errorf("Error sending event to Kafka - %v", err) + } + return nil +} + +// Levels - to implement logrus.Hook interface +func (kC kafkaConn) Levels() []logrus.Level { + return []logrus.Level{ + logrus.InfoLevel, + } +} diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE new file mode 100644 index 000000000..c83641619 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/LICENSE @@ -0,0 +1,15 @@ +ISC License + +Copyright (c) 2012-2016 Dave Collins + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go new file mode 100644 index 000000000..8a4a6589a --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -0,0 +1,152 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is not running on Google App Engine, compiled by GopherJS, and +// "-tags safe" is not added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build !js,!appengine,!safe,!disableunsafe + +package spew + +import ( + "reflect" + "unsafe" +) + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = false + + // ptrSize is the size of a pointer on the current arch. + ptrSize = unsafe.Sizeof((*byte)(nil)) +) + +var ( + // offsetPtr, offsetScalar, and offsetFlag are the offsets for the + // internal reflect.Value fields. These values are valid before golang + // commit ecccf07e7f9d which changed the format. The are also valid + // after commit 82f48826c6c7 which changed the format again to mirror + // the original format. Code in the init function updates these offsets + // as necessary. + offsetPtr = uintptr(ptrSize) + offsetScalar = uintptr(0) + offsetFlag = uintptr(ptrSize * 2) + + // flagKindWidth and flagKindShift indicate various bits that the + // reflect package uses internally to track kind information. + // + // flagRO indicates whether or not the value field of a reflect.Value is + // read-only. + // + // flagIndir indicates whether the value field of a reflect.Value is + // the actual data or a pointer to the data. + // + // These values are valid before golang commit 90a7c3c86944 which + // changed their positions. Code in the init function updates these + // flags as necessary. + flagKindWidth = uintptr(5) + flagKindShift = uintptr(flagKindWidth - 1) + flagRO = uintptr(1 << 0) + flagIndir = uintptr(1 << 1) +) + +func init() { + // Older versions of reflect.Value stored small integers directly in the + // ptr field (which is named val in the older versions). Versions + // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named + // scalar for this purpose which unfortunately came before the flag + // field, so the offset of the flag field is different for those + // versions. + // + // This code constructs a new reflect.Value from a known small integer + // and checks if the size of the reflect.Value struct indicates it has + // the scalar field. When it does, the offsets are updated accordingly. + vv := reflect.ValueOf(0xf00) + if unsafe.Sizeof(vv) == (ptrSize * 4) { + offsetScalar = ptrSize * 2 + offsetFlag = ptrSize * 3 + } + + // Commit 90a7c3c86944 changed the flag positions such that the low + // order bits are the kind. This code extracts the kind from the flags + // field and ensures it's the correct type. When it's not, the flag + // order has been changed to the newer format, so the flags are updated + // accordingly. + upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag) + upfv := *(*uintptr)(upf) + flagKindMask := uintptr((1<>flagKindShift != uintptr(reflect.Int) { + flagKindShift = 0 + flagRO = 1 << 5 + flagIndir = 1 << 6 + + // Commit adf9b30e5594 modified the flags to separate the + // flagRO flag into two bits which specifies whether or not the + // field is embedded. This causes flagIndir to move over a bit + // and means that flagRO is the combination of either of the + // original flagRO bit and the new bit. + // + // This code detects the change by extracting what used to be + // the indirect bit to ensure it's set. When it's not, the flag + // order has been changed to the newer format, so the flags are + // updated accordingly. + if upfv&flagIndir == 0 { + flagRO = 3 << 5 + flagIndir = 1 << 7 + } + } +} + +// unsafeReflectValue converts the passed reflect.Value into a one that bypasses +// the typical safety restrictions preventing access to unaddressable and +// unexported data. It works by digging the raw pointer to the underlying +// value out of the protected value and generating a new unprotected (unsafe) +// reflect.Value to it. +// +// This allows us to check for implementations of the Stringer and error +// interfaces to be used for pretty printing ordinarily unaddressable and +// inaccessible values such as unexported struct fields. +func unsafeReflectValue(v reflect.Value) (rv reflect.Value) { + indirects := 1 + vt := v.Type() + upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr) + rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag)) + if rvf&flagIndir != 0 { + vt = reflect.PtrTo(v.Type()) + indirects++ + } else if offsetScalar != 0 { + // The value is in the scalar field when it's not one of the + // reference types. + switch vt.Kind() { + case reflect.Uintptr: + case reflect.Chan: + case reflect.Func: + case reflect.Map: + case reflect.Ptr: + case reflect.UnsafePointer: + default: + upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + + offsetScalar) + } + } + + pv := reflect.NewAt(vt, upv) + rv = pv + for i := 0; i < indirects; i++ { + rv = rv.Elem() + } + return rv +} diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go new file mode 100644 index 000000000..7c519ff47 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/common.go @@ -0,0 +1,341 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "reflect" + "sort" + "strconv" +) + +// Some constants in the form of bytes to avoid string overhead. This mirrors +// the technique used in the fmt package. +var ( + panicBytes = []byte("(PANIC=") + plusBytes = []byte("+") + iBytes = []byte("i") + trueBytes = []byte("true") + falseBytes = []byte("false") + interfaceBytes = []byte("(interface {})") + commaNewlineBytes = []byte(",\n") + newlineBytes = []byte("\n") + openBraceBytes = []byte("{") + openBraceNewlineBytes = []byte("{\n") + closeBraceBytes = []byte("}") + asteriskBytes = []byte("*") + colonBytes = []byte(":") + colonSpaceBytes = []byte(": ") + openParenBytes = []byte("(") + closeParenBytes = []byte(")") + spaceBytes = []byte(" ") + pointerChainBytes = []byte("->") + nilAngleBytes = []byte("") + maxNewlineBytes = []byte("\n") + maxShortBytes = []byte("") + circularBytes = []byte("") + circularShortBytes = []byte("") + invalidAngleBytes = []byte("") + openBracketBytes = []byte("[") + closeBracketBytes = []byte("]") + percentBytes = []byte("%") + precisionBytes = []byte(".") + openAngleBytes = []byte("<") + closeAngleBytes = []byte(">") + openMapBytes = []byte("map[") + closeMapBytes = []byte("]") + lenEqualsBytes = []byte("len=") + capEqualsBytes = []byte("cap=") +) + +// hexDigits is used to map a decimal value to a hex digit. +var hexDigits = "0123456789abcdef" + +// catchPanic handles any panics that might occur during the handleMethods +// calls. +func catchPanic(w io.Writer, v reflect.Value) { + if err := recover(); err != nil { + w.Write(panicBytes) + fmt.Fprintf(w, "%v", err) + w.Write(closeParenBytes) + } +} + +// handleMethods attempts to call the Error and String methods on the underlying +// type the passed reflect.Value represents and outputes the result to Writer w. +// +// It handles panics in any called methods by catching and displaying the error +// as the formatted value. +func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { + // We need an interface to check if the type implements the error or + // Stringer interface. However, the reflect package won't give us an + // interface on certain things like unexported struct fields in order + // to enforce visibility rules. We use unsafe, when it's available, + // to bypass these restrictions since this package does not mutate the + // values. + if !v.CanInterface() { + if UnsafeDisabled { + return false + } + + v = unsafeReflectValue(v) + } + + // Choose whether or not to do error and Stringer interface lookups against + // the base type or a pointer to the base type depending on settings. + // Technically calling one of these methods with a pointer receiver can + // mutate the value, however, types which choose to satisify an error or + // Stringer interface with a pointer receiver should not be mutating their + // state inside these interface methods. + if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { + v = unsafeReflectValue(v) + } + if v.CanAddr() { + v = v.Addr() + } + + // Is it an error or Stringer? + switch iface := v.Interface().(type) { + case error: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.Error())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + + w.Write([]byte(iface.Error())) + return true + + case fmt.Stringer: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.String())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + w.Write([]byte(iface.String())) + return true + } + return false +} + +// printBool outputs a boolean value as true or false to Writer w. +func printBool(w io.Writer, val bool) { + if val { + w.Write(trueBytes) + } else { + w.Write(falseBytes) + } +} + +// printInt outputs a signed integer value to Writer w. +func printInt(w io.Writer, val int64, base int) { + w.Write([]byte(strconv.FormatInt(val, base))) +} + +// printUint outputs an unsigned integer value to Writer w. +func printUint(w io.Writer, val uint64, base int) { + w.Write([]byte(strconv.FormatUint(val, base))) +} + +// printFloat outputs a floating point value using the specified precision, +// which is expected to be 32 or 64bit, to Writer w. +func printFloat(w io.Writer, val float64, precision int) { + w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) +} + +// printComplex outputs a complex value using the specified float precision +// for the real and imaginary parts to Writer w. +func printComplex(w io.Writer, c complex128, floatPrecision int) { + r := real(c) + w.Write(openParenBytes) + w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) + i := imag(c) + if i >= 0 { + w.Write(plusBytes) + } + w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) + w.Write(iBytes) + w.Write(closeParenBytes) +} + +// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x' +// prefix to Writer w. +func printHexPtr(w io.Writer, p uintptr) { + // Null pointer. + num := uint64(p) + if num == 0 { + w.Write(nilAngleBytes) + return + } + + // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix + buf := make([]byte, 18) + + // It's simpler to construct the hex string right to left. + base := uint64(16) + i := len(buf) - 1 + for num >= base { + buf[i] = hexDigits[num%base] + num /= base + i-- + } + buf[i] = hexDigits[num] + + // Add '0x' prefix. + i-- + buf[i] = 'x' + i-- + buf[i] = '0' + + // Strip unused leading bytes. + buf = buf[i:] + w.Write(buf) +} + +// valuesSorter implements sort.Interface to allow a slice of reflect.Value +// elements to be sorted. +type valuesSorter struct { + values []reflect.Value + strings []string // either nil or same len and values + cs *ConfigState +} + +// newValuesSorter initializes a valuesSorter instance, which holds a set of +// surrogate keys on which the data should be sorted. It uses flags in +// ConfigState to decide if and how to populate those surrogate keys. +func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { + vs := &valuesSorter{values: values, cs: cs} + if canSortSimply(vs.values[0].Kind()) { + return vs + } + if !cs.DisableMethods { + vs.strings = make([]string, len(values)) + for i := range vs.values { + b := bytes.Buffer{} + if !handleMethods(cs, &b, vs.values[i]) { + vs.strings = nil + break + } + vs.strings[i] = b.String() + } + } + if vs.strings == nil && cs.SpewKeys { + vs.strings = make([]string, len(values)) + for i := range vs.values { + vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) + } + } + return vs +} + +// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted +// directly, or whether it should be considered for sorting by surrogate keys +// (if the ConfigState allows it). +func canSortSimply(kind reflect.Kind) bool { + // This switch parallels valueSortLess, except for the default case. + switch kind { + case reflect.Bool: + return true + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return true + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return true + case reflect.Float32, reflect.Float64: + return true + case reflect.String: + return true + case reflect.Uintptr: + return true + case reflect.Array: + return true + } + return false +} + +// Len returns the number of values in the slice. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Len() int { + return len(s.values) +} + +// Swap swaps the values at the passed indices. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Swap(i, j int) { + s.values[i], s.values[j] = s.values[j], s.values[i] + if s.strings != nil { + s.strings[i], s.strings[j] = s.strings[j], s.strings[i] + } +} + +// valueSortLess returns whether the first value should sort before the second +// value. It is used by valueSorter.Less as part of the sort.Interface +// implementation. +func valueSortLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Bool: + return !a.Bool() && b.Bool() + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return a.Int() < b.Int() + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return a.Uint() < b.Uint() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.String: + return a.String() < b.String() + case reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Array: + // Compare the contents of both arrays. + l := a.Len() + for i := 0; i < l; i++ { + av := a.Index(i) + bv := b.Index(i) + if av.Interface() == bv.Interface() { + continue + } + return valueSortLess(av, bv) + } + } + return a.String() < b.String() +} + +// Less returns whether the value at index i should sort before the +// value at index j. It is part of the sort.Interface implementation. +func (s *valuesSorter) Less(i, j int) bool { + if s.strings == nil { + return valueSortLess(s.values[i], s.values[j]) + } + return s.strings[i] < s.strings[j] +} + +// sortValues is a sort function that handles both native types and any type that +// can be converted to error or Stringer. Other inputs are sorted according to +// their Value.String() value to ensure display stability. +func sortValues(values []reflect.Value, cs *ConfigState) { + if len(values) == 0 { + return + } + sort.Sort(newValuesSorter(values, cs)) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go new file mode 100644 index 000000000..2e3d22f31 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/config.go @@ -0,0 +1,306 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "os" +) + +// ConfigState houses the configuration options used by spew to format and +// display values. There is a global instance, Config, that is used to control +// all top-level Formatter and Dump functionality. Each ConfigState instance +// provides methods equivalent to the top-level functions. +// +// The zero value for ConfigState provides no indentation. You would typically +// want to set it to a space or a tab. +// +// Alternatively, you can use NewDefaultConfig to get a ConfigState instance +// with default settings. See the documentation of NewDefaultConfig for default +// values. +type ConfigState struct { + // Indent specifies the string to use for each indentation level. The + // global config instance that all top-level functions use set this to a + // single space by default. If you would like more indentation, you might + // set this to a tab with "\t" or perhaps two spaces with " ". + Indent string + + // MaxDepth controls the maximum number of levels to descend into nested + // data structures. The default, 0, means there is no limit. + // + // NOTE: Circular data structures are properly detected, so it is not + // necessary to set this value unless you specifically want to limit deeply + // nested data structures. + MaxDepth int + + // DisableMethods specifies whether or not error and Stringer interfaces are + // invoked for types that implement them. + DisableMethods bool + + // DisablePointerMethods specifies whether or not to check for and invoke + // error and Stringer interfaces on types which only accept a pointer + // receiver when the current type is not a pointer. + // + // NOTE: This might be an unsafe action since calling one of these methods + // with a pointer receiver could technically mutate the value, however, + // in practice, types which choose to satisify an error or Stringer + // interface with a pointer receiver should not be mutating their state + // inside these interface methods. As a result, this option relies on + // access to the unsafe package, so it will not have any effect when + // running in environments without access to the unsafe package such as + // Google App Engine or with the "safe" build tag specified. + DisablePointerMethods bool + + // DisablePointerAddresses specifies whether to disable the printing of + // pointer addresses. This is useful when diffing data structures in tests. + DisablePointerAddresses bool + + // DisableCapacities specifies whether to disable the printing of capacities + // for arrays, slices, maps and channels. This is useful when diffing + // data structures in tests. + DisableCapacities bool + + // ContinueOnMethod specifies whether or not recursion should continue once + // a custom error or Stringer interface is invoked. The default, false, + // means it will print the results of invoking the custom error or Stringer + // interface and return immediately instead of continuing to recurse into + // the internals of the data type. + // + // NOTE: This flag does not have any effect if method invocation is disabled + // via the DisableMethods or DisablePointerMethods options. + ContinueOnMethod bool + + // SortKeys specifies map keys should be sorted before being printed. Use + // this to have a more deterministic, diffable output. Note that only + // native types (bool, int, uint, floats, uintptr and string) and types + // that support the error or Stringer interfaces (if methods are + // enabled) are supported, with other types sorted according to the + // reflect.Value.String() output which guarantees display stability. + SortKeys bool + + // SpewKeys specifies that, as a last resort attempt, map keys should + // be spewed to strings and sorted by those strings. This is only + // considered if SortKeys is true. + SpewKeys bool +} + +// Config is the active configuration of the top-level functions. +// The configuration can be changed by modifying the contents of spew.Config. +var Config = ConfigState{Indent: " "} + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the formatted string as a value that satisfies error. See NewFormatter +// for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, c.convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, c.convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, c.convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a Formatter interface returned by c.NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, c.convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Print(a ...interface{}) (n int, err error) { + return fmt.Print(c.convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, c.convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Println(a ...interface{}) (n int, err error) { + return fmt.Println(c.convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprint(a ...interface{}) string { + return fmt.Sprint(c.convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, c.convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a Formatter interface returned by c.NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintln(a ...interface{}) string { + return fmt.Sprintln(c.convertArgs(a)...) +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +c.Printf, c.Println, or c.Printf. +*/ +func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(c, v) +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { + fdump(c, w, a...) +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by modifying the public members +of c. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func (c *ConfigState) Dump(a ...interface{}) { + fdump(c, os.Stdout, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func (c *ConfigState) Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(c, &buf, a...) + return buf.String() +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a spew Formatter interface using +// the ConfigState associated with s. +func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = newFormatter(c, arg) + } + return formatters +} + +// NewDefaultConfig returns a ConfigState with the following default settings. +// +// Indent: " " +// MaxDepth: 0 +// DisableMethods: false +// DisablePointerMethods: false +// ContinueOnMethod: false +// SortKeys: false +func NewDefaultConfig() *ConfigState { + return &ConfigState{Indent: " "} +} diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go new file mode 100644 index 000000000..aacaac6f1 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/doc.go @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +Package spew implements a deep pretty printer for Go data structures to aid in +debugging. + +A quick overview of the additional features spew provides over the built-in +printing facilities for Go data types are as follows: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output (only when using + Dump style) + +There are two different approaches spew allows for dumping Go data structures: + + * Dump style which prints with newlines, customizable indentation, + and additional debug information such as types and all pointer addresses + used to indirect to the final value + * A custom Formatter interface that integrates cleanly with the standard fmt + package and replaces %v, %+v, %#v, and %#+v to provide inline printing + similar to the default %v while providing the additional functionality + outlined above and passing unsupported format verbs such as %x and %q + along to fmt + +Quick Start + +This section demonstrates how to quickly get started with spew. See the +sections below for further details on formatting and configuration options. + +To dump a variable with full newlines, indentation, type, and pointer +information use Dump, Fdump, or Sdump: + spew.Dump(myVar1, myVar2, ...) + spew.Fdump(someWriter, myVar1, myVar2, ...) + str := spew.Sdump(myVar1, myVar2, ...) + +Alternatively, if you would prefer to use format strings with a compacted inline +printing style, use the convenience wrappers Printf, Fprintf, etc with +%v (most compact), %+v (adds pointer addresses), %#v (adds types), or +%#+v (adds types and pointer addresses): + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +Configuration Options + +Configuration of spew is handled by fields in the ConfigState type. For +convenience, all of the top-level functions use a global state available +via the spew.Config global. + +It is also possible to create a ConfigState instance that provides methods +equivalent to the top-level functions. This allows concurrent configuration +options. See the ConfigState documentation for more details. + +The following configuration options are available: + * Indent + String to use for each indentation level for Dump functions. + It is a single space by default. A popular alternative is "\t". + + * MaxDepth + Maximum number of levels to descend into nested data structures. + There is no limit by default. + + * DisableMethods + Disables invocation of error and Stringer interface methods. + Method invocation is enabled by default. + + * DisablePointerMethods + Disables invocation of error and Stringer interface methods on types + which only accept pointer receivers from non-pointer variables. + Pointer method invocation is enabled by default. + + * DisablePointerAddresses + DisablePointerAddresses specifies whether to disable the printing of + pointer addresses. This is useful when diffing data structures in tests. + + * DisableCapacities + DisableCapacities specifies whether to disable the printing of + capacities for arrays, slices, maps and channels. This is useful when + diffing data structures in tests. + + * ContinueOnMethod + Enables recursion into types after invoking error and Stringer interface + methods. Recursion after method invocation is disabled by default. + + * SortKeys + Specifies map keys should be sorted before being printed. Use + this to have a more deterministic, diffable output. Note that + only native types (bool, int, uint, floats, uintptr and string) + and types which implement error or Stringer interfaces are + supported with other types sorted according to the + reflect.Value.String() output which guarantees display + stability. Natural map order is used by default. + + * SpewKeys + Specifies that, as a last resort attempt, map keys should be + spewed to strings and sorted by those strings. This is only + considered if SortKeys is true. + +Dump Usage + +Simply call spew.Dump with a list of variables you want to dump: + + spew.Dump(myVar1, myVar2, ...) + +You may also call spew.Fdump if you would prefer to output to an arbitrary +io.Writer. For example, to dump to standard error: + + spew.Fdump(os.Stderr, myVar1, myVar2, ...) + +A third option is to call spew.Sdump to get the formatted output as a string: + + str := spew.Sdump(myVar1, myVar2, ...) + +Sample Dump Output + +See the Dump example for details on the setup of the types and variables being +shown here. + + (main.Foo) { + unexportedField: (*main.Bar)(0xf84002e210)({ + flag: (main.Flag) flagTwo, + data: (uintptr) + }), + ExportedField: (map[interface {}]interface {}) (len=1) { + (string) (len=3) "one": (bool) true + } + } + +Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C +command as shown. + ([]uint8) (len=32 cap=32) { + 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | + 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| + 00000020 31 32 |12| + } + +Custom Formatter + +Spew provides a custom formatter that implements the fmt.Formatter interface +so that it integrates cleanly with standard fmt package printing functions. The +formatter is useful for inline printing of smaller data types similar to the +standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Custom Formatter Usage + +The simplest way to make use of the spew custom formatter is to call one of the +convenience functions such as spew.Printf, spew.Println, or spew.Printf. The +functions have syntax you are most likely already familiar with: + + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Println(myVar, myVar2) + spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +See the Index for the full list convenience functions. + +Sample Formatter Output + +Double pointer to a uint8: + %v: <**>5 + %+v: <**>(0xf8400420d0->0xf8400420c8)5 + %#v: (**uint8)5 + %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 + +Pointer to circular struct with a uint8 field and a pointer to itself: + %v: <*>{1 <*>} + %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} + %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} + %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} + +See the Printf example for details on the setup of variables being shown +here. + +Errors + +Since it is possible for custom Stringer/error interfaces to panic, spew +detects them and handles them internally by printing the panic information +inline with the output. Since spew is intended to provide deep pretty printing +capabilities on structures, it intentionally does not return any errors. +*/ +package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go new file mode 100644 index 000000000..df1d582a7 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/dump.go @@ -0,0 +1,509 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "os" + "reflect" + "regexp" + "strconv" + "strings" +) + +var ( + // uint8Type is a reflect.Type representing a uint8. It is used to + // convert cgo types to uint8 slices for hexdumping. + uint8Type = reflect.TypeOf(uint8(0)) + + // cCharRE is a regular expression that matches a cgo char. + // It is used to detect character arrays to hexdump them. + cCharRE = regexp.MustCompile("^.*\\._Ctype_char$") + + // cUnsignedCharRE is a regular expression that matches a cgo unsigned + // char. It is used to detect unsigned character arrays to hexdump + // them. + cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$") + + // cUint8tCharRE is a regular expression that matches a cgo uint8_t. + // It is used to detect uint8_t arrays to hexdump them. + cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$") +) + +// dumpState contains information about the state of a dump operation. +type dumpState struct { + w io.Writer + depth int + pointers map[uintptr]int + ignoreNextType bool + ignoreNextIndent bool + cs *ConfigState +} + +// indent performs indentation according to the depth level and cs.Indent +// option. +func (d *dumpState) indent() { + if d.ignoreNextIndent { + d.ignoreNextIndent = false + return + } + d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) +} + +// unpackValue returns values inside of non-nil interfaces when possible. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface && !v.IsNil() { + v = v.Elem() + } + return v +} + +// dumpPtr handles formatting of pointers by indirecting them as necessary. +func (d *dumpState) dumpPtr(v reflect.Value) { + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range d.pointers { + if depth >= d.depth { + delete(d.pointers, k) + } + } + + // Keep list of all dereferenced pointers to show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by dereferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := d.pointers[addr]; ok && pd < d.depth { + cycleFound = true + indirects-- + break + } + d.pointers[addr] = d.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type information. + d.w.Write(openParenBytes) + d.w.Write(bytes.Repeat(asteriskBytes, indirects)) + d.w.Write([]byte(ve.Type().String())) + d.w.Write(closeParenBytes) + + // Display pointer information. + if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { + d.w.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + d.w.Write(pointerChainBytes) + } + printHexPtr(d.w, addr) + } + d.w.Write(closeParenBytes) + } + + // Display dereferenced value. + d.w.Write(openParenBytes) + switch { + case nilFound == true: + d.w.Write(nilAngleBytes) + + case cycleFound == true: + d.w.Write(circularBytes) + + default: + d.ignoreNextType = true + d.dump(ve) + } + d.w.Write(closeParenBytes) +} + +// dumpSlice handles formatting of arrays and slices. Byte (uint8 under +// reflection) arrays and slices are dumped in hexdump -C fashion. +func (d *dumpState) dumpSlice(v reflect.Value) { + // Determine whether this type should be hex dumped or not. Also, + // for types which should be hexdumped, try to use the underlying data + // first, then fall back to trying to convert them to a uint8 slice. + var buf []uint8 + doConvert := false + doHexDump := false + numEntries := v.Len() + if numEntries > 0 { + vt := v.Index(0).Type() + vts := vt.String() + switch { + // C types that need to be converted. + case cCharRE.MatchString(vts): + fallthrough + case cUnsignedCharRE.MatchString(vts): + fallthrough + case cUint8tCharRE.MatchString(vts): + doConvert = true + + // Try to use existing uint8 slices and fall back to converting + // and copying if that fails. + case vt.Kind() == reflect.Uint8: + // We need an addressable interface to convert the type + // to a byte slice. However, the reflect package won't + // give us an interface on certain things like + // unexported struct fields in order to enforce + // visibility rules. We use unsafe, when available, to + // bypass these restrictions since this package does not + // mutate the values. + vs := v + if !vs.CanInterface() || !vs.CanAddr() { + vs = unsafeReflectValue(vs) + } + if !UnsafeDisabled { + vs = vs.Slice(0, numEntries) + + // Use the existing uint8 slice if it can be + // type asserted. + iface := vs.Interface() + if slice, ok := iface.([]uint8); ok { + buf = slice + doHexDump = true + break + } + } + + // The underlying data needs to be converted if it can't + // be type asserted to a uint8 slice. + doConvert = true + } + + // Copy and convert the underlying type if needed. + if doConvert && vt.ConvertibleTo(uint8Type) { + // Convert and copy each element into a uint8 byte + // slice. + buf = make([]uint8, numEntries) + for i := 0; i < numEntries; i++ { + vv := v.Index(i) + buf[i] = uint8(vv.Convert(uint8Type).Uint()) + } + doHexDump = true + } + } + + // Hexdump the entire slice as needed. + if doHexDump { + indent := strings.Repeat(d.cs.Indent, d.depth) + str := indent + hex.Dump(buf) + str = strings.Replace(str, "\n", "\n"+indent, -1) + str = strings.TrimRight(str, d.cs.Indent) + d.w.Write([]byte(str)) + return + } + + // Recursively call dump for each item. + for i := 0; i < numEntries; i++ { + d.dump(d.unpackValue(v.Index(i))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } +} + +// dump is the main workhorse for dumping a value. It uses the passed reflect +// value to figure out what kind of object we are dealing with and formats it +// appropriately. It is a recursive function, however circular data structures +// are detected and handled properly. +func (d *dumpState) dump(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + d.w.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + d.indent() + d.dumpPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !d.ignoreNextType { + d.indent() + d.w.Write(openParenBytes) + d.w.Write([]byte(v.Type().String())) + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + d.ignoreNextType = false + + // Display length and capacity if the built-in len and cap functions + // work with the value's kind and the len/cap itself is non-zero. + valueLen, valueCap := 0, 0 + switch v.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + valueLen, valueCap = v.Len(), v.Cap() + case reflect.Map, reflect.String: + valueLen = v.Len() + } + if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { + d.w.Write(openParenBytes) + if valueLen != 0 { + d.w.Write(lenEqualsBytes) + printInt(d.w, int64(valueLen), 10) + } + if !d.cs.DisableCapacities && valueCap != 0 { + if valueLen != 0 { + d.w.Write(spaceBytes) + } + d.w.Write(capEqualsBytes) + printInt(d.w, int64(valueCap), 10) + } + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + + // Call Stringer/error interfaces if they exist and the handle methods flag + // is enabled + if !d.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(d.cs, d.w, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(d.w, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(d.w, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(d.w, v.Uint(), 10) + + case reflect.Float32: + printFloat(d.w, v.Float(), 32) + + case reflect.Float64: + printFloat(d.w, v.Float(), 64) + + case reflect.Complex64: + printComplex(d.w, v.Complex(), 32) + + case reflect.Complex128: + printComplex(d.w, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + d.dumpSlice(v) + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.String: + d.w.Write([]byte(strconv.Quote(v.String()))) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + d.w.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + numEntries := v.Len() + keys := v.MapKeys() + if d.cs.SortKeys { + sortValues(keys, d.cs) + } + for i, key := range keys { + d.dump(d.unpackValue(key)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.MapIndex(key))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Struct: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + vt := v.Type() + numFields := v.NumField() + for i := 0; i < numFields; i++ { + d.indent() + vtf := vt.Field(i) + d.w.Write([]byte(vtf.Name)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.Field(i))) + if i < (numFields - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(d.w, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(d.w, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it in case any new + // types are added. + default: + if v.CanInterface() { + fmt.Fprintf(d.w, "%v", v.Interface()) + } else { + fmt.Fprintf(d.w, "%v", v.String()) + } + } +} + +// fdump is a helper function to consolidate the logic from the various public +// methods which take varying writers and config states. +func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { + for _, arg := range a { + if arg == nil { + w.Write(interfaceBytes) + w.Write(spaceBytes) + w.Write(nilAngleBytes) + w.Write(newlineBytes) + continue + } + + d := dumpState{w: w, cs: cs} + d.pointers = make(map[uintptr]int) + d.dump(reflect.ValueOf(arg)) + d.w.Write(newlineBytes) + } +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func Fdump(w io.Writer, a ...interface{}) { + fdump(&Config, w, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(&Config, &buf, a...) + return buf.String() +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by an exported package global, +spew.Config. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func Dump(a ...interface{}) { + fdump(&Config, os.Stdout, a...) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go new file mode 100644 index 000000000..c49875bac --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/format.go @@ -0,0 +1,419 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "reflect" + "strconv" + "strings" +) + +// supportedFlags is a list of all the character flags supported by fmt package. +const supportedFlags = "0-+# " + +// formatState implements the fmt.Formatter interface and contains information +// about the state of a formatting operation. The NewFormatter function can +// be used to get a new Formatter which can be used directly as arguments +// in standard fmt package printing calls. +type formatState struct { + value interface{} + fs fmt.State + depth int + pointers map[uintptr]int + ignoreNextType bool + cs *ConfigState +} + +// buildDefaultFormat recreates the original format string without precision +// and width information to pass in to fmt.Sprintf in the case of an +// unrecognized type. Unless new types are added to the language, this +// function won't ever be called. +func (f *formatState) buildDefaultFormat() (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + buf.WriteRune('v') + + format = buf.String() + return format +} + +// constructOrigFormat recreates the original format string including precision +// and width information to pass along to the standard fmt package. This allows +// automatic deferral of all format strings this package doesn't support. +func (f *formatState) constructOrigFormat(verb rune) (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + if width, ok := f.fs.Width(); ok { + buf.WriteString(strconv.Itoa(width)) + } + + if precision, ok := f.fs.Precision(); ok { + buf.Write(precisionBytes) + buf.WriteString(strconv.Itoa(precision)) + } + + buf.WriteRune(verb) + + format = buf.String() + return format +} + +// unpackValue returns values inside of non-nil interfaces when possible and +// ensures that types for values which have been unpacked from an interface +// are displayed when the show types flag is also set. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (f *formatState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface { + f.ignoreNextType = false + if !v.IsNil() { + v = v.Elem() + } + } + return v +} + +// formatPtr handles formatting of pointers by indirecting them as necessary. +func (f *formatState) formatPtr(v reflect.Value) { + // Display nil if top level pointer is nil. + showTypes := f.fs.Flag('#') + if v.IsNil() && (!showTypes || f.ignoreNextType) { + f.fs.Write(nilAngleBytes) + return + } + + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range f.pointers { + if depth >= f.depth { + delete(f.pointers, k) + } + } + + // Keep list of all dereferenced pointers to possibly show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by derferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := f.pointers[addr]; ok && pd < f.depth { + cycleFound = true + indirects-- + break + } + f.pointers[addr] = f.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type or indirection level depending on flags. + if showTypes && !f.ignoreNextType { + f.fs.Write(openParenBytes) + f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) + f.fs.Write([]byte(ve.Type().String())) + f.fs.Write(closeParenBytes) + } else { + if nilFound || cycleFound { + indirects += strings.Count(ve.Type().String(), "*") + } + f.fs.Write(openAngleBytes) + f.fs.Write([]byte(strings.Repeat("*", indirects))) + f.fs.Write(closeAngleBytes) + } + + // Display pointer information depending on flags. + if f.fs.Flag('+') && (len(pointerChain) > 0) { + f.fs.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + f.fs.Write(pointerChainBytes) + } + printHexPtr(f.fs, addr) + } + f.fs.Write(closeParenBytes) + } + + // Display dereferenced value. + switch { + case nilFound == true: + f.fs.Write(nilAngleBytes) + + case cycleFound == true: + f.fs.Write(circularShortBytes) + + default: + f.ignoreNextType = true + f.format(ve) + } +} + +// format is the main workhorse for providing the Formatter interface. It +// uses the passed reflect value to figure out what kind of object we are +// dealing with and formats it appropriately. It is a recursive function, +// however circular data structures are detected and handled properly. +func (f *formatState) format(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + f.fs.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + f.formatPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !f.ignoreNextType && f.fs.Flag('#') { + f.fs.Write(openParenBytes) + f.fs.Write([]byte(v.Type().String())) + f.fs.Write(closeParenBytes) + } + f.ignoreNextType = false + + // Call Stringer/error interfaces if they exist and the handle methods + // flag is enabled. + if !f.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(f.cs, f.fs, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(f.fs, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(f.fs, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(f.fs, v.Uint(), 10) + + case reflect.Float32: + printFloat(f.fs, v.Float(), 32) + + case reflect.Float64: + printFloat(f.fs, v.Float(), 64) + + case reflect.Complex64: + printComplex(f.fs, v.Complex(), 32) + + case reflect.Complex128: + printComplex(f.fs, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + f.fs.Write(openBracketBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + numEntries := v.Len() + for i := 0; i < numEntries; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(v.Index(i))) + } + } + f.depth-- + f.fs.Write(closeBracketBytes) + + case reflect.String: + f.fs.Write([]byte(v.String())) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + f.fs.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + + f.fs.Write(openMapBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + keys := v.MapKeys() + if f.cs.SortKeys { + sortValues(keys, f.cs) + } + for i, key := range keys { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(key)) + f.fs.Write(colonBytes) + f.ignoreNextType = true + f.format(f.unpackValue(v.MapIndex(key))) + } + } + f.depth-- + f.fs.Write(closeMapBytes) + + case reflect.Struct: + numFields := v.NumField() + f.fs.Write(openBraceBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + vt := v.Type() + for i := 0; i < numFields; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + vtf := vt.Field(i) + if f.fs.Flag('+') || f.fs.Flag('#') { + f.fs.Write([]byte(vtf.Name)) + f.fs.Write(colonBytes) + } + f.format(f.unpackValue(v.Field(i))) + } + } + f.depth-- + f.fs.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(f.fs, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(f.fs, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it if any get added. + default: + format := f.buildDefaultFormat() + if v.CanInterface() { + fmt.Fprintf(f.fs, format, v.Interface()) + } else { + fmt.Fprintf(f.fs, format, v.String()) + } + } +} + +// Format satisfies the fmt.Formatter interface. See NewFormatter for usage +// details. +func (f *formatState) Format(fs fmt.State, verb rune) { + f.fs = fs + + // Use standard formatting for verbs that are not v. + if verb != 'v' { + format := f.constructOrigFormat(verb) + fmt.Fprintf(fs, format, f.value) + return + } + + if f.value == nil { + if fs.Flag('#') { + fs.Write(interfaceBytes) + } + fs.Write(nilAngleBytes) + return + } + + f.format(reflect.ValueOf(f.value)) +} + +// newFormatter is a helper function to consolidate the logic from the various +// public methods which take varying config states. +func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { + fs := &formatState{value: v, cs: cs} + fs.pointers = make(map[uintptr]int) + return fs +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +Printf, Println, or Fprintf. +*/ +func NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(&Config, v) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go new file mode 100644 index 000000000..32c0e3388 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/spew.go @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "fmt" + "io" +) + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the formatted string as a value that satisfies error. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a default Formatter interface returned by NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) +func Print(a ...interface{}) (n int, err error) { + return fmt.Print(convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) +func Println(a ...interface{}) (n int, err error) { + return fmt.Println(convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprint(a ...interface{}) string { + return fmt.Sprint(convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintln(a ...interface{}) string { + return fmt.Sprintln(convertArgs(a)...) +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a default spew Formatter interface. +func convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = NewFormatter(arg) + } + return formatters +} diff --git a/vendor/github.com/eapache/go-resiliency/LICENSE b/vendor/github.com/eapache/go-resiliency/LICENSE new file mode 100644 index 000000000..698a3f513 --- /dev/null +++ b/vendor/github.com/eapache/go-resiliency/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2014 Evan Huus + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/eapache/go-resiliency/breaker/README.md b/vendor/github.com/eapache/go-resiliency/breaker/README.md new file mode 100644 index 000000000..2d1b3d932 --- /dev/null +++ b/vendor/github.com/eapache/go-resiliency/breaker/README.md @@ -0,0 +1,34 @@ +circuit-breaker +=============== + +[![Build Status](https://travis-ci.org/eapache/go-resiliency.svg?branch=master)](https://travis-ci.org/eapache/go-resiliency) +[![GoDoc](https://godoc.org/github.com/eapache/go-resiliency/breaker?status.svg)](https://godoc.org/github.com/eapache/go-resiliency/breaker) +[![Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-active-blue.svg)](https://eapache.github.io/conduct.html) + +The circuit-breaker resiliency pattern for golang. + +Creating a breaker takes three parameters: +- error threshold (for opening the breaker) +- success threshold (for closing the breaker) +- timeout (how long to keep the breaker open) + +```go +b := breaker.New(3, 1, 5*time.Second) + +for { + result := b.Run(func() error { + // communicate with some external service and + // return an error if the communication failed + return nil + }) + + switch result { + case nil: + // success! + case breaker.ErrBreakerOpen: + // our function wasn't run because the breaker was open + default: + // some other error + } +} +``` diff --git a/vendor/github.com/eapache/go-resiliency/breaker/breaker.go b/vendor/github.com/eapache/go-resiliency/breaker/breaker.go new file mode 100644 index 000000000..f88ca7248 --- /dev/null +++ b/vendor/github.com/eapache/go-resiliency/breaker/breaker.go @@ -0,0 +1,161 @@ +// Package breaker implements the circuit-breaker resiliency pattern for Go. +package breaker + +import ( + "errors" + "sync" + "sync/atomic" + "time" +) + +// ErrBreakerOpen is the error returned from Run() when the function is not executed +// because the breaker is currently open. +var ErrBreakerOpen = errors.New("circuit breaker is open") + +const ( + closed uint32 = iota + open + halfOpen +) + +// Breaker implements the circuit-breaker resiliency pattern +type Breaker struct { + errorThreshold, successThreshold int + timeout time.Duration + + lock sync.Mutex + state uint32 + errors, successes int + lastError time.Time +} + +// New constructs a new circuit-breaker that starts closed. +// From closed, the breaker opens if "errorThreshold" errors are seen +// without an error-free period of at least "timeout". From open, the +// breaker half-closes after "timeout". From half-open, the breaker closes +// after "successThreshold" consecutive successes, or opens on a single error. +func New(errorThreshold, successThreshold int, timeout time.Duration) *Breaker { + return &Breaker{ + errorThreshold: errorThreshold, + successThreshold: successThreshold, + timeout: timeout, + } +} + +// Run will either return ErrBreakerOpen immediately if the circuit-breaker is +// already open, or it will run the given function and pass along its return +// value. It is safe to call Run concurrently on the same Breaker. +func (b *Breaker) Run(work func() error) error { + state := atomic.LoadUint32(&b.state) + + if state == open { + return ErrBreakerOpen + } + + return b.doWork(state, work) +} + +// Go will either return ErrBreakerOpen immediately if the circuit-breaker is +// already open, or it will run the given function in a separate goroutine. +// If the function is run, Go will return nil immediately, and will *not* return +// the return value of the function. It is safe to call Go concurrently on the +// same Breaker. +func (b *Breaker) Go(work func() error) error { + state := atomic.LoadUint32(&b.state) + + if state == open { + return ErrBreakerOpen + } + + // errcheck complains about ignoring the error return value, but + // that's on purpose; if you want an error from a goroutine you have to + // get it over a channel or something + go b.doWork(state, work) + + return nil +} + +func (b *Breaker) doWork(state uint32, work func() error) error { + var panicValue interface{} + + result := func() error { + defer func() { + panicValue = recover() + }() + return work() + }() + + if result == nil && panicValue == nil && state == closed { + // short-circuit the normal, success path without contending + // on the lock + return nil + } + + // oh well, I guess we have to contend on the lock + b.processResult(result, panicValue) + + if panicValue != nil { + // as close as Go lets us come to a "rethrow" although unfortunately + // we lose the original panicing location + panic(panicValue) + } + + return result +} + +func (b *Breaker) processResult(result error, panicValue interface{}) { + b.lock.Lock() + defer b.lock.Unlock() + + if result == nil && panicValue == nil { + if b.state == halfOpen { + b.successes++ + if b.successes == b.successThreshold { + b.closeBreaker() + } + } + } else { + if b.errors > 0 { + expiry := b.lastError.Add(b.timeout) + if time.Now().After(expiry) { + b.errors = 0 + } + } + + switch b.state { + case closed: + b.errors++ + if b.errors == b.errorThreshold { + b.openBreaker() + } else { + b.lastError = time.Now() + } + case halfOpen: + b.openBreaker() + } + } +} + +func (b *Breaker) openBreaker() { + b.changeState(open) + go b.timer() +} + +func (b *Breaker) closeBreaker() { + b.changeState(closed) +} + +func (b *Breaker) timer() { + time.Sleep(b.timeout) + + b.lock.Lock() + defer b.lock.Unlock() + + b.changeState(halfOpen) +} + +func (b *Breaker) changeState(newState uint32) { + b.errors = 0 + b.successes = 0 + atomic.StoreUint32(&b.state, newState) +} diff --git a/vendor/github.com/eapache/go-xerial-snappy/LICENSE b/vendor/github.com/eapache/go-xerial-snappy/LICENSE new file mode 100644 index 000000000..5bf3688d9 --- /dev/null +++ b/vendor/github.com/eapache/go-xerial-snappy/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Evan Huus + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/eapache/go-xerial-snappy/README.md b/vendor/github.com/eapache/go-xerial-snappy/README.md new file mode 100644 index 000000000..3f2695c72 --- /dev/null +++ b/vendor/github.com/eapache/go-xerial-snappy/README.md @@ -0,0 +1,13 @@ +# go-xerial-snappy + +[![Build Status](https://travis-ci.org/eapache/go-xerial-snappy.svg?branch=master)](https://travis-ci.org/eapache/go-xerial-snappy) + +Xerial-compatible Snappy framing support for golang. + +Packages using Xerial for snappy encoding use a framing format incompatible with +basically everything else in existence. This package wraps Go's built-in snappy +package to support it. + +Apps that use this format include Apache Kafka (see +https://github.com/dpkp/kafka-python/issues/126#issuecomment-35478921 for +details). diff --git a/vendor/github.com/eapache/go-xerial-snappy/snappy.go b/vendor/github.com/eapache/go-xerial-snappy/snappy.go new file mode 100644 index 000000000..b8f8b51fc --- /dev/null +++ b/vendor/github.com/eapache/go-xerial-snappy/snappy.go @@ -0,0 +1,43 @@ +package snappy + +import ( + "bytes" + "encoding/binary" + + master "github.com/golang/snappy" +) + +var xerialHeader = []byte{130, 83, 78, 65, 80, 80, 89, 0} + +// Encode encodes data as snappy with no framing header. +func Encode(src []byte) []byte { + return master.Encode(nil, src) +} + +// Decode decodes snappy data whether it is traditional unframed +// or includes the xerial framing format. +func Decode(src []byte) ([]byte, error) { + if !bytes.Equal(src[:8], xerialHeader) { + return master.Decode(nil, src) + } + + var ( + pos = uint32(16) + max = uint32(len(src)) + dst = make([]byte, 0, len(src)) + chunk []byte + err error + ) + for pos < max { + size := binary.BigEndian.Uint32(src[pos : pos+4]) + pos += 4 + + chunk, err = master.Decode(chunk, src[pos:pos+size]) + if err != nil { + return nil, err + } + pos += size + dst = append(dst, chunk...) + } + return dst, nil +} diff --git a/vendor/github.com/eapache/queue/LICENSE b/vendor/github.com/eapache/queue/LICENSE new file mode 100644 index 000000000..d5f36dbca --- /dev/null +++ b/vendor/github.com/eapache/queue/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Evan Huus + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/eapache/queue/README.md b/vendor/github.com/eapache/queue/README.md new file mode 100644 index 000000000..8e782335c --- /dev/null +++ b/vendor/github.com/eapache/queue/README.md @@ -0,0 +1,16 @@ +Queue +===== + +[![Build Status](https://travis-ci.org/eapache/queue.svg)](https://travis-ci.org/eapache/queue) +[![GoDoc](https://godoc.org/github.com/eapache/queue?status.png)](https://godoc.org/github.com/eapache/queue) +[![Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-active-blue.svg)](https://eapache.github.io/conduct.html) + +A fast Golang queue using a ring-buffer, based on the version suggested by Dariusz Górecki. +Using this instead of other, simpler, queue implementations (slice+append or linked list) provides +substantial memory and time benefits, and fewer GC pauses. + +The queue implemented here is as fast as it is in part because it is *not* thread-safe. + +Follows semantic versioning using https://gopkg.in/ - import from +[`gopkg.in/eapache/queue.v1`](https://gopkg.in/eapache/queue.v1) +for guaranteed API stability. diff --git a/vendor/github.com/eapache/queue/queue.go b/vendor/github.com/eapache/queue/queue.go new file mode 100644 index 000000000..71d1acdf2 --- /dev/null +++ b/vendor/github.com/eapache/queue/queue.go @@ -0,0 +1,102 @@ +/* +Package queue provides a fast, ring-buffer queue based on the version suggested by Dariusz Górecki. +Using this instead of other, simpler, queue implementations (slice+append or linked list) provides +substantial memory and time benefits, and fewer GC pauses. + +The queue implemented here is as fast as it is for an additional reason: it is *not* thread-safe. +*/ +package queue + +// minQueueLen is smallest capacity that queue may have. +// Must be power of 2 for bitwise modulus: x % n == x & (n - 1). +const minQueueLen = 16 + +// Queue represents a single instance of the queue data structure. +type Queue struct { + buf []interface{} + head, tail, count int +} + +// New constructs and returns a new Queue. +func New() *Queue { + return &Queue{ + buf: make([]interface{}, minQueueLen), + } +} + +// Length returns the number of elements currently stored in the queue. +func (q *Queue) Length() int { + return q.count +} + +// resizes the queue to fit exactly twice its current contents +// this can result in shrinking if the queue is less than half-full +func (q *Queue) resize() { + newBuf := make([]interface{}, q.count<<1) + + if q.tail > q.head { + copy(newBuf, q.buf[q.head:q.tail]) + } else { + n := copy(newBuf, q.buf[q.head:]) + copy(newBuf[n:], q.buf[:q.tail]) + } + + q.head = 0 + q.tail = q.count + q.buf = newBuf +} + +// Add puts an element on the end of the queue. +func (q *Queue) Add(elem interface{}) { + if q.count == len(q.buf) { + q.resize() + } + + q.buf[q.tail] = elem + // bitwise modulus + q.tail = (q.tail + 1) & (len(q.buf) - 1) + q.count++ +} + +// Peek returns the element at the head of the queue. This call panics +// if the queue is empty. +func (q *Queue) Peek() interface{} { + if q.count <= 0 { + panic("queue: Peek() called on empty queue") + } + return q.buf[q.head] +} + +// Get returns the element at index i in the queue. If the index is +// invalid, the call will panic. This method accepts both positive and +// negative index values. Index 0 refers to the first element, and +// index -1 refers to the last. +func (q *Queue) Get(i int) interface{} { + // If indexing backwards, convert to positive index. + if i < 0 { + i += q.count + } + if i < 0 || i >= q.count { + panic("queue: Get() called with index out of range") + } + // bitwise modulus + return q.buf[(q.head+i)&(len(q.buf)-1)] +} + +// Remove removes and returns the element from the front of the queue. If the +// queue is empty, the call will panic. +func (q *Queue) Remove() interface{} { + if q.count <= 0 { + panic("queue: Remove() called on empty queue") + } + ret := q.buf[q.head] + q.buf[q.head] = nil + // bitwise modulus + q.head = (q.head + 1) & (len(q.buf) - 1) + q.count-- + // Resize down if buffer 1/4 full. + if len(q.buf) > minQueueLen && (q.count<<2) == len(q.buf) { + q.resize() + } + return ret +} diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS new file mode 100644 index 000000000..bcfa19520 --- /dev/null +++ b/vendor/github.com/golang/snappy/AUTHORS @@ -0,0 +1,15 @@ +# This is the official list of Snappy-Go authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Damian Gryski +Google Inc. +Jan Mercl <0xjnml@gmail.com> +Rodolfo Carvalho +Sebastien Binet diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS new file mode 100644 index 000000000..931ae3160 --- /dev/null +++ b/vendor/github.com/golang/snappy/CONTRIBUTORS @@ -0,0 +1,37 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the Snappy-Go repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# The submission process automatically checks to make sure +# that people submitting code are listed in this file (by email address). +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# http://code.google.com/legal/individual-cla-v1.0.html +# http://code.google.com/legal/corporate-cla-v1.0.html +# +# The agreement for individuals can be filled out on the web. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. + +# Names should be added to this file like so: +# Name + +# Please keep the list sorted. + +Damian Gryski +Jan Mercl <0xjnml@gmail.com> +Kai Backman +Marc-Antoine Ruel +Nigel Tao +Rob Pike +Rodolfo Carvalho +Russ Cox +Sebastien Binet diff --git a/vendor/github.com/golang/snappy/LICENSE b/vendor/github.com/golang/snappy/LICENSE new file mode 100644 index 000000000..6050c10f4 --- /dev/null +++ b/vendor/github.com/golang/snappy/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/golang/snappy/README b/vendor/github.com/golang/snappy/README new file mode 100644 index 000000000..cea12879a --- /dev/null +++ b/vendor/github.com/golang/snappy/README @@ -0,0 +1,107 @@ +The Snappy compression format in the Go programming language. + +To download and install from source: +$ go get github.com/golang/snappy + +Unless otherwise noted, the Snappy-Go source files are distributed +under the BSD-style license found in the LICENSE file. + + + +Benchmarks. + +The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten +or so files, the same set used by the C++ Snappy code (github.com/google/snappy +and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @ +3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29: + +"go test -test.bench=." + +_UFlat0-8 2.19GB/s ± 0% html +_UFlat1-8 1.41GB/s ± 0% urls +_UFlat2-8 23.5GB/s ± 2% jpg +_UFlat3-8 1.91GB/s ± 0% jpg_200 +_UFlat4-8 14.0GB/s ± 1% pdf +_UFlat5-8 1.97GB/s ± 0% html4 +_UFlat6-8 814MB/s ± 0% txt1 +_UFlat7-8 785MB/s ± 0% txt2 +_UFlat8-8 857MB/s ± 0% txt3 +_UFlat9-8 719MB/s ± 1% txt4 +_UFlat10-8 2.84GB/s ± 0% pb +_UFlat11-8 1.05GB/s ± 0% gaviota + +_ZFlat0-8 1.04GB/s ± 0% html +_ZFlat1-8 534MB/s ± 0% urls +_ZFlat2-8 15.7GB/s ± 1% jpg +_ZFlat3-8 740MB/s ± 3% jpg_200 +_ZFlat4-8 9.20GB/s ± 1% pdf +_ZFlat5-8 991MB/s ± 0% html4 +_ZFlat6-8 379MB/s ± 0% txt1 +_ZFlat7-8 352MB/s ± 0% txt2 +_ZFlat8-8 396MB/s ± 1% txt3 +_ZFlat9-8 327MB/s ± 1% txt4 +_ZFlat10-8 1.33GB/s ± 1% pb +_ZFlat11-8 605MB/s ± 1% gaviota + + + +"go test -test.bench=. -tags=noasm" + +_UFlat0-8 621MB/s ± 2% html +_UFlat1-8 494MB/s ± 1% urls +_UFlat2-8 23.2GB/s ± 1% jpg +_UFlat3-8 1.12GB/s ± 1% jpg_200 +_UFlat4-8 4.35GB/s ± 1% pdf +_UFlat5-8 609MB/s ± 0% html4 +_UFlat6-8 296MB/s ± 0% txt1 +_UFlat7-8 288MB/s ± 0% txt2 +_UFlat8-8 309MB/s ± 1% txt3 +_UFlat9-8 280MB/s ± 1% txt4 +_UFlat10-8 753MB/s ± 0% pb +_UFlat11-8 400MB/s ± 0% gaviota + +_ZFlat0-8 409MB/s ± 1% html +_ZFlat1-8 250MB/s ± 1% urls +_ZFlat2-8 12.3GB/s ± 1% jpg +_ZFlat3-8 132MB/s ± 0% jpg_200 +_ZFlat4-8 2.92GB/s ± 0% pdf +_ZFlat5-8 405MB/s ± 1% html4 +_ZFlat6-8 179MB/s ± 1% txt1 +_ZFlat7-8 170MB/s ± 1% txt2 +_ZFlat8-8 189MB/s ± 1% txt3 +_ZFlat9-8 164MB/s ± 1% txt4 +_ZFlat10-8 479MB/s ± 1% pb +_ZFlat11-8 270MB/s ± 1% gaviota + + + +For comparison (Go's encoded output is byte-for-byte identical to C++'s), here +are the numbers from C++ Snappy's + +make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log + +BM_UFlat/0 2.4GB/s html +BM_UFlat/1 1.4GB/s urls +BM_UFlat/2 21.8GB/s jpg +BM_UFlat/3 1.5GB/s jpg_200 +BM_UFlat/4 13.3GB/s pdf +BM_UFlat/5 2.1GB/s html4 +BM_UFlat/6 1.0GB/s txt1 +BM_UFlat/7 959.4MB/s txt2 +BM_UFlat/8 1.0GB/s txt3 +BM_UFlat/9 864.5MB/s txt4 +BM_UFlat/10 2.9GB/s pb +BM_UFlat/11 1.2GB/s gaviota + +BM_ZFlat/0 944.3MB/s html (22.31 %) +BM_ZFlat/1 501.6MB/s urls (47.78 %) +BM_ZFlat/2 14.3GB/s jpg (99.95 %) +BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %) +BM_ZFlat/4 8.3GB/s pdf (83.30 %) +BM_ZFlat/5 903.5MB/s html4 (22.52 %) +BM_ZFlat/6 336.0MB/s txt1 (57.88 %) +BM_ZFlat/7 312.3MB/s txt2 (61.91 %) +BM_ZFlat/8 353.1MB/s txt3 (54.99 %) +BM_ZFlat/9 289.9MB/s txt4 (66.26 %) +BM_ZFlat/10 1.2GB/s pb (19.68 %) +BM_ZFlat/11 527.4MB/s gaviota (37.72 %) diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go new file mode 100644 index 000000000..72efb0353 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode.go @@ -0,0 +1,237 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("snappy: corrupt input") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("snappy: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 + decodeErrCodeUnsupportedLiteralLength = 2 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= len(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + switch decode(dst, src[s:]) { + case 0: + return dst, nil + case decodeErrCodeUnsupportedLiteralLength: + return nil, errUnsupportedLiteralLength + } + return nil, ErrCorrupt +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + decoded: make([]byte, maxBlockSize), + buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + readHeader bool +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.readHeader = false +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + for { + if r.i < r.j { + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil + } + if !r.readFull(r.buf[:4], true) { + return 0, r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return 0, r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + r.err = ErrUnsupported + return 0, r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return 0, r.err + } + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.decoded[:n], false) { + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return 0, r.err + } + for i := 0; i < len(magicBody); i++ { + if r.buf[i] != magicBody[i] { + r.err = ErrCorrupt + return 0, r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return 0, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return 0, r.err + } + } +} diff --git a/vendor/github.com/golang/snappy/decode_amd64.go b/vendor/github.com/golang/snappy/decode_amd64.go new file mode 100644 index 000000000..fcd192b84 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_amd64.go @@ -0,0 +1,14 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// decode has the same semantics as in decode_other.go. +// +//go:noescape +func decode(dst, src []byte) int diff --git a/vendor/github.com/golang/snappy/decode_amd64.s b/vendor/github.com/golang/snappy/decode_amd64.s new file mode 100644 index 000000000..e6179f65e --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_amd64.s @@ -0,0 +1,490 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The asm code generally follows the pure Go code in decode_other.go, except +// where marked with a "!!!". + +// func decode(dst, src []byte) int +// +// All local variables fit into registers. The non-zero stack size is only to +// spill registers and push args when issuing a CALL. The register allocation: +// - AX scratch +// - BX scratch +// - CX length or x +// - DX offset +// - SI &src[s] +// - DI &dst[d] +// + R8 dst_base +// + R9 dst_len +// + R10 dst_base + dst_len +// + R11 src_base +// + R12 src_len +// + R13 src_base + src_len +// - R14 used by doCopy +// - R15 used by doCopy +// +// The registers R8-R13 (marked with a "+") are set at the start of the +// function, and after a CALL returns, and are not otherwise modified. +// +// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. +// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. +TEXT ·decode(SB), NOSPLIT, $48-56 + // Initialize SI, DI and R8-R13. + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, DI + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, SI + MOVQ R11, R13 + ADDQ R12, R13 + +loop: + // for s < len(src) + CMPQ SI, R13 + JEQ end + + // CX = uint32(src[s]) + // + // switch src[s] & 0x03 + MOVBLZX (SI), CX + MOVL CX, BX + ANDL $3, BX + CMPL BX, $1 + JAE tagCopy + + // ---------------------------------------- + // The code below handles literal tags. + + // case tagLiteral: + // x := uint32(src[s] >> 2) + // switch + SHRL $2, CX + CMPL CX, $60 + JAE tagLit60Plus + + // case x < 60: + // s++ + INCQ SI + +doLit: + // This is the end of the inner "switch", when we have a literal tag. + // + // We assume that CX == x and x fits in a uint32, where x is the variable + // used in the pure Go decode_other.go code. + + // length = int(x) + 1 + // + // Unlike the pure Go code, we don't need to check if length <= 0 because + // CX can hold 64 bits, so the increment cannot overflow. + INCQ CX + + // Prepare to check if copying length bytes will run past the end of dst or + // src. + // + // AX = len(dst) - d + // BX = len(src) - s + MOVQ R10, AX + SUBQ DI, AX + MOVQ R13, BX + SUBQ SI, BX + + // !!! Try a faster technique for short (16 or fewer bytes) copies. + // + // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { + // goto callMemmove // Fall back on calling runtime·memmove. + // } + // + // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s + // against 21 instead of 16, because it cannot assume that all of its input + // is contiguous in memory and so it needs to leave enough source bytes to + // read the next tag without refilling buffers, but Go's Decode assumes + // contiguousness (the src argument is a []byte). + CMPQ CX, $16 + JGT callMemmove + CMPQ AX, $16 + JLT callMemmove + CMPQ BX, $16 + JLT callMemmove + + // !!! Implement the copy from src to dst as a 16-byte load and store. + // (Decode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only length bytes, but that's + // OK. If the input is a valid Snappy encoding then subsequent iterations + // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a + // non-nil error), so the overrun will be ignored. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(SI), X0 + MOVOU X0, 0(DI) + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +callMemmove: + // if length > len(dst)-d || length > len(src)-s { etc } + CMPQ CX, AX + JGT errCorrupt + CMPQ CX, BX + JGT errCorrupt + + // copy(dst[d:], src[s:s+length]) + // + // This means calling runtime·memmove(&dst[d], &src[s], length), so we push + // DI, SI and CX as arguments. Coincidentally, we also need to spill those + // three registers to the stack, to save local variables across the CALL. + MOVQ DI, 0(SP) + MOVQ SI, 8(SP) + MOVQ CX, 16(SP) + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVQ CX, 40(SP) + CALL runtime·memmove(SB) + + // Restore local variables: unspill registers from the stack and + // re-calculate R8-R13. + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVQ 40(SP), CX + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, R13 + ADDQ R12, R13 + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +tagLit60Plus: + // !!! This fragment does the + // + // s += x - 58; if uint(s) > uint(len(src)) { etc } + // + // checks. In the asm version, we code it once instead of once per switch case. + ADDQ CX, SI + SUBQ $58, SI + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // case x == 60: + CMPL CX, $61 + JEQ tagLit61 + JA tagLit62Plus + + // x = uint32(src[s-1]) + MOVBLZX -1(SI), CX + JMP doLit + +tagLit61: + // case x == 61: + // x = uint32(src[s-2]) | uint32(src[s-1])<<8 + MOVWLZX -2(SI), CX + JMP doLit + +tagLit62Plus: + CMPL CX, $62 + JA tagLit63 + + // case x == 62: + // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + MOVWLZX -3(SI), CX + MOVBLZX -1(SI), BX + SHLL $16, BX + ORL BX, CX + JMP doLit + +tagLit63: + // case x == 63: + // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + MOVL -4(SI), CX + JMP doLit + +// The code above handles literal tags. +// ---------------------------------------- +// The code below handles copy tags. + +tagCopy4: + // case tagCopy4: + // s += 5 + ADDQ $5, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // length = 1 + int(src[s-5])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + MOVLQZX -4(SI), DX + JMP doCopy + +tagCopy2: + // case tagCopy2: + // s += 3 + ADDQ $3, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // length = 1 + int(src[s-3])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + MOVWQZX -2(SI), DX + JMP doCopy + +tagCopy: + // We have a copy tag. We assume that: + // - BX == src[s] & 0x03 + // - CX == src[s] + CMPQ BX, $2 + JEQ tagCopy2 + JA tagCopy4 + + // case tagCopy1: + // s += 2 + ADDQ $2, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + MOVQ CX, DX + ANDQ $0xe0, DX + SHLQ $3, DX + MOVBQZX -1(SI), BX + ORQ BX, DX + + // length = 4 + int(src[s-2])>>2&0x7 + SHRQ $2, CX + ANDQ $7, CX + ADDQ $4, CX + +doCopy: + // This is the end of the outer "switch", when we have a copy tag. + // + // We assume that: + // - CX == length && CX > 0 + // - DX == offset + + // if offset <= 0 { etc } + CMPQ DX, $0 + JLE errCorrupt + + // if d < offset { etc } + MOVQ DI, BX + SUBQ R8, BX + CMPQ BX, DX + JLT errCorrupt + + // if length > len(dst)-d { etc } + MOVQ R10, BX + SUBQ DI, BX + CMPQ CX, BX + JGT errCorrupt + + // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length + // + // Set: + // - R14 = len(dst)-d + // - R15 = &dst[d-offset] + MOVQ R10, R14 + SUBQ DI, R14 + MOVQ DI, R15 + SUBQ DX, R15 + + // !!! Try a faster technique for short (16 or fewer bytes) forward copies. + // + // First, try using two 8-byte load/stores, similar to the doLit technique + // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is + // still OK if offset >= 8. Note that this has to be two 8-byte load/stores + // and not one 16-byte load/store, and the first store has to be before the + // second load, due to the overlap if offset is in the range [8, 16). + // + // if length > 16 || offset < 8 || len(dst)-d < 16 { + // goto slowForwardCopy + // } + // copy 16 bytes + // d += length + CMPQ CX, $16 + JGT slowForwardCopy + CMPQ DX, $8 + JLT slowForwardCopy + CMPQ R14, $16 + JLT slowForwardCopy + MOVQ 0(R15), AX + MOVQ AX, 0(DI) + MOVQ 8(R15), BX + MOVQ BX, 8(DI) + ADDQ CX, DI + JMP loop + +slowForwardCopy: + // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we + // can still try 8-byte load stores, provided we can overrun up to 10 extra + // bytes. As above, the overrun will be fixed up by subsequent iterations + // of the outermost loop. + // + // The C++ snappy code calls this technique IncrementalCopyFastPath. Its + // commentary says: + // + // ---- + // + // The main part of this loop is a simple copy of eight bytes at a time + // until we've copied (at least) the requested amount of bytes. However, + // if d and d-offset are less than eight bytes apart (indicating a + // repeating pattern of length < 8), we first need to expand the pattern in + // order to get the correct results. For instance, if the buffer looks like + // this, with the eight-byte and patterns marked as + // intervals: + // + // abxxxxxxxxxxxx + // [------] d-offset + // [------] d + // + // a single eight-byte copy from to will repeat the pattern + // once, after which we can move two bytes without moving : + // + // ababxxxxxxxxxx + // [------] d-offset + // [------] d + // + // and repeat the exercise until the two no longer overlap. + // + // This allows us to do very well in the special case of one single byte + // repeated many times, without taking a big hit for more general cases. + // + // The worst case of extra writing past the end of the match occurs when + // offset == 1 and length == 1; the last copy will read from byte positions + // [0..7] and write to [4..11], whereas it was only supposed to write to + // position 1. Thus, ten excess bytes. + // + // ---- + // + // That "10 byte overrun" worst case is confirmed by Go's + // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy + // and finishSlowForwardCopy algorithm. + // + // if length > len(dst)-d-10 { + // goto verySlowForwardCopy + // } + SUBQ $10, R14 + CMPQ CX, R14 + JGT verySlowForwardCopy + +makeOffsetAtLeast8: + // !!! As above, expand the pattern so that offset >= 8 and we can use + // 8-byte load/stores. + // + // for offset < 8 { + // copy 8 bytes from dst[d-offset:] to dst[d:] + // length -= offset + // d += offset + // offset += offset + // // The two previous lines together means that d-offset, and therefore + // // R15, is unchanged. + // } + CMPQ DX, $8 + JGE fixUpSlowForwardCopy + MOVQ (R15), BX + MOVQ BX, (DI) + SUBQ DX, CX + ADDQ DX, DI + ADDQ DX, DX + JMP makeOffsetAtLeast8 + +fixUpSlowForwardCopy: + // !!! Add length (which might be negative now) to d (implied by DI being + // &dst[d]) so that d ends up at the right place when we jump back to the + // top of the loop. Before we do that, though, we save DI to AX so that, if + // length is positive, copying the remaining length bytes will write to the + // right place. + MOVQ DI, AX + ADDQ CX, DI + +finishSlowForwardCopy: + // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative + // length means that we overrun, but as above, that will be fixed up by + // subsequent iterations of the outermost loop. + CMPQ CX, $0 + JLE loop + MOVQ (R15), BX + MOVQ BX, (AX) + ADDQ $8, R15 + ADDQ $8, AX + SUBQ $8, CX + JMP finishSlowForwardCopy + +verySlowForwardCopy: + // verySlowForwardCopy is a simple implementation of forward copy. In C + // parlance, this is a do/while loop instead of a while loop, since we know + // that length > 0. In Go syntax: + // + // for { + // dst[d] = dst[d - offset] + // d++ + // length-- + // if length == 0 { + // break + // } + // } + MOVB (R15), BX + MOVB BX, (DI) + INCQ R15 + INCQ DI + DECQ CX + JNZ verySlowForwardCopy + JMP loop + +// The code above handles copy tags. +// ---------------------------------------- + +end: + // This is the end of the "for s < len(src)". + // + // if d != len(dst) { etc } + CMPQ DI, R10 + JNE errCorrupt + + // return 0 + MOVQ $0, ret+48(FP) + RET + +errCorrupt: + // return decodeErrCodeCorrupt + MOVQ $1, ret+48(FP) + RET diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go new file mode 100644 index 000000000..874968906 --- /dev/null +++ b/vendor/github.com/golang/snappy/encode.go @@ -0,0 +1,285 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return dst[:d] +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 16 - 1 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// could be encoded with a copy tag. This is the minimum with respect to the +// algorithm used by encodeBlock, not a minimum enforced by the file format. +// +// The encoded output must start with at least a 1 byte literal, as there are +// no previous bytes to copy. A minimal (1 byte) copy after that, generated +// from an emitCopy call in encodeBlock's main loop, would require at least +// another inputMargin bytes, for the reason above: we want any emitLiteral +// calls inside encodeBlock's main loop to use the fast path if possible, which +// requires being able to overrun by inputMargin bytes. Thus, +// minNonLiteralBlockSize equals 1 + 1 + inputMargin. +// +// The C++ code doesn't use this exact threshold, but it could, as discussed at +// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion +// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an +// optimization. It should not affect the encoded form. This is tested by +// TestSameEncodingAsCppShortCopies. +const minNonLiteralBlockSize = 1 + 1 + inputMargin + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if n > 0xffffffff { + return -1 + } + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + n = 32 + n + n/6 + if n > 0xffffffff { + return -1 + } + return int(n) +} + +var errClosed = errors.New("snappy: Writer is closed") + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + obuf: make([]byte, obufLen), + } +} + +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + ibuf: make([]byte, 0, maxBlockSize), + obuf: make([]byte, obufLen), + } +} + +// Writer is an io.Writer than can write Snappy-compressed bytes. +type Writer struct { + w io.Writer + err error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + // + // Its use is optional. For backwards compatibility, Writers created by the + // NewWriter function have ibuf == nil, do not buffer incoming bytes, and + // therefore do not need to be Flush'ed or Close'd. + ibuf []byte + + // obuf is a buffer for the outgoing (compressed) bytes. + obuf []byte + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool +} + +// Reset discards the writer's state and switches the Snappy writer to write to +// w. This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + w.w = writer + w.err = nil + if w.ibuf != nil { + w.ibuf = w.ibuf[:0] + } + w.wroteStreamHeader = false +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if w.ibuf == nil { + // Do not buffer incoming bytes. This does not perform or compress well + // if the caller of Writer.Write writes many small slices. This + // behavior is therefore deprecated, but still supported for backwards + // compatibility with code that doesn't explicitly Flush or Close. + return w.write(p) + } + + // The remainder of this method is based on bufio.Writer.Write from the + // standard library. + + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.Flush() + } + nRet += n + p = p[n:] + } + if w.err != nil { + return nRet, w.err + } + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if w.err != nil { + return 0, w.err + } + for len(p) > 0 { + obufStart := len(magicChunk) + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + copy(w.obuf, magicChunk) + obufStart = 0 + } + + var uncompressed []byte + if len(p) > maxBlockSize { + uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] + } else { + uncompressed, p = p, nil + } + checksum := crc(uncompressed) + + // Compress the buffer, discarding the result if the improvement + // isn't at least 12.5%. + compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) + chunkType := uint8(chunkTypeCompressedData) + chunkLen := 4 + len(compressed) + obufEnd := obufHeaderLen + len(compressed) + if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { + chunkType = chunkTypeUncompressedData + chunkLen = 4 + len(uncompressed) + obufEnd = obufHeaderLen + } + + // Fill in the per-chunk header that comes before the body. + w.obuf[len(magicChunk)+0] = chunkType + w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) + w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) + w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) + w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) + w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) + w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) + w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) + + if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { + w.err = err + return nRet, err + } + if chunkType == chunkTypeUncompressedData { + if _, err := w.w.Write(uncompressed); err != nil { + w.err = err + return nRet, err + } + } + nRet += len(uncompressed) + } + return nRet, nil +} + +// Flush flushes the Writer to its underlying io.Writer. +func (w *Writer) Flush() error { + if w.err != nil { + return w.err + } + if len(w.ibuf) == 0 { + return nil + } + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err +} + +// Close calls Flush and then closes the Writer. +func (w *Writer) Close() error { + w.Flush() + ret := w.err + if w.err == nil { + w.err = errClosed + } + return ret +} diff --git a/vendor/github.com/golang/snappy/encode_amd64.go b/vendor/github.com/golang/snappy/encode_amd64.go new file mode 100644 index 000000000..2a56fb504 --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_amd64.go @@ -0,0 +1,29 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// emitLiteral has the same semantics as in encode_other.go. +// +//go:noescape +func emitLiteral(dst, lit []byte) int + +// emitCopy has the same semantics as in encode_other.go. +// +//go:noescape +func emitCopy(dst []byte, offset, length int) int + +// extendMatch has the same semantics as in encode_other.go. +// +//go:noescape +func extendMatch(src []byte, i, j int) int + +// encodeBlock has the same semantics as in encode_other.go. +// +//go:noescape +func encodeBlock(dst, src []byte) (d int) \ No newline at end of file diff --git a/vendor/github.com/golang/snappy/encode_amd64.s b/vendor/github.com/golang/snappy/encode_amd64.s new file mode 100644 index 000000000..adfd979fe --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_amd64.s @@ -0,0 +1,730 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a +// Go toolchain regression. See https://github.com/golang/go/issues/15426 and +// https://github.com/golang/snappy/issues/29 +// +// As a workaround, the package was built with a known good assembler, and +// those instructions were disassembled by "objdump -d" to yield the +// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 +// style comments, in AT&T asm syntax. Note that rsp here is a physical +// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). +// The instructions were then encoded as "BYTE $0x.." sequences, which assemble +// fine on Go 1.6. + +// The asm code generally follows the pure Go code in encode_other.go, except +// where marked with a "!!!". + +// ---------------------------------------------------------------------------- + +// func emitLiteral(dst, lit []byte) int +// +// All local variables fit into registers. The register allocation: +// - AX len(lit) +// - BX n +// - DX return value +// - DI &dst[i] +// - R10 &lit[0] +// +// The 24 bytes of stack space is to call runtime·memmove. +// +// The unusual register allocation of local variables, such as R10 for the +// source pointer, matches the allocation used at the call site in encodeBlock, +// which makes it easier to manually inline this function. +TEXT ·emitLiteral(SB), NOSPLIT, $24-56 + MOVQ dst_base+0(FP), DI + MOVQ lit_base+24(FP), R10 + MOVQ lit_len+32(FP), AX + MOVQ AX, DX + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT oneByte + CMPL BX, $256 + JLT twoBytes + +threeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + ADDQ $3, DX + JMP memmove + +twoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + ADDQ $2, DX + JMP memmove + +oneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + ADDQ $1, DX + +memmove: + MOVQ DX, ret+48(FP) + + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + CALL runtime·memmove(SB) + RET + +// ---------------------------------------------------------------------------- + +// func emitCopy(dst []byte, offset, length int) int +// +// All local variables fit into registers. The register allocation: +// - AX length +// - SI &dst[0] +// - DI &dst[i] +// - R11 offset +// +// The unusual register allocation of local variables, such as R11 for the +// offset, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·emitCopy(SB), NOSPLIT, $0-48 + MOVQ dst_base+0(FP), DI + MOVQ DI, SI + MOVQ offset+24(FP), R11 + MOVQ length+32(FP), AX + +loop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT step1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP loop0 + +step1: + // if length > 64 { etc } + CMPL AX, $64 + JLE step2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +step2: + // if length >= 12 || offset >= 2048 { goto step3 } + CMPL AX, $12 + JGE step3 + CMPL R11, $2048 + JGE step3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +step3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func extendMatch(src []byte, i, j int) int +// +// All local variables fit into registers. The register allocation: +// - DX &src[0] +// - SI &src[j] +// - R13 &src[len(src) - 8] +// - R14 &src[len(src)] +// - R15 &src[i] +// +// The unusual register allocation of local variables, such as R15 for a source +// pointer, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·extendMatch(SB), NOSPLIT, $0-48 + MOVQ src_base+0(FP), DX + MOVQ src_len+8(FP), R14 + MOVQ i+24(FP), R15 + MOVQ j+32(FP), SI + ADDQ DX, R14 + ADDQ DX, R15 + ADDQ DX, SI + MOVQ R14, R13 + SUBQ $8, R13 + +cmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA cmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE bsf + ADDQ $8, R15 + ADDQ $8, SI + JMP cmp8 + +bsf: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +cmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE extendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE extendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP cmp1 + +extendMatchEnd: + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func encodeBlock(dst, src []byte) (d int) +// +// All local variables fit into registers, other than "var table". The register +// allocation: +// - AX . . +// - BX . . +// - CX 56 shift (note that amd64 shifts by non-immediates must use CX). +// - DX 64 &src[0], tableSize +// - SI 72 &src[s] +// - DI 80 &dst[d] +// - R9 88 sLimit +// - R10 . &src[nextEmit] +// - R11 96 prevHash, currHash, nextHash, offset +// - R12 104 &src[base], skip +// - R13 . &src[nextS], &src[len(src) - 8] +// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x +// - R15 112 candidate +// +// The second column (56, 64, etc) is the stack offset to spill the registers +// when calling other functions. We could pack this slightly tighter, but it's +// simpler to have a dedicated spill map independent of the function called. +// +// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An +// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill +// local variables (registers) during calls gives 32768 + 56 + 64 = 32888. +TEXT ·encodeBlock(SB), 0, $32888-56 + MOVQ dst_base+0(FP), DI + MOVQ src_base+24(FP), SI + MOVQ src_len+32(FP), R14 + + // shift, tableSize := uint32(32-8), 1<<8 + MOVQ $24, CX + MOVQ $256, DX + +calcShift: + // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + // shift-- + // } + CMPQ DX, $16384 + JGE varTable + CMPQ DX, R14 + JGE varTable + SUBQ $1, CX + SHLQ $1, DX + JMP calcShift + +varTable: + // var table [maxTableSize]uint16 + // + // In the asm code, unlike the Go code, we can zero-initialize only the + // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU + // writes 16 bytes, so we can do only tableSize/8 writes instead of the + // 2048 writes that would zero-initialize all of table's 32768 bytes. + SHRQ $3, DX + LEAQ table-32768(SP), BX + PXOR X0, X0 + +memclr: + MOVOU X0, 0(BX) + ADDQ $16, BX + SUBQ $1, DX + JNZ memclr + + // !!! DX = &src[0] + MOVQ SI, DX + + // sLimit := len(src) - inputMargin + MOVQ R14, R9 + SUBQ $15, R9 + + // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't + // change for the rest of the function. + MOVQ CX, 56(SP) + MOVQ DX, 64(SP) + MOVQ R9, 88(SP) + + // nextEmit := 0 + MOVQ DX, R10 + + // s := 1 + ADDQ $1, SI + + // nextHash := hash(load32(src, s), shift) + MOVL 0(SI), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + +outer: + // for { etc } + + // skip := 32 + MOVQ $32, R12 + + // nextS := s + MOVQ SI, R13 + + // candidate := 0 + MOVQ $0, R15 + +inner0: + // for { etc } + + // s := nextS + MOVQ R13, SI + + // bytesBetweenHashLookups := skip >> 5 + MOVQ R12, R14 + SHRQ $5, R14 + + // nextS = s + bytesBetweenHashLookups + ADDQ R14, R13 + + // skip += bytesBetweenHashLookups + ADDQ R14, R12 + + // if nextS > sLimit { goto emitRemainder } + MOVQ R13, AX + SUBQ DX, AX + CMPQ AX, R9 + JA emitRemainder + + // candidate = int(table[nextHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[nextHash] = uint16(s) + MOVQ SI, AX + SUBQ DX, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // nextHash = hash(load32(src, nextS), shift) + MOVL 0(R13), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // if load32(src, s) != load32(src, candidate) { continue } break + MOVL 0(SI), AX + MOVL (DX)(R15*1), BX + CMPL AX, BX + JNE inner0 + +fourByteMatch: + // As per the encode_other.go code: + // + // A 4-byte match has been found. We'll later see etc. + + // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment + // on inputMargin in encode.go. + MOVQ SI, AX + SUBQ R10, AX + CMPQ AX, $16 + JLE emitLiteralFastPath + + // ---------------------------------------- + // Begin inline of the emitLiteral call. + // + // d += emitLiteral(dst[d:], src[nextEmit:s]) + + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT inlineEmitLiteralOneByte + CMPL BX, $256 + JLT inlineEmitLiteralTwoBytes + +inlineEmitLiteralThreeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralTwoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralOneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + +inlineEmitLiteralMemmove: + // Spill local variables (registers) onto the stack; call; unspill. + // + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)". + MOVQ SI, 72(SP) + MOVQ DI, 80(SP) + MOVQ R15, 112(SP) + CALL runtime·memmove(SB) + MOVQ 56(SP), CX + MOVQ 64(SP), DX + MOVQ 72(SP), SI + MOVQ 80(SP), DI + MOVQ 88(SP), R9 + MOVQ 112(SP), R15 + JMP inner1 + +inlineEmitLiteralEnd: + // End inline of the emitLiteral call. + // ---------------------------------------- + +emitLiteralFastPath: + // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". + MOVB AX, BX + SUBB $1, BX + SHLB $2, BX + MOVB BX, (DI) + ADDQ $1, DI + + // !!! Implement the copy from lit to dst as a 16-byte load and store. + // (Encode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only len(lit) bytes, but that's + // OK. Subsequent iterations will fix up the overrun. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(R10), X0 + MOVOU X0, 0(DI) + ADDQ AX, DI + +inner1: + // for { etc } + + // base := s + MOVQ SI, R12 + + // !!! offset := base - candidate + MOVQ R12, R11 + SUBQ R15, R11 + SUBQ DX, R11 + + // ---------------------------------------- + // Begin inline of the extendMatch call. + // + // s = extendMatch(src, candidate+4, s+4) + + // !!! R14 = &src[len(src)] + MOVQ src_len+32(FP), R14 + ADDQ DX, R14 + + // !!! R13 = &src[len(src) - 8] + MOVQ R14, R13 + SUBQ $8, R13 + + // !!! R15 = &src[candidate + 4] + ADDQ $4, R15 + ADDQ DX, R15 + + // !!! s += 4 + ADDQ $4, SI + +inlineExtendMatchCmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA inlineExtendMatchCmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE inlineExtendMatchBSF + ADDQ $8, R15 + ADDQ $8, SI + JMP inlineExtendMatchCmp8 + +inlineExtendMatchBSF: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + JMP inlineExtendMatchEnd + +inlineExtendMatchCmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE inlineExtendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE inlineExtendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP inlineExtendMatchCmp1 + +inlineExtendMatchEnd: + // End inline of the extendMatch call. + // ---------------------------------------- + + // ---------------------------------------- + // Begin inline of the emitCopy call. + // + // d += emitCopy(dst[d:], base-candidate, s-base) + + // !!! length := s - base + MOVQ SI, AX + SUBQ R12, AX + +inlineEmitCopyLoop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT inlineEmitCopyStep1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP inlineEmitCopyLoop0 + +inlineEmitCopyStep1: + // if length > 64 { etc } + CMPL AX, $64 + JLE inlineEmitCopyStep2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +inlineEmitCopyStep2: + // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } + CMPL AX, $12 + JGE inlineEmitCopyStep3 + CMPL R11, $2048 + JGE inlineEmitCopyStep3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + JMP inlineEmitCopyEnd + +inlineEmitCopyStep3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + +inlineEmitCopyEnd: + // End inline of the emitCopy call. + // ---------------------------------------- + + // nextEmit = s + MOVQ SI, R10 + + // if s >= sLimit { goto emitRemainder } + MOVQ SI, AX + SUBQ DX, AX + CMPQ AX, R9 + JAE emitRemainder + + // As per the encode_other.go code: + // + // We could immediately etc. + + // x := load64(src, s-1) + MOVQ -1(SI), R14 + + // prevHash := hash(uint32(x>>0), shift) + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // table[prevHash] = uint16(s-1) + MOVQ SI, AX + SUBQ DX, AX + SUBQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // currHash := hash(uint32(x>>8), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // candidate = int(table[currHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[currHash] = uint16(s) + ADDQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // if uint32(x>>8) == load32(src, candidate) { continue } + MOVL (DX)(R15*1), BX + CMPL R14, BX + JEQ inner1 + + // nextHash = hash(uint32(x>>16), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // s++ + ADDQ $1, SI + + // break out of the inner1 for loop, i.e. continue the outer loop. + JMP outer + +emitRemainder: + // if nextEmit < len(src) { etc } + MOVQ src_len+32(FP), AX + ADDQ DX, AX + CMPQ R10, AX + JEQ encodeBlockEnd + + // d += emitLiteral(dst[d:], src[nextEmit:]) + // + // Push args. + MOVQ DI, 0(SP) + MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ R10, 24(SP) + SUBQ R10, AX + MOVQ AX, 32(SP) + MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative. + + // Spill local variables (registers) onto the stack; call; unspill. + MOVQ DI, 80(SP) + CALL ·emitLiteral(SB) + MOVQ 80(SP), DI + + // Finish the "d +=" part of "d += emitLiteral(etc)". + ADDQ 48(SP), DI + +encodeBlockEnd: + MOVQ dst_base+0(FP), AX + SUBQ AX, DI + MOVQ DI, d+48(FP) + RET diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go new file mode 100644 index 000000000..0cf5e379c --- /dev/null +++ b/vendor/github.com/golang/snappy/snappy.go @@ -0,0 +1,87 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snappy implements the snappy block-based compression format. +// It aims for very high speeds and reasonable compression. +// +// The C++ snappy implementation is at https://github.com/google/snappy +package snappy // import "github.com/golang/snappy" + +import ( + "hash/crc32" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer issued by most + encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicBody = "sNaPpY" + + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 + + // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + maxEncodedLenOfMaxBlockSize = 76490 + + obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize + obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/vendor/github.com/klauspost/crc32/LICENSE b/vendor/github.com/klauspost/crc32/LICENSE new file mode 100644 index 000000000..4fd5963e3 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2015 Klaus Post + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/crc32/README.md b/vendor/github.com/klauspost/crc32/README.md new file mode 100644 index 000000000..029625d36 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/README.md @@ -0,0 +1,87 @@ +# crc32 +CRC32 hash with x64 optimizations + +This package is a drop-in replacement for the standard library `hash/crc32` package, that features SSE 4.2 optimizations on x64 platforms, for a 10x speedup. + +[![Build Status](https://travis-ci.org/klauspost/crc32.svg?branch=master)](https://travis-ci.org/klauspost/crc32) + +# usage + +Install using `go get github.com/klauspost/crc32`. This library is based on Go 1.5 code and requires Go 1.3 or newer. + +Replace `import "hash/crc32"` with `import "github.com/klauspost/crc32"` and you are good to go. + +# changes +* Oct 20, 2016: Changes have been merged to upstream Go. Package updated to match. +* Dec 4, 2015: Uses the "slice-by-8" trick more extensively, which gives a 1.5 to 2.5x speedup if assembler is unavailable. + + +# performance + +For *Go 1.7* performance is equivalent to the standard library. So if you use this package for Go 1.7 you can switch back. + + +For IEEE tables (the most common), there is approximately a factor 10 speedup with "CLMUL" (Carryless multiplication) instruction: +``` +benchmark old ns/op new ns/op delta +BenchmarkCrc32KB 99955 10258 -89.74% + +benchmark old MB/s new MB/s speedup +BenchmarkCrc32KB 327.83 3194.20 9.74x +``` + +For other tables and "CLMUL" capable machines the performance is the same as the standard library. + +Here are some detailed benchmarks, comparing to go 1.5 standard library with and without assembler enabled. + +``` +Std: Standard Go 1.5 library +Crc: Indicates IEEE type CRC. +40B: Size of each slice encoded. +NoAsm: Assembler was disabled (ie. not an AMD64 or SSE 4.2+ capable machine). +Castagnoli: Castagnoli CRC type. + +BenchmarkStdCrc40B-4 10000000 158 ns/op 252.88 MB/s +BenchmarkCrc40BNoAsm-4 20000000 105 ns/op 377.38 MB/s (slice8) +BenchmarkCrc40B-4 20000000 105 ns/op 378.77 MB/s (slice8) + +BenchmarkStdCrc1KB-4 500000 3604 ns/op 284.10 MB/s +BenchmarkCrc1KBNoAsm-4 1000000 1463 ns/op 699.79 MB/s (slice8) +BenchmarkCrc1KB-4 3000000 396 ns/op 2583.69 MB/s (asm) + +BenchmarkStdCrc8KB-4 200000 11417 ns/op 717.48 MB/s (slice8) +BenchmarkCrc8KBNoAsm-4 200000 11317 ns/op 723.85 MB/s (slice8) +BenchmarkCrc8KB-4 500000 2919 ns/op 2805.73 MB/s (asm) + +BenchmarkStdCrc32KB-4 30000 45749 ns/op 716.24 MB/s (slice8) +BenchmarkCrc32KBNoAsm-4 30000 45109 ns/op 726.42 MB/s (slice8) +BenchmarkCrc32KB-4 100000 11497 ns/op 2850.09 MB/s (asm) + +BenchmarkStdNoAsmCastagnol40B-4 10000000 161 ns/op 246.94 MB/s +BenchmarkStdCastagnoli40B-4 50000000 28.4 ns/op 1410.69 MB/s (asm) +BenchmarkCastagnoli40BNoAsm-4 20000000 100 ns/op 398.01 MB/s (slice8) +BenchmarkCastagnoli40B-4 50000000 28.2 ns/op 1419.54 MB/s (asm) + +BenchmarkStdNoAsmCastagnoli1KB-4 500000 3622 ns/op 282.67 MB/s +BenchmarkStdCastagnoli1KB-4 10000000 144 ns/op 7099.78 MB/s (asm) +BenchmarkCastagnoli1KBNoAsm-4 1000000 1475 ns/op 694.14 MB/s (slice8) +BenchmarkCastagnoli1KB-4 10000000 146 ns/op 6993.35 MB/s (asm) + +BenchmarkStdNoAsmCastagnoli8KB-4 50000 28781 ns/op 284.63 MB/s +BenchmarkStdCastagnoli8KB-4 1000000 1029 ns/op 7957.89 MB/s (asm) +BenchmarkCastagnoli8KBNoAsm-4 200000 11410 ns/op 717.94 MB/s (slice8) +BenchmarkCastagnoli8KB-4 1000000 1000 ns/op 8188.71 MB/s (asm) + +BenchmarkStdNoAsmCastagnoli32KB-4 10000 115426 ns/op 283.89 MB/s +BenchmarkStdCastagnoli32KB-4 300000 4065 ns/op 8059.13 MB/s (asm) +BenchmarkCastagnoli32KBNoAsm-4 30000 45171 ns/op 725.41 MB/s (slice8) +BenchmarkCastagnoli32KB-4 500000 4077 ns/op 8035.89 MB/s (asm) +``` + +The IEEE assembler optimizations has been submitted and will be part of the Go 1.6 standard library. + +However, the improved use of slice-by-8 has not, but will probably be submitted for Go 1.7. + +# license + +Standard Go license. Changes are Copyright (c) 2015 Klaus Post under same conditions. diff --git a/vendor/github.com/klauspost/crc32/crc32.go b/vendor/github.com/klauspost/crc32/crc32.go new file mode 100644 index 000000000..8aa91b17e --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32.go @@ -0,0 +1,207 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package crc32 implements the 32-bit cyclic redundancy check, or CRC-32, +// checksum. See http://en.wikipedia.org/wiki/Cyclic_redundancy_check for +// information. +// +// Polynomials are represented in LSB-first form also known as reversed representation. +// +// See http://en.wikipedia.org/wiki/Mathematics_of_cyclic_redundancy_checks#Reversed_representations_and_reciprocal_polynomials +// for information. +package crc32 + +import ( + "hash" + "sync" +) + +// The size of a CRC-32 checksum in bytes. +const Size = 4 + +// Predefined polynomials. +const ( + // IEEE is by far and away the most common CRC-32 polynomial. + // Used by ethernet (IEEE 802.3), v.42, fddi, gzip, zip, png, ... + IEEE = 0xedb88320 + + // Castagnoli's polynomial, used in iSCSI. + // Has better error detection characteristics than IEEE. + // http://dx.doi.org/10.1109/26.231911 + Castagnoli = 0x82f63b78 + + // Koopman's polynomial. + // Also has better error detection characteristics than IEEE. + // http://dx.doi.org/10.1109/DSN.2002.1028931 + Koopman = 0xeb31d82e +) + +// Table is a 256-word table representing the polynomial for efficient processing. +type Table [256]uint32 + +// This file makes use of functions implemented in architecture-specific files. +// The interface that they implement is as follows: +// +// // archAvailableIEEE reports whether an architecture-specific CRC32-IEEE +// // algorithm is available. +// archAvailableIEEE() bool +// +// // archInitIEEE initializes the architecture-specific CRC3-IEEE algorithm. +// // It can only be called if archAvailableIEEE() returns true. +// archInitIEEE() +// +// // archUpdateIEEE updates the given CRC32-IEEE. It can only be called if +// // archInitIEEE() was previously called. +// archUpdateIEEE(crc uint32, p []byte) uint32 +// +// // archAvailableCastagnoli reports whether an architecture-specific +// // CRC32-C algorithm is available. +// archAvailableCastagnoli() bool +// +// // archInitCastagnoli initializes the architecture-specific CRC32-C +// // algorithm. It can only be called if archAvailableCastagnoli() returns +// // true. +// archInitCastagnoli() +// +// // archUpdateCastagnoli updates the given CRC32-C. It can only be called +// // if archInitCastagnoli() was previously called. +// archUpdateCastagnoli(crc uint32, p []byte) uint32 + +// castagnoliTable points to a lazily initialized Table for the Castagnoli +// polynomial. MakeTable will always return this value when asked to make a +// Castagnoli table so we can compare against it to find when the caller is +// using this polynomial. +var castagnoliTable *Table +var castagnoliTable8 *slicing8Table +var castagnoliArchImpl bool +var updateCastagnoli func(crc uint32, p []byte) uint32 +var castagnoliOnce sync.Once + +func castagnoliInit() { + castagnoliTable = simpleMakeTable(Castagnoli) + castagnoliArchImpl = archAvailableCastagnoli() + + if castagnoliArchImpl { + archInitCastagnoli() + updateCastagnoli = archUpdateCastagnoli + } else { + // Initialize the slicing-by-8 table. + castagnoliTable8 = slicingMakeTable(Castagnoli) + updateCastagnoli = func(crc uint32, p []byte) uint32 { + return slicingUpdate(crc, castagnoliTable8, p) + } + } +} + +// IEEETable is the table for the IEEE polynomial. +var IEEETable = simpleMakeTable(IEEE) + +// ieeeTable8 is the slicing8Table for IEEE +var ieeeTable8 *slicing8Table +var ieeeArchImpl bool +var updateIEEE func(crc uint32, p []byte) uint32 +var ieeeOnce sync.Once + +func ieeeInit() { + ieeeArchImpl = archAvailableIEEE() + + if ieeeArchImpl { + archInitIEEE() + updateIEEE = archUpdateIEEE + } else { + // Initialize the slicing-by-8 table. + ieeeTable8 = slicingMakeTable(IEEE) + updateIEEE = func(crc uint32, p []byte) uint32 { + return slicingUpdate(crc, ieeeTable8, p) + } + } +} + +// MakeTable returns a Table constructed from the specified polynomial. +// The contents of this Table must not be modified. +func MakeTable(poly uint32) *Table { + switch poly { + case IEEE: + ieeeOnce.Do(ieeeInit) + return IEEETable + case Castagnoli: + castagnoliOnce.Do(castagnoliInit) + return castagnoliTable + } + return simpleMakeTable(poly) +} + +// digest represents the partial evaluation of a checksum. +type digest struct { + crc uint32 + tab *Table +} + +// New creates a new hash.Hash32 computing the CRC-32 checksum +// using the polynomial represented by the Table. +// Its Sum method will lay the value out in big-endian byte order. +func New(tab *Table) hash.Hash32 { + if tab == IEEETable { + ieeeOnce.Do(ieeeInit) + } + return &digest{0, tab} +} + +// NewIEEE creates a new hash.Hash32 computing the CRC-32 checksum +// using the IEEE polynomial. +// Its Sum method will lay the value out in big-endian byte order. +func NewIEEE() hash.Hash32 { return New(IEEETable) } + +func (d *digest) Size() int { return Size } + +func (d *digest) BlockSize() int { return 1 } + +func (d *digest) Reset() { d.crc = 0 } + +// Update returns the result of adding the bytes in p to the crc. +func Update(crc uint32, tab *Table, p []byte) uint32 { + switch tab { + case castagnoliTable: + return updateCastagnoli(crc, p) + case IEEETable: + // Unfortunately, because IEEETable is exported, IEEE may be used without a + // call to MakeTable. We have to make sure it gets initialized in that case. + ieeeOnce.Do(ieeeInit) + return updateIEEE(crc, p) + default: + return simpleUpdate(crc, tab, p) + } +} + +func (d *digest) Write(p []byte) (n int, err error) { + switch d.tab { + case castagnoliTable: + d.crc = updateCastagnoli(d.crc, p) + case IEEETable: + // We only create digest objects through New() which takes care of + // initialization in this case. + d.crc = updateIEEE(d.crc, p) + default: + d.crc = simpleUpdate(d.crc, d.tab, p) + } + return len(p), nil +} + +func (d *digest) Sum32() uint32 { return d.crc } + +func (d *digest) Sum(in []byte) []byte { + s := d.Sum32() + return append(in, byte(s>>24), byte(s>>16), byte(s>>8), byte(s)) +} + +// Checksum returns the CRC-32 checksum of data +// using the polynomial represented by the Table. +func Checksum(data []byte, tab *Table) uint32 { return Update(0, tab, data) } + +// ChecksumIEEE returns the CRC-32 checksum of data +// using the IEEE polynomial. +func ChecksumIEEE(data []byte) uint32 { + ieeeOnce.Do(ieeeInit) + return updateIEEE(0, data) +} diff --git a/vendor/github.com/klauspost/crc32/crc32_amd64.go b/vendor/github.com/klauspost/crc32/crc32_amd64.go new file mode 100644 index 000000000..af2a0b844 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_amd64.go @@ -0,0 +1,230 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine,!gccgo + +// AMD64-specific hardware-assisted CRC32 algorithms. See crc32.go for a +// description of the interface that each architecture-specific file +// implements. + +package crc32 + +import "unsafe" + +// This file contains the code to call the SSE 4.2 version of the Castagnoli +// and IEEE CRC. + +// haveSSE41/haveSSE42/haveCLMUL are defined in crc_amd64.s and use +// CPUID to test for SSE 4.1, 4.2 and CLMUL support. +func haveSSE41() bool +func haveSSE42() bool +func haveCLMUL() bool + +// castagnoliSSE42 is defined in crc32_amd64.s and uses the SSE4.2 CRC32 +// instruction. +//go:noescape +func castagnoliSSE42(crc uint32, p []byte) uint32 + +// castagnoliSSE42Triple is defined in crc32_amd64.s and uses the SSE4.2 CRC32 +// instruction. +//go:noescape +func castagnoliSSE42Triple( + crcA, crcB, crcC uint32, + a, b, c []byte, + rounds uint32, +) (retA uint32, retB uint32, retC uint32) + +// ieeeCLMUL is defined in crc_amd64.s and uses the PCLMULQDQ +// instruction as well as SSE 4.1. +//go:noescape +func ieeeCLMUL(crc uint32, p []byte) uint32 + +var sse42 = haveSSE42() +var useFastIEEE = haveCLMUL() && haveSSE41() + +const castagnoliK1 = 168 +const castagnoliK2 = 1344 + +type sse42Table [4]Table + +var castagnoliSSE42TableK1 *sse42Table +var castagnoliSSE42TableK2 *sse42Table + +func archAvailableCastagnoli() bool { + return sse42 +} + +func archInitCastagnoli() { + if !sse42 { + panic("arch-specific Castagnoli not available") + } + castagnoliSSE42TableK1 = new(sse42Table) + castagnoliSSE42TableK2 = new(sse42Table) + // See description in updateCastagnoli. + // t[0][i] = CRC(i000, O) + // t[1][i] = CRC(0i00, O) + // t[2][i] = CRC(00i0, O) + // t[3][i] = CRC(000i, O) + // where O is a sequence of K zeros. + var tmp [castagnoliK2]byte + for b := 0; b < 4; b++ { + for i := 0; i < 256; i++ { + val := uint32(i) << uint32(b*8) + castagnoliSSE42TableK1[b][i] = castagnoliSSE42(val, tmp[:castagnoliK1]) + castagnoliSSE42TableK2[b][i] = castagnoliSSE42(val, tmp[:]) + } + } +} + +// castagnoliShift computes the CRC32-C of K1 or K2 zeroes (depending on the +// table given) with the given initial crc value. This corresponds to +// CRC(crc, O) in the description in updateCastagnoli. +func castagnoliShift(table *sse42Table, crc uint32) uint32 { + return table[3][crc>>24] ^ + table[2][(crc>>16)&0xFF] ^ + table[1][(crc>>8)&0xFF] ^ + table[0][crc&0xFF] +} + +func archUpdateCastagnoli(crc uint32, p []byte) uint32 { + if !sse42 { + panic("not available") + } + + // This method is inspired from the algorithm in Intel's white paper: + // "Fast CRC Computation for iSCSI Polynomial Using CRC32 Instruction" + // The same strategy of splitting the buffer in three is used but the + // combining calculation is different; the complete derivation is explained + // below. + // + // -- The basic idea -- + // + // The CRC32 instruction (available in SSE4.2) can process 8 bytes at a + // time. In recent Intel architectures the instruction takes 3 cycles; + // however the processor can pipeline up to three instructions if they + // don't depend on each other. + // + // Roughly this means that we can process three buffers in about the same + // time we can process one buffer. + // + // The idea is then to split the buffer in three, CRC the three pieces + // separately and then combine the results. + // + // Combining the results requires precomputed tables, so we must choose a + // fixed buffer length to optimize. The longer the length, the faster; but + // only buffers longer than this length will use the optimization. We choose + // two cutoffs and compute tables for both: + // - one around 512: 168*3=504 + // - one around 4KB: 1344*3=4032 + // + // -- The nitty gritty -- + // + // Let CRC(I, X) be the non-inverted CRC32-C of the sequence X (with + // initial non-inverted CRC I). This function has the following properties: + // (a) CRC(I, AB) = CRC(CRC(I, A), B) + // (b) CRC(I, A xor B) = CRC(I, A) xor CRC(0, B) + // + // Say we want to compute CRC(I, ABC) where A, B, C are three sequences of + // K bytes each, where K is a fixed constant. Let O be the sequence of K zero + // bytes. + // + // CRC(I, ABC) = CRC(I, ABO xor C) + // = CRC(I, ABO) xor CRC(0, C) + // = CRC(CRC(I, AB), O) xor CRC(0, C) + // = CRC(CRC(I, AO xor B), O) xor CRC(0, C) + // = CRC(CRC(I, AO) xor CRC(0, B), O) xor CRC(0, C) + // = CRC(CRC(CRC(I, A), O) xor CRC(0, B), O) xor CRC(0, C) + // + // The castagnoliSSE42Triple function can compute CRC(I, A), CRC(0, B), + // and CRC(0, C) efficiently. We just need to find a way to quickly compute + // CRC(uvwx, O) given a 4-byte initial value uvwx. We can precompute these + // values; since we can't have a 32-bit table, we break it up into four + // 8-bit tables: + // + // CRC(uvwx, O) = CRC(u000, O) xor + // CRC(0v00, O) xor + // CRC(00w0, O) xor + // CRC(000x, O) + // + // We can compute tables corresponding to the four terms for all 8-bit + // values. + + crc = ^crc + + // If a buffer is long enough to use the optimization, process the first few + // bytes to align the buffer to an 8 byte boundary (if necessary). + if len(p) >= castagnoliK1*3 { + delta := int(uintptr(unsafe.Pointer(&p[0])) & 7) + if delta != 0 { + delta = 8 - delta + crc = castagnoliSSE42(crc, p[:delta]) + p = p[delta:] + } + } + + // Process 3*K2 at a time. + for len(p) >= castagnoliK2*3 { + // Compute CRC(I, A), CRC(0, B), and CRC(0, C). + crcA, crcB, crcC := castagnoliSSE42Triple( + crc, 0, 0, + p, p[castagnoliK2:], p[castagnoliK2*2:], + castagnoliK2/24) + + // CRC(I, AB) = CRC(CRC(I, A), O) xor CRC(0, B) + crcAB := castagnoliShift(castagnoliSSE42TableK2, crcA) ^ crcB + // CRC(I, ABC) = CRC(CRC(I, AB), O) xor CRC(0, C) + crc = castagnoliShift(castagnoliSSE42TableK2, crcAB) ^ crcC + p = p[castagnoliK2*3:] + } + + // Process 3*K1 at a time. + for len(p) >= castagnoliK1*3 { + // Compute CRC(I, A), CRC(0, B), and CRC(0, C). + crcA, crcB, crcC := castagnoliSSE42Triple( + crc, 0, 0, + p, p[castagnoliK1:], p[castagnoliK1*2:], + castagnoliK1/24) + + // CRC(I, AB) = CRC(CRC(I, A), O) xor CRC(0, B) + crcAB := castagnoliShift(castagnoliSSE42TableK1, crcA) ^ crcB + // CRC(I, ABC) = CRC(CRC(I, AB), O) xor CRC(0, C) + crc = castagnoliShift(castagnoliSSE42TableK1, crcAB) ^ crcC + p = p[castagnoliK1*3:] + } + + // Use the simple implementation for what's left. + crc = castagnoliSSE42(crc, p) + return ^crc +} + +func archAvailableIEEE() bool { + return useFastIEEE +} + +var archIeeeTable8 *slicing8Table + +func archInitIEEE() { + if !useFastIEEE { + panic("not available") + } + // We still use slicing-by-8 for small buffers. + archIeeeTable8 = slicingMakeTable(IEEE) +} + +func archUpdateIEEE(crc uint32, p []byte) uint32 { + if !useFastIEEE { + panic("not available") + } + + if len(p) >= 64 { + left := len(p) & 15 + do := len(p) - left + crc = ^ieeeCLMUL(^crc, p[:do]) + p = p[do:] + } + if len(p) == 0 { + return crc + } + return slicingUpdate(crc, archIeeeTable8, p) +} diff --git a/vendor/github.com/klauspost/crc32/crc32_amd64.s b/vendor/github.com/klauspost/crc32/crc32_amd64.s new file mode 100644 index 000000000..e8a7941ce --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_amd64.s @@ -0,0 +1,319 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build gc + +#define NOSPLIT 4 +#define RODATA 8 + +// castagnoliSSE42 updates the (non-inverted) crc with the given buffer. +// +// func castagnoliSSE42(crc uint32, p []byte) uint32 +TEXT ·castagnoliSSE42(SB), NOSPLIT, $0 + MOVL crc+0(FP), AX // CRC value + MOVQ p+8(FP), SI // data pointer + MOVQ p_len+16(FP), CX // len(p) + + // If there are fewer than 8 bytes to process, skip alignment. + CMPQ CX, $8 + JL less_than_8 + + MOVQ SI, BX + ANDQ $7, BX + JZ aligned + + // Process the first few bytes to 8-byte align the input. + + // BX = 8 - BX. We need to process this many bytes to align. + SUBQ $1, BX + XORQ $7, BX + + BTQ $0, BX + JNC align_2 + + CRC32B (SI), AX + DECQ CX + INCQ SI + +align_2: + BTQ $1, BX + JNC align_4 + + // CRC32W (SI), AX + BYTE $0x66; BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06 + + SUBQ $2, CX + ADDQ $2, SI + +align_4: + BTQ $2, BX + JNC aligned + + // CRC32L (SI), AX + BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06 + + SUBQ $4, CX + ADDQ $4, SI + +aligned: + // The input is now 8-byte aligned and we can process 8-byte chunks. + CMPQ CX, $8 + JL less_than_8 + + CRC32Q (SI), AX + ADDQ $8, SI + SUBQ $8, CX + JMP aligned + +less_than_8: + // We may have some bytes left over; process 4 bytes, then 2, then 1. + BTQ $2, CX + JNC less_than_4 + + // CRC32L (SI), AX + BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06 + ADDQ $4, SI + +less_than_4: + BTQ $1, CX + JNC less_than_2 + + // CRC32W (SI), AX + BYTE $0x66; BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06 + ADDQ $2, SI + +less_than_2: + BTQ $0, CX + JNC done + + CRC32B (SI), AX + +done: + MOVL AX, ret+32(FP) + RET + +// castagnoliSSE42Triple updates three (non-inverted) crcs with (24*rounds) +// bytes from each buffer. +// +// func castagnoliSSE42Triple( +// crc1, crc2, crc3 uint32, +// a, b, c []byte, +// rounds uint32, +// ) (retA uint32, retB uint32, retC uint32) +TEXT ·castagnoliSSE42Triple(SB), NOSPLIT, $0 + MOVL crcA+0(FP), AX + MOVL crcB+4(FP), CX + MOVL crcC+8(FP), DX + + MOVQ a+16(FP), R8 // data pointer + MOVQ b+40(FP), R9 // data pointer + MOVQ c+64(FP), R10 // data pointer + + MOVL rounds+88(FP), R11 + +loop: + CRC32Q (R8), AX + CRC32Q (R9), CX + CRC32Q (R10), DX + + CRC32Q 8(R8), AX + CRC32Q 8(R9), CX + CRC32Q 8(R10), DX + + CRC32Q 16(R8), AX + CRC32Q 16(R9), CX + CRC32Q 16(R10), DX + + ADDQ $24, R8 + ADDQ $24, R9 + ADDQ $24, R10 + + DECQ R11 + JNZ loop + + MOVL AX, retA+96(FP) + MOVL CX, retB+100(FP) + MOVL DX, retC+104(FP) + RET + +// func haveSSE42() bool +TEXT ·haveSSE42(SB), NOSPLIT, $0 + XORQ AX, AX + INCL AX + CPUID + SHRQ $20, CX + ANDQ $1, CX + MOVB CX, ret+0(FP) + RET + +// func haveCLMUL() bool +TEXT ·haveCLMUL(SB), NOSPLIT, $0 + XORQ AX, AX + INCL AX + CPUID + SHRQ $1, CX + ANDQ $1, CX + MOVB CX, ret+0(FP) + RET + +// func haveSSE41() bool +TEXT ·haveSSE41(SB), NOSPLIT, $0 + XORQ AX, AX + INCL AX + CPUID + SHRQ $19, CX + ANDQ $1, CX + MOVB CX, ret+0(FP) + RET + +// CRC32 polynomial data +// +// These constants are lifted from the +// Linux kernel, since they avoid the costly +// PSHUFB 16 byte reversal proposed in the +// original Intel paper. +DATA r2r1kp<>+0(SB)/8, $0x154442bd4 +DATA r2r1kp<>+8(SB)/8, $0x1c6e41596 +DATA r4r3kp<>+0(SB)/8, $0x1751997d0 +DATA r4r3kp<>+8(SB)/8, $0x0ccaa009e +DATA rupolykp<>+0(SB)/8, $0x1db710641 +DATA rupolykp<>+8(SB)/8, $0x1f7011641 +DATA r5kp<>+0(SB)/8, $0x163cd6124 + +GLOBL r2r1kp<>(SB), RODATA, $16 +GLOBL r4r3kp<>(SB), RODATA, $16 +GLOBL rupolykp<>(SB), RODATA, $16 +GLOBL r5kp<>(SB), RODATA, $8 + +// Based on http://www.intel.com/content/dam/www/public/us/en/documents/white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf +// len(p) must be at least 64, and must be a multiple of 16. + +// func ieeeCLMUL(crc uint32, p []byte) uint32 +TEXT ·ieeeCLMUL(SB), NOSPLIT, $0 + MOVL crc+0(FP), X0 // Initial CRC value + MOVQ p+8(FP), SI // data pointer + MOVQ p_len+16(FP), CX // len(p) + + MOVOU (SI), X1 + MOVOU 16(SI), X2 + MOVOU 32(SI), X3 + MOVOU 48(SI), X4 + PXOR X0, X1 + ADDQ $64, SI // buf+=64 + SUBQ $64, CX // len-=64 + CMPQ CX, $64 // Less than 64 bytes left + JB remain64 + + MOVOA r2r1kp<>+0(SB), X0 + +loopback64: + MOVOA X1, X5 + MOVOA X2, X6 + MOVOA X3, X7 + MOVOA X4, X8 + + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0, X0, X2 + PCLMULQDQ $0, X0, X3 + PCLMULQDQ $0, X0, X4 + + // Load next early + MOVOU (SI), X11 + MOVOU 16(SI), X12 + MOVOU 32(SI), X13 + MOVOU 48(SI), X14 + + PCLMULQDQ $0x11, X0, X5 + PCLMULQDQ $0x11, X0, X6 + PCLMULQDQ $0x11, X0, X7 + PCLMULQDQ $0x11, X0, X8 + + PXOR X5, X1 + PXOR X6, X2 + PXOR X7, X3 + PXOR X8, X4 + + PXOR X11, X1 + PXOR X12, X2 + PXOR X13, X3 + PXOR X14, X4 + + ADDQ $0x40, DI + ADDQ $64, SI // buf+=64 + SUBQ $64, CX // len-=64 + CMPQ CX, $64 // Less than 64 bytes left? + JGE loopback64 + + // Fold result into a single register (X1) +remain64: + MOVOA r4r3kp<>+0(SB), X0 + + MOVOA X1, X5 + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0x11, X0, X5 + PXOR X5, X1 + PXOR X2, X1 + + MOVOA X1, X5 + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0x11, X0, X5 + PXOR X5, X1 + PXOR X3, X1 + + MOVOA X1, X5 + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0x11, X0, X5 + PXOR X5, X1 + PXOR X4, X1 + + // If there is less than 16 bytes left we are done + CMPQ CX, $16 + JB finish + + // Encode 16 bytes +remain16: + MOVOU (SI), X10 + MOVOA X1, X5 + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0x11, X0, X5 + PXOR X5, X1 + PXOR X10, X1 + SUBQ $16, CX + ADDQ $16, SI + CMPQ CX, $16 + JGE remain16 + +finish: + // Fold final result into 32 bits and return it + PCMPEQB X3, X3 + PCLMULQDQ $1, X1, X0 + PSRLDQ $8, X1 + PXOR X0, X1 + + MOVOA X1, X2 + MOVQ r5kp<>+0(SB), X0 + + // Creates 32 bit mask. Note that we don't care about upper half. + PSRLQ $32, X3 + + PSRLDQ $4, X2 + PAND X3, X1 + PCLMULQDQ $0, X0, X1 + PXOR X2, X1 + + MOVOA rupolykp<>+0(SB), X0 + + MOVOA X1, X2 + PAND X3, X1 + PCLMULQDQ $0x10, X0, X1 + PAND X3, X1 + PCLMULQDQ $0, X0, X1 + PXOR X2, X1 + + // PEXTRD $1, X1, AX (SSE 4.1) + BYTE $0x66; BYTE $0x0f; BYTE $0x3a + BYTE $0x16; BYTE $0xc8; BYTE $0x01 + MOVL AX, ret+32(FP) + + RET diff --git a/vendor/github.com/klauspost/crc32/crc32_amd64p32.go b/vendor/github.com/klauspost/crc32/crc32_amd64p32.go new file mode 100644 index 000000000..3222b06a5 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_amd64p32.go @@ -0,0 +1,43 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine,!gccgo + +package crc32 + +// This file contains the code to call the SSE 4.2 version of the Castagnoli +// CRC. + +// haveSSE42 is defined in crc32_amd64p32.s and uses CPUID to test for SSE 4.2 +// support. +func haveSSE42() bool + +// castagnoliSSE42 is defined in crc32_amd64p32.s and uses the SSE4.2 CRC32 +// instruction. +//go:noescape +func castagnoliSSE42(crc uint32, p []byte) uint32 + +var sse42 = haveSSE42() + +func archAvailableCastagnoli() bool { + return sse42 +} + +func archInitCastagnoli() { + if !sse42 { + panic("not available") + } + // No initialization necessary. +} + +func archUpdateCastagnoli(crc uint32, p []byte) uint32 { + if !sse42 { + panic("not available") + } + return castagnoliSSE42(crc, p) +} + +func archAvailableIEEE() bool { return false } +func archInitIEEE() { panic("not available") } +func archUpdateIEEE(crc uint32, p []byte) uint32 { panic("not available") } diff --git a/vendor/github.com/klauspost/crc32/crc32_amd64p32.s b/vendor/github.com/klauspost/crc32/crc32_amd64p32.s new file mode 100644 index 000000000..a578d685c --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_amd64p32.s @@ -0,0 +1,67 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build gc + +#define NOSPLIT 4 +#define RODATA 8 + +// func castagnoliSSE42(crc uint32, p []byte) uint32 +TEXT ·castagnoliSSE42(SB), NOSPLIT, $0 + MOVL crc+0(FP), AX // CRC value + MOVL p+4(FP), SI // data pointer + MOVL p_len+8(FP), CX // len(p) + + NOTL AX + + // If there's less than 8 bytes to process, we do it byte-by-byte. + CMPQ CX, $8 + JL cleanup + + // Process individual bytes until the input is 8-byte aligned. +startup: + MOVQ SI, BX + ANDQ $7, BX + JZ aligned + + CRC32B (SI), AX + DECQ CX + INCQ SI + JMP startup + +aligned: + // The input is now 8-byte aligned and we can process 8-byte chunks. + CMPQ CX, $8 + JL cleanup + + CRC32Q (SI), AX + ADDQ $8, SI + SUBQ $8, CX + JMP aligned + +cleanup: + // We may have some bytes left over that we process one at a time. + CMPQ CX, $0 + JE done + + CRC32B (SI), AX + INCQ SI + DECQ CX + JMP cleanup + +done: + NOTL AX + MOVL AX, ret+16(FP) + RET + +// func haveSSE42() bool +TEXT ·haveSSE42(SB), NOSPLIT, $0 + XORQ AX, AX + INCL AX + CPUID + SHRQ $20, CX + ANDQ $1, CX + MOVB CX, ret+0(FP) + RET + diff --git a/vendor/github.com/klauspost/crc32/crc32_generic.go b/vendor/github.com/klauspost/crc32/crc32_generic.go new file mode 100644 index 000000000..abacbb663 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_generic.go @@ -0,0 +1,89 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains CRC32 algorithms that are not specific to any architecture +// and don't use hardware acceleration. +// +// The simple (and slow) CRC32 implementation only uses a 256*4 bytes table. +// +// The slicing-by-8 algorithm is a faster implementation that uses a bigger +// table (8*256*4 bytes). + +package crc32 + +// simpleMakeTable allocates and constructs a Table for the specified +// polynomial. The table is suitable for use with the simple algorithm +// (simpleUpdate). +func simpleMakeTable(poly uint32) *Table { + t := new(Table) + simplePopulateTable(poly, t) + return t +} + +// simplePopulateTable constructs a Table for the specified polynomial, suitable +// for use with simpleUpdate. +func simplePopulateTable(poly uint32, t *Table) { + for i := 0; i < 256; i++ { + crc := uint32(i) + for j := 0; j < 8; j++ { + if crc&1 == 1 { + crc = (crc >> 1) ^ poly + } else { + crc >>= 1 + } + } + t[i] = crc + } +} + +// simpleUpdate uses the simple algorithm to update the CRC, given a table that +// was previously computed using simpleMakeTable. +func simpleUpdate(crc uint32, tab *Table, p []byte) uint32 { + crc = ^crc + for _, v := range p { + crc = tab[byte(crc)^v] ^ (crc >> 8) + } + return ^crc +} + +// Use slicing-by-8 when payload >= this value. +const slicing8Cutoff = 16 + +// slicing8Table is array of 8 Tables, used by the slicing-by-8 algorithm. +type slicing8Table [8]Table + +// slicingMakeTable constructs a slicing8Table for the specified polynomial. The +// table is suitable for use with the slicing-by-8 algorithm (slicingUpdate). +func slicingMakeTable(poly uint32) *slicing8Table { + t := new(slicing8Table) + simplePopulateTable(poly, &t[0]) + for i := 0; i < 256; i++ { + crc := t[0][i] + for j := 1; j < 8; j++ { + crc = t[0][crc&0xFF] ^ (crc >> 8) + t[j][i] = crc + } + } + return t +} + +// slicingUpdate uses the slicing-by-8 algorithm to update the CRC, given a +// table that was previously computed using slicingMakeTable. +func slicingUpdate(crc uint32, tab *slicing8Table, p []byte) uint32 { + if len(p) >= slicing8Cutoff { + crc = ^crc + for len(p) > 8 { + crc ^= uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24 + crc = tab[0][p[7]] ^ tab[1][p[6]] ^ tab[2][p[5]] ^ tab[3][p[4]] ^ + tab[4][crc>>24] ^ tab[5][(crc>>16)&0xFF] ^ + tab[6][(crc>>8)&0xFF] ^ tab[7][crc&0xFF] + p = p[8:] + } + crc = ^crc + } + if len(p) == 0 { + return crc + } + return simpleUpdate(crc, &tab[0], p) +} diff --git a/vendor/github.com/klauspost/crc32/crc32_otherarch.go b/vendor/github.com/klauspost/crc32/crc32_otherarch.go new file mode 100644 index 000000000..cc960764b --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_otherarch.go @@ -0,0 +1,15 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64,!amd64p32,!s390x + +package crc32 + +func archAvailableIEEE() bool { return false } +func archInitIEEE() { panic("not available") } +func archUpdateIEEE(crc uint32, p []byte) uint32 { panic("not available") } + +func archAvailableCastagnoli() bool { return false } +func archInitCastagnoli() { panic("not available") } +func archUpdateCastagnoli(crc uint32, p []byte) uint32 { panic("not available") } diff --git a/vendor/github.com/klauspost/crc32/crc32_s390x.go b/vendor/github.com/klauspost/crc32/crc32_s390x.go new file mode 100644 index 000000000..ce96f0328 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_s390x.go @@ -0,0 +1,91 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build s390x + +package crc32 + +const ( + vxMinLen = 64 + vxAlignMask = 15 // align to 16 bytes +) + +// hasVectorFacility reports whether the machine has the z/Architecture +// vector facility installed and enabled. +func hasVectorFacility() bool + +var hasVX = hasVectorFacility() + +// vectorizedCastagnoli implements CRC32 using vector instructions. +// It is defined in crc32_s390x.s. +//go:noescape +func vectorizedCastagnoli(crc uint32, p []byte) uint32 + +// vectorizedIEEE implements CRC32 using vector instructions. +// It is defined in crc32_s390x.s. +//go:noescape +func vectorizedIEEE(crc uint32, p []byte) uint32 + +func archAvailableCastagnoli() bool { + return hasVX +} + +var archCastagnoliTable8 *slicing8Table + +func archInitCastagnoli() { + if !hasVX { + panic("not available") + } + // We still use slicing-by-8 for small buffers. + archCastagnoliTable8 = slicingMakeTable(Castagnoli) +} + +// archUpdateCastagnoli calculates the checksum of p using +// vectorizedCastagnoli. +func archUpdateCastagnoli(crc uint32, p []byte) uint32 { + if !hasVX { + panic("not available") + } + // Use vectorized function if data length is above threshold. + if len(p) >= vxMinLen { + aligned := len(p) & ^vxAlignMask + crc = vectorizedCastagnoli(crc, p[:aligned]) + p = p[aligned:] + } + if len(p) == 0 { + return crc + } + return slicingUpdate(crc, archCastagnoliTable8, p) +} + +func archAvailableIEEE() bool { + return hasVX +} + +var archIeeeTable8 *slicing8Table + +func archInitIEEE() { + if !hasVX { + panic("not available") + } + // We still use slicing-by-8 for small buffers. + archIeeeTable8 = slicingMakeTable(IEEE) +} + +// archUpdateIEEE calculates the checksum of p using vectorizedIEEE. +func archUpdateIEEE(crc uint32, p []byte) uint32 { + if !hasVX { + panic("not available") + } + // Use vectorized function if data length is above threshold. + if len(p) >= vxMinLen { + aligned := len(p) & ^vxAlignMask + crc = vectorizedIEEE(crc, p[:aligned]) + p = p[aligned:] + } + if len(p) == 0 { + return crc + } + return slicingUpdate(crc, archIeeeTable8, p) +} diff --git a/vendor/github.com/klauspost/crc32/crc32_s390x.s b/vendor/github.com/klauspost/crc32/crc32_s390x.s new file mode 100644 index 000000000..e980ca29d --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_s390x.s @@ -0,0 +1,249 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build s390x + +#include "textflag.h" + +// Vector register range containing CRC-32 constants + +#define CONST_PERM_LE2BE V9 +#define CONST_R2R1 V10 +#define CONST_R4R3 V11 +#define CONST_R5 V12 +#define CONST_RU_POLY V13 +#define CONST_CRC_POLY V14 + +// The CRC-32 constant block contains reduction constants to fold and +// process particular chunks of the input data stream in parallel. +// +// Note that the constant definitions below are extended in order to compute +// intermediate results with a single VECTOR GALOIS FIELD MULTIPLY instruction. +// The rightmost doubleword can be 0 to prevent contribution to the result or +// can be multiplied by 1 to perform an XOR without the need for a separate +// VECTOR EXCLUSIVE OR instruction. +// +// The polynomials used are bit-reflected: +// +// IEEE: P'(x) = 0x0edb88320 +// Castagnoli: P'(x) = 0x082f63b78 + +// IEEE polynomial constants +DATA ·crcleconskp+0(SB)/8, $0x0F0E0D0C0B0A0908 // LE-to-BE mask +DATA ·crcleconskp+8(SB)/8, $0x0706050403020100 +DATA ·crcleconskp+16(SB)/8, $0x00000001c6e41596 // R2 +DATA ·crcleconskp+24(SB)/8, $0x0000000154442bd4 // R1 +DATA ·crcleconskp+32(SB)/8, $0x00000000ccaa009e // R4 +DATA ·crcleconskp+40(SB)/8, $0x00000001751997d0 // R3 +DATA ·crcleconskp+48(SB)/8, $0x0000000000000000 +DATA ·crcleconskp+56(SB)/8, $0x0000000163cd6124 // R5 +DATA ·crcleconskp+64(SB)/8, $0x0000000000000000 +DATA ·crcleconskp+72(SB)/8, $0x00000001F7011641 // u' +DATA ·crcleconskp+80(SB)/8, $0x0000000000000000 +DATA ·crcleconskp+88(SB)/8, $0x00000001DB710641 // P'(x) << 1 + +GLOBL ·crcleconskp(SB), RODATA, $144 + +// Castagonli Polynomial constants +DATA ·crccleconskp+0(SB)/8, $0x0F0E0D0C0B0A0908 // LE-to-BE mask +DATA ·crccleconskp+8(SB)/8, $0x0706050403020100 +DATA ·crccleconskp+16(SB)/8, $0x000000009e4addf8 // R2 +DATA ·crccleconskp+24(SB)/8, $0x00000000740eef02 // R1 +DATA ·crccleconskp+32(SB)/8, $0x000000014cd00bd6 // R4 +DATA ·crccleconskp+40(SB)/8, $0x00000000f20c0dfe // R3 +DATA ·crccleconskp+48(SB)/8, $0x0000000000000000 +DATA ·crccleconskp+56(SB)/8, $0x00000000dd45aab8 // R5 +DATA ·crccleconskp+64(SB)/8, $0x0000000000000000 +DATA ·crccleconskp+72(SB)/8, $0x00000000dea713f1 // u' +DATA ·crccleconskp+80(SB)/8, $0x0000000000000000 +DATA ·crccleconskp+88(SB)/8, $0x0000000105ec76f0 // P'(x) << 1 + +GLOBL ·crccleconskp(SB), RODATA, $144 + +// func hasVectorFacility() bool +TEXT ·hasVectorFacility(SB), NOSPLIT, $24-1 + MOVD $x-24(SP), R1 + XC $24, 0(R1), 0(R1) // clear the storage + MOVD $2, R0 // R0 is the number of double words stored -1 + WORD $0xB2B01000 // STFLE 0(R1) + XOR R0, R0 // reset the value of R0 + MOVBZ z-8(SP), R1 + AND $0x40, R1 + BEQ novector + +vectorinstalled: + // check if the vector instruction has been enabled + VLEIB $0, $0xF, V16 + VLGVB $0, V16, R1 + CMPBNE R1, $0xF, novector + MOVB $1, ret+0(FP) // have vx + RET + +novector: + MOVB $0, ret+0(FP) // no vx + RET + +// The CRC-32 function(s) use these calling conventions: +// +// Parameters: +// +// R2: Initial CRC value, typically ~0; and final CRC (return) value. +// R3: Input buffer pointer, performance might be improved if the +// buffer is on a doubleword boundary. +// R4: Length of the buffer, must be 64 bytes or greater. +// +// Register usage: +// +// R5: CRC-32 constant pool base pointer. +// V0: Initial CRC value and intermediate constants and results. +// V1..V4: Data for CRC computation. +// V5..V8: Next data chunks that are fetched from the input buffer. +// +// V9..V14: CRC-32 constants. + +// func vectorizedIEEE(crc uint32, p []byte) uint32 +TEXT ·vectorizedIEEE(SB), NOSPLIT, $0 + MOVWZ crc+0(FP), R2 // R2 stores the CRC value + MOVD p+8(FP), R3 // data pointer + MOVD p_len+16(FP), R4 // len(p) + + MOVD $·crcleconskp(SB), R5 + BR vectorizedBody<>(SB) + +// func vectorizedCastagnoli(crc uint32, p []byte) uint32 +TEXT ·vectorizedCastagnoli(SB), NOSPLIT, $0 + MOVWZ crc+0(FP), R2 // R2 stores the CRC value + MOVD p+8(FP), R3 // data pointer + MOVD p_len+16(FP), R4 // len(p) + + // R5: crc-32 constant pool base pointer, constant is used to reduce crc + MOVD $·crccleconskp(SB), R5 + BR vectorizedBody<>(SB) + +TEXT vectorizedBody<>(SB), NOSPLIT, $0 + XOR $0xffffffff, R2 // NOTW R2 + VLM 0(R5), CONST_PERM_LE2BE, CONST_CRC_POLY + + // Load the initial CRC value into the rightmost word of V0 + VZERO V0 + VLVGF $3, R2, V0 + + // Crash if the input size is less than 64-bytes. + CMP R4, $64 + BLT crash + + // Load a 64-byte data chunk and XOR with CRC + VLM 0(R3), V1, V4 // 64-bytes into V1..V4 + + // Reflect the data if the CRC operation is in the bit-reflected domain + VPERM V1, V1, CONST_PERM_LE2BE, V1 + VPERM V2, V2, CONST_PERM_LE2BE, V2 + VPERM V3, V3, CONST_PERM_LE2BE, V3 + VPERM V4, V4, CONST_PERM_LE2BE, V4 + + VX V0, V1, V1 // V1 ^= CRC + ADD $64, R3 // BUF = BUF + 64 + ADD $(-64), R4 + + // Check remaining buffer size and jump to proper folding method + CMP R4, $64 + BLT less_than_64bytes + +fold_64bytes_loop: + // Load the next 64-byte data chunk into V5 to V8 + VLM 0(R3), V5, V8 + VPERM V5, V5, CONST_PERM_LE2BE, V5 + VPERM V6, V6, CONST_PERM_LE2BE, V6 + VPERM V7, V7, CONST_PERM_LE2BE, V7 + VPERM V8, V8, CONST_PERM_LE2BE, V8 + + // Perform a GF(2) multiplication of the doublewords in V1 with + // the reduction constants in V0. The intermediate result is + // then folded (accumulated) with the next data chunk in V5 and + // stored in V1. Repeat this step for the register contents + // in V2, V3, and V4 respectively. + + VGFMAG CONST_R2R1, V1, V5, V1 + VGFMAG CONST_R2R1, V2, V6, V2 + VGFMAG CONST_R2R1, V3, V7, V3 + VGFMAG CONST_R2R1, V4, V8, V4 + + // Adjust buffer pointer and length for next loop + ADD $64, R3 // BUF = BUF + 64 + ADD $(-64), R4 // LEN = LEN - 64 + + CMP R4, $64 + BGE fold_64bytes_loop + +less_than_64bytes: + // Fold V1 to V4 into a single 128-bit value in V1 + VGFMAG CONST_R4R3, V1, V2, V1 + VGFMAG CONST_R4R3, V1, V3, V1 + VGFMAG CONST_R4R3, V1, V4, V1 + + // Check whether to continue with 64-bit folding + CMP R4, $16 + BLT final_fold + +fold_16bytes_loop: + VL 0(R3), V2 // Load next data chunk + VPERM V2, V2, CONST_PERM_LE2BE, V2 + + VGFMAG CONST_R4R3, V1, V2, V1 // Fold next data chunk + + // Adjust buffer pointer and size for folding next data chunk + ADD $16, R3 + ADD $-16, R4 + + // Process remaining data chunks + CMP R4, $16 + BGE fold_16bytes_loop + +final_fold: + VLEIB $7, $0x40, V9 + VSRLB V9, CONST_R4R3, V0 + VLEIG $0, $1, V0 + + VGFMG V0, V1, V1 + + VLEIB $7, $0x20, V9 // Shift by words + VSRLB V9, V1, V2 // Store remaining bits in V2 + VUPLLF V1, V1 // Split rightmost doubleword + VGFMAG CONST_R5, V1, V2, V1 // V1 = (V1 * R5) XOR V2 + + // The input values to the Barret reduction are the degree-63 polynomial + // in V1 (R(x)), degree-32 generator polynomial, and the reduction + // constant u. The Barret reduction result is the CRC value of R(x) mod + // P(x). + // + // The Barret reduction algorithm is defined as: + // + // 1. T1(x) = floor( R(x) / x^32 ) GF2MUL u + // 2. T2(x) = floor( T1(x) / x^32 ) GF2MUL P(x) + // 3. C(x) = R(x) XOR T2(x) mod x^32 + // + // Note: To compensate the division by x^32, use the vector unpack + // instruction to move the leftmost word into the leftmost doubleword + // of the vector register. The rightmost doubleword is multiplied + // with zero to not contribute to the intermedate results. + + // T1(x) = floor( R(x) / x^32 ) GF2MUL u + VUPLLF V1, V2 + VGFMG CONST_RU_POLY, V2, V2 + + // Compute the GF(2) product of the CRC polynomial in VO with T1(x) in + // V2 and XOR the intermediate result, T2(x), with the value in V1. + // The final result is in the rightmost word of V2. + + VUPLLF V2, V2 + VGFMAG CONST_CRC_POLY, V2, V1, V2 + +done: + VLGVF $2, V2, R2 + XOR $0xffffffff, R2 // NOTW R2 + MOVWZ R2, ret + 32(FP) + RET + +crash: + MOVD $0, (R0) // input size is less than 64-bytes diff --git a/vendor/gopkg.in/Shopify/sarama.v1/CHANGELOG.md b/vendor/gopkg.in/Shopify/sarama.v1/CHANGELOG.md new file mode 100644 index 000000000..49ff92165 --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/CHANGELOG.md @@ -0,0 +1,323 @@ +# Changelog + +#### Version 1.10.1 (2016-08-30) + +Bug Fixes: + - Fix the documentation for `HashPartitioner` which was incorrect + ([#717](https://github.com/Shopify/sarama/pull/717)). + - Permit client creation even when it is limited by ACLs + ([#722](https://github.com/Shopify/sarama/pull/722)). + - Several fixes to the consumer timer optimization code, regressions introduced + in v1.10.0. Go's timers are finicky + ([#730](https://github.com/Shopify/sarama/pull/730), + [#733](https://github.com/Shopify/sarama/pull/733), + [#734](https://github.com/Shopify/sarama/pull/734)). + - Handle consuming compressed relative offsets with Kafka 0.10 + ([#735](https://github.com/Shopify/sarama/pull/735)). + +#### Version 1.10.0 (2016-08-02) + +_Important:_ As of Sarama 1.10 it is necessary to tell Sarama the version of +Kafka you are running against (via the `config.Version` value) in order to use +features that may not be compatible with old Kafka versions. If you don't +specify this value it will default to 0.8.2 (the minimum supported), and trying +to use more recent features (like the offset manager) will fail with an error. + +_Also:_ The offset-manager's behaviour has been changed to match the upstream +java consumer (see [#705](https://github.com/Shopify/sarama/pull/705) and +[#713](https://github.com/Shopify/sarama/pull/713)). If you use the +offset-manager, please ensure that you are committing one *greater* than the +last consumed message offset or else you may end up consuming duplicate +messages. + +New Features: + - Support for Kafka 0.10 + ([#672](https://github.com/Shopify/sarama/pull/672), + [#678](https://github.com/Shopify/sarama/pull/678), + [#681](https://github.com/Shopify/sarama/pull/681), and others). + - Support for configuring the target Kafka version + ([#676](https://github.com/Shopify/sarama/pull/676)). + - Batch producing support in the SyncProducer + ([#677](https://github.com/Shopify/sarama/pull/677)). + - Extend producer mock to allow setting expectations on message contents + ([#667](https://github.com/Shopify/sarama/pull/667)). + +Improvements: + - Support `nil` compressed messages for deleting in compacted topics + ([#634](https://github.com/Shopify/sarama/pull/634)). + - Pre-allocate decoding errors, greatly reducing heap usage and GC time against + misbehaving brokers ([#690](https://github.com/Shopify/sarama/pull/690)). + - Re-use consumer expiry timers, removing one allocation per consumed message + ([#707](https://github.com/Shopify/sarama/pull/707)). + +Bug Fixes: + - Actually default the client ID to "sarama" like we say we do + ([#664](https://github.com/Shopify/sarama/pull/664)). + - Fix a rare issue where `Client.Leader` could return the wrong error + ([#685](https://github.com/Shopify/sarama/pull/685)). + - Fix a possible tight loop in the consumer + ([#693](https://github.com/Shopify/sarama/pull/693)). + - Match upstream's offset-tracking behaviour + ([#705](https://github.com/Shopify/sarama/pull/705)). + - Report UnknownTopicOrPartition errors from the offset manager + ([#706](https://github.com/Shopify/sarama/pull/706)). + - Fix possible negative partition value from the HashPartitioner + ([#709](https://github.com/Shopify/sarama/pull/709)). + +#### Version 1.9.0 (2016-05-16) + +New Features: + - Add support for custom offset manager retention durations + ([#602](https://github.com/Shopify/sarama/pull/602)). + - Publish low-level mocks to enable testing of third-party producer/consumer + implementations ([#570](https://github.com/Shopify/sarama/pull/570)). + - Declare support for Golang 1.6 + ([#611](https://github.com/Shopify/sarama/pull/611)). + - Support for SASL plain-text auth + ([#648](https://github.com/Shopify/sarama/pull/648)). + +Improvements: + - Simplified broker locking scheme slightly + ([#604](https://github.com/Shopify/sarama/pull/604)). + - Documentation cleanup + ([#605](https://github.com/Shopify/sarama/pull/605), + [#621](https://github.com/Shopify/sarama/pull/621), + [#654](https://github.com/Shopify/sarama/pull/654)). + +Bug Fixes: + - Fix race condition shutting down the OffsetManager + ([#658](https://github.com/Shopify/sarama/pull/658)). + +#### Version 1.8.0 (2016-02-01) + +New Features: + - Full support for Kafka 0.9: + - All protocol messages and fields + ([#586](https://github.com/Shopify/sarama/pull/586), + [#588](https://github.com/Shopify/sarama/pull/588), + [#590](https://github.com/Shopify/sarama/pull/590)). + - Verified that TLS support works + ([#581](https://github.com/Shopify/sarama/pull/581)). + - Fixed the OffsetManager compatibility + ([#585](https://github.com/Shopify/sarama/pull/585)). + +Improvements: + - Optimize for fewer system calls when reading from the network + ([#584](https://github.com/Shopify/sarama/pull/584)). + - Automatically retry `InvalidMessage` errors to match upstream behaviour + ([#589](https://github.com/Shopify/sarama/pull/589)). + +#### Version 1.7.0 (2015-12-11) + +New Features: + - Preliminary support for Kafka 0.9 + ([#572](https://github.com/Shopify/sarama/pull/572)). This comes with several + caveats: + - Protocol-layer support is mostly in place + ([#577](https://github.com/Shopify/sarama/pull/577)), however Kafka 0.9 + renamed some messages and fields, which we did not in order to preserve API + compatibility. + - The producer and consumer work against 0.9, but the offset manager does + not ([#573](https://github.com/Shopify/sarama/pull/573)). + - TLS support may or may not work + ([#581](https://github.com/Shopify/sarama/pull/581)). + +Improvements: + - Don't wait for request timeouts on dead brokers, greatly speeding recovery + when the TCP connection is left hanging + ([#548](https://github.com/Shopify/sarama/pull/548)). + - Refactored part of the producer. The new version provides a much more elegant + solution to [#449](https://github.com/Shopify/sarama/pull/449). It is also + slightly more efficient, and much more precise in calculating batch sizes + when compression is used + ([#549](https://github.com/Shopify/sarama/pull/549), + [#550](https://github.com/Shopify/sarama/pull/550), + [#551](https://github.com/Shopify/sarama/pull/551)). + +Bug Fixes: + - Fix race condition in consumer test mock + ([#553](https://github.com/Shopify/sarama/pull/553)). + +#### Version 1.6.1 (2015-09-25) + +Bug Fixes: + - Fix panic that could occur if a user-supplied message value failed to encode + ([#449](https://github.com/Shopify/sarama/pull/449)). + +#### Version 1.6.0 (2015-09-04) + +New Features: + - Implementation of a consumer offset manager using the APIs introduced in + Kafka 0.8.2. The API is designed mainly for integration into a future + high-level consumer, not for direct use, although it is *possible* to use it + directly. + ([#461](https://github.com/Shopify/sarama/pull/461)). + +Improvements: + - CRC32 calculation is much faster on machines with SSE4.2 instructions, + removing a major hotspot from most profiles + ([#255](https://github.com/Shopify/sarama/pull/255)). + +Bug Fixes: + - Make protocol decoding more robust against some malformed packets generated + by go-fuzz ([#523](https://github.com/Shopify/sarama/pull/523), + [#525](https://github.com/Shopify/sarama/pull/525)) or found in other ways + ([#528](https://github.com/Shopify/sarama/pull/528)). + - Fix a potential race condition panic in the consumer on shutdown + ([#529](https://github.com/Shopify/sarama/pull/529)). + +#### Version 1.5.0 (2015-08-17) + +New Features: + - TLS-encrypted network connections are now supported. This feature is subject + to change when Kafka releases built-in TLS support, but for now this is + enough to work with TLS-terminating proxies + ([#154](https://github.com/Shopify/sarama/pull/154)). + +Improvements: + - The consumer will not block if a single partition is not drained by the user; + all other partitions will continue to consume normally + ([#485](https://github.com/Shopify/sarama/pull/485)). + - Formatting of error strings has been much improved + ([#495](https://github.com/Shopify/sarama/pull/495)). + - Internal refactoring of the producer for code cleanliness and to enable + future work ([#300](https://github.com/Shopify/sarama/pull/300)). + +Bug Fixes: + - Fix a potential deadlock in the consumer on shutdown + ([#475](https://github.com/Shopify/sarama/pull/475)). + +#### Version 1.4.3 (2015-07-21) + +Bug Fixes: + - Don't include the partitioner in the producer's "fetch partitions" + circuit-breaker ([#466](https://github.com/Shopify/sarama/pull/466)). + - Don't retry messages until the broker is closed when abandoning a broker in + the producer ([#468](https://github.com/Shopify/sarama/pull/468)). + - Update the import path for snappy-go, it has moved again and the API has + changed slightly ([#486](https://github.com/Shopify/sarama/pull/486)). + +#### Version 1.4.2 (2015-05-27) + +Bug Fixes: + - Update the import path for snappy-go, it has moved from google code to github + ([#456](https://github.com/Shopify/sarama/pull/456)). + +#### Version 1.4.1 (2015-05-25) + +Improvements: + - Optimizations when decoding snappy messages, thanks to John Potocny + ([#446](https://github.com/Shopify/sarama/pull/446)). + +Bug Fixes: + - Fix hypothetical race conditions on producer shutdown + ([#450](https://github.com/Shopify/sarama/pull/450), + [#451](https://github.com/Shopify/sarama/pull/451)). + +#### Version 1.4.0 (2015-05-01) + +New Features: + - The consumer now implements `Topics()` and `Partitions()` methods to enable + users to dynamically choose what topics/partitions to consume without + instantiating a full client + ([#431](https://github.com/Shopify/sarama/pull/431)). + - The partition-consumer now exposes the high water mark offset value returned + by the broker via the `HighWaterMarkOffset()` method ([#339](https://github.com/Shopify/sarama/pull/339)). + - Added a `kafka-console-consumer` tool capable of handling multiple + partitions, and deprecated the now-obsolete `kafka-console-partitionConsumer` + ([#439](https://github.com/Shopify/sarama/pull/439), + [#442](https://github.com/Shopify/sarama/pull/442)). + +Improvements: + - The producer's logging during retry scenarios is more consistent, more + useful, and slightly less verbose + ([#429](https://github.com/Shopify/sarama/pull/429)). + - The client now shuffles its initial list of seed brokers in order to prevent + thundering herd on the first broker in the list + ([#441](https://github.com/Shopify/sarama/pull/441)). + +Bug Fixes: + - The producer now correctly manages its state if retries occur when it is + shutting down, fixing several instances of confusing behaviour and at least + one potential deadlock ([#419](https://github.com/Shopify/sarama/pull/419)). + - The consumer now handles messages for different partitions asynchronously, + making it much more resilient to specific user code ordering + ([#325](https://github.com/Shopify/sarama/pull/325)). + +#### Version 1.3.0 (2015-04-16) + +New Features: + - The client now tracks consumer group coordinators using + ConsumerMetadataRequests similar to how it tracks partition leadership using + regular MetadataRequests ([#411](https://github.com/Shopify/sarama/pull/411)). + This adds two methods to the client API: + - `Coordinator(consumerGroup string) (*Broker, error)` + - `RefreshCoordinator(consumerGroup string) error` + +Improvements: + - ConsumerMetadataResponses now automatically create a Broker object out of the + ID/address/port combination for the Coordinator; accessing the fields + individually has been deprecated + ([#413](https://github.com/Shopify/sarama/pull/413)). + - Much improved handling of `OffsetOutOfRange` errors in the consumer. + Consumers will fail to start if the provided offset is out of range + ([#418](https://github.com/Shopify/sarama/pull/418)) + and they will automatically shut down if the offset falls out of range + ([#424](https://github.com/Shopify/sarama/pull/424)). + - Small performance improvement in encoding and decoding protocol messages + ([#427](https://github.com/Shopify/sarama/pull/427)). + +Bug Fixes: + - Fix a rare race condition in the client's background metadata refresher if + it happens to be activated while the client is being closed + ([#422](https://github.com/Shopify/sarama/pull/422)). + +#### Version 1.2.0 (2015-04-07) + +Improvements: + - The producer's behaviour when `Flush.Frequency` is set is now more intuitive + ([#389](https://github.com/Shopify/sarama/pull/389)). + - The producer is now somewhat more memory-efficient during and after retrying + messages due to an improved queue implementation + ([#396](https://github.com/Shopify/sarama/pull/396)). + - The consumer produces much more useful logging output when leadership + changes ([#385](https://github.com/Shopify/sarama/pull/385)). + - The client's `GetOffset` method will now automatically refresh metadata and + retry once in the event of stale information or similar + ([#394](https://github.com/Shopify/sarama/pull/394)). + - Broker connections now have support for using TCP keepalives + ([#407](https://github.com/Shopify/sarama/issues/407)). + +Bug Fixes: + - The OffsetCommitRequest message now correctly implements all three possible + API versions ([#390](https://github.com/Shopify/sarama/pull/390), + [#400](https://github.com/Shopify/sarama/pull/400)). + +#### Version 1.1.0 (2015-03-20) + +Improvements: + - Wrap the producer's partitioner call in a circuit-breaker so that repeatedly + broken topics don't choke throughput + ([#373](https://github.com/Shopify/sarama/pull/373)). + +Bug Fixes: + - Fix the producer's internal reference counting in certain unusual scenarios + ([#367](https://github.com/Shopify/sarama/pull/367)). + - Fix the consumer's internal reference counting in certain unusual scenarios + ([#369](https://github.com/Shopify/sarama/pull/369)). + - Fix a condition where the producer's internal control messages could have + gotten stuck ([#368](https://github.com/Shopify/sarama/pull/368)). + - Fix an issue where invalid partition lists would be cached when asking for + metadata for a non-existant topic ([#372](https://github.com/Shopify/sarama/pull/372)). + + +#### Version 1.0.0 (2015-03-17) + +Version 1.0.0 is the first tagged version, and is almost a complete rewrite. The primary differences with previous untagged versions are: + +- The producer has been rewritten; there is now a `SyncProducer` with a blocking API, and an `AsyncProducer` that is non-blocking. +- The consumer has been rewritten to only open one connection per broker instead of one connection per partition. +- The main types of Sarama are now interfaces to make depedency injection easy; mock implementations for `Consumer`, `SyncProducer` and `AsyncProducer` are provided in the `github.com/Shopify/sarama/mocks` package. +- For most uses cases, it is no longer necessary to open a `Client`; this will be done for you. +- All the configuration values have been unified in the `Config` struct. +- Much improved test suite. diff --git a/vendor/gopkg.in/Shopify/sarama.v1/MIT-LICENSE b/vendor/gopkg.in/Shopify/sarama.v1/MIT-LICENSE new file mode 100644 index 000000000..8121b63b1 --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/MIT-LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2013 Evan Huus + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/gopkg.in/Shopify/sarama.v1/Makefile b/vendor/gopkg.in/Shopify/sarama.v1/Makefile new file mode 100644 index 000000000..626b09a54 --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/Makefile @@ -0,0 +1,21 @@ +default: fmt vet errcheck test + +test: + go test -v -timeout 60s -race ./... + +vet: + go vet ./... + +errcheck: + errcheck github.com/Shopify/sarama/... + +fmt: + @if [ -n "$$(go fmt ./...)" ]; then echo 'Please run go fmt on your code.' && exit 1; fi + +install_dependencies: install_errcheck get + +install_errcheck: + go get github.com/kisielk/errcheck + +get: + go get -t diff --git a/vendor/gopkg.in/Shopify/sarama.v1/README.md b/vendor/gopkg.in/Shopify/sarama.v1/README.md new file mode 100644 index 000000000..bcbd3e9c1 --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/README.md @@ -0,0 +1,36 @@ +sarama +====== + +[![GoDoc](https://godoc.org/github.com/Shopify/sarama?status.png)](https://godoc.org/github.com/Shopify/sarama) +[![Build Status](https://travis-ci.org/Shopify/sarama.svg?branch=master)](https://travis-ci.org/Shopify/sarama) + +Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/) version 0.8 (and later). + +### Getting started + +- API documentation and examples are available via [godoc](https://godoc.org/github.com/Shopify/sarama). +- Mocks for testing are available in the [mocks](./mocks) subpackage. +- The [examples](./examples) directory contains more elaborate example applications. +- The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation. + +### Compatibility and API stability + +Sarama provides a "2 releases + 2 months" compatibility guarantee: we support +the two latest stable releases of Kafka and Go, and we provide a two month +grace period for older releases. This means we currently officially support +Go 1.6 and 1.5, and Kafka 0.10.0, 0.9.0 and 0.8.2, although older releases are +still likely to work. + +Sarama follows semantic versioning and provides API stability via the gopkg.in service. +You can import a version with a guaranteed stable API via http://gopkg.in/Shopify/sarama.v1. +A changelog is available [here](CHANGELOG.md). + +### Contributing + +* Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/master/CONTRIBUTING.md). +* Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more + technical and design details. +* The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol) + contains a wealth of useful information. +* For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers. +* If you have any questions, just ask! diff --git a/vendor/gopkg.in/Shopify/sarama.v1/Vagrantfile b/vendor/gopkg.in/Shopify/sarama.v1/Vagrantfile new file mode 100644 index 000000000..4586d9ae8 --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/Vagrantfile @@ -0,0 +1,19 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +# Vagrantfile API/syntax version. Don't touch unless you know what you're doing! +VAGRANTFILE_API_VERSION = "2" + +MEMORY = 3072 + +Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| + config.vm.box = "ubuntu/trusty64" + + config.vm.provision :shell, path: "vagrant/provision.sh" + + config.vm.network "private_network", ip: "192.168.100.67" + + config.vm.provider "virtualbox" do |v| + v.memory = MEMORY + end +end diff --git a/vendor/gopkg.in/Shopify/sarama.v1/api_versions_request.go b/vendor/gopkg.in/Shopify/sarama.v1/api_versions_request.go new file mode 100644 index 000000000..ab65f01cc --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/api_versions_request.go @@ -0,0 +1,24 @@ +package sarama + +type ApiVersionsRequest struct { +} + +func (r *ApiVersionsRequest) encode(pe packetEncoder) error { + return nil +} + +func (r *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) { + return nil +} + +func (r *ApiVersionsRequest) key() int16 { + return 18 +} + +func (r *ApiVersionsRequest) version() int16 { + return 0 +} + +func (r *ApiVersionsRequest) requiredVersion() KafkaVersion { + return V0_10_0_0 +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/api_versions_response.go b/vendor/gopkg.in/Shopify/sarama.v1/api_versions_response.go new file mode 100644 index 000000000..16d62db2d --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/api_versions_response.go @@ -0,0 +1,86 @@ +package sarama + +type ApiVersionsResponseBlock struct { + ApiKey int16 + MinVersion int16 + MaxVersion int16 +} + +func (b *ApiVersionsResponseBlock) encode(pe packetEncoder) error { + pe.putInt16(b.ApiKey) + pe.putInt16(b.MinVersion) + pe.putInt16(b.MaxVersion) + return nil +} + +func (b *ApiVersionsResponseBlock) decode(pd packetDecoder) error { + var err error + + if b.ApiKey, err = pd.getInt16(); err != nil { + return err + } + + if b.MinVersion, err = pd.getInt16(); err != nil { + return err + } + + if b.MaxVersion, err = pd.getInt16(); err != nil { + return err + } + + return nil +} + +type ApiVersionsResponse struct { + Err KError + ApiVersions []*ApiVersionsResponseBlock +} + +func (r *ApiVersionsResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + if err := pe.putArrayLength(len(r.ApiVersions)); err != nil { + return err + } + for _, apiVersion := range r.ApiVersions { + if err := apiVersion.encode(pe); err != nil { + return err + } + } + return nil +} + +func (r *ApiVersionsResponse) decode(pd packetDecoder, version int16) error { + if kerr, err := pd.getInt16(); err != nil { + return err + } else { + r.Err = KError(kerr) + } + + numBlocks, err := pd.getArrayLength() + if err != nil { + return err + } + + r.ApiVersions = make([]*ApiVersionsResponseBlock, numBlocks) + for i := 0; i < numBlocks; i++ { + block := new(ApiVersionsResponseBlock) + if err := block.decode(pd); err != nil { + return err + } + r.ApiVersions[i] = block + } + + return nil +} + +func (r *ApiVersionsResponse) key() int16 { + return 18 +} + +func (r *ApiVersionsResponse) version() int16 { + return 0 +} + +func (r *ApiVersionsResponse) requiredVersion() KafkaVersion { + return V0_10_0_0 +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/async_producer.go b/vendor/gopkg.in/Shopify/sarama.v1/async_producer.go new file mode 100644 index 000000000..e1ae5b0da --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/async_producer.go @@ -0,0 +1,903 @@ +package sarama + +import ( + "fmt" + "sync" + "time" + + "github.com/eapache/go-resiliency/breaker" + "github.com/eapache/queue" +) + +// AsyncProducer publishes Kafka messages using a non-blocking API. It routes messages +// to the correct broker for the provided topic-partition, refreshing metadata as appropriate, +// and parses responses for errors. You must read from the Errors() channel or the +// producer will deadlock. You must call Close() or AsyncClose() on a producer to avoid +// leaks: it will not be garbage-collected automatically when it passes out of +// scope. +type AsyncProducer interface { + + // AsyncClose triggers a shutdown of the producer, flushing any messages it may + // have buffered. The shutdown has completed when both the Errors and Successes + // channels have been closed. When calling AsyncClose, you *must* continue to + // read from those channels in order to drain the results of any messages in + // flight. + AsyncClose() + + // Close shuts down the producer and flushes any messages it may have buffered. + // You must call this function before a producer object passes out of scope, as + // it may otherwise leak memory. You must call this before calling Close on the + // underlying client. + Close() error + + // Input is the input channel for the user to write messages to that they + // wish to send. + Input() chan<- *ProducerMessage + + // Successes is the success output channel back to the user when AckSuccesses is + // enabled. If Return.Successes is true, you MUST read from this channel or the + // Producer will deadlock. It is suggested that you send and read messages + // together in a single select statement. + Successes() <-chan *ProducerMessage + + // Errors is the error output channel back to the user. You MUST read from this + // channel or the Producer will deadlock when the channel is full. Alternatively, + // you can set Producer.Return.Errors in your config to false, which prevents + // errors to be returned. + Errors() <-chan *ProducerError +} + +type asyncProducer struct { + client Client + conf *Config + ownClient bool + + errors chan *ProducerError + input, successes, retries chan *ProducerMessage + inFlight sync.WaitGroup + + brokers map[*Broker]chan<- *ProducerMessage + brokerRefs map[chan<- *ProducerMessage]int + brokerLock sync.Mutex +} + +// NewAsyncProducer creates a new AsyncProducer using the given broker addresses and configuration. +func NewAsyncProducer(addrs []string, conf *Config) (AsyncProducer, error) { + client, err := NewClient(addrs, conf) + if err != nil { + return nil, err + } + + p, err := NewAsyncProducerFromClient(client) + if err != nil { + return nil, err + } + p.(*asyncProducer).ownClient = true + return p, nil +} + +// NewAsyncProducerFromClient creates a new Producer using the given client. It is still +// necessary to call Close() on the underlying client when shutting down this producer. +func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) { + // Check that we are not dealing with a closed Client before processing any other arguments + if client.Closed() { + return nil, ErrClosedClient + } + + p := &asyncProducer{ + client: client, + conf: client.Config(), + errors: make(chan *ProducerError), + input: make(chan *ProducerMessage), + successes: make(chan *ProducerMessage), + retries: make(chan *ProducerMessage), + brokers: make(map[*Broker]chan<- *ProducerMessage), + brokerRefs: make(map[chan<- *ProducerMessage]int), + } + + // launch our singleton dispatchers + go withRecover(p.dispatcher) + go withRecover(p.retryHandler) + + return p, nil +} + +type flagSet int8 + +const ( + syn flagSet = 1 << iota // first message from partitionProducer to brokerProducer + fin // final message from partitionProducer to brokerProducer and back + shutdown // start the shutdown process +) + +// ProducerMessage is the collection of elements passed to the Producer in order to send a message. +type ProducerMessage struct { + Topic string // The Kafka topic for this message. + // The partitioning key for this message. Pre-existing Encoders include + // StringEncoder and ByteEncoder. + Key Encoder + // The actual message to store in Kafka. Pre-existing Encoders include + // StringEncoder and ByteEncoder. + Value Encoder + + // This field is used to hold arbitrary data you wish to include so it + // will be available when receiving on the Successes and Errors channels. + // Sarama completely ignores this field and is only to be used for + // pass-through data. + Metadata interface{} + + // Below this point are filled in by the producer as the message is processed + + // Offset is the offset of the message stored on the broker. This is only + // guaranteed to be defined if the message was successfully delivered and + // RequiredAcks is not NoResponse. + Offset int64 + // Partition is the partition that the message was sent to. This is only + // guaranteed to be defined if the message was successfully delivered. + Partition int32 + // Timestamp is the timestamp assigned to the message by the broker. This + // is only guaranteed to be defined if the message was successfully + // delivered, RequiredAcks is not NoResponse, and the Kafka broker is at + // least version 0.10.0. + Timestamp time.Time + + retries int + flags flagSet +} + +const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc. + +func (m *ProducerMessage) byteSize() int { + size := producerMessageOverhead + if m.Key != nil { + size += m.Key.Length() + } + if m.Value != nil { + size += m.Value.Length() + } + return size +} + +func (m *ProducerMessage) clear() { + m.flags = 0 + m.retries = 0 +} + +// ProducerError is the type of error generated when the producer fails to deliver a message. +// It contains the original ProducerMessage as well as the actual error value. +type ProducerError struct { + Msg *ProducerMessage + Err error +} + +func (pe ProducerError) Error() string { + return fmt.Sprintf("kafka: Failed to produce message to topic %s: %s", pe.Msg.Topic, pe.Err) +} + +// ProducerErrors is a type that wraps a batch of "ProducerError"s and implements the Error interface. +// It can be returned from the Producer's Close method to avoid the need to manually drain the Errors channel +// when closing a producer. +type ProducerErrors []*ProducerError + +func (pe ProducerErrors) Error() string { + return fmt.Sprintf("kafka: Failed to deliver %d messages.", len(pe)) +} + +func (p *asyncProducer) Errors() <-chan *ProducerError { + return p.errors +} + +func (p *asyncProducer) Successes() <-chan *ProducerMessage { + return p.successes +} + +func (p *asyncProducer) Input() chan<- *ProducerMessage { + return p.input +} + +func (p *asyncProducer) Close() error { + p.AsyncClose() + + if p.conf.Producer.Return.Successes { + go withRecover(func() { + for _ = range p.successes { + } + }) + } + + var errors ProducerErrors + if p.conf.Producer.Return.Errors { + for event := range p.errors { + errors = append(errors, event) + } + } + + if len(errors) > 0 { + return errors + } + return nil +} + +func (p *asyncProducer) AsyncClose() { + go withRecover(p.shutdown) +} + +// singleton +// dispatches messages by topic +func (p *asyncProducer) dispatcher() { + handlers := make(map[string]chan<- *ProducerMessage) + shuttingDown := false + + for msg := range p.input { + if msg == nil { + Logger.Println("Something tried to send a nil message, it was ignored.") + continue + } + + if msg.flags&shutdown != 0 { + shuttingDown = true + p.inFlight.Done() + continue + } else if msg.retries == 0 { + if shuttingDown { + // we can't just call returnError here because that decrements the wait group, + // which hasn't been incremented yet for this message, and shouldn't be + pErr := &ProducerError{Msg: msg, Err: ErrShuttingDown} + if p.conf.Producer.Return.Errors { + p.errors <- pErr + } else { + Logger.Println(pErr) + } + continue + } + p.inFlight.Add(1) + } + + if msg.byteSize() > p.conf.Producer.MaxMessageBytes { + p.returnError(msg, ErrMessageSizeTooLarge) + continue + } + + handler := handlers[msg.Topic] + if handler == nil { + handler = p.newTopicProducer(msg.Topic) + handlers[msg.Topic] = handler + } + + handler <- msg + } + + for _, handler := range handlers { + close(handler) + } +} + +// one per topic +// partitions messages, then dispatches them by partition +type topicProducer struct { + parent *asyncProducer + topic string + input <-chan *ProducerMessage + + breaker *breaker.Breaker + handlers map[int32]chan<- *ProducerMessage + partitioner Partitioner +} + +func (p *asyncProducer) newTopicProducer(topic string) chan<- *ProducerMessage { + input := make(chan *ProducerMessage, p.conf.ChannelBufferSize) + tp := &topicProducer{ + parent: p, + topic: topic, + input: input, + breaker: breaker.New(3, 1, 10*time.Second), + handlers: make(map[int32]chan<- *ProducerMessage), + partitioner: p.conf.Producer.Partitioner(topic), + } + go withRecover(tp.dispatch) + return input +} + +func (tp *topicProducer) dispatch() { + for msg := range tp.input { + if msg.retries == 0 { + if err := tp.partitionMessage(msg); err != nil { + tp.parent.returnError(msg, err) + continue + } + } + + handler := tp.handlers[msg.Partition] + if handler == nil { + handler = tp.parent.newPartitionProducer(msg.Topic, msg.Partition) + tp.handlers[msg.Partition] = handler + } + + handler <- msg + } + + for _, handler := range tp.handlers { + close(handler) + } +} + +func (tp *topicProducer) partitionMessage(msg *ProducerMessage) error { + var partitions []int32 + + err := tp.breaker.Run(func() (err error) { + if tp.partitioner.RequiresConsistency() { + partitions, err = tp.parent.client.Partitions(msg.Topic) + } else { + partitions, err = tp.parent.client.WritablePartitions(msg.Topic) + } + return + }) + + if err != nil { + return err + } + + numPartitions := int32(len(partitions)) + + if numPartitions == 0 { + return ErrLeaderNotAvailable + } + + choice, err := tp.partitioner.Partition(msg, numPartitions) + + if err != nil { + return err + } else if choice < 0 || choice >= numPartitions { + return ErrInvalidPartition + } + + msg.Partition = partitions[choice] + + return nil +} + +// one per partition per topic +// dispatches messages to the appropriate broker +// also responsible for maintaining message order during retries +type partitionProducer struct { + parent *asyncProducer + topic string + partition int32 + input <-chan *ProducerMessage + + leader *Broker + breaker *breaker.Breaker + output chan<- *ProducerMessage + + // highWatermark tracks the "current" retry level, which is the only one where we actually let messages through, + // all other messages get buffered in retryState[msg.retries].buf to preserve ordering + // retryState[msg.retries].expectChaser simply tracks whether we've seen a fin message for a given level (and + // therefore whether our buffer is complete and safe to flush) + highWatermark int + retryState []partitionRetryState +} + +type partitionRetryState struct { + buf []*ProducerMessage + expectChaser bool +} + +func (p *asyncProducer) newPartitionProducer(topic string, partition int32) chan<- *ProducerMessage { + input := make(chan *ProducerMessage, p.conf.ChannelBufferSize) + pp := &partitionProducer{ + parent: p, + topic: topic, + partition: partition, + input: input, + + breaker: breaker.New(3, 1, 10*time.Second), + retryState: make([]partitionRetryState, p.conf.Producer.Retry.Max+1), + } + go withRecover(pp.dispatch) + return input +} + +func (pp *partitionProducer) dispatch() { + // try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader` + // on the first message + pp.leader, _ = pp.parent.client.Leader(pp.topic, pp.partition) + if pp.leader != nil { + pp.output = pp.parent.getBrokerProducer(pp.leader) + pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight + pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn} + } + + for msg := range pp.input { + if msg.retries > pp.highWatermark { + // a new, higher, retry level; handle it and then back off + pp.newHighWatermark(msg.retries) + time.Sleep(pp.parent.conf.Producer.Retry.Backoff) + } else if pp.highWatermark > 0 { + // we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level + if msg.retries < pp.highWatermark { + // in fact this message is not even the current retry level, so buffer it for now (unless it's a just a fin) + if msg.flags&fin == fin { + pp.retryState[msg.retries].expectChaser = false + pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected + } else { + pp.retryState[msg.retries].buf = append(pp.retryState[msg.retries].buf, msg) + } + continue + } else if msg.flags&fin == fin { + // this message is of the current retry level (msg.retries == highWatermark) and the fin flag is set, + // meaning this retry level is done and we can go down (at least) one level and flush that + pp.retryState[pp.highWatermark].expectChaser = false + pp.flushRetryBuffers() + pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected + continue + } + } + + // if we made it this far then the current msg contains real data, and can be sent to the next goroutine + // without breaking any of our ordering guarantees + + if pp.output == nil { + if err := pp.updateLeader(); err != nil { + pp.parent.returnError(msg, err) + time.Sleep(pp.parent.conf.Producer.Retry.Backoff) + continue + } + Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID()) + } + + pp.output <- msg + } + + if pp.output != nil { + pp.parent.unrefBrokerProducer(pp.leader, pp.output) + } +} + +func (pp *partitionProducer) newHighWatermark(hwm int) { + Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, hwm) + pp.highWatermark = hwm + + // send off a fin so that we know when everything "in between" has made it + // back to us and we can safely flush the backlog (otherwise we risk re-ordering messages) + pp.retryState[pp.highWatermark].expectChaser = true + pp.parent.inFlight.Add(1) // we're generating a fin message; track it so we don't shut down while it's still inflight + pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: fin, retries: pp.highWatermark - 1} + + // a new HWM means that our current broker selection is out of date + Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID()) + pp.parent.unrefBrokerProducer(pp.leader, pp.output) + pp.output = nil +} + +func (pp *partitionProducer) flushRetryBuffers() { + Logger.Printf("producer/leader/%s/%d state change to [flushing-%d]\n", pp.topic, pp.partition, pp.highWatermark) + for { + pp.highWatermark-- + + if pp.output == nil { + if err := pp.updateLeader(); err != nil { + pp.parent.returnErrors(pp.retryState[pp.highWatermark].buf, err) + goto flushDone + } + Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID()) + } + + for _, msg := range pp.retryState[pp.highWatermark].buf { + pp.output <- msg + } + + flushDone: + pp.retryState[pp.highWatermark].buf = nil + if pp.retryState[pp.highWatermark].expectChaser { + Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, pp.highWatermark) + break + } else if pp.highWatermark == 0 { + Logger.Printf("producer/leader/%s/%d state change to [normal]\n", pp.topic, pp.partition) + break + } + } +} + +func (pp *partitionProducer) updateLeader() error { + return pp.breaker.Run(func() (err error) { + if err = pp.parent.client.RefreshMetadata(pp.topic); err != nil { + return err + } + + if pp.leader, err = pp.parent.client.Leader(pp.topic, pp.partition); err != nil { + return err + } + + pp.output = pp.parent.getBrokerProducer(pp.leader) + pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight + pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn} + + return nil + }) +} + +// one per broker; also constructs an associated flusher +func (p *asyncProducer) newBrokerProducer(broker *Broker) chan<- *ProducerMessage { + var ( + input = make(chan *ProducerMessage) + bridge = make(chan *produceSet) + responses = make(chan *brokerProducerResponse) + ) + + bp := &brokerProducer{ + parent: p, + broker: broker, + input: input, + output: bridge, + responses: responses, + buffer: newProduceSet(p), + currentRetries: make(map[string]map[int32]error), + } + go withRecover(bp.run) + + // minimal bridge to make the network response `select`able + go withRecover(func() { + for set := range bridge { + request := set.buildRequest() + + response, err := broker.Produce(request) + + responses <- &brokerProducerResponse{ + set: set, + err: err, + res: response, + } + } + close(responses) + }) + + return input +} + +type brokerProducerResponse struct { + set *produceSet + err error + res *ProduceResponse +} + +// groups messages together into appropriately-sized batches for sending to the broker +// handles state related to retries etc +type brokerProducer struct { + parent *asyncProducer + broker *Broker + + input <-chan *ProducerMessage + output chan<- *produceSet + responses <-chan *brokerProducerResponse + + buffer *produceSet + timer <-chan time.Time + timerFired bool + + closing error + currentRetries map[string]map[int32]error +} + +func (bp *brokerProducer) run() { + var output chan<- *produceSet + Logger.Printf("producer/broker/%d starting up\n", bp.broker.ID()) + + for { + select { + case msg := <-bp.input: + if msg == nil { + bp.shutdown() + return + } + + if msg.flags&syn == syn { + Logger.Printf("producer/broker/%d state change to [open] on %s/%d\n", + bp.broker.ID(), msg.Topic, msg.Partition) + if bp.currentRetries[msg.Topic] == nil { + bp.currentRetries[msg.Topic] = make(map[int32]error) + } + bp.currentRetries[msg.Topic][msg.Partition] = nil + bp.parent.inFlight.Done() + continue + } + + if reason := bp.needsRetry(msg); reason != nil { + bp.parent.retryMessage(msg, reason) + + if bp.closing == nil && msg.flags&fin == fin { + // we were retrying this partition but we can start processing again + delete(bp.currentRetries[msg.Topic], msg.Partition) + Logger.Printf("producer/broker/%d state change to [closed] on %s/%d\n", + bp.broker.ID(), msg.Topic, msg.Partition) + } + + continue + } + + if bp.buffer.wouldOverflow(msg) { + if err := bp.waitForSpace(msg); err != nil { + bp.parent.retryMessage(msg, err) + continue + } + } + + if err := bp.buffer.add(msg); err != nil { + bp.parent.returnError(msg, err) + continue + } + + if bp.parent.conf.Producer.Flush.Frequency > 0 && bp.timer == nil { + bp.timer = time.After(bp.parent.conf.Producer.Flush.Frequency) + } + case <-bp.timer: + bp.timerFired = true + case output <- bp.buffer: + bp.rollOver() + case response := <-bp.responses: + bp.handleResponse(response) + } + + if bp.timerFired || bp.buffer.readyToFlush() { + output = bp.output + } else { + output = nil + } + } +} + +func (bp *brokerProducer) shutdown() { + for !bp.buffer.empty() { + select { + case response := <-bp.responses: + bp.handleResponse(response) + case bp.output <- bp.buffer: + bp.rollOver() + } + } + close(bp.output) + for response := range bp.responses { + bp.handleResponse(response) + } + + Logger.Printf("producer/broker/%d shut down\n", bp.broker.ID()) +} + +func (bp *brokerProducer) needsRetry(msg *ProducerMessage) error { + if bp.closing != nil { + return bp.closing + } + + return bp.currentRetries[msg.Topic][msg.Partition] +} + +func (bp *brokerProducer) waitForSpace(msg *ProducerMessage) error { + Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID()) + + for { + select { + case response := <-bp.responses: + bp.handleResponse(response) + // handling a response can change our state, so re-check some things + if reason := bp.needsRetry(msg); reason != nil { + return reason + } else if !bp.buffer.wouldOverflow(msg) { + return nil + } + case bp.output <- bp.buffer: + bp.rollOver() + return nil + } + } +} + +func (bp *brokerProducer) rollOver() { + bp.timer = nil + bp.timerFired = false + bp.buffer = newProduceSet(bp.parent) +} + +func (bp *brokerProducer) handleResponse(response *brokerProducerResponse) { + if response.err != nil { + bp.handleError(response.set, response.err) + } else { + bp.handleSuccess(response.set, response.res) + } + + if bp.buffer.empty() { + bp.rollOver() // this can happen if the response invalidated our buffer + } +} + +func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceResponse) { + // we iterate through the blocks in the request set, not the response, so that we notice + // if the response is missing a block completely + sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) { + if response == nil { + // this only happens when RequiredAcks is NoResponse, so we have to assume success + bp.parent.returnSuccesses(msgs) + return + } + + block := response.GetBlock(topic, partition) + if block == nil { + bp.parent.returnErrors(msgs, ErrIncompleteResponse) + return + } + + switch block.Err { + // Success + case ErrNoError: + if bp.parent.conf.Version.IsAtLeast(V0_10_0_0) && !block.Timestamp.IsZero() { + for _, msg := range msgs { + msg.Timestamp = block.Timestamp + } + } + for i, msg := range msgs { + msg.Offset = block.Offset + int64(i) + } + bp.parent.returnSuccesses(msgs) + // Retriable errors + case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition, + ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend: + Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n", + bp.broker.ID(), topic, partition, block.Err) + bp.currentRetries[topic][partition] = block.Err + bp.parent.retryMessages(msgs, block.Err) + bp.parent.retryMessages(bp.buffer.dropPartition(topic, partition), block.Err) + // Other non-retriable errors + default: + bp.parent.returnErrors(msgs, block.Err) + } + }) +} + +func (bp *brokerProducer) handleError(sent *produceSet, err error) { + switch err.(type) { + case PacketEncodingError: + sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) { + bp.parent.returnErrors(msgs, err) + }) + default: + Logger.Printf("producer/broker/%d state change to [closing] because %s\n", bp.broker.ID(), err) + bp.parent.abandonBrokerConnection(bp.broker) + _ = bp.broker.Close() + bp.closing = err + sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) { + bp.parent.retryMessages(msgs, err) + }) + bp.buffer.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) { + bp.parent.retryMessages(msgs, err) + }) + bp.rollOver() + } +} + +// singleton +// effectively a "bridge" between the flushers and the dispatcher in order to avoid deadlock +// based on https://godoc.org/github.com/eapache/channels#InfiniteChannel +func (p *asyncProducer) retryHandler() { + var msg *ProducerMessage + buf := queue.New() + + for { + if buf.Length() == 0 { + msg = <-p.retries + } else { + select { + case msg = <-p.retries: + case p.input <- buf.Peek().(*ProducerMessage): + buf.Remove() + continue + } + } + + if msg == nil { + return + } + + buf.Add(msg) + } +} + +// utility functions + +func (p *asyncProducer) shutdown() { + Logger.Println("Producer shutting down.") + p.inFlight.Add(1) + p.input <- &ProducerMessage{flags: shutdown} + + p.inFlight.Wait() + + if p.ownClient { + err := p.client.Close() + if err != nil { + Logger.Println("producer/shutdown failed to close the embedded client:", err) + } + } + + close(p.input) + close(p.retries) + close(p.errors) + close(p.successes) +} + +func (p *asyncProducer) returnError(msg *ProducerMessage, err error) { + msg.clear() + pErr := &ProducerError{Msg: msg, Err: err} + if p.conf.Producer.Return.Errors { + p.errors <- pErr + } else { + Logger.Println(pErr) + } + p.inFlight.Done() +} + +func (p *asyncProducer) returnErrors(batch []*ProducerMessage, err error) { + for _, msg := range batch { + p.returnError(msg, err) + } +} + +func (p *asyncProducer) returnSuccesses(batch []*ProducerMessage) { + for _, msg := range batch { + if p.conf.Producer.Return.Successes { + msg.clear() + p.successes <- msg + } + p.inFlight.Done() + } +} + +func (p *asyncProducer) retryMessage(msg *ProducerMessage, err error) { + if msg.retries >= p.conf.Producer.Retry.Max { + p.returnError(msg, err) + } else { + msg.retries++ + p.retries <- msg + } +} + +func (p *asyncProducer) retryMessages(batch []*ProducerMessage, err error) { + for _, msg := range batch { + p.retryMessage(msg, err) + } +} + +func (p *asyncProducer) getBrokerProducer(broker *Broker) chan<- *ProducerMessage { + p.brokerLock.Lock() + defer p.brokerLock.Unlock() + + bp := p.brokers[broker] + + if bp == nil { + bp = p.newBrokerProducer(broker) + p.brokers[broker] = bp + p.brokerRefs[bp] = 0 + } + + p.brokerRefs[bp]++ + + return bp +} + +func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp chan<- *ProducerMessage) { + p.brokerLock.Lock() + defer p.brokerLock.Unlock() + + p.brokerRefs[bp]-- + if p.brokerRefs[bp] == 0 { + close(bp) + delete(p.brokerRefs, bp) + + if p.brokers[broker] == bp { + delete(p.brokers, broker) + } + } +} + +func (p *asyncProducer) abandonBrokerConnection(broker *Broker) { + p.brokerLock.Lock() + defer p.brokerLock.Unlock() + + delete(p.brokers, broker) +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/broker.go b/vendor/gopkg.in/Shopify/sarama.v1/broker.go new file mode 100644 index 000000000..bfcb82f37 --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/broker.go @@ -0,0 +1,526 @@ +package sarama + +import ( + "crypto/tls" + "encoding/binary" + "fmt" + "io" + "net" + "strconv" + "sync" + "sync/atomic" + "time" +) + +// Broker represents a single Kafka broker connection. All operations on this object are entirely concurrency-safe. +type Broker struct { + id int32 + addr string + + conf *Config + correlationID int32 + conn net.Conn + connErr error + lock sync.Mutex + opened int32 + + responses chan responsePromise + done chan bool +} + +type responsePromise struct { + correlationID int32 + packets chan []byte + errors chan error +} + +// NewBroker creates and returns a Broker targetting the given host:port address. +// This does not attempt to actually connect, you have to call Open() for that. +func NewBroker(addr string) *Broker { + return &Broker{id: -1, addr: addr} +} + +// Open tries to connect to the Broker if it is not already connected or connecting, but does not block +// waiting for the connection to complete. This means that any subsequent operations on the broker will +// block waiting for the connection to succeed or fail. To get the effect of a fully synchronous Open call, +// follow it by a call to Connected(). The only errors Open will return directly are ConfigurationError or +// AlreadyConnected. If conf is nil, the result of NewConfig() is used. +func (b *Broker) Open(conf *Config) error { + if !atomic.CompareAndSwapInt32(&b.opened, 0, 1) { + return ErrAlreadyConnected + } + + if conf == nil { + conf = NewConfig() + } + + err := conf.Validate() + if err != nil { + return err + } + + b.lock.Lock() + + go withRecover(func() { + defer b.lock.Unlock() + + dialer := net.Dialer{ + Timeout: conf.Net.DialTimeout, + KeepAlive: conf.Net.KeepAlive, + } + + if conf.Net.TLS.Enable { + b.conn, b.connErr = tls.DialWithDialer(&dialer, "tcp", b.addr, conf.Net.TLS.Config) + } else { + b.conn, b.connErr = dialer.Dial("tcp", b.addr) + } + if b.connErr != nil { + Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, b.connErr) + b.conn = nil + atomic.StoreInt32(&b.opened, 0) + return + } + b.conn = newBufConn(b.conn) + + b.conf = conf + + if conf.Net.SASL.Enable { + b.connErr = b.sendAndReceiveSASLPlainAuth() + if b.connErr != nil { + err = b.conn.Close() + if err == nil { + Logger.Printf("Closed connection to broker %s\n", b.addr) + } else { + Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err) + } + b.conn = nil + atomic.StoreInt32(&b.opened, 0) + return + } + } + + b.done = make(chan bool) + b.responses = make(chan responsePromise, b.conf.Net.MaxOpenRequests-1) + + if b.id >= 0 { + Logger.Printf("Connected to broker at %s (registered as #%d)\n", b.addr, b.id) + } else { + Logger.Printf("Connected to broker at %s (unregistered)\n", b.addr) + } + go withRecover(b.responseReceiver) + }) + + return nil +} + +// Connected returns true if the broker is connected and false otherwise. If the broker is not +// connected but it had tried to connect, the error from that connection attempt is also returned. +func (b *Broker) Connected() (bool, error) { + b.lock.Lock() + defer b.lock.Unlock() + + return b.conn != nil, b.connErr +} + +func (b *Broker) Close() error { + b.lock.Lock() + defer b.lock.Unlock() + + if b.conn == nil { + return ErrNotConnected + } + + close(b.responses) + <-b.done + + err := b.conn.Close() + + b.conn = nil + b.connErr = nil + b.done = nil + b.responses = nil + + if err == nil { + Logger.Printf("Closed connection to broker %s\n", b.addr) + } else { + Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err) + } + + atomic.StoreInt32(&b.opened, 0) + + return err +} + +// ID returns the broker ID retrieved from Kafka's metadata, or -1 if that is not known. +func (b *Broker) ID() int32 { + return b.id +} + +// Addr returns the broker address as either retrieved from Kafka's metadata or passed to NewBroker. +func (b *Broker) Addr() string { + return b.addr +} + +func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error) { + response := new(MetadataResponse) + + err := b.sendAndReceive(request, response) + + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*ConsumerMetadataResponse, error) { + response := new(ConsumerMetadataResponse) + + err := b.sendAndReceive(request, response) + + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, error) { + response := new(OffsetResponse) + + err := b.sendAndReceive(request, response) + + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) { + var response *ProduceResponse + var err error + + if request.RequiredAcks == NoResponse { + err = b.sendAndReceive(request, nil) + } else { + response = new(ProduceResponse) + err = b.sendAndReceive(request, response) + } + + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) { + response := new(FetchResponse) + + err := b.sendAndReceive(request, response) + + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) { + response := new(OffsetCommitResponse) + + err := b.sendAndReceive(request, response) + + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, error) { + response := new(OffsetFetchResponse) + + err := b.sendAndReceive(request, response) + + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) JoinGroup(request *JoinGroupRequest) (*JoinGroupResponse, error) { + response := new(JoinGroupResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) SyncGroup(request *SyncGroupRequest) (*SyncGroupResponse, error) { + response := new(SyncGroupResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) LeaveGroup(request *LeaveGroupRequest) (*LeaveGroupResponse, error) { + response := new(LeaveGroupResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) Heartbeat(request *HeartbeatRequest) (*HeartbeatResponse, error) { + response := new(HeartbeatResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) ListGroups(request *ListGroupsRequest) (*ListGroupsResponse, error) { + response := new(ListGroupsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroupsResponse, error) { + response := new(DescribeGroupsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) send(rb protocolBody, promiseResponse bool) (*responsePromise, error) { + b.lock.Lock() + defer b.lock.Unlock() + + if b.conn == nil { + if b.connErr != nil { + return nil, b.connErr + } + return nil, ErrNotConnected + } + + if !b.conf.Version.IsAtLeast(rb.requiredVersion()) { + return nil, ErrUnsupportedVersion + } + + req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb} + buf, err := encode(req) + if err != nil { + return nil, err + } + + err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)) + if err != nil { + return nil, err + } + + _, err = b.conn.Write(buf) + if err != nil { + return nil, err + } + b.correlationID++ + + if !promiseResponse { + return nil, nil + } + + promise := responsePromise{req.correlationID, make(chan []byte), make(chan error)} + b.responses <- promise + + return &promise, nil +} + +func (b *Broker) sendAndReceive(req protocolBody, res versionedDecoder) error { + promise, err := b.send(req, res != nil) + + if err != nil { + return err + } + + if promise == nil { + return nil + } + + select { + case buf := <-promise.packets: + return versionedDecode(buf, res, req.version()) + case err = <-promise.errors: + return err + } +} + +func (b *Broker) decode(pd packetDecoder) (err error) { + b.id, err = pd.getInt32() + if err != nil { + return err + } + + host, err := pd.getString() + if err != nil { + return err + } + + port, err := pd.getInt32() + if err != nil { + return err + } + + b.addr = net.JoinHostPort(host, fmt.Sprint(port)) + if _, _, err := net.SplitHostPort(b.addr); err != nil { + return err + } + + return nil +} + +func (b *Broker) encode(pe packetEncoder) (err error) { + + host, portstr, err := net.SplitHostPort(b.addr) + if err != nil { + return err + } + port, err := strconv.Atoi(portstr) + if err != nil { + return err + } + + pe.putInt32(b.id) + + err = pe.putString(host) + if err != nil { + return err + } + + pe.putInt32(int32(port)) + + return nil +} + +func (b *Broker) responseReceiver() { + var dead error + header := make([]byte, 8) + for response := range b.responses { + if dead != nil { + response.errors <- dead + continue + } + + err := b.conn.SetReadDeadline(time.Now().Add(b.conf.Net.ReadTimeout)) + if err != nil { + dead = err + response.errors <- err + continue + } + + _, err = io.ReadFull(b.conn, header) + if err != nil { + dead = err + response.errors <- err + continue + } + + decodedHeader := responseHeader{} + err = decode(header, &decodedHeader) + if err != nil { + dead = err + response.errors <- err + continue + } + if decodedHeader.correlationID != response.correlationID { + // TODO if decoded ID < cur ID, discard until we catch up + // TODO if decoded ID > cur ID, save it so when cur ID catches up we have a response + dead = PacketDecodingError{fmt.Sprintf("correlation ID didn't match, wanted %d, got %d", response.correlationID, decodedHeader.correlationID)} + response.errors <- dead + continue + } + + buf := make([]byte, decodedHeader.length-4) + _, err = io.ReadFull(b.conn, buf) + if err != nil { + dead = err + response.errors <- err + continue + } + + response.packets <- buf + } + close(b.done) +} + +// Kafka 0.10.0 plans to support SASL Plain and Kerberos as per PR #812 (KIP-43)/(JIRA KAFKA-3149) +// Some hosted kafka services such as IBM Message Hub already offer SASL/PLAIN auth with Kafka 0.9 +// +// In SASL Plain, Kafka expects the auth header to be in the following format +// Message format (from https://tools.ietf.org/html/rfc4616): +// +// message = [authzid] UTF8NUL authcid UTF8NUL passwd +// authcid = 1*SAFE ; MUST accept up to 255 octets +// authzid = 1*SAFE ; MUST accept up to 255 octets +// passwd = 1*SAFE ; MUST accept up to 255 octets +// UTF8NUL = %x00 ; UTF-8 encoded NUL character +// +// SAFE = UTF1 / UTF2 / UTF3 / UTF4 +// ;; any UTF-8 encoded Unicode character except NUL +// +// When credentials are valid, Kafka returns a 4 byte array of null characters. +// When credentials are invalid, Kafka closes the connection. This does not seem to be the ideal way +// of responding to bad credentials but thats how its being done today. +func (b *Broker) sendAndReceiveSASLPlainAuth() error { + length := 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password) + authBytes := make([]byte, length+4) //4 byte length header + auth data + binary.BigEndian.PutUint32(authBytes, uint32(length)) + copy(authBytes[4:], []byte("\x00"+b.conf.Net.SASL.User+"\x00"+b.conf.Net.SASL.Password)) + + err := b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)) + if err != nil { + Logger.Printf("Failed to set write deadline when doing SASL auth with broker %s: %s\n", b.addr, err.Error()) + return err + } + + _, err = b.conn.Write(authBytes) + if err != nil { + Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error()) + return err + } + + header := make([]byte, 4) + n, err := io.ReadFull(b.conn, header) + // If the credentials are valid, we would get a 4 byte response filled with null characters. + // Otherwise, the broker closes the connection and we get an EOF + if err != nil { + Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error()) + return err + } + + Logger.Printf("SASL authentication successful with broker %s:%v - %v\n", b.addr, n, header) + return nil +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/client.go b/vendor/gopkg.in/Shopify/sarama.v1/client.go new file mode 100644 index 000000000..e9a9ea77e --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/client.go @@ -0,0 +1,733 @@ +package sarama + +import ( + "math/rand" + "sort" + "sync" + "time" +) + +// Client is a generic Kafka client. It manages connections to one or more Kafka brokers. +// You MUST call Close() on a client to avoid leaks, it will not be garbage-collected +// automatically when it passes out of scope. It is safe to share a client amongst many +// users, however Kafka will process requests from a single client strictly in serial, +// so it is generally more efficient to use the default one client per producer/consumer. +type Client interface { + // Config returns the Config struct of the client. This struct should not be + // altered after it has been created. + Config() *Config + + // Topics returns the set of available topics as retrieved from cluster metadata. + Topics() ([]string, error) + + // Partitions returns the sorted list of all partition IDs for the given topic. + Partitions(topic string) ([]int32, error) + + // WritablePartitions returns the sorted list of all writable partition IDs for + // the given topic, where "writable" means "having a valid leader accepting + // writes". + WritablePartitions(topic string) ([]int32, error) + + // Leader returns the broker object that is the leader of the current + // topic/partition, as determined by querying the cluster metadata. + Leader(topic string, partitionID int32) (*Broker, error) + + // Replicas returns the set of all replica IDs for the given partition. + Replicas(topic string, partitionID int32) ([]int32, error) + + // RefreshMetadata takes a list of topics and queries the cluster to refresh the + // available metadata for those topics. If no topics are provided, it will refresh + // metadata for all topics. + RefreshMetadata(topics ...string) error + + // GetOffset queries the cluster to get the most recent available offset at the + // given time on the topic/partition combination. Time should be OffsetOldest for + // the earliest available offset, OffsetNewest for the offset of the message that + // will be produced next, or a time. + GetOffset(topic string, partitionID int32, time int64) (int64, error) + + // Coordinator returns the coordinating broker for a consumer group. It will + // return a locally cached value if it's available. You can call + // RefreshCoordinator to update the cached value. This function only works on + // Kafka 0.8.2 and higher. + Coordinator(consumerGroup string) (*Broker, error) + + // RefreshCoordinator retrieves the coordinator for a consumer group and stores it + // in local cache. This function only works on Kafka 0.8.2 and higher. + RefreshCoordinator(consumerGroup string) error + + // Close shuts down all broker connections managed by this client. It is required + // to call this function before a client object passes out of scope, as it will + // otherwise leak memory. You must close any Producers or Consumers using a client + // before you close the client. + Close() error + + // Closed returns true if the client has already had Close called on it + Closed() bool +} + +const ( + // OffsetNewest stands for the log head offset, i.e. the offset that will be + // assigned to the next message that will be produced to the partition. You + // can send this to a client's GetOffset method to get this offset, or when + // calling ConsumePartition to start consuming new messages. + OffsetNewest int64 = -1 + // OffsetOldest stands for the oldest offset available on the broker for a + // partition. You can send this to a client's GetOffset method to get this + // offset, or when calling ConsumePartition to start consuming from the + // oldest offset that is still available on the broker. + OffsetOldest int64 = -2 +) + +type client struct { + conf *Config + closer, closed chan none // for shutting down background metadata updater + + // the broker addresses given to us through the constructor are not guaranteed to be returned in + // the cluster metadata (I *think* it only returns brokers who are currently leading partitions?) + // so we store them separately + seedBrokers []*Broker + deadSeeds []*Broker + + brokers map[int32]*Broker // maps broker ids to brokers + metadata map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata + coordinators map[string]int32 // Maps consumer group names to coordinating broker IDs + + // If the number of partitions is large, we can get some churn calling cachedPartitions, + // so the result is cached. It is important to update this value whenever metadata is changed + cachedPartitionsResults map[string][maxPartitionIndex][]int32 + + lock sync.RWMutex // protects access to the maps that hold cluster state. +} + +// NewClient creates a new Client. It connects to one of the given broker addresses +// and uses that broker to automatically fetch metadata on the rest of the kafka cluster. If metadata cannot +// be retrieved from any of the given broker addresses, the client is not created. +func NewClient(addrs []string, conf *Config) (Client, error) { + Logger.Println("Initializing new client") + + if conf == nil { + conf = NewConfig() + } + + if err := conf.Validate(); err != nil { + return nil, err + } + + if len(addrs) < 1 { + return nil, ConfigurationError("You must provide at least one broker address") + } + + client := &client{ + conf: conf, + closer: make(chan none), + closed: make(chan none), + brokers: make(map[int32]*Broker), + metadata: make(map[string]map[int32]*PartitionMetadata), + cachedPartitionsResults: make(map[string][maxPartitionIndex][]int32), + coordinators: make(map[string]int32), + } + + random := rand.New(rand.NewSource(time.Now().UnixNano())) + for _, index := range random.Perm(len(addrs)) { + client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index])) + } + + // do an initial fetch of all cluster metadata by specifing an empty list of topics + err := client.RefreshMetadata() + switch err { + case nil: + break + case ErrLeaderNotAvailable, ErrReplicaNotAvailable, ErrTopicAuthorizationFailed, ErrClusterAuthorizationFailed: + // indicates that maybe part of the cluster is down, but is not fatal to creating the client + Logger.Println(err) + default: + close(client.closed) // we haven't started the background updater yet, so we have to do this manually + _ = client.Close() + return nil, err + } + go withRecover(client.backgroundMetadataUpdater) + + Logger.Println("Successfully initialized new client") + + return client, nil +} + +func (client *client) Config() *Config { + return client.conf +} + +func (client *client) Close() error { + if client.Closed() { + // Chances are this is being called from a defer() and the error will go unobserved + // so we go ahead and log the event in this case. + Logger.Printf("Close() called on already closed client") + return ErrClosedClient + } + + // shutdown and wait for the background thread before we take the lock, to avoid races + close(client.closer) + <-client.closed + + client.lock.Lock() + defer client.lock.Unlock() + Logger.Println("Closing Client") + + for _, broker := range client.brokers { + safeAsyncClose(broker) + } + + for _, broker := range client.seedBrokers { + safeAsyncClose(broker) + } + + client.brokers = nil + client.metadata = nil + + return nil +} + +func (client *client) Closed() bool { + return client.brokers == nil +} + +func (client *client) Topics() ([]string, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + client.lock.RLock() + defer client.lock.RUnlock() + + ret := make([]string, 0, len(client.metadata)) + for topic := range client.metadata { + ret = append(ret, topic) + } + + return ret, nil +} + +func (client *client) Partitions(topic string) ([]int32, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + partitions := client.cachedPartitions(topic, allPartitions) + + if len(partitions) == 0 { + err := client.RefreshMetadata(topic) + if err != nil { + return nil, err + } + partitions = client.cachedPartitions(topic, allPartitions) + } + + if partitions == nil { + return nil, ErrUnknownTopicOrPartition + } + + return partitions, nil +} + +func (client *client) WritablePartitions(topic string) ([]int32, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + partitions := client.cachedPartitions(topic, writablePartitions) + + // len==0 catches when it's nil (no such topic) and the odd case when every single + // partition is undergoing leader election simultaneously. Callers have to be able to handle + // this function returning an empty slice (which is a valid return value) but catching it + // here the first time (note we *don't* catch it below where we return ErrUnknownTopicOrPartition) triggers + // a metadata refresh as a nicety so callers can just try again and don't have to manually + // trigger a refresh (otherwise they'd just keep getting a stale cached copy). + if len(partitions) == 0 { + err := client.RefreshMetadata(topic) + if err != nil { + return nil, err + } + partitions = client.cachedPartitions(topic, writablePartitions) + } + + if partitions == nil { + return nil, ErrUnknownTopicOrPartition + } + + return partitions, nil +} + +func (client *client) Replicas(topic string, partitionID int32) ([]int32, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + metadata := client.cachedMetadata(topic, partitionID) + + if metadata == nil { + err := client.RefreshMetadata(topic) + if err != nil { + return nil, err + } + metadata = client.cachedMetadata(topic, partitionID) + } + + if metadata == nil { + return nil, ErrUnknownTopicOrPartition + } + + if metadata.Err == ErrReplicaNotAvailable { + return nil, metadata.Err + } + return dupeAndSort(metadata.Replicas), nil +} + +func (client *client) Leader(topic string, partitionID int32) (*Broker, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + leader, err := client.cachedLeader(topic, partitionID) + + if leader == nil { + err = client.RefreshMetadata(topic) + if err != nil { + return nil, err + } + leader, err = client.cachedLeader(topic, partitionID) + } + + return leader, err +} + +func (client *client) RefreshMetadata(topics ...string) error { + if client.Closed() { + return ErrClosedClient + } + + // Prior to 0.8.2, Kafka will throw exceptions on an empty topic and not return a proper + // error. This handles the case by returning an error instead of sending it + // off to Kafka. See: https://github.com/Shopify/sarama/pull/38#issuecomment-26362310 + for _, topic := range topics { + if len(topic) == 0 { + return ErrInvalidTopic // this is the error that 0.8.2 and later correctly return + } + } + + return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max) +} + +func (client *client) GetOffset(topic string, partitionID int32, time int64) (int64, error) { + if client.Closed() { + return -1, ErrClosedClient + } + + offset, err := client.getOffset(topic, partitionID, time) + + if err != nil { + if err := client.RefreshMetadata(topic); err != nil { + return -1, err + } + return client.getOffset(topic, partitionID, time) + } + + return offset, err +} + +func (client *client) Coordinator(consumerGroup string) (*Broker, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + coordinator := client.cachedCoordinator(consumerGroup) + + if coordinator == nil { + if err := client.RefreshCoordinator(consumerGroup); err != nil { + return nil, err + } + coordinator = client.cachedCoordinator(consumerGroup) + } + + if coordinator == nil { + return nil, ErrConsumerCoordinatorNotAvailable + } + + _ = coordinator.Open(client.conf) + return coordinator, nil +} + +func (client *client) RefreshCoordinator(consumerGroup string) error { + if client.Closed() { + return ErrClosedClient + } + + response, err := client.getConsumerMetadata(consumerGroup, client.conf.Metadata.Retry.Max) + if err != nil { + return err + } + + client.lock.Lock() + defer client.lock.Unlock() + client.registerBroker(response.Coordinator) + client.coordinators[consumerGroup] = response.Coordinator.ID() + return nil +} + +// private broker management helpers + +// registerBroker makes sure a broker received by a Metadata or Coordinator request is registered +// in the brokers map. It returns the broker that is registered, which may be the provided broker, +// or a previously registered Broker instance. You must hold the write lock before calling this function. +func (client *client) registerBroker(broker *Broker) { + if client.brokers[broker.ID()] == nil { + client.brokers[broker.ID()] = broker + Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr()) + } else if broker.Addr() != client.brokers[broker.ID()].Addr() { + safeAsyncClose(client.brokers[broker.ID()]) + client.brokers[broker.ID()] = broker + Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr()) + } +} + +// deregisterBroker removes a broker from the seedsBroker list, and if it's +// not the seedbroker, removes it from brokers map completely. +func (client *client) deregisterBroker(broker *Broker) { + client.lock.Lock() + defer client.lock.Unlock() + + if len(client.seedBrokers) > 0 && broker == client.seedBrokers[0] { + client.deadSeeds = append(client.deadSeeds, broker) + client.seedBrokers = client.seedBrokers[1:] + } else { + // we do this so that our loop in `tryRefreshMetadata` doesn't go on forever, + // but we really shouldn't have to; once that loop is made better this case can be + // removed, and the function generally can be renamed from `deregisterBroker` to + // `nextSeedBroker` or something + Logger.Printf("client/brokers deregistered broker #%d at %s", broker.ID(), broker.Addr()) + delete(client.brokers, broker.ID()) + } +} + +func (client *client) resurrectDeadBrokers() { + client.lock.Lock() + defer client.lock.Unlock() + + Logger.Printf("client/brokers resurrecting %d dead seed brokers", len(client.deadSeeds)) + client.seedBrokers = append(client.seedBrokers, client.deadSeeds...) + client.deadSeeds = nil +} + +func (client *client) any() *Broker { + client.lock.RLock() + defer client.lock.RUnlock() + + if len(client.seedBrokers) > 0 { + _ = client.seedBrokers[0].Open(client.conf) + return client.seedBrokers[0] + } + + // not guaranteed to be random *or* deterministic + for _, broker := range client.brokers { + _ = broker.Open(client.conf) + return broker + } + + return nil +} + +// private caching/lazy metadata helpers + +type partitionType int + +const ( + allPartitions partitionType = iota + writablePartitions + // If you add any more types, update the partition cache in update() + + // Ensure this is the last partition type value + maxPartitionIndex +) + +func (client *client) cachedMetadata(topic string, partitionID int32) *PartitionMetadata { + client.lock.RLock() + defer client.lock.RUnlock() + + partitions := client.metadata[topic] + if partitions != nil { + return partitions[partitionID] + } + + return nil +} + +func (client *client) cachedPartitions(topic string, partitionSet partitionType) []int32 { + client.lock.RLock() + defer client.lock.RUnlock() + + partitions, exists := client.cachedPartitionsResults[topic] + + if !exists { + return nil + } + return partitions[partitionSet] +} + +func (client *client) setPartitionCache(topic string, partitionSet partitionType) []int32 { + partitions := client.metadata[topic] + + if partitions == nil { + return nil + } + + ret := make([]int32, 0, len(partitions)) + for _, partition := range partitions { + if partitionSet == writablePartitions && partition.Err == ErrLeaderNotAvailable { + continue + } + ret = append(ret, partition.ID) + } + + sort.Sort(int32Slice(ret)) + return ret +} + +func (client *client) cachedLeader(topic string, partitionID int32) (*Broker, error) { + client.lock.RLock() + defer client.lock.RUnlock() + + partitions := client.metadata[topic] + if partitions != nil { + metadata, ok := partitions[partitionID] + if ok { + if metadata.Err == ErrLeaderNotAvailable { + return nil, ErrLeaderNotAvailable + } + b := client.brokers[metadata.Leader] + if b == nil { + return nil, ErrLeaderNotAvailable + } + _ = b.Open(client.conf) + return b, nil + } + } + + return nil, ErrUnknownTopicOrPartition +} + +func (client *client) getOffset(topic string, partitionID int32, time int64) (int64, error) { + broker, err := client.Leader(topic, partitionID) + if err != nil { + return -1, err + } + + request := &OffsetRequest{} + request.AddBlock(topic, partitionID, time, 1) + + response, err := broker.GetAvailableOffsets(request) + if err != nil { + _ = broker.Close() + return -1, err + } + + block := response.GetBlock(topic, partitionID) + if block == nil { + _ = broker.Close() + return -1, ErrIncompleteResponse + } + if block.Err != ErrNoError { + return -1, block.Err + } + if len(block.Offsets) != 1 { + return -1, ErrOffsetOutOfRange + } + + return block.Offsets[0], nil +} + +// core metadata update logic + +func (client *client) backgroundMetadataUpdater() { + defer close(client.closed) + + if client.conf.Metadata.RefreshFrequency == time.Duration(0) { + return + } + + ticker := time.NewTicker(client.conf.Metadata.RefreshFrequency) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + if err := client.RefreshMetadata(); err != nil { + Logger.Println("Client background metadata update:", err) + } + case <-client.closer: + return + } + } +} + +func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int) error { + retry := func(err error) error { + if attemptsRemaining > 0 { + Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining) + time.Sleep(client.conf.Metadata.Retry.Backoff) + return client.tryRefreshMetadata(topics, attemptsRemaining-1) + } + return err + } + + for broker := client.any(); broker != nil; broker = client.any() { + if len(topics) > 0 { + Logger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr) + } else { + Logger.Printf("client/metadata fetching metadata for all topics from broker %s\n", broker.addr) + } + response, err := broker.GetMetadata(&MetadataRequest{Topics: topics}) + + switch err.(type) { + case nil: + // valid response, use it + if shouldRetry, err := client.updateMetadata(response); shouldRetry { + Logger.Println("client/metadata found some partitions to be leaderless") + return retry(err) // note: err can be nil + } else { + return err + } + + case PacketEncodingError: + // didn't even send, return the error + return err + default: + // some other error, remove that broker and try again + Logger.Println("client/metadata got error from broker while fetching metadata:", err) + _ = broker.Close() + client.deregisterBroker(broker) + } + } + + Logger.Println("client/metadata no available broker to send metadata request to") + client.resurrectDeadBrokers() + return retry(ErrOutOfBrokers) +} + +// if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable +func (client *client) updateMetadata(data *MetadataResponse) (retry bool, err error) { + client.lock.Lock() + defer client.lock.Unlock() + + // For all the brokers we received: + // - if it is a new ID, save it + // - if it is an existing ID, but the address we have is stale, discard the old one and save it + // - otherwise ignore it, replacing our existing one would just bounce the connection + for _, broker := range data.Brokers { + client.registerBroker(broker) + } + + for _, topic := range data.Topics { + delete(client.metadata, topic.Name) + delete(client.cachedPartitionsResults, topic.Name) + + switch topic.Err { + case ErrNoError: + break + case ErrInvalidTopic, ErrTopicAuthorizationFailed: // don't retry, don't store partial results + err = topic.Err + continue + case ErrUnknownTopicOrPartition: // retry, do not store partial partition results + err = topic.Err + retry = true + continue + case ErrLeaderNotAvailable: // retry, but store partial partition results + retry = true + break + default: // don't retry, don't store partial results + Logger.Printf("Unexpected topic-level metadata error: %s", topic.Err) + err = topic.Err + continue + } + + client.metadata[topic.Name] = make(map[int32]*PartitionMetadata, len(topic.Partitions)) + for _, partition := range topic.Partitions { + client.metadata[topic.Name][partition.ID] = partition + if partition.Err == ErrLeaderNotAvailable { + retry = true + } + } + + var partitionCache [maxPartitionIndex][]int32 + partitionCache[allPartitions] = client.setPartitionCache(topic.Name, allPartitions) + partitionCache[writablePartitions] = client.setPartitionCache(topic.Name, writablePartitions) + client.cachedPartitionsResults[topic.Name] = partitionCache + } + + return +} + +func (client *client) cachedCoordinator(consumerGroup string) *Broker { + client.lock.RLock() + defer client.lock.RUnlock() + if coordinatorID, ok := client.coordinators[consumerGroup]; ok { + return client.brokers[coordinatorID] + } + return nil +} + +func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemaining int) (*ConsumerMetadataResponse, error) { + retry := func(err error) (*ConsumerMetadataResponse, error) { + if attemptsRemaining > 0 { + Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining) + time.Sleep(client.conf.Metadata.Retry.Backoff) + return client.getConsumerMetadata(consumerGroup, attemptsRemaining-1) + } + return nil, err + } + + for broker := client.any(); broker != nil; broker = client.any() { + Logger.Printf("client/coordinator requesting coordinator for consumergroup %s from %s\n", consumerGroup, broker.Addr()) + + request := new(ConsumerMetadataRequest) + request.ConsumerGroup = consumerGroup + + response, err := broker.GetConsumerMetadata(request) + + if err != nil { + Logger.Printf("client/coordinator request to broker %s failed: %s\n", broker.Addr(), err) + + switch err.(type) { + case PacketEncodingError: + return nil, err + default: + _ = broker.Close() + client.deregisterBroker(broker) + continue + } + } + + switch response.Err { + case ErrNoError: + Logger.Printf("client/coordinator coordinator for consumergroup %s is #%d (%s)\n", consumerGroup, response.Coordinator.ID(), response.Coordinator.Addr()) + return response, nil + + case ErrConsumerCoordinatorNotAvailable: + Logger.Printf("client/coordinator coordinator for consumer group %s is not available\n", consumerGroup) + + // This is very ugly, but this scenario will only happen once per cluster. + // The __consumer_offsets topic only has to be created one time. + // The number of partitions not configurable, but partition 0 should always exist. + if _, err := client.Leader("__consumer_offsets", 0); err != nil { + Logger.Printf("client/coordinator the __consumer_offsets topic is not initialized completely yet. Waiting 2 seconds...\n") + time.Sleep(2 * time.Second) + } + + return retry(ErrConsumerCoordinatorNotAvailable) + default: + return nil, response.Err + } + } + + Logger.Println("client/coordinator no available broker to send consumer metadata request to") + client.resurrectDeadBrokers() + return retry(ErrOutOfBrokers) +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/config.go b/vendor/gopkg.in/Shopify/sarama.v1/config.go new file mode 100644 index 000000000..b61bf7ea4 --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/config.go @@ -0,0 +1,399 @@ +package sarama + +import ( + "crypto/tls" + "regexp" + "time" +) + +const defaultClientID = "sarama" + +var validID = regexp.MustCompile(`\A[A-Za-z0-9._-]+\z`) + +// Config is used to pass multiple configuration options to Sarama's constructors. +type Config struct { + // Net is the namespace for network-level properties used by the Broker, and + // shared by the Client/Producer/Consumer. + Net struct { + // How many outstanding requests a connection is allowed to have before + // sending on it blocks (default 5). + MaxOpenRequests int + + // All three of the below configurations are similar to the + // `socket.timeout.ms` setting in JVM kafka. All of them default + // to 30 seconds. + DialTimeout time.Duration // How long to wait for the initial connection. + ReadTimeout time.Duration // How long to wait for a response. + WriteTimeout time.Duration // How long to wait for a transmit. + + TLS struct { + // Whether or not to use TLS when connecting to the broker + // (defaults to false). + Enable bool + // The TLS configuration to use for secure connections if + // enabled (defaults to nil). + Config *tls.Config + } + + // SASL based authentication with broker. While there are multiple SASL authentication methods + // the current implementation is limited to plaintext (SASL/PLAIN) authentication + SASL struct { + // Whether or not to use SASL authentication when connecting to the broker + // (defaults to false). + Enable bool + //username and password for SASL/PLAIN authentication + User string + Password string + } + + // KeepAlive specifies the keep-alive period for an active network connection. + // If zero, keep-alives are disabled. (default is 0: disabled). + KeepAlive time.Duration + } + + // Metadata is the namespace for metadata management properties used by the + // Client, and shared by the Producer/Consumer. + Metadata struct { + Retry struct { + // The total number of times to retry a metadata request when the + // cluster is in the middle of a leader election (default 3). + Max int + // How long to wait for leader election to occur before retrying + // (default 250ms). Similar to the JVM's `retry.backoff.ms`. + Backoff time.Duration + } + // How frequently to refresh the cluster metadata in the background. + // Defaults to 10 minutes. Set to 0 to disable. Similar to + // `topic.metadata.refresh.interval.ms` in the JVM version. + RefreshFrequency time.Duration + } + + // Producer is the namespace for configuration related to producing messages, + // used by the Producer. + Producer struct { + // The maximum permitted size of a message (defaults to 1000000). Should be + // set equal to or smaller than the broker's `message.max.bytes`. + MaxMessageBytes int + // The level of acknowledgement reliability needed from the broker (defaults + // to WaitForLocal). Equivalent to the `request.required.acks` setting of the + // JVM producer. + RequiredAcks RequiredAcks + // The maximum duration the broker will wait the receipt of the number of + // RequiredAcks (defaults to 10 seconds). This is only relevant when + // RequiredAcks is set to WaitForAll or a number > 1. Only supports + // millisecond resolution, nanoseconds will be truncated. Equivalent to + // the JVM producer's `request.timeout.ms` setting. + Timeout time.Duration + // The type of compression to use on messages (defaults to no compression). + // Similar to `compression.codec` setting of the JVM producer. + Compression CompressionCodec + // Generates partitioners for choosing the partition to send messages to + // (defaults to hashing the message key). Similar to the `partitioner.class` + // setting for the JVM producer. + Partitioner PartitionerConstructor + + // Return specifies what channels will be populated. If they are set to true, + // you must read from the respective channels to prevent deadlock. + Return struct { + // If enabled, successfully delivered messages will be returned on the + // Successes channel (default disabled). + Successes bool + + // If enabled, messages that failed to deliver will be returned on the + // Errors channel, including error (default enabled). + Errors bool + } + + // The following config options control how often messages are batched up and + // sent to the broker. By default, messages are sent as fast as possible, and + // all messages received while the current batch is in-flight are placed + // into the subsequent batch. + Flush struct { + // The best-effort number of bytes needed to trigger a flush. Use the + // global sarama.MaxRequestSize to set a hard upper limit. + Bytes int + // The best-effort number of messages needed to trigger a flush. Use + // `MaxMessages` to set a hard upper limit. + Messages int + // The best-effort frequency of flushes. Equivalent to + // `queue.buffering.max.ms` setting of JVM producer. + Frequency time.Duration + // The maximum number of messages the producer will send in a single + // broker request. Defaults to 0 for unlimited. Similar to + // `queue.buffering.max.messages` in the JVM producer. + MaxMessages int + } + + Retry struct { + // The total number of times to retry sending a message (default 3). + // Similar to the `message.send.max.retries` setting of the JVM producer. + Max int + // How long to wait for the cluster to settle between retries + // (default 100ms). Similar to the `retry.backoff.ms` setting of the + // JVM producer. + Backoff time.Duration + } + } + + // Consumer is the namespace for configuration related to consuming messages, + // used by the Consumer. + // + // Note that Sarama's Consumer type does not currently support automatic + // consumer-group rebalancing and offset tracking. For Zookeeper-based + // tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka + // library builds on Sarama to add this support. For Kafka-based tracking + // (Kafka 0.9 and later), the https://github.com/bsm/sarama-cluster library + // builds on Sarama to add this support. + Consumer struct { + Retry struct { + // How long to wait after a failing to read from a partition before + // trying again (default 2s). + Backoff time.Duration + } + + // Fetch is the namespace for controlling how many bytes are retrieved by any + // given request. + Fetch struct { + // The minimum number of message bytes to fetch in a request - the broker + // will wait until at least this many are available. The default is 1, + // as 0 causes the consumer to spin when no messages are available. + // Equivalent to the JVM's `fetch.min.bytes`. + Min int32 + // The default number of message bytes to fetch from the broker in each + // request (default 32768). This should be larger than the majority of + // your messages, or else the consumer will spend a lot of time + // negotiating sizes and not actually consuming. Similar to the JVM's + // `fetch.message.max.bytes`. + Default int32 + // The maximum number of message bytes to fetch from the broker in a + // single request. Messages larger than this will return + // ErrMessageTooLarge and will not be consumable, so you must be sure + // this is at least as large as your largest message. Defaults to 0 + // (no limit). Similar to the JVM's `fetch.message.max.bytes`. The + // global `sarama.MaxResponseSize` still applies. + Max int32 + } + // The maximum amount of time the broker will wait for Consumer.Fetch.Min + // bytes to become available before it returns fewer than that anyways. The + // default is 250ms, since 0 causes the consumer to spin when no events are + // available. 100-500ms is a reasonable range for most cases. Kafka only + // supports precision up to milliseconds; nanoseconds will be truncated. + // Equivalent to the JVM's `fetch.wait.max.ms`. + MaxWaitTime time.Duration + + // The maximum amount of time the consumer expects a message takes to process + // for the user. If writing to the Messages channel takes longer than this, + // that partition will stop fetching more messages until it can proceed again. + // Note that, since the Messages channel is buffered, the actual grace time is + // (MaxProcessingTime * ChanneBufferSize). Defaults to 100ms. + MaxProcessingTime time.Duration + + // Return specifies what channels will be populated. If they are set to true, + // you must read from them to prevent deadlock. + Return struct { + // If enabled, any errors that occurred while consuming are returned on + // the Errors channel (default disabled). + Errors bool + } + + // Offsets specifies configuration for how and when to commit consumed + // offsets. This currently requires the manual use of an OffsetManager + // but will eventually be automated. + Offsets struct { + // How frequently to commit updated offsets. Defaults to 1s. + CommitInterval time.Duration + + // The initial offset to use if no offset was previously committed. + // Should be OffsetNewest or OffsetOldest. Defaults to OffsetNewest. + Initial int64 + + // The retention duration for committed offsets. If zero, disabled + // (in which case the `offsets.retention.minutes` option on the + // broker will be used). Kafka only supports precision up to + // milliseconds; nanoseconds will be truncated. Requires Kafka + // broker version 0.9.0 or later. + // (default is 0: disabled). + Retention time.Duration + } + } + + // A user-provided string sent with every request to the brokers for logging, + // debugging, and auditing purposes. Defaults to "sarama", but you should + // probably set it to something specific to your application. + ClientID string + // The number of events to buffer in internal and external channels. This + // permits the producer and consumer to continue processing some messages + // in the background while user code is working, greatly improving throughput. + // Defaults to 256. + ChannelBufferSize int + // The version of Kafka that Sarama will assume it is running against. + // Defaults to the oldest supported stable version. Since Kafka provides + // backwards-compatibility, setting it to a version older than you have + // will not break anything, although it may prevent you from using the + // latest features. Setting it to a version greater than you are actually + // running may lead to random breakage. + Version KafkaVersion +} + +// NewConfig returns a new configuration instance with sane defaults. +func NewConfig() *Config { + c := &Config{} + + c.Net.MaxOpenRequests = 5 + c.Net.DialTimeout = 30 * time.Second + c.Net.ReadTimeout = 30 * time.Second + c.Net.WriteTimeout = 30 * time.Second + + c.Metadata.Retry.Max = 3 + c.Metadata.Retry.Backoff = 250 * time.Millisecond + c.Metadata.RefreshFrequency = 10 * time.Minute + + c.Producer.MaxMessageBytes = 1000000 + c.Producer.RequiredAcks = WaitForLocal + c.Producer.Timeout = 10 * time.Second + c.Producer.Partitioner = NewHashPartitioner + c.Producer.Retry.Max = 3 + c.Producer.Retry.Backoff = 100 * time.Millisecond + c.Producer.Return.Errors = true + + c.Consumer.Fetch.Min = 1 + c.Consumer.Fetch.Default = 32768 + c.Consumer.Retry.Backoff = 2 * time.Second + c.Consumer.MaxWaitTime = 250 * time.Millisecond + c.Consumer.MaxProcessingTime = 100 * time.Millisecond + c.Consumer.Return.Errors = false + c.Consumer.Offsets.CommitInterval = 1 * time.Second + c.Consumer.Offsets.Initial = OffsetNewest + + c.ClientID = defaultClientID + c.ChannelBufferSize = 256 + c.Version = minVersion + + return c +} + +// Validate checks a Config instance. It will return a +// ConfigurationError if the specified values don't make sense. +func (c *Config) Validate() error { + // some configuration values should be warned on but not fail completely, do those first + if c.Net.TLS.Enable == false && c.Net.TLS.Config != nil { + Logger.Println("Net.TLS is disabled but a non-nil configuration was provided.") + } + if c.Net.SASL.Enable == false { + if c.Net.SASL.User != "" { + Logger.Println("Net.SASL is disabled but a non-empty username was provided.") + } + if c.Net.SASL.Password != "" { + Logger.Println("Net.SASL is disabled but a non-empty password was provided.") + } + } + if c.Producer.RequiredAcks > 1 { + Logger.Println("Producer.RequiredAcks > 1 is deprecated and will raise an exception with kafka >= 0.8.2.0.") + } + if c.Producer.MaxMessageBytes >= int(MaxRequestSize) { + Logger.Println("Producer.MaxMessageBytes is larger than MaxRequestSize; it will be ignored.") + } + if c.Producer.Flush.Bytes >= int(MaxRequestSize) { + Logger.Println("Producer.Flush.Bytes is larger than MaxRequestSize; it will be ignored.") + } + if c.Producer.Timeout%time.Millisecond != 0 { + Logger.Println("Producer.Timeout only supports millisecond resolution; nanoseconds will be truncated.") + } + if c.Consumer.MaxWaitTime < 100*time.Millisecond { + Logger.Println("Consumer.MaxWaitTime is very low, which can cause high CPU and network usage. See documentation for details.") + } + if c.Consumer.MaxWaitTime%time.Millisecond != 0 { + Logger.Println("Consumer.MaxWaitTime only supports millisecond precision; nanoseconds will be truncated.") + } + if c.Consumer.Offsets.Retention%time.Millisecond != 0 { + Logger.Println("Consumer.Offsets.Retention only supports millisecond precision; nanoseconds will be truncated.") + } + if c.ClientID == defaultClientID { + Logger.Println("ClientID is the default of 'sarama', you should consider setting it to something application-specific.") + } + + // validate Net values + switch { + case c.Net.MaxOpenRequests <= 0: + return ConfigurationError("Net.MaxOpenRequests must be > 0") + case c.Net.DialTimeout <= 0: + return ConfigurationError("Net.DialTimeout must be > 0") + case c.Net.ReadTimeout <= 0: + return ConfigurationError("Net.ReadTimeout must be > 0") + case c.Net.WriteTimeout <= 0: + return ConfigurationError("Net.WriteTimeout must be > 0") + case c.Net.KeepAlive < 0: + return ConfigurationError("Net.KeepAlive must be >= 0") + case c.Net.SASL.Enable == true && c.Net.SASL.User == "": + return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled") + case c.Net.SASL.Enable == true && c.Net.SASL.Password == "": + return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled") + } + + // validate the Metadata values + switch { + case c.Metadata.Retry.Max < 0: + return ConfigurationError("Metadata.Retry.Max must be >= 0") + case c.Metadata.Retry.Backoff < 0: + return ConfigurationError("Metadata.Retry.Backoff must be >= 0") + case c.Metadata.RefreshFrequency < 0: + return ConfigurationError("Metadata.RefreshFrequency must be >= 0") + } + + // validate the Producer values + switch { + case c.Producer.MaxMessageBytes <= 0: + return ConfigurationError("Producer.MaxMessageBytes must be > 0") + case c.Producer.RequiredAcks < -1: + return ConfigurationError("Producer.RequiredAcks must be >= -1") + case c.Producer.Timeout <= 0: + return ConfigurationError("Producer.Timeout must be > 0") + case c.Producer.Partitioner == nil: + return ConfigurationError("Producer.Partitioner must not be nil") + case c.Producer.Flush.Bytes < 0: + return ConfigurationError("Producer.Flush.Bytes must be >= 0") + case c.Producer.Flush.Messages < 0: + return ConfigurationError("Producer.Flush.Messages must be >= 0") + case c.Producer.Flush.Frequency < 0: + return ConfigurationError("Producer.Flush.Frequency must be >= 0") + case c.Producer.Flush.MaxMessages < 0: + return ConfigurationError("Producer.Flush.MaxMessages must be >= 0") + case c.Producer.Flush.MaxMessages > 0 && c.Producer.Flush.MaxMessages < c.Producer.Flush.Messages: + return ConfigurationError("Producer.Flush.MaxMessages must be >= Producer.Flush.Messages when set") + case c.Producer.Retry.Max < 0: + return ConfigurationError("Producer.Retry.Max must be >= 0") + case c.Producer.Retry.Backoff < 0: + return ConfigurationError("Producer.Retry.Backoff must be >= 0") + } + + // validate the Consumer values + switch { + case c.Consumer.Fetch.Min <= 0: + return ConfigurationError("Consumer.Fetch.Min must be > 0") + case c.Consumer.Fetch.Default <= 0: + return ConfigurationError("Consumer.Fetch.Default must be > 0") + case c.Consumer.Fetch.Max < 0: + return ConfigurationError("Consumer.Fetch.Max must be >= 0") + case c.Consumer.MaxWaitTime < 1*time.Millisecond: + return ConfigurationError("Consumer.MaxWaitTime must be >= 1ms") + case c.Consumer.MaxProcessingTime <= 0: + return ConfigurationError("Consumer.MaxProcessingTime must be > 0") + case c.Consumer.Retry.Backoff < 0: + return ConfigurationError("Consumer.Retry.Backoff must be >= 0") + case c.Consumer.Offsets.CommitInterval <= 0: + return ConfigurationError("Consumer.Offsets.CommitInterval must be > 0") + case c.Consumer.Offsets.Initial != OffsetOldest && c.Consumer.Offsets.Initial != OffsetNewest: + return ConfigurationError("Consumer.Offsets.Initial must be OffsetOldest or OffsetNewest") + + } + + // validate misc shared values + switch { + case c.ChannelBufferSize < 0: + return ConfigurationError("ChannelBufferSize must be >= 0") + case !validID.MatchString(c.ClientID): + return ConfigurationError("ClientID is invalid") + } + + return nil +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/consumer.go b/vendor/gopkg.in/Shopify/sarama.v1/consumer.go new file mode 100644 index 000000000..5271e21de --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/consumer.go @@ -0,0 +1,715 @@ +package sarama + +import ( + "errors" + "fmt" + "sync" + "sync/atomic" + "time" +) + +// ConsumerMessage encapsulates a Kafka message returned by the consumer. +type ConsumerMessage struct { + Key, Value []byte + Topic string + Partition int32 + Offset int64 + Timestamp time.Time // only set if kafka is version 0.10+ +} + +// ConsumerError is what is provided to the user when an error occurs. +// It wraps an error and includes the topic and partition. +type ConsumerError struct { + Topic string + Partition int32 + Err error +} + +func (ce ConsumerError) Error() string { + return fmt.Sprintf("kafka: error while consuming %s/%d: %s", ce.Topic, ce.Partition, ce.Err) +} + +// ConsumerErrors is a type that wraps a batch of errors and implements the Error interface. +// It can be returned from the PartitionConsumer's Close methods to avoid the need to manually drain errors +// when stopping. +type ConsumerErrors []*ConsumerError + +func (ce ConsumerErrors) Error() string { + return fmt.Sprintf("kafka: %d errors while consuming", len(ce)) +} + +// Consumer manages PartitionConsumers which process Kafka messages from brokers. You MUST call Close() +// on a consumer to avoid leaks, it will not be garbage-collected automatically when it passes out of +// scope. +// +// Sarama's Consumer type does not currently support automatic consumer-group rebalancing and offset tracking. +// For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka library +// builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9 and later), the +// https://github.com/bsm/sarama-cluster library builds on Sarama to add this support. +type Consumer interface { + + // Topics returns the set of available topics as retrieved from the cluster + // metadata. This method is the same as Client.Topics(), and is provided for + // convenience. + Topics() ([]string, error) + + // Partitions returns the sorted list of all partition IDs for the given topic. + // This method is the same as Client.Partitions(), and is provided for convenience. + Partitions(topic string) ([]int32, error) + + // ConsumePartition creates a PartitionConsumer on the given topic/partition with + // the given offset. It will return an error if this Consumer is already consuming + // on the given topic/partition. Offset can be a literal offset, or OffsetNewest + // or OffsetOldest + ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) + + // Close shuts down the consumer. It must be called after all child + // PartitionConsumers have already been closed. + Close() error +} + +type consumer struct { + client Client + conf *Config + ownClient bool + + lock sync.Mutex + children map[string]map[int32]*partitionConsumer + brokerConsumers map[*Broker]*brokerConsumer +} + +// NewConsumer creates a new consumer using the given broker addresses and configuration. +func NewConsumer(addrs []string, config *Config) (Consumer, error) { + client, err := NewClient(addrs, config) + if err != nil { + return nil, err + } + + c, err := NewConsumerFromClient(client) + if err != nil { + return nil, err + } + c.(*consumer).ownClient = true + return c, nil +} + +// NewConsumerFromClient creates a new consumer using the given client. It is still +// necessary to call Close() on the underlying client when shutting down this consumer. +func NewConsumerFromClient(client Client) (Consumer, error) { + // Check that we are not dealing with a closed Client before processing any other arguments + if client.Closed() { + return nil, ErrClosedClient + } + + c := &consumer{ + client: client, + conf: client.Config(), + children: make(map[string]map[int32]*partitionConsumer), + brokerConsumers: make(map[*Broker]*brokerConsumer), + } + + return c, nil +} + +func (c *consumer) Close() error { + if c.ownClient { + return c.client.Close() + } + return nil +} + +func (c *consumer) Topics() ([]string, error) { + return c.client.Topics() +} + +func (c *consumer) Partitions(topic string) ([]int32, error) { + return c.client.Partitions(topic) +} + +func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) { + child := &partitionConsumer{ + consumer: c, + conf: c.conf, + topic: topic, + partition: partition, + messages: make(chan *ConsumerMessage, c.conf.ChannelBufferSize), + errors: make(chan *ConsumerError, c.conf.ChannelBufferSize), + feeder: make(chan *FetchResponse, 1), + trigger: make(chan none, 1), + dying: make(chan none), + fetchSize: c.conf.Consumer.Fetch.Default, + } + + if err := child.chooseStartingOffset(offset); err != nil { + return nil, err + } + + var leader *Broker + var err error + if leader, err = c.client.Leader(child.topic, child.partition); err != nil { + return nil, err + } + + if err := c.addChild(child); err != nil { + return nil, err + } + + go withRecover(child.dispatcher) + go withRecover(child.responseFeeder) + + child.broker = c.refBrokerConsumer(leader) + child.broker.input <- child + + return child, nil +} + +func (c *consumer) addChild(child *partitionConsumer) error { + c.lock.Lock() + defer c.lock.Unlock() + + topicChildren := c.children[child.topic] + if topicChildren == nil { + topicChildren = make(map[int32]*partitionConsumer) + c.children[child.topic] = topicChildren + } + + if topicChildren[child.partition] != nil { + return ConfigurationError("That topic/partition is already being consumed") + } + + topicChildren[child.partition] = child + return nil +} + +func (c *consumer) removeChild(child *partitionConsumer) { + c.lock.Lock() + defer c.lock.Unlock() + + delete(c.children[child.topic], child.partition) +} + +func (c *consumer) refBrokerConsumer(broker *Broker) *brokerConsumer { + c.lock.Lock() + defer c.lock.Unlock() + + bc := c.brokerConsumers[broker] + if bc == nil { + bc = c.newBrokerConsumer(broker) + c.brokerConsumers[broker] = bc + } + + bc.refs++ + + return bc +} + +func (c *consumer) unrefBrokerConsumer(brokerWorker *brokerConsumer) { + c.lock.Lock() + defer c.lock.Unlock() + + brokerWorker.refs-- + + if brokerWorker.refs == 0 { + close(brokerWorker.input) + if c.brokerConsumers[brokerWorker.broker] == brokerWorker { + delete(c.brokerConsumers, brokerWorker.broker) + } + } +} + +func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) { + c.lock.Lock() + defer c.lock.Unlock() + + delete(c.brokerConsumers, brokerWorker.broker) +} + +// PartitionConsumer + +// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call Close() +// or AsyncClose() on a PartitionConsumer to avoid leaks, it will not be garbage-collected automatically +// when it passes out of scope. +// +// The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range +// loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported +// as out of range by the brokers. In this case you should decide what you want to do (try a different offset, +// notify a human, etc) and handle it appropriately. For all other error cases, it will just keep retrying. +// By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set +// your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement +// or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches. +type PartitionConsumer interface { + + // AsyncClose initiates a shutdown of the PartitionConsumer. This method will + // return immediately, after which you should wait until the 'messages' and + // 'errors' channel are drained. It is required to call this function, or + // Close before a consumer object passes out of scope, as it will otherwise + // leak memory. You must call this before calling Close on the underlying client. + AsyncClose() + + // Close stops the PartitionConsumer from fetching messages. It is required to + // call this function (or AsyncClose) before a consumer object passes out of + // scope, as it will otherwise leak memory. You must call this before calling + // Close on the underlying client. + Close() error + + // Messages returns the read channel for the messages that are returned by + // the broker. + Messages() <-chan *ConsumerMessage + + // Errors returns a read channel of errors that occurred during consuming, if + // enabled. By default, errors are logged and not returned over this channel. + // If you want to implement any custom error handling, set your config's + // Consumer.Return.Errors setting to true, and read from this channel. + Errors() <-chan *ConsumerError + + // HighWaterMarkOffset returns the high water mark offset of the partition, + // i.e. the offset that will be used for the next message that will be produced. + // You can use this to determine how far behind the processing is. + HighWaterMarkOffset() int64 +} + +type partitionConsumer struct { + consumer *consumer + conf *Config + topic string + partition int32 + + broker *brokerConsumer + messages chan *ConsumerMessage + errors chan *ConsumerError + feeder chan *FetchResponse + + trigger, dying chan none + responseResult error + + fetchSize int32 + offset int64 + highWaterMarkOffset int64 +} + +var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing + +func (child *partitionConsumer) sendError(err error) { + cErr := &ConsumerError{ + Topic: child.topic, + Partition: child.partition, + Err: err, + } + + if child.conf.Consumer.Return.Errors { + child.errors <- cErr + } else { + Logger.Println(cErr) + } +} + +func (child *partitionConsumer) dispatcher() { + for _ = range child.trigger { + select { + case <-child.dying: + close(child.trigger) + case <-time.After(child.conf.Consumer.Retry.Backoff): + if child.broker != nil { + child.consumer.unrefBrokerConsumer(child.broker) + child.broker = nil + } + + Logger.Printf("consumer/%s/%d finding new broker\n", child.topic, child.partition) + if err := child.dispatch(); err != nil { + child.sendError(err) + child.trigger <- none{} + } + } + } + + if child.broker != nil { + child.consumer.unrefBrokerConsumer(child.broker) + } + child.consumer.removeChild(child) + close(child.feeder) +} + +func (child *partitionConsumer) dispatch() error { + if err := child.consumer.client.RefreshMetadata(child.topic); err != nil { + return err + } + + var leader *Broker + var err error + if leader, err = child.consumer.client.Leader(child.topic, child.partition); err != nil { + return err + } + + child.broker = child.consumer.refBrokerConsumer(leader) + + child.broker.input <- child + + return nil +} + +func (child *partitionConsumer) chooseStartingOffset(offset int64) error { + newestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetNewest) + if err != nil { + return err + } + oldestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetOldest) + if err != nil { + return err + } + + switch { + case offset == OffsetNewest: + child.offset = newestOffset + case offset == OffsetOldest: + child.offset = oldestOffset + case offset >= oldestOffset && offset <= newestOffset: + child.offset = offset + default: + return ErrOffsetOutOfRange + } + + return nil +} + +func (child *partitionConsumer) Messages() <-chan *ConsumerMessage { + return child.messages +} + +func (child *partitionConsumer) Errors() <-chan *ConsumerError { + return child.errors +} + +func (child *partitionConsumer) AsyncClose() { + // this triggers whatever broker owns this child to abandon it and close its trigger channel, which causes + // the dispatcher to exit its loop, which removes it from the consumer then closes its 'messages' and + // 'errors' channel (alternatively, if the child is already at the dispatcher for some reason, that will + // also just close itself) + close(child.dying) +} + +func (child *partitionConsumer) Close() error { + child.AsyncClose() + + go withRecover(func() { + for _ = range child.messages { + // drain + } + }) + + var errors ConsumerErrors + for err := range child.errors { + errors = append(errors, err) + } + + if len(errors) > 0 { + return errors + } + return nil +} + +func (child *partitionConsumer) HighWaterMarkOffset() int64 { + return atomic.LoadInt64(&child.highWaterMarkOffset) +} + +func (child *partitionConsumer) responseFeeder() { + var msgs []*ConsumerMessage + expiryTimer := time.NewTimer(child.conf.Consumer.MaxProcessingTime) + expireTimedOut := false + +feederLoop: + for response := range child.feeder { + msgs, child.responseResult = child.parseResponse(response) + + for i, msg := range msgs { + if !expiryTimer.Stop() && !expireTimedOut { + // expiryTimer was expired; clear out the waiting msg + <-expiryTimer.C + } + expiryTimer.Reset(child.conf.Consumer.MaxProcessingTime) + expireTimedOut = false + + select { + case child.messages <- msg: + case <-expiryTimer.C: + expireTimedOut = true + child.responseResult = errTimedOut + child.broker.acks.Done() + for _, msg = range msgs[i:] { + child.messages <- msg + } + child.broker.input <- child + continue feederLoop + } + } + + child.broker.acks.Done() + } + + close(child.messages) + close(child.errors) +} + +func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) { + block := response.GetBlock(child.topic, child.partition) + if block == nil { + return nil, ErrIncompleteResponse + } + + if block.Err != ErrNoError { + return nil, block.Err + } + + if len(block.MsgSet.Messages) == 0 { + // We got no messages. If we got a trailing one then we need to ask for more data. + // Otherwise we just poll again and wait for one to be produced... + if block.MsgSet.PartialTrailingMessage { + if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max { + // we can't ask for more data, we've hit the configured limit + child.sendError(ErrMessageTooLarge) + child.offset++ // skip this one so we can keep processing future messages + } else { + child.fetchSize *= 2 + if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max { + child.fetchSize = child.conf.Consumer.Fetch.Max + } + } + } + + return nil, nil + } + + // we got messages, reset our fetch size in case it was increased for a previous request + child.fetchSize = child.conf.Consumer.Fetch.Default + atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset) + + incomplete := false + prelude := true + var messages []*ConsumerMessage + for _, msgBlock := range block.MsgSet.Messages { + + for _, msg := range msgBlock.Messages() { + offset := msg.Offset + if msg.Msg.Version >= 1 { + baseOffset := msgBlock.Offset - msgBlock.Messages()[len(msgBlock.Messages())-1].Offset + offset += baseOffset + } + if prelude && offset < child.offset { + continue + } + prelude = false + + if offset >= child.offset { + messages = append(messages, &ConsumerMessage{ + Topic: child.topic, + Partition: child.partition, + Key: msg.Msg.Key, + Value: msg.Msg.Value, + Offset: offset, + Timestamp: msg.Msg.Timestamp, + }) + child.offset = offset + 1 + } else { + incomplete = true + } + } + + } + + if incomplete || len(messages) == 0 { + return nil, ErrIncompleteResponse + } + return messages, nil +} + +// brokerConsumer + +type brokerConsumer struct { + consumer *consumer + broker *Broker + input chan *partitionConsumer + newSubscriptions chan []*partitionConsumer + wait chan none + subscriptions map[*partitionConsumer]none + acks sync.WaitGroup + refs int +} + +func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer { + bc := &brokerConsumer{ + consumer: c, + broker: broker, + input: make(chan *partitionConsumer), + newSubscriptions: make(chan []*partitionConsumer), + wait: make(chan none), + subscriptions: make(map[*partitionConsumer]none), + refs: 0, + } + + go withRecover(bc.subscriptionManager) + go withRecover(bc.subscriptionConsumer) + + return bc +} + +func (bc *brokerConsumer) subscriptionManager() { + var buffer []*partitionConsumer + + // The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer + // goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks + // up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give + // it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available, + // so the main goroutine can block waiting for work if it has none. + for { + if len(buffer) > 0 { + select { + case event, ok := <-bc.input: + if !ok { + goto done + } + buffer = append(buffer, event) + case bc.newSubscriptions <- buffer: + buffer = nil + case bc.wait <- none{}: + } + } else { + select { + case event, ok := <-bc.input: + if !ok { + goto done + } + buffer = append(buffer, event) + case bc.newSubscriptions <- nil: + } + } + } + +done: + close(bc.wait) + if len(buffer) > 0 { + bc.newSubscriptions <- buffer + } + close(bc.newSubscriptions) +} + +func (bc *brokerConsumer) subscriptionConsumer() { + <-bc.wait // wait for our first piece of work + + // the subscriptionConsumer ensures we will get nil right away if no new subscriptions is available + for newSubscriptions := range bc.newSubscriptions { + bc.updateSubscriptions(newSubscriptions) + + if len(bc.subscriptions) == 0 { + // We're about to be shut down or we're about to receive more subscriptions. + // Either way, the signal just hasn't propagated to our goroutine yet. + <-bc.wait + continue + } + + response, err := bc.fetchNewMessages() + + if err != nil { + Logger.Printf("consumer/broker/%d disconnecting due to error processing FetchRequest: %s\n", bc.broker.ID(), err) + bc.abort(err) + return + } + + bc.acks.Add(len(bc.subscriptions)) + for child := range bc.subscriptions { + child.feeder <- response + } + bc.acks.Wait() + bc.handleResponses() + } +} + +func (bc *brokerConsumer) updateSubscriptions(newSubscriptions []*partitionConsumer) { + for _, child := range newSubscriptions { + bc.subscriptions[child] = none{} + Logger.Printf("consumer/broker/%d added subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition) + } + + for child := range bc.subscriptions { + select { + case <-child.dying: + Logger.Printf("consumer/broker/%d closed dead subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition) + close(child.trigger) + delete(bc.subscriptions, child) + default: + break + } + } +} + +func (bc *brokerConsumer) handleResponses() { + // handles the response codes left for us by our subscriptions, and abandons ones that have been closed + for child := range bc.subscriptions { + result := child.responseResult + child.responseResult = nil + + switch result { + case nil: + break + case errTimedOut: + Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n", + bc.broker.ID(), child.topic, child.partition) + delete(bc.subscriptions, child) + case ErrOffsetOutOfRange: + // there's no point in retrying this it will just fail the same way again + // shut it down and force the user to choose what to do + child.sendError(result) + Logger.Printf("consumer/%s/%d shutting down because %s\n", child.topic, child.partition, result) + close(child.trigger) + delete(bc.subscriptions, child) + case ErrUnknownTopicOrPartition, ErrNotLeaderForPartition, ErrLeaderNotAvailable, ErrReplicaNotAvailable: + // not an error, but does need redispatching + Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n", + bc.broker.ID(), child.topic, child.partition, result) + child.trigger <- none{} + delete(bc.subscriptions, child) + default: + // dunno, tell the user and try redispatching + child.sendError(result) + Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n", + bc.broker.ID(), child.topic, child.partition, result) + child.trigger <- none{} + delete(bc.subscriptions, child) + } + } +} + +func (bc *brokerConsumer) abort(err error) { + bc.consumer.abandonBrokerConsumer(bc) + _ = bc.broker.Close() // we don't care about the error this might return, we already have one + + for child := range bc.subscriptions { + child.sendError(err) + child.trigger <- none{} + } + + for newSubscriptions := range bc.newSubscriptions { + if len(newSubscriptions) == 0 { + <-bc.wait + continue + } + for _, child := range newSubscriptions { + child.sendError(err) + child.trigger <- none{} + } + } +} + +func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) { + request := &FetchRequest{ + MinBytes: bc.consumer.conf.Consumer.Fetch.Min, + MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond), + } + if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) { + request.Version = 2 + } + + for child := range bc.subscriptions { + request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize) + } + + return bc.broker.Fetch(request) +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/consumer_group_members.go b/vendor/gopkg.in/Shopify/sarama.v1/consumer_group_members.go new file mode 100644 index 000000000..9d92d350a --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/consumer_group_members.go @@ -0,0 +1,94 @@ +package sarama + +type ConsumerGroupMemberMetadata struct { + Version int16 + Topics []string + UserData []byte +} + +func (m *ConsumerGroupMemberMetadata) encode(pe packetEncoder) error { + pe.putInt16(m.Version) + + if err := pe.putStringArray(m.Topics); err != nil { + return err + } + + if err := pe.putBytes(m.UserData); err != nil { + return err + } + + return nil +} + +func (m *ConsumerGroupMemberMetadata) decode(pd packetDecoder) (err error) { + if m.Version, err = pd.getInt16(); err != nil { + return + } + + if m.Topics, err = pd.getStringArray(); err != nil { + return + } + + if m.UserData, err = pd.getBytes(); err != nil { + return + } + + return nil +} + +type ConsumerGroupMemberAssignment struct { + Version int16 + Topics map[string][]int32 + UserData []byte +} + +func (m *ConsumerGroupMemberAssignment) encode(pe packetEncoder) error { + pe.putInt16(m.Version) + + if err := pe.putArrayLength(len(m.Topics)); err != nil { + return err + } + + for topic, partitions := range m.Topics { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putInt32Array(partitions); err != nil { + return err + } + } + + if err := pe.putBytes(m.UserData); err != nil { + return err + } + + return nil +} + +func (m *ConsumerGroupMemberAssignment) decode(pd packetDecoder) (err error) { + if m.Version, err = pd.getInt16(); err != nil { + return + } + + var topicLen int + if topicLen, err = pd.getArrayLength(); err != nil { + return + } + + m.Topics = make(map[string][]int32, topicLen) + for i := 0; i < topicLen; i++ { + var topic string + if topic, err = pd.getString(); err != nil { + return + } + if m.Topics[topic], err = pd.getInt32Array(); err != nil { + return + } + } + + if m.UserData, err = pd.getBytes(); err != nil { + return + } + + return nil +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/consumer_metadata_request.go b/vendor/gopkg.in/Shopify/sarama.v1/consumer_metadata_request.go new file mode 100644 index 000000000..483be3354 --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/consumer_metadata_request.go @@ -0,0 +1,26 @@ +package sarama + +type ConsumerMetadataRequest struct { + ConsumerGroup string +} + +func (r *ConsumerMetadataRequest) encode(pe packetEncoder) error { + return pe.putString(r.ConsumerGroup) +} + +func (r *ConsumerMetadataRequest) decode(pd packetDecoder, version int16) (err error) { + r.ConsumerGroup, err = pd.getString() + return err +} + +func (r *ConsumerMetadataRequest) key() int16 { + return 10 +} + +func (r *ConsumerMetadataRequest) version() int16 { + return 0 +} + +func (r *ConsumerMetadataRequest) requiredVersion() KafkaVersion { + return V0_8_2_0 +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/consumer_metadata_response.go b/vendor/gopkg.in/Shopify/sarama.v1/consumer_metadata_response.go new file mode 100644 index 000000000..6b9632bba --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/consumer_metadata_response.go @@ -0,0 +1,85 @@ +package sarama + +import ( + "net" + "strconv" +) + +type ConsumerMetadataResponse struct { + Err KError + Coordinator *Broker + CoordinatorID int32 // deprecated: use Coordinator.ID() + CoordinatorHost string // deprecated: use Coordinator.Addr() + CoordinatorPort int32 // deprecated: use Coordinator.Addr() +} + +func (r *ConsumerMetadataResponse) decode(pd packetDecoder, version int16) (err error) { + tmp, err := pd.getInt16() + if err != nil { + return err + } + r.Err = KError(tmp) + + coordinator := new(Broker) + if err := coordinator.decode(pd); err != nil { + return err + } + if coordinator.addr == ":0" { + return nil + } + r.Coordinator = coordinator + + // this can all go away in 2.0, but we have to fill in deprecated fields to maintain + // backwards compatibility + host, portstr, err := net.SplitHostPort(r.Coordinator.Addr()) + if err != nil { + return err + } + port, err := strconv.ParseInt(portstr, 10, 32) + if err != nil { + return err + } + r.CoordinatorID = r.Coordinator.ID() + r.CoordinatorHost = host + r.CoordinatorPort = int32(port) + + return nil +} + +func (r *ConsumerMetadataResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + if r.Coordinator != nil { + host, portstr, err := net.SplitHostPort(r.Coordinator.Addr()) + if err != nil { + return err + } + port, err := strconv.ParseInt(portstr, 10, 32) + if err != nil { + return err + } + pe.putInt32(r.Coordinator.ID()) + if err := pe.putString(host); err != nil { + return err + } + pe.putInt32(int32(port)) + return nil + } + pe.putInt32(r.CoordinatorID) + if err := pe.putString(r.CoordinatorHost); err != nil { + return err + } + pe.putInt32(r.CoordinatorPort) + return nil +} + +func (r *ConsumerMetadataResponse) key() int16 { + return 10 +} + +func (r *ConsumerMetadataResponse) version() int16 { + return 0 +} + +func (r *ConsumerMetadataResponse) requiredVersion() KafkaVersion { + return V0_8_2_0 +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/crc32_field.go b/vendor/gopkg.in/Shopify/sarama.v1/crc32_field.go new file mode 100644 index 000000000..5c2860790 --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/crc32_field.go @@ -0,0 +1,36 @@ +package sarama + +import ( + "encoding/binary" + + "github.com/klauspost/crc32" +) + +// crc32Field implements the pushEncoder and pushDecoder interfaces for calculating CRC32s. +type crc32Field struct { + startOffset int +} + +func (c *crc32Field) saveOffset(in int) { + c.startOffset = in +} + +func (c *crc32Field) reserveLength() int { + return 4 +} + +func (c *crc32Field) run(curOffset int, buf []byte) error { + crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset]) + binary.BigEndian.PutUint32(buf[c.startOffset:], crc) + return nil +} + +func (c *crc32Field) check(curOffset int, buf []byte) error { + crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset]) + + if crc != binary.BigEndian.Uint32(buf[c.startOffset:]) { + return PacketDecodingError{"CRC didn't match"} + } + + return nil +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/describe_groups_request.go b/vendor/gopkg.in/Shopify/sarama.v1/describe_groups_request.go new file mode 100644 index 000000000..1fb356777 --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/describe_groups_request.go @@ -0,0 +1,30 @@ +package sarama + +type DescribeGroupsRequest struct { + Groups []string +} + +func (r *DescribeGroupsRequest) encode(pe packetEncoder) error { + return pe.putStringArray(r.Groups) +} + +func (r *DescribeGroupsRequest) decode(pd packetDecoder, version int16) (err error) { + r.Groups, err = pd.getStringArray() + return +} + +func (r *DescribeGroupsRequest) key() int16 { + return 15 +} + +func (r *DescribeGroupsRequest) version() int16 { + return 0 +} + +func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion { + return V0_9_0_0 +} + +func (r *DescribeGroupsRequest) AddGroup(group string) { + r.Groups = append(r.Groups, group) +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/describe_groups_response.go b/vendor/gopkg.in/Shopify/sarama.v1/describe_groups_response.go new file mode 100644 index 000000000..e78b8ce02 --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/describe_groups_response.go @@ -0,0 +1,174 @@ +package sarama + +type DescribeGroupsResponse struct { + Groups []*GroupDescription +} + +func (r *DescribeGroupsResponse) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(r.Groups)); err != nil { + return err + } + + for _, groupDescription := range r.Groups { + if err := groupDescription.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (r *DescribeGroupsResponse) decode(pd packetDecoder, version int16) (err error) { + n, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Groups = make([]*GroupDescription, n) + for i := 0; i < n; i++ { + r.Groups[i] = new(GroupDescription) + if err := r.Groups[i].decode(pd); err != nil { + return err + } + } + + return nil +} + +func (r *DescribeGroupsResponse) key() int16 { + return 15 +} + +func (r *DescribeGroupsResponse) version() int16 { + return 0 +} + +func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion { + return V0_9_0_0 +} + +type GroupDescription struct { + Err KError + GroupId string + State string + ProtocolType string + Protocol string + Members map[string]*GroupMemberDescription +} + +func (gd *GroupDescription) encode(pe packetEncoder) error { + pe.putInt16(int16(gd.Err)) + + if err := pe.putString(gd.GroupId); err != nil { + return err + } + if err := pe.putString(gd.State); err != nil { + return err + } + if err := pe.putString(gd.ProtocolType); err != nil { + return err + } + if err := pe.putString(gd.Protocol); err != nil { + return err + } + + if err := pe.putArrayLength(len(gd.Members)); err != nil { + return err + } + + for memberId, groupMemberDescription := range gd.Members { + if err := pe.putString(memberId); err != nil { + return err + } + if err := groupMemberDescription.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (gd *GroupDescription) decode(pd packetDecoder) (err error) { + if kerr, err := pd.getInt16(); err != nil { + return err + } else { + gd.Err = KError(kerr) + } + + if gd.GroupId, err = pd.getString(); err != nil { + return + } + if gd.State, err = pd.getString(); err != nil { + return + } + if gd.ProtocolType, err = pd.getString(); err != nil { + return + } + if gd.Protocol, err = pd.getString(); err != nil { + return + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + if n == 0 { + return nil + } + + gd.Members = make(map[string]*GroupMemberDescription) + for i := 0; i < n; i++ { + memberId, err := pd.getString() + if err != nil { + return err + } + + gd.Members[memberId] = new(GroupMemberDescription) + if err := gd.Members[memberId].decode(pd); err != nil { + return err + } + } + + return nil +} + +type GroupMemberDescription struct { + ClientId string + ClientHost string + MemberMetadata []byte + MemberAssignment []byte +} + +func (gmd *GroupMemberDescription) encode(pe packetEncoder) error { + if err := pe.putString(gmd.ClientId); err != nil { + return err + } + if err := pe.putString(gmd.ClientHost); err != nil { + return err + } + if err := pe.putBytes(gmd.MemberMetadata); err != nil { + return err + } + if err := pe.putBytes(gmd.MemberAssignment); err != nil { + return err + } + + return nil +} + +func (gmd *GroupMemberDescription) decode(pd packetDecoder) (err error) { + if gmd.ClientId, err = pd.getString(); err != nil { + return + } + if gmd.ClientHost, err = pd.getString(); err != nil { + return + } + if gmd.MemberMetadata, err = pd.getBytes(); err != nil { + return + } + if gmd.MemberAssignment, err = pd.getBytes(); err != nil { + return + } + + return nil +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/dev.yml b/vendor/gopkg.in/Shopify/sarama.v1/dev.yml new file mode 100644 index 000000000..61ab5e5f0 --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/dev.yml @@ -0,0 +1,13 @@ +name: sarama + +up: + - go: 1.6.2 + +commands: + test: + run: make test + desc: 'run unit tests' + +packages: + - git@github.com:Shopify/dev-shopify.git + diff --git a/vendor/gopkg.in/Shopify/sarama.v1/encoder_decoder.go b/vendor/gopkg.in/Shopify/sarama.v1/encoder_decoder.go new file mode 100644 index 000000000..35a24c2d9 --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/encoder_decoder.go @@ -0,0 +1,84 @@ +package sarama + +import "fmt" + +// Encoder is the interface that wraps the basic Encode method. +// Anything implementing Encoder can be turned into bytes using Kafka's encoding rules. +type encoder interface { + encode(pe packetEncoder) error +} + +// Encode takes an Encoder and turns it into bytes. +func encode(e encoder) ([]byte, error) { + if e == nil { + return nil, nil + } + + var prepEnc prepEncoder + var realEnc realEncoder + + err := e.encode(&prepEnc) + if err != nil { + return nil, err + } + + if prepEnc.length < 0 || prepEnc.length > int(MaxRequestSize) { + return nil, PacketEncodingError{fmt.Sprintf("invalid request size (%d)", prepEnc.length)} + } + + realEnc.raw = make([]byte, prepEnc.length) + err = e.encode(&realEnc) + if err != nil { + return nil, err + } + + return realEnc.raw, nil +} + +// Decoder is the interface that wraps the basic Decode method. +// Anything implementing Decoder can be extracted from bytes using Kafka's encoding rules. +type decoder interface { + decode(pd packetDecoder) error +} + +type versionedDecoder interface { + decode(pd packetDecoder, version int16) error +} + +// Decode takes bytes and a Decoder and fills the fields of the decoder from the bytes, +// interpreted using Kafka's encoding rules. +func decode(buf []byte, in decoder) error { + if buf == nil { + return nil + } + + helper := realDecoder{raw: buf} + err := in.decode(&helper) + if err != nil { + return err + } + + if helper.off != len(buf) { + return PacketDecodingError{"invalid length"} + } + + return nil +} + +func versionedDecode(buf []byte, in versionedDecoder, version int16) error { + if buf == nil { + return nil + } + + helper := realDecoder{raw: buf} + err := in.decode(&helper, version) + if err != nil { + return err + } + + if helper.off != len(buf) { + return PacketDecodingError{"invalid length"} + } + + return nil +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/errors.go b/vendor/gopkg.in/Shopify/sarama.v1/errors.go new file mode 100644 index 000000000..cfb7006f7 --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/errors.go @@ -0,0 +1,194 @@ +package sarama + +import ( + "errors" + "fmt" +) + +// ErrOutOfBrokers is the error returned when the client has run out of brokers to talk to because all of them errored +// or otherwise failed to respond. +var ErrOutOfBrokers = errors.New("kafka: client has run out of available brokers to talk to (Is your cluster reachable?)") + +// ErrClosedClient is the error returned when a method is called on a client that has been closed. +var ErrClosedClient = errors.New("kafka: tried to use a client that was closed") + +// ErrIncompleteResponse is the error returned when the server returns a syntactically valid response, but it does +// not contain the expected information. +var ErrIncompleteResponse = errors.New("kafka: response did not contain all the expected topic/partition blocks") + +// ErrInvalidPartition is the error returned when a partitioner returns an invalid partition index +// (meaning one outside of the range [0...numPartitions-1]). +var ErrInvalidPartition = errors.New("kafka: partitioner returned an invalid partition index") + +// ErrAlreadyConnected is the error returned when calling Open() on a Broker that is already connected or connecting. +var ErrAlreadyConnected = errors.New("kafka: broker connection already initiated") + +// ErrNotConnected is the error returned when trying to send or call Close() on a Broker that is not connected. +var ErrNotConnected = errors.New("kafka: broker not connected") + +// ErrInsufficientData is returned when decoding and the packet is truncated. This can be expected +// when requesting messages, since as an optimization the server is allowed to return a partial message at the end +// of the message set. +var ErrInsufficientData = errors.New("kafka: insufficient data to decode packet, more bytes expected") + +// ErrShuttingDown is returned when a producer receives a message during shutdown. +var ErrShuttingDown = errors.New("kafka: message received by producer in process of shutting down") + +// ErrMessageTooLarge is returned when the next message to consume is larger than the configured Consumer.Fetch.Max +var ErrMessageTooLarge = errors.New("kafka: message is larger than Consumer.Fetch.Max") + +// PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example, +// if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that. +type PacketEncodingError struct { + Info string +} + +func (err PacketEncodingError) Error() string { + return fmt.Sprintf("kafka: error encoding packet: %s", err.Info) +} + +// PacketDecodingError is returned when there was an error (other than truncated data) decoding the Kafka broker's response. +// This can be a bad CRC or length field, or any other invalid value. +type PacketDecodingError struct { + Info string +} + +func (err PacketDecodingError) Error() string { + return fmt.Sprintf("kafka: error decoding packet: %s", err.Info) +} + +// ConfigurationError is the type of error returned from a constructor (e.g. NewClient, or NewConsumer) +// when the specified configuration is invalid. +type ConfigurationError string + +func (err ConfigurationError) Error() string { + return "kafka: invalid configuration (" + string(err) + ")" +} + +// KError is the type of error that can be returned directly by the Kafka broker. +// See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes +type KError int16 + +// Numeric error codes returned by the Kafka server. +const ( + ErrNoError KError = 0 + ErrUnknown KError = -1 + ErrOffsetOutOfRange KError = 1 + ErrInvalidMessage KError = 2 + ErrUnknownTopicOrPartition KError = 3 + ErrInvalidMessageSize KError = 4 + ErrLeaderNotAvailable KError = 5 + ErrNotLeaderForPartition KError = 6 + ErrRequestTimedOut KError = 7 + ErrBrokerNotAvailable KError = 8 + ErrReplicaNotAvailable KError = 9 + ErrMessageSizeTooLarge KError = 10 + ErrStaleControllerEpochCode KError = 11 + ErrOffsetMetadataTooLarge KError = 12 + ErrNetworkException KError = 13 + ErrOffsetsLoadInProgress KError = 14 + ErrConsumerCoordinatorNotAvailable KError = 15 + ErrNotCoordinatorForConsumer KError = 16 + ErrInvalidTopic KError = 17 + ErrMessageSetSizeTooLarge KError = 18 + ErrNotEnoughReplicas KError = 19 + ErrNotEnoughReplicasAfterAppend KError = 20 + ErrInvalidRequiredAcks KError = 21 + ErrIllegalGeneration KError = 22 + ErrInconsistentGroupProtocol KError = 23 + ErrInvalidGroupId KError = 24 + ErrUnknownMemberId KError = 25 + ErrInvalidSessionTimeout KError = 26 + ErrRebalanceInProgress KError = 27 + ErrInvalidCommitOffsetSize KError = 28 + ErrTopicAuthorizationFailed KError = 29 + ErrGroupAuthorizationFailed KError = 30 + ErrClusterAuthorizationFailed KError = 31 + ErrInvalidTimestamp KError = 32 + ErrUnsupportedSASLMechanism KError = 33 + ErrIllegalSASLState KError = 34 + ErrUnsupportedVersion KError = 35 +) + +func (err KError) Error() string { + // Error messages stolen/adapted from + // https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol + switch err { + case ErrNoError: + return "kafka server: Not an error, why are you printing me?" + case ErrUnknown: + return "kafka server: Unexpected (unknown?) server error." + case ErrOffsetOutOfRange: + return "kafka server: The requested offset is outside the range of offsets maintained by the server for the given topic/partition." + case ErrInvalidMessage: + return "kafka server: Message contents does not match its CRC." + case ErrUnknownTopicOrPartition: + return "kafka server: Request was for a topic or partition that does not exist on this broker." + case ErrInvalidMessageSize: + return "kafka server: The message has a negative size." + case ErrLeaderNotAvailable: + return "kafka server: In the middle of a leadership election, there is currently no leader for this partition and hence it is unavailable for writes." + case ErrNotLeaderForPartition: + return "kafka server: Tried to send a message to a replica that is not the leader for some partition. Your metadata is out of date." + case ErrRequestTimedOut: + return "kafka server: Request exceeded the user-specified time limit in the request." + case ErrBrokerNotAvailable: + return "kafka server: Broker not available. Not a client facing error, we should never receive this!!!" + case ErrReplicaNotAvailable: + return "kafka server: Replica infomation not available, one or more brokers are down." + case ErrMessageSizeTooLarge: + return "kafka server: Message was too large, server rejected it to avoid allocation error." + case ErrStaleControllerEpochCode: + return "kafka server: StaleControllerEpochCode (internal error code for broker-to-broker communication)." + case ErrOffsetMetadataTooLarge: + return "kafka server: Specified a string larger than the configured maximum for offset metadata." + case ErrNetworkException: + return "kafka server: The server disconnected before a response was received." + case ErrOffsetsLoadInProgress: + return "kafka server: The broker is still loading offsets after a leader change for that offset's topic partition." + case ErrConsumerCoordinatorNotAvailable: + return "kafka server: Offset's topic has not yet been created." + case ErrNotCoordinatorForConsumer: + return "kafka server: Request was for a consumer group that is not coordinated by this broker." + case ErrInvalidTopic: + return "kafka server: The request attempted to perform an operation on an invalid topic." + case ErrMessageSetSizeTooLarge: + return "kafka server: The request included message batch larger than the configured segment size on the server." + case ErrNotEnoughReplicas: + return "kafka server: Messages are rejected since there are fewer in-sync replicas than required." + case ErrNotEnoughReplicasAfterAppend: + return "kafka server: Messages are written to the log, but to fewer in-sync replicas than required." + case ErrInvalidRequiredAcks: + return "kafka server: The number of required acks is invalid (should be either -1, 0, or 1)." + case ErrIllegalGeneration: + return "kafka server: The provided generation id is not the current generation." + case ErrInconsistentGroupProtocol: + return "kafka server: The provider group protocol type is incompatible with the other members." + case ErrInvalidGroupId: + return "kafka server: The provided group id was empty." + case ErrUnknownMemberId: + return "kafka server: The provided member is not known in the current generation." + case ErrInvalidSessionTimeout: + return "kafka server: The provided session timeout is outside the allowed range." + case ErrRebalanceInProgress: + return "kafka server: A rebalance for the group is in progress. Please re-join the group." + case ErrInvalidCommitOffsetSize: + return "kafka server: The provided commit metadata was too large." + case ErrTopicAuthorizationFailed: + return "kafka server: The client is not authorized to access this topic." + case ErrGroupAuthorizationFailed: + return "kafka server: The client is not authorized to access this group." + case ErrClusterAuthorizationFailed: + return "kafka server: The client is not authorized to send this request type." + case ErrInvalidTimestamp: + return "kafka server: The timestamp of the message is out of acceptable range." + case ErrUnsupportedSASLMechanism: + return "kafka server: The broker does not support the requested SASL mechanism." + case ErrIllegalSASLState: + return "kafka server: Request is not valid given the current SASL state." + case ErrUnsupportedVersion: + return "kafka server: The version of API is not supported." + } + + return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err) +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/fetch_request.go b/vendor/gopkg.in/Shopify/sarama.v1/fetch_request.go new file mode 100644 index 000000000..ae701a3f2 --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/fetch_request.go @@ -0,0 +1,136 @@ +package sarama + +type fetchRequestBlock struct { + fetchOffset int64 + maxBytes int32 +} + +func (b *fetchRequestBlock) encode(pe packetEncoder) error { + pe.putInt64(b.fetchOffset) + pe.putInt32(b.maxBytes) + return nil +} + +func (b *fetchRequestBlock) decode(pd packetDecoder) (err error) { + if b.fetchOffset, err = pd.getInt64(); err != nil { + return err + } + if b.maxBytes, err = pd.getInt32(); err != nil { + return err + } + return nil +} + +type FetchRequest struct { + MaxWaitTime int32 + MinBytes int32 + Version int16 + blocks map[string]map[int32]*fetchRequestBlock +} + +func (r *FetchRequest) encode(pe packetEncoder) (err error) { + pe.putInt32(-1) // replica ID is always -1 for clients + pe.putInt32(r.MaxWaitTime) + pe.putInt32(r.MinBytes) + err = pe.putArrayLength(len(r.blocks)) + if err != nil { + return err + } + for topic, blocks := range r.blocks { + err = pe.putString(topic) + if err != nil { + return err + } + err = pe.putArrayLength(len(blocks)) + if err != nil { + return err + } + for partition, block := range blocks { + pe.putInt32(partition) + err = block.encode(pe) + if err != nil { + return err + } + } + } + return nil +} + +func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + if _, err = pd.getInt32(); err != nil { + return err + } + if r.MaxWaitTime, err = pd.getInt32(); err != nil { + return err + } + if r.MinBytes, err = pd.getInt32(); err != nil { + return err + } + topicCount, err := pd.getArrayLength() + if err != nil { + return err + } + if topicCount == 0 { + return nil + } + r.blocks = make(map[string]map[int32]*fetchRequestBlock) + for i := 0; i < topicCount; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + partitionCount, err := pd.getArrayLength() + if err != nil { + return err + } + r.blocks[topic] = make(map[int32]*fetchRequestBlock) + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + fetchBlock := &fetchRequestBlock{} + if err = fetchBlock.decode(pd); err != nil { + return nil + } + r.blocks[topic][partition] = fetchBlock + } + } + return nil +} + +func (r *FetchRequest) key() int16 { + return 1 +} + +func (r *FetchRequest) version() int16 { + return r.Version +} + +func (r *FetchRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_9_0_0 + case 2: + return V0_10_0_0 + default: + return minVersion + } +} + +func (r *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int64, maxBytes int32) { + if r.blocks == nil { + r.blocks = make(map[string]map[int32]*fetchRequestBlock) + } + + if r.blocks[topic] == nil { + r.blocks[topic] = make(map[int32]*fetchRequestBlock) + } + + tmp := new(fetchRequestBlock) + tmp.maxBytes = maxBytes + tmp.fetchOffset = fetchOffset + + r.blocks[topic][partitionID] = tmp +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/fetch_response.go b/vendor/gopkg.in/Shopify/sarama.v1/fetch_response.go new file mode 100644 index 000000000..b56b166c2 --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/fetch_response.go @@ -0,0 +1,210 @@ +package sarama + +import "time" + +type FetchResponseBlock struct { + Err KError + HighWaterMarkOffset int64 + MsgSet MessageSet +} + +func (b *FetchResponseBlock) decode(pd packetDecoder) (err error) { + tmp, err := pd.getInt16() + if err != nil { + return err + } + b.Err = KError(tmp) + + b.HighWaterMarkOffset, err = pd.getInt64() + if err != nil { + return err + } + + msgSetSize, err := pd.getInt32() + if err != nil { + return err + } + + msgSetDecoder, err := pd.getSubset(int(msgSetSize)) + if err != nil { + return err + } + err = (&b.MsgSet).decode(msgSetDecoder) + + return err +} + +func (b *FetchResponseBlock) encode(pe packetEncoder) (err error) { + pe.putInt16(int16(b.Err)) + + pe.putInt64(b.HighWaterMarkOffset) + + pe.push(&lengthField{}) + err = b.MsgSet.encode(pe) + if err != nil { + return err + } + return pe.pop() +} + +type FetchResponse struct { + Blocks map[string]map[int32]*FetchResponseBlock + ThrottleTime time.Duration + Version int16 // v1 requires 0.9+, v2 requires 0.10+ +} + +func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if r.Version >= 1 { + throttle, err := pd.getInt32() + if err != nil { + return err + } + r.ThrottleTime = time.Duration(throttle) * time.Millisecond + } + + numTopics, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Blocks = make(map[string]map[int32]*FetchResponseBlock, numTopics) + for i := 0; i < numTopics; i++ { + name, err := pd.getString() + if err != nil { + return err + } + + numBlocks, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Blocks[name] = make(map[int32]*FetchResponseBlock, numBlocks) + + for j := 0; j < numBlocks; j++ { + id, err := pd.getInt32() + if err != nil { + return err + } + + block := new(FetchResponseBlock) + err = block.decode(pd) + if err != nil { + return err + } + r.Blocks[name][id] = block + } + } + + return nil +} + +func (r *FetchResponse) encode(pe packetEncoder) (err error) { + if r.Version >= 1 { + pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) + } + + err = pe.putArrayLength(len(r.Blocks)) + if err != nil { + return err + } + + for topic, partitions := range r.Blocks { + err = pe.putString(topic) + if err != nil { + return err + } + + err = pe.putArrayLength(len(partitions)) + if err != nil { + return err + } + + for id, block := range partitions { + pe.putInt32(id) + err = block.encode(pe) + if err != nil { + return err + } + } + + } + return nil +} + +func (r *FetchResponse) key() int16 { + return 1 +} + +func (r *FetchResponse) version() int16 { + return r.Version +} + +func (r *FetchResponse) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_9_0_0 + case 2: + return V0_10_0_0 + default: + return minVersion + } +} + +func (r *FetchResponse) GetBlock(topic string, partition int32) *FetchResponseBlock { + if r.Blocks == nil { + return nil + } + + if r.Blocks[topic] == nil { + return nil + } + + return r.Blocks[topic][partition] +} + +func (r *FetchResponse) AddError(topic string, partition int32, err KError) { + if r.Blocks == nil { + r.Blocks = make(map[string]map[int32]*FetchResponseBlock) + } + partitions, ok := r.Blocks[topic] + if !ok { + partitions = make(map[int32]*FetchResponseBlock) + r.Blocks[topic] = partitions + } + frb, ok := partitions[partition] + if !ok { + frb = new(FetchResponseBlock) + partitions[partition] = frb + } + frb.Err = err +} + +func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) { + if r.Blocks == nil { + r.Blocks = make(map[string]map[int32]*FetchResponseBlock) + } + partitions, ok := r.Blocks[topic] + if !ok { + partitions = make(map[int32]*FetchResponseBlock) + r.Blocks[topic] = partitions + } + frb, ok := partitions[partition] + if !ok { + frb = new(FetchResponseBlock) + partitions[partition] = frb + } + var kb []byte + var vb []byte + if key != nil { + kb, _ = key.Encode() + } + if value != nil { + vb, _ = value.Encode() + } + msg := &Message{Key: kb, Value: vb} + msgBlock := &MessageBlock{Msg: msg, Offset: offset} + frb.MsgSet.Messages = append(frb.MsgSet.Messages, msgBlock) +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/heartbeat_request.go b/vendor/gopkg.in/Shopify/sarama.v1/heartbeat_request.go new file mode 100644 index 000000000..ce49c4739 --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/heartbeat_request.go @@ -0,0 +1,47 @@ +package sarama + +type HeartbeatRequest struct { + GroupId string + GenerationId int32 + MemberId string +} + +func (r *HeartbeatRequest) encode(pe packetEncoder) error { + if err := pe.putString(r.GroupId); err != nil { + return err + } + + pe.putInt32(r.GenerationId) + + if err := pe.putString(r.MemberId); err != nil { + return err + } + + return nil +} + +func (r *HeartbeatRequest) decode(pd packetDecoder, version int16) (err error) { + if r.GroupId, err = pd.getString(); err != nil { + return + } + if r.GenerationId, err = pd.getInt32(); err != nil { + return + } + if r.MemberId, err = pd.getString(); err != nil { + return + } + + return nil +} + +func (r *HeartbeatRequest) key() int16 { + return 12 +} + +func (r *HeartbeatRequest) version() int16 { + return 0 +} + +func (r *HeartbeatRequest) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/heartbeat_response.go b/vendor/gopkg.in/Shopify/sarama.v1/heartbeat_response.go new file mode 100644 index 000000000..3c51163ad --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/heartbeat_response.go @@ -0,0 +1,32 @@ +package sarama + +type HeartbeatResponse struct { + Err KError +} + +func (r *HeartbeatResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + return nil +} + +func (r *HeartbeatResponse) decode(pd packetDecoder, version int16) error { + if kerr, err := pd.getInt16(); err != nil { + return err + } else { + r.Err = KError(kerr) + } + + return nil +} + +func (r *HeartbeatResponse) key() int16 { + return 12 +} + +func (r *HeartbeatResponse) version() int16 { + return 0 +} + +func (r *HeartbeatResponse) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/join_group_request.go b/vendor/gopkg.in/Shopify/sarama.v1/join_group_request.go new file mode 100644 index 000000000..d95085b2d --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/join_group_request.go @@ -0,0 +1,108 @@ +package sarama + +type JoinGroupRequest struct { + GroupId string + SessionTimeout int32 + MemberId string + ProtocolType string + GroupProtocols map[string][]byte +} + +func (r *JoinGroupRequest) encode(pe packetEncoder) error { + if err := pe.putString(r.GroupId); err != nil { + return err + } + pe.putInt32(r.SessionTimeout) + if err := pe.putString(r.MemberId); err != nil { + return err + } + if err := pe.putString(r.ProtocolType); err != nil { + return err + } + + if err := pe.putArrayLength(len(r.GroupProtocols)); err != nil { + return err + } + for name, metadata := range r.GroupProtocols { + if err := pe.putString(name); err != nil { + return err + } + if err := pe.putBytes(metadata); err != nil { + return err + } + } + + return nil +} + +func (r *JoinGroupRequest) decode(pd packetDecoder, version int16) (err error) { + if r.GroupId, err = pd.getString(); err != nil { + return + } + + if r.SessionTimeout, err = pd.getInt32(); err != nil { + return + } + + if r.MemberId, err = pd.getString(); err != nil { + return + } + + if r.ProtocolType, err = pd.getString(); err != nil { + return + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + if n == 0 { + return nil + } + + r.GroupProtocols = make(map[string][]byte) + for i := 0; i < n; i++ { + name, err := pd.getString() + if err != nil { + return err + } + metadata, err := pd.getBytes() + if err != nil { + return err + } + + r.GroupProtocols[name] = metadata + } + + return nil +} + +func (r *JoinGroupRequest) key() int16 { + return 11 +} + +func (r *JoinGroupRequest) version() int16 { + return 0 +} + +func (r *JoinGroupRequest) requiredVersion() KafkaVersion { + return V0_9_0_0 +} + +func (r *JoinGroupRequest) AddGroupProtocol(name string, metadata []byte) { + if r.GroupProtocols == nil { + r.GroupProtocols = make(map[string][]byte) + } + + r.GroupProtocols[name] = metadata +} + +func (r *JoinGroupRequest) AddGroupProtocolMetadata(name string, metadata *ConsumerGroupMemberMetadata) error { + bin, err := encode(metadata) + if err != nil { + return err + } + + r.AddGroupProtocol(name, bin) + return nil +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/join_group_response.go b/vendor/gopkg.in/Shopify/sarama.v1/join_group_response.go new file mode 100644 index 000000000..94c7a7fde --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/join_group_response.go @@ -0,0 +1,114 @@ +package sarama + +type JoinGroupResponse struct { + Err KError + GenerationId int32 + GroupProtocol string + LeaderId string + MemberId string + Members map[string][]byte +} + +func (r *JoinGroupResponse) GetMembers() (map[string]ConsumerGroupMemberMetadata, error) { + members := make(map[string]ConsumerGroupMemberMetadata, len(r.Members)) + for id, bin := range r.Members { + meta := new(ConsumerGroupMemberMetadata) + if err := decode(bin, meta); err != nil { + return nil, err + } + members[id] = *meta + } + return members, nil +} + +func (r *JoinGroupResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + pe.putInt32(r.GenerationId) + + if err := pe.putString(r.GroupProtocol); err != nil { + return err + } + if err := pe.putString(r.LeaderId); err != nil { + return err + } + if err := pe.putString(r.MemberId); err != nil { + return err + } + + if err := pe.putArrayLength(len(r.Members)); err != nil { + return err + } + + for memberId, memberMetadata := range r.Members { + if err := pe.putString(memberId); err != nil { + return err + } + + if err := pe.putBytes(memberMetadata); err != nil { + return err + } + } + + return nil +} + +func (r *JoinGroupResponse) decode(pd packetDecoder, version int16) (err error) { + if kerr, err := pd.getInt16(); err != nil { + return err + } else { + r.Err = KError(kerr) + } + + if r.GenerationId, err = pd.getInt32(); err != nil { + return + } + + if r.GroupProtocol, err = pd.getString(); err != nil { + return + } + + if r.LeaderId, err = pd.getString(); err != nil { + return + } + + if r.MemberId, err = pd.getString(); err != nil { + return + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + if n == 0 { + return nil + } + + r.Members = make(map[string][]byte) + for i := 0; i < n; i++ { + memberId, err := pd.getString() + if err != nil { + return err + } + + memberMetadata, err := pd.getBytes() + if err != nil { + return err + } + + r.Members[memberId] = memberMetadata + } + + return nil +} + +func (r *JoinGroupResponse) key() int16 { + return 11 +} + +func (r *JoinGroupResponse) version() int16 { + return 0 +} + +func (r *JoinGroupResponse) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/leave_group_request.go b/vendor/gopkg.in/Shopify/sarama.v1/leave_group_request.go new file mode 100644 index 000000000..e17742748 --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/leave_group_request.go @@ -0,0 +1,40 @@ +package sarama + +type LeaveGroupRequest struct { + GroupId string + MemberId string +} + +func (r *LeaveGroupRequest) encode(pe packetEncoder) error { + if err := pe.putString(r.GroupId); err != nil { + return err + } + if err := pe.putString(r.MemberId); err != nil { + return err + } + + return nil +} + +func (r *LeaveGroupRequest) decode(pd packetDecoder, version int16) (err error) { + if r.GroupId, err = pd.getString(); err != nil { + return + } + if r.MemberId, err = pd.getString(); err != nil { + return + } + + return nil +} + +func (r *LeaveGroupRequest) key() int16 { + return 13 +} + +func (r *LeaveGroupRequest) version() int16 { + return 0 +} + +func (r *LeaveGroupRequest) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/leave_group_response.go b/vendor/gopkg.in/Shopify/sarama.v1/leave_group_response.go new file mode 100644 index 000000000..bd4a34f46 --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/leave_group_response.go @@ -0,0 +1,32 @@ +package sarama + +type LeaveGroupResponse struct { + Err KError +} + +func (r *LeaveGroupResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + return nil +} + +func (r *LeaveGroupResponse) decode(pd packetDecoder, version int16) (err error) { + if kerr, err := pd.getInt16(); err != nil { + return err + } else { + r.Err = KError(kerr) + } + + return nil +} + +func (r *LeaveGroupResponse) key() int16 { + return 13 +} + +func (r *LeaveGroupResponse) version() int16 { + return 0 +} + +func (r *LeaveGroupResponse) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/length_field.go b/vendor/gopkg.in/Shopify/sarama.v1/length_field.go new file mode 100644 index 000000000..70078be5d --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/length_field.go @@ -0,0 +1,29 @@ +package sarama + +import "encoding/binary" + +// LengthField implements the PushEncoder and PushDecoder interfaces for calculating 4-byte lengths. +type lengthField struct { + startOffset int +} + +func (l *lengthField) saveOffset(in int) { + l.startOffset = in +} + +func (l *lengthField) reserveLength() int { + return 4 +} + +func (l *lengthField) run(curOffset int, buf []byte) error { + binary.BigEndian.PutUint32(buf[l.startOffset:], uint32(curOffset-l.startOffset-4)) + return nil +} + +func (l *lengthField) check(curOffset int, buf []byte) error { + if uint32(curOffset-l.startOffset-4) != binary.BigEndian.Uint32(buf[l.startOffset:]) { + return PacketDecodingError{"length field invalid"} + } + + return nil +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/list_groups_request.go b/vendor/gopkg.in/Shopify/sarama.v1/list_groups_request.go new file mode 100644 index 000000000..3b16abf7f --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/list_groups_request.go @@ -0,0 +1,24 @@ +package sarama + +type ListGroupsRequest struct { +} + +func (r *ListGroupsRequest) encode(pe packetEncoder) error { + return nil +} + +func (r *ListGroupsRequest) decode(pd packetDecoder, version int16) (err error) { + return nil +} + +func (r *ListGroupsRequest) key() int16 { + return 16 +} + +func (r *ListGroupsRequest) version() int16 { + return 0 +} + +func (r *ListGroupsRequest) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/list_groups_response.go b/vendor/gopkg.in/Shopify/sarama.v1/list_groups_response.go new file mode 100644 index 000000000..3a84f9b6c --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/list_groups_response.go @@ -0,0 +1,68 @@ +package sarama + +type ListGroupsResponse struct { + Err KError + Groups map[string]string +} + +func (r *ListGroupsResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + + if err := pe.putArrayLength(len(r.Groups)); err != nil { + return err + } + for groupId, protocolType := range r.Groups { + if err := pe.putString(groupId); err != nil { + return err + } + if err := pe.putString(protocolType); err != nil { + return err + } + } + + return nil +} + +func (r *ListGroupsResponse) decode(pd packetDecoder, version int16) error { + if kerr, err := pd.getInt16(); err != nil { + return err + } else { + r.Err = KError(kerr) + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + if n == 0 { + return nil + } + + r.Groups = make(map[string]string) + for i := 0; i < n; i++ { + groupId, err := pd.getString() + if err != nil { + return err + } + protocolType, err := pd.getString() + if err != nil { + return err + } + + r.Groups[groupId] = protocolType + } + + return nil +} + +func (r *ListGroupsResponse) key() int16 { + return 16 +} + +func (r *ListGroupsResponse) version() int16 { + return 0 +} + +func (r *ListGroupsResponse) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/message.go b/vendor/gopkg.in/Shopify/sarama.v1/message.go new file mode 100644 index 000000000..0f0ca5b6d --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/message.go @@ -0,0 +1,163 @@ +package sarama + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + "time" + + "github.com/eapache/go-xerial-snappy" +) + +// CompressionCodec represents the various compression codecs recognized by Kafka in messages. +type CompressionCodec int8 + +// only the last two bits are really used +const compressionCodecMask int8 = 0x03 + +const ( + CompressionNone CompressionCodec = 0 + CompressionGZIP CompressionCodec = 1 + CompressionSnappy CompressionCodec = 2 +) + +type Message struct { + Codec CompressionCodec // codec used to compress the message contents + Key []byte // the message key, may be nil + Value []byte // the message contents + Set *MessageSet // the message set a message might wrap + Version int8 // v1 requires Kafka 0.10 + Timestamp time.Time // the timestamp of the message (version 1+ only) + + compressedCache []byte +} + +func (m *Message) encode(pe packetEncoder) error { + pe.push(&crc32Field{}) + + pe.putInt8(m.Version) + + attributes := int8(m.Codec) & compressionCodecMask + pe.putInt8(attributes) + + if m.Version >= 1 { + pe.putInt64(m.Timestamp.UnixNano() / int64(time.Millisecond)) + } + + err := pe.putBytes(m.Key) + if err != nil { + return err + } + + var payload []byte + + if m.compressedCache != nil { + payload = m.compressedCache + m.compressedCache = nil + } else if m.Value != nil { + switch m.Codec { + case CompressionNone: + payload = m.Value + case CompressionGZIP: + var buf bytes.Buffer + writer := gzip.NewWriter(&buf) + if _, err = writer.Write(m.Value); err != nil { + return err + } + if err = writer.Close(); err != nil { + return err + } + m.compressedCache = buf.Bytes() + payload = m.compressedCache + case CompressionSnappy: + tmp := snappy.Encode(m.Value) + m.compressedCache = tmp + payload = m.compressedCache + default: + return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", m.Codec)} + } + } + + if err = pe.putBytes(payload); err != nil { + return err + } + + return pe.pop() +} + +func (m *Message) decode(pd packetDecoder) (err error) { + err = pd.push(&crc32Field{}) + if err != nil { + return err + } + + m.Version, err = pd.getInt8() + if err != nil { + return err + } + + attribute, err := pd.getInt8() + if err != nil { + return err + } + m.Codec = CompressionCodec(attribute & compressionCodecMask) + + if m.Version >= 1 { + millis, err := pd.getInt64() + if err != nil { + return err + } + m.Timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond)) + } + + m.Key, err = pd.getBytes() + if err != nil { + return err + } + + m.Value, err = pd.getBytes() + if err != nil { + return err + } + + switch m.Codec { + case CompressionNone: + // nothing to do + case CompressionGZIP: + if m.Value == nil { + break + } + reader, err := gzip.NewReader(bytes.NewReader(m.Value)) + if err != nil { + return err + } + if m.Value, err = ioutil.ReadAll(reader); err != nil { + return err + } + if err := m.decodeSet(); err != nil { + return err + } + case CompressionSnappy: + if m.Value == nil { + break + } + if m.Value, err = snappy.Decode(m.Value); err != nil { + return err + } + if err := m.decodeSet(); err != nil { + return err + } + default: + return PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", m.Codec)} + } + + return pd.pop() +} + +// decodes a message set from a previousy encoded bulk-message +func (m *Message) decodeSet() (err error) { + pd := realDecoder{raw: m.Value} + m.Set = &MessageSet{} + return m.Set.decode(&pd) +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/message_set.go b/vendor/gopkg.in/Shopify/sarama.v1/message_set.go new file mode 100644 index 000000000..f028784e5 --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/message_set.go @@ -0,0 +1,89 @@ +package sarama + +type MessageBlock struct { + Offset int64 + Msg *Message +} + +// Messages convenience helper which returns either all the +// messages that are wrapped in this block +func (msb *MessageBlock) Messages() []*MessageBlock { + if msb.Msg.Set != nil { + return msb.Msg.Set.Messages + } + return []*MessageBlock{msb} +} + +func (msb *MessageBlock) encode(pe packetEncoder) error { + pe.putInt64(msb.Offset) + pe.push(&lengthField{}) + err := msb.Msg.encode(pe) + if err != nil { + return err + } + return pe.pop() +} + +func (msb *MessageBlock) decode(pd packetDecoder) (err error) { + if msb.Offset, err = pd.getInt64(); err != nil { + return err + } + + if err = pd.push(&lengthField{}); err != nil { + return err + } + + msb.Msg = new(Message) + if err = msb.Msg.decode(pd); err != nil { + return err + } + + if err = pd.pop(); err != nil { + return err + } + + return nil +} + +type MessageSet struct { + PartialTrailingMessage bool // whether the set on the wire contained an incomplete trailing MessageBlock + Messages []*MessageBlock +} + +func (ms *MessageSet) encode(pe packetEncoder) error { + for i := range ms.Messages { + err := ms.Messages[i].encode(pe) + if err != nil { + return err + } + } + return nil +} + +func (ms *MessageSet) decode(pd packetDecoder) (err error) { + ms.Messages = nil + + for pd.remaining() > 0 { + msb := new(MessageBlock) + err = msb.decode(pd) + switch err { + case nil: + ms.Messages = append(ms.Messages, msb) + case ErrInsufficientData: + // As an optimization the server is allowed to return a partial message at the + // end of the message set. Clients should handle this case. So we just ignore such things. + ms.PartialTrailingMessage = true + return nil + default: + return err + } + } + + return nil +} + +func (ms *MessageSet) addMessage(msg *Message) { + block := new(MessageBlock) + block.Msg = msg + ms.Messages = append(ms.Messages, block) +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/metadata_request.go b/vendor/gopkg.in/Shopify/sarama.v1/metadata_request.go new file mode 100644 index 000000000..9a26b55fd --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/metadata_request.go @@ -0,0 +1,52 @@ +package sarama + +type MetadataRequest struct { + Topics []string +} + +func (r *MetadataRequest) encode(pe packetEncoder) error { + err := pe.putArrayLength(len(r.Topics)) + if err != nil { + return err + } + + for i := range r.Topics { + err = pe.putString(r.Topics[i]) + if err != nil { + return err + } + } + return nil +} + +func (r *MetadataRequest) decode(pd packetDecoder, version int16) error { + topicCount, err := pd.getArrayLength() + if err != nil { + return err + } + if topicCount == 0 { + return nil + } + + r.Topics = make([]string, topicCount) + for i := range r.Topics { + topic, err := pd.getString() + if err != nil { + return err + } + r.Topics[i] = topic + } + return nil +} + +func (r *MetadataRequest) key() int16 { + return 3 +} + +func (r *MetadataRequest) version() int16 { + return 0 +} + +func (r *MetadataRequest) requiredVersion() KafkaVersion { + return minVersion +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/metadata_response.go b/vendor/gopkg.in/Shopify/sarama.v1/metadata_response.go new file mode 100644 index 000000000..f9d6a4271 --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/metadata_response.go @@ -0,0 +1,239 @@ +package sarama + +type PartitionMetadata struct { + Err KError + ID int32 + Leader int32 + Replicas []int32 + Isr []int32 +} + +func (pm *PartitionMetadata) decode(pd packetDecoder) (err error) { + tmp, err := pd.getInt16() + if err != nil { + return err + } + pm.Err = KError(tmp) + + pm.ID, err = pd.getInt32() + if err != nil { + return err + } + + pm.Leader, err = pd.getInt32() + if err != nil { + return err + } + + pm.Replicas, err = pd.getInt32Array() + if err != nil { + return err + } + + pm.Isr, err = pd.getInt32Array() + if err != nil { + return err + } + + return nil +} + +func (pm *PartitionMetadata) encode(pe packetEncoder) (err error) { + pe.putInt16(int16(pm.Err)) + pe.putInt32(pm.ID) + pe.putInt32(pm.Leader) + + err = pe.putInt32Array(pm.Replicas) + if err != nil { + return err + } + + err = pe.putInt32Array(pm.Isr) + if err != nil { + return err + } + + return nil +} + +type TopicMetadata struct { + Err KError + Name string + Partitions []*PartitionMetadata +} + +func (tm *TopicMetadata) decode(pd packetDecoder) (err error) { + tmp, err := pd.getInt16() + if err != nil { + return err + } + tm.Err = KError(tmp) + + tm.Name, err = pd.getString() + if err != nil { + return err + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + tm.Partitions = make([]*PartitionMetadata, n) + for i := 0; i < n; i++ { + tm.Partitions[i] = new(PartitionMetadata) + err = tm.Partitions[i].decode(pd) + if err != nil { + return err + } + } + + return nil +} + +func (tm *TopicMetadata) encode(pe packetEncoder) (err error) { + pe.putInt16(int16(tm.Err)) + + err = pe.putString(tm.Name) + if err != nil { + return err + } + + err = pe.putArrayLength(len(tm.Partitions)) + if err != nil { + return err + } + + for _, pm := range tm.Partitions { + err = pm.encode(pe) + if err != nil { + return err + } + } + + return nil +} + +type MetadataResponse struct { + Brokers []*Broker + Topics []*TopicMetadata +} + +func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) { + n, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Brokers = make([]*Broker, n) + for i := 0; i < n; i++ { + r.Brokers[i] = new(Broker) + err = r.Brokers[i].decode(pd) + if err != nil { + return err + } + } + + n, err = pd.getArrayLength() + if err != nil { + return err + } + + r.Topics = make([]*TopicMetadata, n) + for i := 0; i < n; i++ { + r.Topics[i] = new(TopicMetadata) + err = r.Topics[i].decode(pd) + if err != nil { + return err + } + } + + return nil +} + +func (r *MetadataResponse) encode(pe packetEncoder) error { + err := pe.putArrayLength(len(r.Brokers)) + if err != nil { + return err + } + for _, broker := range r.Brokers { + err = broker.encode(pe) + if err != nil { + return err + } + } + + err = pe.putArrayLength(len(r.Topics)) + if err != nil { + return err + } + for _, tm := range r.Topics { + err = tm.encode(pe) + if err != nil { + return err + } + } + + return nil +} + +func (r *MetadataResponse) key() int16 { + return 3 +} + +func (r *MetadataResponse) version() int16 { + return 0 +} + +func (r *MetadataResponse) requiredVersion() KafkaVersion { + return minVersion +} + +// testing API + +func (r *MetadataResponse) AddBroker(addr string, id int32) { + r.Brokers = append(r.Brokers, &Broker{id: id, addr: addr}) +} + +func (r *MetadataResponse) AddTopic(topic string, err KError) *TopicMetadata { + var tmatch *TopicMetadata + + for _, tm := range r.Topics { + if tm.Name == topic { + tmatch = tm + goto foundTopic + } + } + + tmatch = new(TopicMetadata) + tmatch.Name = topic + r.Topics = append(r.Topics, tmatch) + +foundTopic: + + tmatch.Err = err + return tmatch +} + +func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, err KError) { + tmatch := r.AddTopic(topic, ErrNoError) + var pmatch *PartitionMetadata + + for _, pm := range tmatch.Partitions { + if pm.ID == partition { + pmatch = pm + goto foundPartition + } + } + + pmatch = new(PartitionMetadata) + pmatch.ID = partition + tmatch.Partitions = append(tmatch.Partitions, pmatch) + +foundPartition: + + pmatch.Leader = brokerID + pmatch.Replicas = replicas + pmatch.Isr = isr + pmatch.Err = err + +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/mockbroker.go b/vendor/gopkg.in/Shopify/sarama.v1/mockbroker.go new file mode 100644 index 000000000..36996a50c --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/mockbroker.go @@ -0,0 +1,300 @@ +package sarama + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "net" + "reflect" + "strconv" + "sync" + "time" + + "github.com/davecgh/go-spew/spew" +) + +const ( + expectationTimeout = 500 * time.Millisecond +) + +type requestHandlerFunc func(req *request) (res encoder) + +// MockBroker is a mock Kafka broker that is used in unit tests. It is exposed +// to facilitate testing of higher level or specialized consumers and producers +// built on top of Sarama. Note that it does not 'mimic' the Kafka API protocol, +// but rather provides a facility to do that. It takes care of the TCP +// transport, request unmarshaling, response marshaling, and makes it the test +// writer responsibility to program correct according to the Kafka API protocol +// MockBroker behaviour. +// +// MockBroker is implemented as a TCP server listening on a kernel-selected +// localhost port that can accept many connections. It reads Kafka requests +// from that connection and returns responses programmed by the SetHandlerByMap +// function. If a MockBroker receives a request that it has no programmed +// response for, then it returns nothing and the request times out. +// +// A set of MockRequest builders to define mappings used by MockBroker is +// provided by Sarama. But users can develop MockRequests of their own and use +// them along with or instead of the standard ones. +// +// When running tests with MockBroker it is strongly recommended to specify +// a timeout to `go test` so that if the broker hangs waiting for a response, +// the test panics. +// +// It is not necessary to prefix message length or correlation ID to your +// response bytes, the server does that automatically as a convenience. +type MockBroker struct { + brokerID int32 + port int32 + closing chan none + stopper chan none + expectations chan encoder + listener net.Listener + t TestReporter + latency time.Duration + handler requestHandlerFunc + history []RequestResponse + lock sync.Mutex +} + +// RequestResponse represents a Request/Response pair processed by MockBroker. +type RequestResponse struct { + Request protocolBody + Response encoder +} + +// SetLatency makes broker pause for the specified period every time before +// replying. +func (b *MockBroker) SetLatency(latency time.Duration) { + b.latency = latency +} + +// SetHandlerByMap defines mapping of Request types to MockResponses. When a +// request is received by the broker, it looks up the request type in the map +// and uses the found MockResponse instance to generate an appropriate reply. +// If the request type is not found in the map then nothing is sent. +func (b *MockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) { + b.setHandler(func(req *request) (res encoder) { + reqTypeName := reflect.TypeOf(req.body).Elem().Name() + mockResponse := handlerMap[reqTypeName] + if mockResponse == nil { + return nil + } + return mockResponse.For(req.body) + }) +} + +// BrokerID returns broker ID assigned to the broker. +func (b *MockBroker) BrokerID() int32 { + return b.brokerID +} + +// History returns a slice of RequestResponse pairs in the order they were +// processed by the broker. Note that in case of multiple connections to the +// broker the order expected by a test can be different from the order recorded +// in the history, unless some synchronization is implemented in the test. +func (b *MockBroker) History() []RequestResponse { + b.lock.Lock() + history := make([]RequestResponse, len(b.history)) + copy(history, b.history) + b.lock.Unlock() + return history +} + +// Port returns the TCP port number the broker is listening for requests on. +func (b *MockBroker) Port() int32 { + return b.port +} + +// Addr returns the broker connection string in the form "
:". +func (b *MockBroker) Addr() string { + return b.listener.Addr().String() +} + +// Close terminates the broker blocking until it stops internal goroutines and +// releases all resources. +func (b *MockBroker) Close() { + close(b.expectations) + if len(b.expectations) > 0 { + buf := bytes.NewBufferString(fmt.Sprintf("mockbroker/%d: not all expectations were satisfied! Still waiting on:\n", b.BrokerID())) + for e := range b.expectations { + _, _ = buf.WriteString(spew.Sdump(e)) + } + b.t.Error(buf.String()) + } + close(b.closing) + <-b.stopper +} + +// setHandler sets the specified function as the request handler. Whenever +// a mock broker reads a request from the wire it passes the request to the +// function and sends back whatever the handler function returns. +func (b *MockBroker) setHandler(handler requestHandlerFunc) { + b.lock.Lock() + b.handler = handler + b.lock.Unlock() +} + +func (b *MockBroker) serverLoop() { + defer close(b.stopper) + var err error + var conn net.Conn + + go func() { + <-b.closing + err := b.listener.Close() + if err != nil { + b.t.Error(err) + } + }() + + wg := &sync.WaitGroup{} + i := 0 + for conn, err = b.listener.Accept(); err == nil; conn, err = b.listener.Accept() { + wg.Add(1) + go b.handleRequests(conn, i, wg) + i++ + } + wg.Wait() + Logger.Printf("*** mockbroker/%d: listener closed, err=%v", b.BrokerID(), err) +} + +func (b *MockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup) { + defer wg.Done() + defer func() { + _ = conn.Close() + }() + Logger.Printf("*** mockbroker/%d/%d: connection opened", b.BrokerID(), idx) + var err error + + abort := make(chan none) + defer close(abort) + go func() { + select { + case <-b.closing: + _ = conn.Close() + case <-abort: + } + }() + + resHeader := make([]byte, 8) + for { + req, err := decodeRequest(conn) + if err != nil { + Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req)) + b.serverError(err) + break + } + + if b.latency > 0 { + time.Sleep(b.latency) + } + + b.lock.Lock() + res := b.handler(req) + b.history = append(b.history, RequestResponse{req.body, res}) + b.lock.Unlock() + + if res == nil { + Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(req)) + continue + } + Logger.Printf("*** mockbroker/%d/%d: served %v -> %v", b.brokerID, idx, req, res) + + encodedRes, err := encode(res) + if err != nil { + b.serverError(err) + break + } + if len(encodedRes) == 0 { + continue + } + + binary.BigEndian.PutUint32(resHeader, uint32(len(encodedRes)+4)) + binary.BigEndian.PutUint32(resHeader[4:], uint32(req.correlationID)) + if _, err = conn.Write(resHeader); err != nil { + b.serverError(err) + break + } + if _, err = conn.Write(encodedRes); err != nil { + b.serverError(err) + break + } + } + Logger.Printf("*** mockbroker/%d/%d: connection closed, err=%v", b.BrokerID(), idx, err) +} + +func (b *MockBroker) defaultRequestHandler(req *request) (res encoder) { + select { + case res, ok := <-b.expectations: + if !ok { + return nil + } + return res + case <-time.After(expectationTimeout): + return nil + } +} + +func (b *MockBroker) serverError(err error) { + isConnectionClosedError := false + if _, ok := err.(*net.OpError); ok { + isConnectionClosedError = true + } else if err == io.EOF { + isConnectionClosedError = true + } else if err.Error() == "use of closed network connection" { + isConnectionClosedError = true + } + + if isConnectionClosedError { + return + } + + b.t.Errorf(err.Error()) +} + +// NewMockBroker launches a fake Kafka broker. It takes a TestReporter as provided by the +// test framework and a channel of responses to use. If an error occurs it is +// simply logged to the TestReporter and the broker exits. +func NewMockBroker(t TestReporter, brokerID int32) *MockBroker { + return NewMockBrokerAddr(t, brokerID, "localhost:0") +} + +// NewMockBrokerAddr behaves like newMockBroker but listens on the address you give +// it rather than just some ephemeral port. +func NewMockBrokerAddr(t TestReporter, brokerID int32, addr string) *MockBroker { + var err error + + broker := &MockBroker{ + closing: make(chan none), + stopper: make(chan none), + t: t, + brokerID: brokerID, + expectations: make(chan encoder, 512), + } + broker.handler = broker.defaultRequestHandler + + broker.listener, err = net.Listen("tcp", addr) + if err != nil { + t.Fatal(err) + } + Logger.Printf("*** mockbroker/%d listening on %s\n", brokerID, broker.listener.Addr().String()) + _, portStr, err := net.SplitHostPort(broker.listener.Addr().String()) + if err != nil { + t.Fatal(err) + } + tmp, err := strconv.ParseInt(portStr, 10, 32) + if err != nil { + t.Fatal(err) + } + broker.port = int32(tmp) + + go broker.serverLoop() + + return broker +} + +func (b *MockBroker) Returns(e encoder) { + b.expectations <- e +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/mockresponses.go b/vendor/gopkg.in/Shopify/sarama.v1/mockresponses.go new file mode 100644 index 000000000..a20314209 --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/mockresponses.go @@ -0,0 +1,455 @@ +package sarama + +import ( + "fmt" +) + +// TestReporter has methods matching go's testing.T to avoid importing +// `testing` in the main part of the library. +type TestReporter interface { + Error(...interface{}) + Errorf(string, ...interface{}) + Fatal(...interface{}) + Fatalf(string, ...interface{}) +} + +// MockResponse is a response builder interface it defines one method that +// allows generating a response based on a request body. MockResponses are used +// to program behavior of MockBroker in tests. +type MockResponse interface { + For(reqBody versionedDecoder) (res encoder) +} + +// MockWrapper is a mock response builder that returns a particular concrete +// response regardless of the actual request passed to the `For` method. +type MockWrapper struct { + res encoder +} + +func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoder) { + return mw.res +} + +func NewMockWrapper(res encoder) *MockWrapper { + return &MockWrapper{res: res} +} + +// MockSequence is a mock response builder that is created from a sequence of +// concrete responses. Every time when a `MockBroker` calls its `For` method +// the next response from the sequence is returned. When the end of the +// sequence is reached the last element from the sequence is returned. +type MockSequence struct { + responses []MockResponse +} + +func NewMockSequence(responses ...interface{}) *MockSequence { + ms := &MockSequence{} + ms.responses = make([]MockResponse, len(responses)) + for i, res := range responses { + switch res := res.(type) { + case MockResponse: + ms.responses[i] = res + case encoder: + ms.responses[i] = NewMockWrapper(res) + default: + panic(fmt.Sprintf("Unexpected response type: %T", res)) + } + } + return ms +} + +func (mc *MockSequence) For(reqBody versionedDecoder) (res encoder) { + res = mc.responses[0].For(reqBody) + if len(mc.responses) > 1 { + mc.responses = mc.responses[1:] + } + return res +} + +// MockMetadataResponse is a `MetadataResponse` builder. +type MockMetadataResponse struct { + leaders map[string]map[int32]int32 + brokers map[string]int32 + t TestReporter +} + +func NewMockMetadataResponse(t TestReporter) *MockMetadataResponse { + return &MockMetadataResponse{ + leaders: make(map[string]map[int32]int32), + brokers: make(map[string]int32), + t: t, + } +} + +func (mmr *MockMetadataResponse) SetLeader(topic string, partition, brokerID int32) *MockMetadataResponse { + partitions := mmr.leaders[topic] + if partitions == nil { + partitions = make(map[int32]int32) + mmr.leaders[topic] = partitions + } + partitions[partition] = brokerID + return mmr +} + +func (mmr *MockMetadataResponse) SetBroker(addr string, brokerID int32) *MockMetadataResponse { + mmr.brokers[addr] = brokerID + return mmr +} + +func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoder { + metadataRequest := reqBody.(*MetadataRequest) + metadataResponse := &MetadataResponse{} + for addr, brokerID := range mmr.brokers { + metadataResponse.AddBroker(addr, brokerID) + } + if len(metadataRequest.Topics) == 0 { + for topic, partitions := range mmr.leaders { + for partition, brokerID := range partitions { + metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError) + } + } + return metadataResponse + } + for _, topic := range metadataRequest.Topics { + for partition, brokerID := range mmr.leaders[topic] { + metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError) + } + } + return metadataResponse +} + +// MockOffsetResponse is an `OffsetResponse` builder. +type MockOffsetResponse struct { + offsets map[string]map[int32]map[int64]int64 + t TestReporter +} + +func NewMockOffsetResponse(t TestReporter) *MockOffsetResponse { + return &MockOffsetResponse{ + offsets: make(map[string]map[int32]map[int64]int64), + t: t, + } +} + +func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, offset int64) *MockOffsetResponse { + partitions := mor.offsets[topic] + if partitions == nil { + partitions = make(map[int32]map[int64]int64) + mor.offsets[topic] = partitions + } + times := partitions[partition] + if times == nil { + times = make(map[int64]int64) + partitions[partition] = times + } + times[time] = offset + return mor +} + +func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoder { + offsetRequest := reqBody.(*OffsetRequest) + offsetResponse := &OffsetResponse{} + for topic, partitions := range offsetRequest.blocks { + for partition, block := range partitions { + offset := mor.getOffset(topic, partition, block.time) + offsetResponse.AddTopicPartition(topic, partition, offset) + } + } + return offsetResponse +} + +func (mor *MockOffsetResponse) getOffset(topic string, partition int32, time int64) int64 { + partitions := mor.offsets[topic] + if partitions == nil { + mor.t.Errorf("missing topic: %s", topic) + } + times := partitions[partition] + if times == nil { + mor.t.Errorf("missing partition: %d", partition) + } + offset, ok := times[time] + if !ok { + mor.t.Errorf("missing time: %d", time) + } + return offset +} + +// MockFetchResponse is a `FetchResponse` builder. +type MockFetchResponse struct { + messages map[string]map[int32]map[int64]Encoder + highWaterMarks map[string]map[int32]int64 + t TestReporter + batchSize int +} + +func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse { + return &MockFetchResponse{ + messages: make(map[string]map[int32]map[int64]Encoder), + highWaterMarks: make(map[string]map[int32]int64), + t: t, + batchSize: batchSize, + } +} + +func (mfr *MockFetchResponse) SetMessage(topic string, partition int32, offset int64, msg Encoder) *MockFetchResponse { + partitions := mfr.messages[topic] + if partitions == nil { + partitions = make(map[int32]map[int64]Encoder) + mfr.messages[topic] = partitions + } + messages := partitions[partition] + if messages == nil { + messages = make(map[int64]Encoder) + partitions[partition] = messages + } + messages[offset] = msg + return mfr +} + +func (mfr *MockFetchResponse) SetHighWaterMark(topic string, partition int32, offset int64) *MockFetchResponse { + partitions := mfr.highWaterMarks[topic] + if partitions == nil { + partitions = make(map[int32]int64) + mfr.highWaterMarks[topic] = partitions + } + partitions[partition] = offset + return mfr +} + +func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoder { + fetchRequest := reqBody.(*FetchRequest) + res := &FetchResponse{} + for topic, partitions := range fetchRequest.blocks { + for partition, block := range partitions { + initialOffset := block.fetchOffset + offset := initialOffset + maxOffset := initialOffset + int64(mfr.getMessageCount(topic, partition)) + for i := 0; i < mfr.batchSize && offset < maxOffset; { + msg := mfr.getMessage(topic, partition, offset) + if msg != nil { + res.AddMessage(topic, partition, nil, msg, offset) + i++ + } + offset++ + } + fb := res.GetBlock(topic, partition) + if fb == nil { + res.AddError(topic, partition, ErrNoError) + fb = res.GetBlock(topic, partition) + } + fb.HighWaterMarkOffset = mfr.getHighWaterMark(topic, partition) + } + } + return res +} + +func (mfr *MockFetchResponse) getMessage(topic string, partition int32, offset int64) Encoder { + partitions := mfr.messages[topic] + if partitions == nil { + return nil + } + messages := partitions[partition] + if messages == nil { + return nil + } + return messages[offset] +} + +func (mfr *MockFetchResponse) getMessageCount(topic string, partition int32) int { + partitions := mfr.messages[topic] + if partitions == nil { + return 0 + } + messages := partitions[partition] + if messages == nil { + return 0 + } + return len(messages) +} + +func (mfr *MockFetchResponse) getHighWaterMark(topic string, partition int32) int64 { + partitions := mfr.highWaterMarks[topic] + if partitions == nil { + return 0 + } + return partitions[partition] +} + +// MockConsumerMetadataResponse is a `ConsumerMetadataResponse` builder. +type MockConsumerMetadataResponse struct { + coordinators map[string]interface{} + t TestReporter +} + +func NewMockConsumerMetadataResponse(t TestReporter) *MockConsumerMetadataResponse { + return &MockConsumerMetadataResponse{ + coordinators: make(map[string]interface{}), + t: t, + } +} + +func (mr *MockConsumerMetadataResponse) SetCoordinator(group string, broker *MockBroker) *MockConsumerMetadataResponse { + mr.coordinators[group] = broker + return mr +} + +func (mr *MockConsumerMetadataResponse) SetError(group string, kerror KError) *MockConsumerMetadataResponse { + mr.coordinators[group] = kerror + return mr +} + +func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoder { + req := reqBody.(*ConsumerMetadataRequest) + group := req.ConsumerGroup + res := &ConsumerMetadataResponse{} + v := mr.coordinators[group] + switch v := v.(type) { + case *MockBroker: + res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()} + case KError: + res.Err = v + } + return res +} + +// MockOffsetCommitResponse is a `OffsetCommitResponse` builder. +type MockOffsetCommitResponse struct { + errors map[string]map[string]map[int32]KError + t TestReporter +} + +func NewMockOffsetCommitResponse(t TestReporter) *MockOffsetCommitResponse { + return &MockOffsetCommitResponse{t: t} +} + +func (mr *MockOffsetCommitResponse) SetError(group, topic string, partition int32, kerror KError) *MockOffsetCommitResponse { + if mr.errors == nil { + mr.errors = make(map[string]map[string]map[int32]KError) + } + topics := mr.errors[group] + if topics == nil { + topics = make(map[string]map[int32]KError) + mr.errors[group] = topics + } + partitions := topics[topic] + if partitions == nil { + partitions = make(map[int32]KError) + topics[topic] = partitions + } + partitions[partition] = kerror + return mr +} + +func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoder { + req := reqBody.(*OffsetCommitRequest) + group := req.ConsumerGroup + res := &OffsetCommitResponse{} + for topic, partitions := range req.blocks { + for partition := range partitions { + res.AddError(topic, partition, mr.getError(group, topic, partition)) + } + } + return res +} + +func (mr *MockOffsetCommitResponse) getError(group, topic string, partition int32) KError { + topics := mr.errors[group] + if topics == nil { + return ErrNoError + } + partitions := topics[topic] + if partitions == nil { + return ErrNoError + } + kerror, ok := partitions[partition] + if !ok { + return ErrNoError + } + return kerror +} + +// MockProduceResponse is a `ProduceResponse` builder. +type MockProduceResponse struct { + errors map[string]map[int32]KError + t TestReporter +} + +func NewMockProduceResponse(t TestReporter) *MockProduceResponse { + return &MockProduceResponse{t: t} +} + +func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KError) *MockProduceResponse { + if mr.errors == nil { + mr.errors = make(map[string]map[int32]KError) + } + partitions := mr.errors[topic] + if partitions == nil { + partitions = make(map[int32]KError) + mr.errors[topic] = partitions + } + partitions[partition] = kerror + return mr +} + +func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoder { + req := reqBody.(*ProduceRequest) + res := &ProduceResponse{} + for topic, partitions := range req.msgSets { + for partition := range partitions { + res.AddTopicPartition(topic, partition, mr.getError(topic, partition)) + } + } + return res +} + +func (mr *MockProduceResponse) getError(topic string, partition int32) KError { + partitions := mr.errors[topic] + if partitions == nil { + return ErrNoError + } + kerror, ok := partitions[partition] + if !ok { + return ErrNoError + } + return kerror +} + +// MockOffsetFetchResponse is a `OffsetFetchResponse` builder. +type MockOffsetFetchResponse struct { + offsets map[string]map[string]map[int32]*OffsetFetchResponseBlock + t TestReporter +} + +func NewMockOffsetFetchResponse(t TestReporter) *MockOffsetFetchResponse { + return &MockOffsetFetchResponse{t: t} +} + +func (mr *MockOffsetFetchResponse) SetOffset(group, topic string, partition int32, offset int64, metadata string, kerror KError) *MockOffsetFetchResponse { + if mr.offsets == nil { + mr.offsets = make(map[string]map[string]map[int32]*OffsetFetchResponseBlock) + } + topics := mr.offsets[group] + if topics == nil { + topics = make(map[string]map[int32]*OffsetFetchResponseBlock) + mr.offsets[group] = topics + } + partitions := topics[topic] + if partitions == nil { + partitions = make(map[int32]*OffsetFetchResponseBlock) + topics[topic] = partitions + } + partitions[partition] = &OffsetFetchResponseBlock{offset, metadata, kerror} + return mr +} + +func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoder { + req := reqBody.(*OffsetFetchRequest) + group := req.ConsumerGroup + res := &OffsetFetchResponse{} + for topic, partitions := range mr.offsets[group] { + for partition, block := range partitions { + res.AddBlock(topic, partition, block) + } + } + return res +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/offset_commit_request.go b/vendor/gopkg.in/Shopify/sarama.v1/offset_commit_request.go new file mode 100644 index 000000000..b21ea634b --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/offset_commit_request.go @@ -0,0 +1,190 @@ +package sarama + +// ReceiveTime is a special value for the timestamp field of Offset Commit Requests which +// tells the broker to set the timestamp to the time at which the request was received. +// The timestamp is only used if message version 1 is used, which requires kafka 0.8.2. +const ReceiveTime int64 = -1 + +// GroupGenerationUndefined is a special value for the group generation field of +// Offset Commit Requests that should be used when a consumer group does not rely +// on Kafka for partition management. +const GroupGenerationUndefined = -1 + +type offsetCommitRequestBlock struct { + offset int64 + timestamp int64 + metadata string +} + +func (b *offsetCommitRequestBlock) encode(pe packetEncoder, version int16) error { + pe.putInt64(b.offset) + if version == 1 { + pe.putInt64(b.timestamp) + } else if b.timestamp != 0 { + Logger.Println("Non-zero timestamp specified for OffsetCommitRequest not v1, it will be ignored") + } + + return pe.putString(b.metadata) +} + +func (b *offsetCommitRequestBlock) decode(pd packetDecoder, version int16) (err error) { + if b.offset, err = pd.getInt64(); err != nil { + return err + } + if version == 1 { + if b.timestamp, err = pd.getInt64(); err != nil { + return err + } + } + b.metadata, err = pd.getString() + return err +} + +type OffsetCommitRequest struct { + ConsumerGroup string + ConsumerGroupGeneration int32 // v1 or later + ConsumerID string // v1 or later + RetentionTime int64 // v2 or later + + // Version can be: + // - 0 (kafka 0.8.1 and later) + // - 1 (kafka 0.8.2 and later) + // - 2 (kafka 0.9.0 and later) + Version int16 + blocks map[string]map[int32]*offsetCommitRequestBlock +} + +func (r *OffsetCommitRequest) encode(pe packetEncoder) error { + if r.Version < 0 || r.Version > 2 { + return PacketEncodingError{"invalid or unsupported OffsetCommitRequest version field"} + } + + if err := pe.putString(r.ConsumerGroup); err != nil { + return err + } + + if r.Version >= 1 { + pe.putInt32(r.ConsumerGroupGeneration) + if err := pe.putString(r.ConsumerID); err != nil { + return err + } + } else { + if r.ConsumerGroupGeneration != 0 { + Logger.Println("Non-zero ConsumerGroupGeneration specified for OffsetCommitRequest v0, it will be ignored") + } + if r.ConsumerID != "" { + Logger.Println("Non-empty ConsumerID specified for OffsetCommitRequest v0, it will be ignored") + } + } + + if r.Version >= 2 { + pe.putInt64(r.RetentionTime) + } else if r.RetentionTime != 0 { + Logger.Println("Non-zero RetentionTime specified for OffsetCommitRequest version <2, it will be ignored") + } + + if err := pe.putArrayLength(len(r.blocks)); err != nil { + return err + } + for topic, partitions := range r.blocks { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putArrayLength(len(partitions)); err != nil { + return err + } + for partition, block := range partitions { + pe.putInt32(partition) + if err := block.encode(pe, r.Version); err != nil { + return err + } + } + } + return nil +} + +func (r *OffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if r.ConsumerGroup, err = pd.getString(); err != nil { + return err + } + + if r.Version >= 1 { + if r.ConsumerGroupGeneration, err = pd.getInt32(); err != nil { + return err + } + if r.ConsumerID, err = pd.getString(); err != nil { + return err + } + } + + if r.Version >= 2 { + if r.RetentionTime, err = pd.getInt64(); err != nil { + return err + } + } + + topicCount, err := pd.getArrayLength() + if err != nil { + return err + } + if topicCount == 0 { + return nil + } + r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock) + for i := 0; i < topicCount; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + partitionCount, err := pd.getArrayLength() + if err != nil { + return err + } + r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock) + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + block := &offsetCommitRequestBlock{} + if err := block.decode(pd, r.Version); err != nil { + return err + } + r.blocks[topic][partition] = block + } + } + return nil +} + +func (r *OffsetCommitRequest) key() int16 { + return 8 +} + +func (r *OffsetCommitRequest) version() int16 { + return r.Version +} + +func (r *OffsetCommitRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_8_2_0 + case 2: + return V0_9_0_0 + default: + return minVersion + } +} + +func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, timestamp int64, metadata string) { + if r.blocks == nil { + r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock) + } + + if r.blocks[topic] == nil { + r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock) + } + + r.blocks[topic][partitionID] = &offsetCommitRequestBlock{offset, timestamp, metadata} +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/offset_commit_response.go b/vendor/gopkg.in/Shopify/sarama.v1/offset_commit_response.go new file mode 100644 index 000000000..7f277e775 --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/offset_commit_response.go @@ -0,0 +1,85 @@ +package sarama + +type OffsetCommitResponse struct { + Errors map[string]map[int32]KError +} + +func (r *OffsetCommitResponse) AddError(topic string, partition int32, kerror KError) { + if r.Errors == nil { + r.Errors = make(map[string]map[int32]KError) + } + partitions := r.Errors[topic] + if partitions == nil { + partitions = make(map[int32]KError) + r.Errors[topic] = partitions + } + partitions[partition] = kerror +} + +func (r *OffsetCommitResponse) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(r.Errors)); err != nil { + return err + } + for topic, partitions := range r.Errors { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putArrayLength(len(partitions)); err != nil { + return err + } + for partition, kerror := range partitions { + pe.putInt32(partition) + pe.putInt16(int16(kerror)) + } + } + return nil +} + +func (r *OffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) { + numTopics, err := pd.getArrayLength() + if err != nil || numTopics == 0 { + return err + } + + r.Errors = make(map[string]map[int32]KError, numTopics) + for i := 0; i < numTopics; i++ { + name, err := pd.getString() + if err != nil { + return err + } + + numErrors, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Errors[name] = make(map[int32]KError, numErrors) + + for j := 0; j < numErrors; j++ { + id, err := pd.getInt32() + if err != nil { + return err + } + + tmp, err := pd.getInt16() + if err != nil { + return err + } + r.Errors[name][id] = KError(tmp) + } + } + + return nil +} + +func (r *OffsetCommitResponse) key() int16 { + return 8 +} + +func (r *OffsetCommitResponse) version() int16 { + return 0 +} + +func (r *OffsetCommitResponse) requiredVersion() KafkaVersion { + return minVersion +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/offset_fetch_request.go b/vendor/gopkg.in/Shopify/sarama.v1/offset_fetch_request.go new file mode 100644 index 000000000..b19fe79ba --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/offset_fetch_request.go @@ -0,0 +1,81 @@ +package sarama + +type OffsetFetchRequest struct { + ConsumerGroup string + Version int16 + partitions map[string][]int32 +} + +func (r *OffsetFetchRequest) encode(pe packetEncoder) (err error) { + if r.Version < 0 || r.Version > 1 { + return PacketEncodingError{"invalid or unsupported OffsetFetchRequest version field"} + } + + if err = pe.putString(r.ConsumerGroup); err != nil { + return err + } + if err = pe.putArrayLength(len(r.partitions)); err != nil { + return err + } + for topic, partitions := range r.partitions { + if err = pe.putString(topic); err != nil { + return err + } + if err = pe.putInt32Array(partitions); err != nil { + return err + } + } + return nil +} + +func (r *OffsetFetchRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + if r.ConsumerGroup, err = pd.getString(); err != nil { + return err + } + partitionCount, err := pd.getArrayLength() + if err != nil { + return err + } + if partitionCount == 0 { + return nil + } + r.partitions = make(map[string][]int32) + for i := 0; i < partitionCount; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + partitions, err := pd.getInt32Array() + if err != nil { + return err + } + r.partitions[topic] = partitions + } + return nil +} + +func (r *OffsetFetchRequest) key() int16 { + return 9 +} + +func (r *OffsetFetchRequest) version() int16 { + return r.Version +} + +func (r *OffsetFetchRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_8_2_0 + default: + return minVersion + } +} + +func (r *OffsetFetchRequest) AddPartition(topic string, partitionID int32) { + if r.partitions == nil { + r.partitions = make(map[string][]int32) + } + + r.partitions[topic] = append(r.partitions[topic], partitionID) +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/offset_fetch_response.go b/vendor/gopkg.in/Shopify/sarama.v1/offset_fetch_response.go new file mode 100644 index 000000000..323220eac --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/offset_fetch_response.go @@ -0,0 +1,143 @@ +package sarama + +type OffsetFetchResponseBlock struct { + Offset int64 + Metadata string + Err KError +} + +func (b *OffsetFetchResponseBlock) decode(pd packetDecoder) (err error) { + b.Offset, err = pd.getInt64() + if err != nil { + return err + } + + b.Metadata, err = pd.getString() + if err != nil { + return err + } + + tmp, err := pd.getInt16() + if err != nil { + return err + } + b.Err = KError(tmp) + + return nil +} + +func (b *OffsetFetchResponseBlock) encode(pe packetEncoder) (err error) { + pe.putInt64(b.Offset) + + err = pe.putString(b.Metadata) + if err != nil { + return err + } + + pe.putInt16(int16(b.Err)) + + return nil +} + +type OffsetFetchResponse struct { + Blocks map[string]map[int32]*OffsetFetchResponseBlock +} + +func (r *OffsetFetchResponse) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(r.Blocks)); err != nil { + return err + } + for topic, partitions := range r.Blocks { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putArrayLength(len(partitions)); err != nil { + return err + } + for partition, block := range partitions { + pe.putInt32(partition) + if err := block.encode(pe); err != nil { + return err + } + } + } + return nil +} + +func (r *OffsetFetchResponse) decode(pd packetDecoder, version int16) (err error) { + numTopics, err := pd.getArrayLength() + if err != nil || numTopics == 0 { + return err + } + + r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock, numTopics) + for i := 0; i < numTopics; i++ { + name, err := pd.getString() + if err != nil { + return err + } + + numBlocks, err := pd.getArrayLength() + if err != nil { + return err + } + + if numBlocks == 0 { + r.Blocks[name] = nil + continue + } + r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks) + + for j := 0; j < numBlocks; j++ { + id, err := pd.getInt32() + if err != nil { + return err + } + + block := new(OffsetFetchResponseBlock) + err = block.decode(pd) + if err != nil { + return err + } + r.Blocks[name][id] = block + } + } + + return nil +} + +func (r *OffsetFetchResponse) key() int16 { + return 9 +} + +func (r *OffsetFetchResponse) version() int16 { + return 0 +} + +func (r *OffsetFetchResponse) requiredVersion() KafkaVersion { + return minVersion +} + +func (r *OffsetFetchResponse) GetBlock(topic string, partition int32) *OffsetFetchResponseBlock { + if r.Blocks == nil { + return nil + } + + if r.Blocks[topic] == nil { + return nil + } + + return r.Blocks[topic][partition] +} + +func (r *OffsetFetchResponse) AddBlock(topic string, partition int32, block *OffsetFetchResponseBlock) { + if r.Blocks == nil { + r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock) + } + partitions := r.Blocks[topic] + if partitions == nil { + partitions = make(map[int32]*OffsetFetchResponseBlock) + r.Blocks[topic] = partitions + } + partitions[partition] = block +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/offset_manager.go b/vendor/gopkg.in/Shopify/sarama.v1/offset_manager.go new file mode 100644 index 000000000..5e15cdafe --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/offset_manager.go @@ -0,0 +1,542 @@ +package sarama + +import ( + "sync" + "time" +) + +// Offset Manager + +// OffsetManager uses Kafka to store and fetch consumed partition offsets. +type OffsetManager interface { + // ManagePartition creates a PartitionOffsetManager on the given topic/partition. + // It will return an error if this OffsetManager is already managing the given + // topic/partition. + ManagePartition(topic string, partition int32) (PartitionOffsetManager, error) + + // Close stops the OffsetManager from managing offsets. It is required to call + // this function before an OffsetManager object passes out of scope, as it + // will otherwise leak memory. You must call this after all the + // PartitionOffsetManagers are closed. + Close() error +} + +type offsetManager struct { + client Client + conf *Config + group string + + lock sync.Mutex + poms map[string]map[int32]*partitionOffsetManager + boms map[*Broker]*brokerOffsetManager +} + +// NewOffsetManagerFromClient creates a new OffsetManager from the given client. +// It is still necessary to call Close() on the underlying client when finished with the partition manager. +func NewOffsetManagerFromClient(group string, client Client) (OffsetManager, error) { + // Check that we are not dealing with a closed Client before processing any other arguments + if client.Closed() { + return nil, ErrClosedClient + } + + om := &offsetManager{ + client: client, + conf: client.Config(), + group: group, + poms: make(map[string]map[int32]*partitionOffsetManager), + boms: make(map[*Broker]*brokerOffsetManager), + } + + return om, nil +} + +func (om *offsetManager) ManagePartition(topic string, partition int32) (PartitionOffsetManager, error) { + pom, err := om.newPartitionOffsetManager(topic, partition) + if err != nil { + return nil, err + } + + om.lock.Lock() + defer om.lock.Unlock() + + topicManagers := om.poms[topic] + if topicManagers == nil { + topicManagers = make(map[int32]*partitionOffsetManager) + om.poms[topic] = topicManagers + } + + if topicManagers[partition] != nil { + return nil, ConfigurationError("That topic/partition is already being managed") + } + + topicManagers[partition] = pom + return pom, nil +} + +func (om *offsetManager) Close() error { + return nil +} + +func (om *offsetManager) refBrokerOffsetManager(broker *Broker) *brokerOffsetManager { + om.lock.Lock() + defer om.lock.Unlock() + + bom := om.boms[broker] + if bom == nil { + bom = om.newBrokerOffsetManager(broker) + om.boms[broker] = bom + } + + bom.refs++ + + return bom +} + +func (om *offsetManager) unrefBrokerOffsetManager(bom *brokerOffsetManager) { + om.lock.Lock() + defer om.lock.Unlock() + + bom.refs-- + + if bom.refs == 0 { + close(bom.updateSubscriptions) + if om.boms[bom.broker] == bom { + delete(om.boms, bom.broker) + } + } +} + +func (om *offsetManager) abandonBroker(bom *brokerOffsetManager) { + om.lock.Lock() + defer om.lock.Unlock() + + delete(om.boms, bom.broker) +} + +func (om *offsetManager) abandonPartitionOffsetManager(pom *partitionOffsetManager) { + om.lock.Lock() + defer om.lock.Unlock() + + delete(om.poms[pom.topic], pom.partition) + if len(om.poms[pom.topic]) == 0 { + delete(om.poms, pom.topic) + } +} + +// Partition Offset Manager + +// PartitionOffsetManager uses Kafka to store and fetch consumed partition offsets. You MUST call Close() +// on a partition offset manager to avoid leaks, it will not be garbage-collected automatically when it passes +// out of scope. +type PartitionOffsetManager interface { + // NextOffset returns the next offset that should be consumed for the managed + // partition, accompanied by metadata which can be used to reconstruct the state + // of the partition consumer when it resumes. NextOffset() will return + // `config.Consumer.Offsets.Initial` and an empty metadata string if no offset + // was committed for this partition yet. + NextOffset() (int64, string) + + // MarkOffset marks the provided offset, alongside a metadata string + // that represents the state of the partition consumer at that point in time. The + // metadata string can be used by another consumer to restore that state, so it + // can resume consumption. + // + // To follow upstream conventions, you are expected to mark the offset of the + // next message to read, not the last message read. Thus, when calling `MarkOffset` + // you should typically add one to the offset of the last consumed message. + // + // Note: calling MarkOffset does not necessarily commit the offset to the backend + // store immediately for efficiency reasons, and it may never be committed if + // your application crashes. This means that you may end up processing the same + // message twice, and your processing should ideally be idempotent. + MarkOffset(offset int64, metadata string) + + // Errors returns a read channel of errors that occur during offset management, if + // enabled. By default, errors are logged and not returned over this channel. If + // you want to implement any custom error handling, set your config's + // Consumer.Return.Errors setting to true, and read from this channel. + Errors() <-chan *ConsumerError + + // AsyncClose initiates a shutdown of the PartitionOffsetManager. This method will + // return immediately, after which you should wait until the 'errors' channel has + // been drained and closed. It is required to call this function, or Close before + // a consumer object passes out of scope, as it will otherwise leak memory. You + // must call this before calling Close on the underlying client. + AsyncClose() + + // Close stops the PartitionOffsetManager from managing offsets. It is required to + // call this function (or AsyncClose) before a PartitionOffsetManager object + // passes out of scope, as it will otherwise leak memory. You must call this + // before calling Close on the underlying client. + Close() error +} + +type partitionOffsetManager struct { + parent *offsetManager + topic string + partition int32 + + lock sync.Mutex + offset int64 + metadata string + dirty bool + clean sync.Cond + broker *brokerOffsetManager + + errors chan *ConsumerError + rebalance chan none + dying chan none +} + +func (om *offsetManager) newPartitionOffsetManager(topic string, partition int32) (*partitionOffsetManager, error) { + pom := &partitionOffsetManager{ + parent: om, + topic: topic, + partition: partition, + errors: make(chan *ConsumerError, om.conf.ChannelBufferSize), + rebalance: make(chan none, 1), + dying: make(chan none), + } + pom.clean.L = &pom.lock + + if err := pom.selectBroker(); err != nil { + return nil, err + } + + if err := pom.fetchInitialOffset(om.conf.Metadata.Retry.Max); err != nil { + return nil, err + } + + pom.broker.updateSubscriptions <- pom + + go withRecover(pom.mainLoop) + + return pom, nil +} + +func (pom *partitionOffsetManager) mainLoop() { + for { + select { + case <-pom.rebalance: + if err := pom.selectBroker(); err != nil { + pom.handleError(err) + pom.rebalance <- none{} + } else { + pom.broker.updateSubscriptions <- pom + } + case <-pom.dying: + if pom.broker != nil { + select { + case <-pom.rebalance: + case pom.broker.updateSubscriptions <- pom: + } + pom.parent.unrefBrokerOffsetManager(pom.broker) + } + pom.parent.abandonPartitionOffsetManager(pom) + close(pom.errors) + return + } + } +} + +func (pom *partitionOffsetManager) selectBroker() error { + if pom.broker != nil { + pom.parent.unrefBrokerOffsetManager(pom.broker) + pom.broker = nil + } + + var broker *Broker + var err error + + if err = pom.parent.client.RefreshCoordinator(pom.parent.group); err != nil { + return err + } + + if broker, err = pom.parent.client.Coordinator(pom.parent.group); err != nil { + return err + } + + pom.broker = pom.parent.refBrokerOffsetManager(broker) + return nil +} + +func (pom *partitionOffsetManager) fetchInitialOffset(retries int) error { + request := new(OffsetFetchRequest) + request.Version = 1 + request.ConsumerGroup = pom.parent.group + request.AddPartition(pom.topic, pom.partition) + + response, err := pom.broker.broker.FetchOffset(request) + if err != nil { + return err + } + + block := response.GetBlock(pom.topic, pom.partition) + if block == nil { + return ErrIncompleteResponse + } + + switch block.Err { + case ErrNoError: + pom.offset = block.Offset + pom.metadata = block.Metadata + return nil + case ErrNotCoordinatorForConsumer: + if retries <= 0 { + return block.Err + } + if err := pom.selectBroker(); err != nil { + return err + } + return pom.fetchInitialOffset(retries - 1) + case ErrOffsetsLoadInProgress: + if retries <= 0 { + return block.Err + } + time.Sleep(pom.parent.conf.Metadata.Retry.Backoff) + return pom.fetchInitialOffset(retries - 1) + default: + return block.Err + } +} + +func (pom *partitionOffsetManager) handleError(err error) { + cErr := &ConsumerError{ + Topic: pom.topic, + Partition: pom.partition, + Err: err, + } + + if pom.parent.conf.Consumer.Return.Errors { + pom.errors <- cErr + } else { + Logger.Println(cErr) + } +} + +func (pom *partitionOffsetManager) Errors() <-chan *ConsumerError { + return pom.errors +} + +func (pom *partitionOffsetManager) MarkOffset(offset int64, metadata string) { + pom.lock.Lock() + defer pom.lock.Unlock() + + if offset > pom.offset { + pom.offset = offset + pom.metadata = metadata + pom.dirty = true + } +} + +func (pom *partitionOffsetManager) updateCommitted(offset int64, metadata string) { + pom.lock.Lock() + defer pom.lock.Unlock() + + if pom.offset == offset && pom.metadata == metadata { + pom.dirty = false + pom.clean.Signal() + } +} + +func (pom *partitionOffsetManager) NextOffset() (int64, string) { + pom.lock.Lock() + defer pom.lock.Unlock() + + if pom.offset >= 0 { + return pom.offset, pom.metadata + } + + return pom.parent.conf.Consumer.Offsets.Initial, "" +} + +func (pom *partitionOffsetManager) AsyncClose() { + go func() { + pom.lock.Lock() + defer pom.lock.Unlock() + + for pom.dirty { + pom.clean.Wait() + } + + close(pom.dying) + }() +} + +func (pom *partitionOffsetManager) Close() error { + pom.AsyncClose() + + var errors ConsumerErrors + for err := range pom.errors { + errors = append(errors, err) + } + + if len(errors) > 0 { + return errors + } + return nil +} + +// Broker Offset Manager + +type brokerOffsetManager struct { + parent *offsetManager + broker *Broker + timer *time.Ticker + updateSubscriptions chan *partitionOffsetManager + subscriptions map[*partitionOffsetManager]none + refs int +} + +func (om *offsetManager) newBrokerOffsetManager(broker *Broker) *brokerOffsetManager { + bom := &brokerOffsetManager{ + parent: om, + broker: broker, + timer: time.NewTicker(om.conf.Consumer.Offsets.CommitInterval), + updateSubscriptions: make(chan *partitionOffsetManager), + subscriptions: make(map[*partitionOffsetManager]none), + } + + go withRecover(bom.mainLoop) + + return bom +} + +func (bom *brokerOffsetManager) mainLoop() { + for { + select { + case <-bom.timer.C: + if len(bom.subscriptions) > 0 { + bom.flushToBroker() + } + case s, ok := <-bom.updateSubscriptions: + if !ok { + bom.timer.Stop() + return + } + if _, ok := bom.subscriptions[s]; ok { + delete(bom.subscriptions, s) + } else { + bom.subscriptions[s] = none{} + } + } + } +} + +func (bom *brokerOffsetManager) flushToBroker() { + request := bom.constructRequest() + if request == nil { + return + } + + response, err := bom.broker.CommitOffset(request) + + if err != nil { + bom.abort(err) + return + } + + for s := range bom.subscriptions { + if request.blocks[s.topic] == nil || request.blocks[s.topic][s.partition] == nil { + continue + } + + var err KError + var ok bool + + if response.Errors[s.topic] == nil { + s.handleError(ErrIncompleteResponse) + delete(bom.subscriptions, s) + s.rebalance <- none{} + continue + } + if err, ok = response.Errors[s.topic][s.partition]; !ok { + s.handleError(ErrIncompleteResponse) + delete(bom.subscriptions, s) + s.rebalance <- none{} + continue + } + + switch err { + case ErrNoError: + block := request.blocks[s.topic][s.partition] + s.updateCommitted(block.offset, block.metadata) + case ErrNotLeaderForPartition, ErrLeaderNotAvailable, + ErrConsumerCoordinatorNotAvailable, ErrNotCoordinatorForConsumer: + // not a critical error, we just need to redispatch + delete(bom.subscriptions, s) + s.rebalance <- none{} + case ErrOffsetMetadataTooLarge, ErrInvalidCommitOffsetSize: + // nothing we can do about this, just tell the user and carry on + s.handleError(err) + case ErrOffsetsLoadInProgress: + // nothing wrong but we didn't commit, we'll get it next time round + break + case ErrUnknownTopicOrPartition: + // let the user know *and* try redispatching - if topic-auto-create is + // enabled, redispatching should trigger a metadata request and create the + // topic; if not then re-dispatching won't help, but we've let the user + // know and it shouldn't hurt either (see https://github.com/Shopify/sarama/issues/706) + fallthrough + default: + // dunno, tell the user and try redispatching + s.handleError(err) + delete(bom.subscriptions, s) + s.rebalance <- none{} + } + } +} + +func (bom *brokerOffsetManager) constructRequest() *OffsetCommitRequest { + var r *OffsetCommitRequest + var perPartitionTimestamp int64 + if bom.parent.conf.Consumer.Offsets.Retention == 0 { + perPartitionTimestamp = ReceiveTime + r = &OffsetCommitRequest{ + Version: 1, + ConsumerGroup: bom.parent.group, + ConsumerGroupGeneration: GroupGenerationUndefined, + } + } else { + r = &OffsetCommitRequest{ + Version: 2, + RetentionTime: int64(bom.parent.conf.Consumer.Offsets.Retention / time.Millisecond), + ConsumerGroup: bom.parent.group, + ConsumerGroupGeneration: GroupGenerationUndefined, + } + + } + + for s := range bom.subscriptions { + s.lock.Lock() + if s.dirty { + r.AddBlock(s.topic, s.partition, s.offset, perPartitionTimestamp, s.metadata) + } + s.lock.Unlock() + } + + if len(r.blocks) > 0 { + return r + } + + return nil +} + +func (bom *brokerOffsetManager) abort(err error) { + _ = bom.broker.Close() // we don't care about the error this might return, we already have one + bom.parent.abandonBroker(bom) + + for pom := range bom.subscriptions { + pom.handleError(err) + pom.rebalance <- none{} + } + + for s := range bom.updateSubscriptions { + if _, ok := bom.subscriptions[s]; !ok { + s.handleError(err) + s.rebalance <- none{} + } + } + + bom.subscriptions = make(map[*partitionOffsetManager]none) +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/offset_request.go b/vendor/gopkg.in/Shopify/sarama.v1/offset_request.go new file mode 100644 index 000000000..c66d8f709 --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/offset_request.go @@ -0,0 +1,117 @@ +package sarama + +type offsetRequestBlock struct { + time int64 + maxOffsets int32 +} + +func (b *offsetRequestBlock) encode(pe packetEncoder) error { + pe.putInt64(int64(b.time)) + pe.putInt32(b.maxOffsets) + return nil +} + +func (b *offsetRequestBlock) decode(pd packetDecoder) (err error) { + if b.time, err = pd.getInt64(); err != nil { + return err + } + if b.maxOffsets, err = pd.getInt32(); err != nil { + return err + } + return nil +} + +type OffsetRequest struct { + blocks map[string]map[int32]*offsetRequestBlock +} + +func (r *OffsetRequest) encode(pe packetEncoder) error { + pe.putInt32(-1) // replica ID is always -1 for clients + err := pe.putArrayLength(len(r.blocks)) + if err != nil { + return err + } + for topic, partitions := range r.blocks { + err = pe.putString(topic) + if err != nil { + return err + } + err = pe.putArrayLength(len(partitions)) + if err != nil { + return err + } + for partition, block := range partitions { + pe.putInt32(partition) + if err = block.encode(pe); err != nil { + return err + } + } + } + return nil +} + +func (r *OffsetRequest) decode(pd packetDecoder, version int16) error { + // Ignore replica ID + if _, err := pd.getInt32(); err != nil { + return err + } + blockCount, err := pd.getArrayLength() + if err != nil { + return err + } + if blockCount == 0 { + return nil + } + r.blocks = make(map[string]map[int32]*offsetRequestBlock) + for i := 0; i < blockCount; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + partitionCount, err := pd.getArrayLength() + if err != nil { + return err + } + r.blocks[topic] = make(map[int32]*offsetRequestBlock) + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + block := &offsetRequestBlock{} + if err := block.decode(pd); err != nil { + return err + } + r.blocks[topic][partition] = block + } + } + return nil +} + +func (r *OffsetRequest) key() int16 { + return 2 +} + +func (r *OffsetRequest) version() int16 { + return 0 +} + +func (r *OffsetRequest) requiredVersion() KafkaVersion { + return minVersion +} + +func (r *OffsetRequest) AddBlock(topic string, partitionID int32, time int64, maxOffsets int32) { + if r.blocks == nil { + r.blocks = make(map[string]map[int32]*offsetRequestBlock) + } + + if r.blocks[topic] == nil { + r.blocks[topic] = make(map[int32]*offsetRequestBlock) + } + + tmp := new(offsetRequestBlock) + tmp.time = time + tmp.maxOffsets = maxOffsets + + r.blocks[topic][partitionID] = tmp +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/offset_response.go b/vendor/gopkg.in/Shopify/sarama.v1/offset_response.go new file mode 100644 index 000000000..ad1a66974 --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/offset_response.go @@ -0,0 +1,142 @@ +package sarama + +type OffsetResponseBlock struct { + Err KError + Offsets []int64 +} + +func (b *OffsetResponseBlock) decode(pd packetDecoder) (err error) { + tmp, err := pd.getInt16() + if err != nil { + return err + } + b.Err = KError(tmp) + + b.Offsets, err = pd.getInt64Array() + + return err +} + +func (b *OffsetResponseBlock) encode(pe packetEncoder) (err error) { + pe.putInt16(int16(b.Err)) + + return pe.putInt64Array(b.Offsets) +} + +type OffsetResponse struct { + Blocks map[string]map[int32]*OffsetResponseBlock +} + +func (r *OffsetResponse) decode(pd packetDecoder, version int16) (err error) { + numTopics, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Blocks = make(map[string]map[int32]*OffsetResponseBlock, numTopics) + for i := 0; i < numTopics; i++ { + name, err := pd.getString() + if err != nil { + return err + } + + numBlocks, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Blocks[name] = make(map[int32]*OffsetResponseBlock, numBlocks) + + for j := 0; j < numBlocks; j++ { + id, err := pd.getInt32() + if err != nil { + return err + } + + block := new(OffsetResponseBlock) + err = block.decode(pd) + if err != nil { + return err + } + r.Blocks[name][id] = block + } + } + + return nil +} + +func (r *OffsetResponse) GetBlock(topic string, partition int32) *OffsetResponseBlock { + if r.Blocks == nil { + return nil + } + + if r.Blocks[topic] == nil { + return nil + } + + return r.Blocks[topic][partition] +} + +/* +// [0 0 0 1 ntopics +0 8 109 121 95 116 111 112 105 99 topic +0 0 0 1 npartitions +0 0 0 0 id +0 0 + +0 0 0 1 0 0 0 0 +0 1 1 1 0 0 0 1 +0 8 109 121 95 116 111 112 +105 99 0 0 0 1 0 0 +0 0 0 0 0 0 0 1 +0 0 0 0 0 1 1 1] + +*/ +func (r *OffsetResponse) encode(pe packetEncoder) (err error) { + if err = pe.putArrayLength(len(r.Blocks)); err != nil { + return err + } + + for topic, partitions := range r.Blocks { + if err = pe.putString(topic); err != nil { + return err + } + if err = pe.putArrayLength(len(partitions)); err != nil { + return err + } + for partition, block := range partitions { + pe.putInt32(partition) + if err = block.encode(pe); err != nil { + return err + } + } + } + + return nil +} + +func (r *OffsetResponse) key() int16 { + return 2 +} + +func (r *OffsetResponse) version() int16 { + return 0 +} + +func (r *OffsetResponse) requiredVersion() KafkaVersion { + return minVersion +} + +// testing API + +func (r *OffsetResponse) AddTopicPartition(topic string, partition int32, offset int64) { + if r.Blocks == nil { + r.Blocks = make(map[string]map[int32]*OffsetResponseBlock) + } + byTopic, ok := r.Blocks[topic] + if !ok { + byTopic = make(map[int32]*OffsetResponseBlock) + r.Blocks[topic] = byTopic + } + byTopic[partition] = &OffsetResponseBlock{Offsets: []int64{offset}} +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/packet_decoder.go b/vendor/gopkg.in/Shopify/sarama.v1/packet_decoder.go new file mode 100644 index 000000000..28670c0e6 --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/packet_decoder.go @@ -0,0 +1,45 @@ +package sarama + +// PacketDecoder is the interface providing helpers for reading with Kafka's encoding rules. +// Types implementing Decoder only need to worry about calling methods like GetString, +// not about how a string is represented in Kafka. +type packetDecoder interface { + // Primitives + getInt8() (int8, error) + getInt16() (int16, error) + getInt32() (int32, error) + getInt64() (int64, error) + getArrayLength() (int, error) + + // Collections + getBytes() ([]byte, error) + getString() (string, error) + getInt32Array() ([]int32, error) + getInt64Array() ([]int64, error) + getStringArray() ([]string, error) + + // Subsets + remaining() int + getSubset(length int) (packetDecoder, error) + + // Stacks, see PushDecoder + push(in pushDecoder) error + pop() error +} + +// PushDecoder is the interface for decoding fields like CRCs and lengths where the validity +// of the field depends on what is after it in the packet. Start them with PacketDecoder.Push() where +// the actual value is located in the packet, then PacketDecoder.Pop() them when all the bytes they +// depend upon have been decoded. +type pushDecoder interface { + // Saves the offset into the input buffer as the location to actually read the calculated value when able. + saveOffset(in int) + + // Returns the length of data to reserve for the input of this encoder (eg 4 bytes for a CRC32). + reserveLength() int + + // Indicates that all required data is now available to calculate and check the field. + // SaveOffset is guaranteed to have been called first. The implementation should read ReserveLength() bytes + // of data from the saved offset, and verify it based on the data between the saved offset and curOffset. + check(curOffset int, buf []byte) error +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/packet_encoder.go b/vendor/gopkg.in/Shopify/sarama.v1/packet_encoder.go new file mode 100644 index 000000000..0df6e24aa --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/packet_encoder.go @@ -0,0 +1,42 @@ +package sarama + +// PacketEncoder is the interface providing helpers for writing with Kafka's encoding rules. +// Types implementing Encoder only need to worry about calling methods like PutString, +// not about how a string is represented in Kafka. +type packetEncoder interface { + // Primitives + putInt8(in int8) + putInt16(in int16) + putInt32(in int32) + putInt64(in int64) + putArrayLength(in int) error + + // Collections + putBytes(in []byte) error + putRawBytes(in []byte) error + putString(in string) error + putStringArray(in []string) error + putInt32Array(in []int32) error + putInt64Array(in []int64) error + + // Stacks, see PushEncoder + push(in pushEncoder) + pop() error +} + +// PushEncoder is the interface for encoding fields like CRCs and lengths where the value +// of the field depends on what is encoded after it in the packet. Start them with PacketEncoder.Push() where +// the actual value is located in the packet, then PacketEncoder.Pop() them when all the bytes they +// depend upon have been written. +type pushEncoder interface { + // Saves the offset into the input buffer as the location to actually write the calculated value when able. + saveOffset(in int) + + // Returns the length of data to reserve for the output of this encoder (eg 4 bytes for a CRC32). + reserveLength() int + + // Indicates that all required data is now available to calculate and write the field. + // SaveOffset is guaranteed to have been called first. The implementation should write ReserveLength() bytes + // of data to the saved offset, based on the data between the saved offset and curOffset. + run(curOffset int, buf []byte) error +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/partitioner.go b/vendor/gopkg.in/Shopify/sarama.v1/partitioner.go new file mode 100644 index 000000000..d24199da9 --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/partitioner.go @@ -0,0 +1,123 @@ +package sarama + +import ( + "hash" + "hash/fnv" + "math/rand" + "time" +) + +// Partitioner is anything that, given a Kafka message and a number of partitions indexed [0...numPartitions-1], +// decides to which partition to send the message. RandomPartitioner, RoundRobinPartitioner and HashPartitioner are provided +// as simple default implementations. +type Partitioner interface { + // Partition takes a message and partition count and chooses a partition + Partition(message *ProducerMessage, numPartitions int32) (int32, error) + + // RequiresConsistency indicates to the user of the partitioner whether the + // mapping of key->partition is consistent or not. Specifically, if a + // partitioner requires consistency then it must be allowed to choose from all + // partitions (even ones known to be unavailable), and its choice must be + // respected by the caller. The obvious example is the HashPartitioner. + RequiresConsistency() bool +} + +// PartitionerConstructor is the type for a function capable of constructing new Partitioners. +type PartitionerConstructor func(topic string) Partitioner + +type manualPartitioner struct{} + +// NewManualPartitioner returns a Partitioner which uses the partition manually set in the provided +// ProducerMessage's Partition field as the partition to produce to. +func NewManualPartitioner(topic string) Partitioner { + return new(manualPartitioner) +} + +func (p *manualPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { + return message.Partition, nil +} + +func (p *manualPartitioner) RequiresConsistency() bool { + return true +} + +type randomPartitioner struct { + generator *rand.Rand +} + +// NewRandomPartitioner returns a Partitioner which chooses a random partition each time. +func NewRandomPartitioner(topic string) Partitioner { + p := new(randomPartitioner) + p.generator = rand.New(rand.NewSource(time.Now().UTC().UnixNano())) + return p +} + +func (p *randomPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { + return int32(p.generator.Intn(int(numPartitions))), nil +} + +func (p *randomPartitioner) RequiresConsistency() bool { + return false +} + +type roundRobinPartitioner struct { + partition int32 +} + +// NewRoundRobinPartitioner returns a Partitioner which walks through the available partitions one at a time. +func NewRoundRobinPartitioner(topic string) Partitioner { + return &roundRobinPartitioner{} +} + +func (p *roundRobinPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { + if p.partition >= numPartitions { + p.partition = 0 + } + ret := p.partition + p.partition++ + return ret, nil +} + +func (p *roundRobinPartitioner) RequiresConsistency() bool { + return false +} + +type hashPartitioner struct { + random Partitioner + hasher hash.Hash32 +} + +// NewHashPartitioner returns a Partitioner which behaves as follows. If the message's key is nil then a +// random partition is chosen. Otherwise the FNV-1a hash of the encoded bytes of the message key is used, +// modulus the number of partitions. This ensures that messages with the same key always end up on the +// same partition. +func NewHashPartitioner(topic string) Partitioner { + p := new(hashPartitioner) + p.random = NewRandomPartitioner(topic) + p.hasher = fnv.New32a() + return p +} + +func (p *hashPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { + if message.Key == nil { + return p.random.Partition(message, numPartitions) + } + bytes, err := message.Key.Encode() + if err != nil { + return -1, err + } + p.hasher.Reset() + _, err = p.hasher.Write(bytes) + if err != nil { + return -1, err + } + partition := int32(p.hasher.Sum32()) % numPartitions + if partition < 0 { + partition = -partition + } + return partition, nil +} + +func (p *hashPartitioner) RequiresConsistency() bool { + return true +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/prep_encoder.go b/vendor/gopkg.in/Shopify/sarama.v1/prep_encoder.go new file mode 100644 index 000000000..8c6ba8502 --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/prep_encoder.go @@ -0,0 +1,110 @@ +package sarama + +import ( + "fmt" + "math" +) + +type prepEncoder struct { + length int +} + +// primitives + +func (pe *prepEncoder) putInt8(in int8) { + pe.length++ +} + +func (pe *prepEncoder) putInt16(in int16) { + pe.length += 2 +} + +func (pe *prepEncoder) putInt32(in int32) { + pe.length += 4 +} + +func (pe *prepEncoder) putInt64(in int64) { + pe.length += 8 +} + +func (pe *prepEncoder) putArrayLength(in int) error { + if in > math.MaxInt32 { + return PacketEncodingError{fmt.Sprintf("array too long (%d)", in)} + } + pe.length += 4 + return nil +} + +// arrays + +func (pe *prepEncoder) putBytes(in []byte) error { + pe.length += 4 + if in == nil { + return nil + } + if len(in) > math.MaxInt32 { + return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))} + } + pe.length += len(in) + return nil +} + +func (pe *prepEncoder) putRawBytes(in []byte) error { + if len(in) > math.MaxInt32 { + return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))} + } + pe.length += len(in) + return nil +} + +func (pe *prepEncoder) putString(in string) error { + pe.length += 2 + if len(in) > math.MaxInt16 { + return PacketEncodingError{fmt.Sprintf("string too long (%d)", len(in))} + } + pe.length += len(in) + return nil +} + +func (pe *prepEncoder) putStringArray(in []string) error { + err := pe.putArrayLength(len(in)) + if err != nil { + return err + } + + for _, str := range in { + if err := pe.putString(str); err != nil { + return err + } + } + + return nil +} + +func (pe *prepEncoder) putInt32Array(in []int32) error { + err := pe.putArrayLength(len(in)) + if err != nil { + return err + } + pe.length += 4 * len(in) + return nil +} + +func (pe *prepEncoder) putInt64Array(in []int64) error { + err := pe.putArrayLength(len(in)) + if err != nil { + return err + } + pe.length += 8 * len(in) + return nil +} + +// stackable + +func (pe *prepEncoder) push(in pushEncoder) { + pe.length += in.reserveLength() +} + +func (pe *prepEncoder) pop() error { + return nil +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/produce_request.go b/vendor/gopkg.in/Shopify/sarama.v1/produce_request.go new file mode 100644 index 000000000..f8a250946 --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/produce_request.go @@ -0,0 +1,157 @@ +package sarama + +// RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements +// it must see before responding. Any of the constants defined here are valid. On broker versions +// prior to 0.8.2.0 any other positive int16 is also valid (the broker will wait for that many +// acknowledgements) but in 0.8.2.0 and later this will raise an exception (it has been replaced +// by setting the `min.isr` value in the brokers configuration). +type RequiredAcks int16 + +const ( + // NoResponse doesn't send any response, the TCP ACK is all you get. + NoResponse RequiredAcks = 0 + // WaitForLocal waits for only the local commit to succeed before responding. + WaitForLocal RequiredAcks = 1 + // WaitForAll waits for all replicas to commit before responding. + WaitForAll RequiredAcks = -1 +) + +type ProduceRequest struct { + RequiredAcks RequiredAcks + Timeout int32 + Version int16 // v1 requires Kafka 0.9, v2 requires Kafka 0.10 + msgSets map[string]map[int32]*MessageSet +} + +func (r *ProduceRequest) encode(pe packetEncoder) error { + pe.putInt16(int16(r.RequiredAcks)) + pe.putInt32(r.Timeout) + err := pe.putArrayLength(len(r.msgSets)) + if err != nil { + return err + } + for topic, partitions := range r.msgSets { + err = pe.putString(topic) + if err != nil { + return err + } + err = pe.putArrayLength(len(partitions)) + if err != nil { + return err + } + for id, msgSet := range partitions { + pe.putInt32(id) + pe.push(&lengthField{}) + err = msgSet.encode(pe) + if err != nil { + return err + } + err = pe.pop() + if err != nil { + return err + } + } + } + return nil +} + +func (r *ProduceRequest) decode(pd packetDecoder, version int16) error { + requiredAcks, err := pd.getInt16() + if err != nil { + return err + } + r.RequiredAcks = RequiredAcks(requiredAcks) + if r.Timeout, err = pd.getInt32(); err != nil { + return err + } + topicCount, err := pd.getArrayLength() + if err != nil { + return err + } + if topicCount == 0 { + return nil + } + r.msgSets = make(map[string]map[int32]*MessageSet) + for i := 0; i < topicCount; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + partitionCount, err := pd.getArrayLength() + if err != nil { + return err + } + r.msgSets[topic] = make(map[int32]*MessageSet) + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + messageSetSize, err := pd.getInt32() + if err != nil { + return err + } + msgSetDecoder, err := pd.getSubset(int(messageSetSize)) + if err != nil { + return err + } + msgSet := &MessageSet{} + err = msgSet.decode(msgSetDecoder) + if err != nil { + return err + } + r.msgSets[topic][partition] = msgSet + } + } + return nil +} + +func (r *ProduceRequest) key() int16 { + return 0 +} + +func (r *ProduceRequest) version() int16 { + return r.Version +} + +func (r *ProduceRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_9_0_0 + case 2: + return V0_10_0_0 + default: + return minVersion + } +} + +func (r *ProduceRequest) AddMessage(topic string, partition int32, msg *Message) { + if r.msgSets == nil { + r.msgSets = make(map[string]map[int32]*MessageSet) + } + + if r.msgSets[topic] == nil { + r.msgSets[topic] = make(map[int32]*MessageSet) + } + + set := r.msgSets[topic][partition] + + if set == nil { + set = new(MessageSet) + r.msgSets[topic][partition] = set + } + + set.addMessage(msg) +} + +func (r *ProduceRequest) AddSet(topic string, partition int32, set *MessageSet) { + if r.msgSets == nil { + r.msgSets = make(map[string]map[int32]*MessageSet) + } + + if r.msgSets[topic] == nil { + r.msgSets[topic] = make(map[int32]*MessageSet) + } + + r.msgSets[topic][partition] = set +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/produce_response.go b/vendor/gopkg.in/Shopify/sarama.v1/produce_response.go new file mode 100644 index 000000000..195abcb81 --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/produce_response.go @@ -0,0 +1,158 @@ +package sarama + +import "time" + +type ProduceResponseBlock struct { + Err KError + Offset int64 + // only provided if Version >= 2 and the broker is configured with `LogAppendTime` + Timestamp time.Time +} + +func (b *ProduceResponseBlock) decode(pd packetDecoder, version int16) (err error) { + tmp, err := pd.getInt16() + if err != nil { + return err + } + b.Err = KError(tmp) + + b.Offset, err = pd.getInt64() + if err != nil { + return err + } + + if version >= 2 { + if millis, err := pd.getInt64(); err != nil { + return err + } else if millis != -1 { + b.Timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond)) + } + } + + return nil +} + +type ProduceResponse struct { + Blocks map[string]map[int32]*ProduceResponseBlock + Version int16 + ThrottleTime time.Duration // only provided if Version >= 1 +} + +func (r *ProduceResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + numTopics, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Blocks = make(map[string]map[int32]*ProduceResponseBlock, numTopics) + for i := 0; i < numTopics; i++ { + name, err := pd.getString() + if err != nil { + return err + } + + numBlocks, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Blocks[name] = make(map[int32]*ProduceResponseBlock, numBlocks) + + for j := 0; j < numBlocks; j++ { + id, err := pd.getInt32() + if err != nil { + return err + } + + block := new(ProduceResponseBlock) + err = block.decode(pd, version) + if err != nil { + return err + } + r.Blocks[name][id] = block + } + } + + if r.Version >= 1 { + if millis, err := pd.getInt32(); err != nil { + return err + } else { + r.ThrottleTime = time.Duration(millis) * time.Millisecond + } + } + + return nil +} + +func (r *ProduceResponse) encode(pe packetEncoder) error { + err := pe.putArrayLength(len(r.Blocks)) + if err != nil { + return err + } + for topic, partitions := range r.Blocks { + err = pe.putString(topic) + if err != nil { + return err + } + err = pe.putArrayLength(len(partitions)) + if err != nil { + return err + } + for id, prb := range partitions { + pe.putInt32(id) + pe.putInt16(int16(prb.Err)) + pe.putInt64(prb.Offset) + } + } + if r.Version >= 1 { + pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) + } + return nil +} + +func (r *ProduceResponse) key() int16 { + return 0 +} + +func (r *ProduceResponse) version() int16 { + return r.Version +} + +func (r *ProduceResponse) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_9_0_0 + case 2: + return V0_10_0_0 + default: + return minVersion + } +} + +func (r *ProduceResponse) GetBlock(topic string, partition int32) *ProduceResponseBlock { + if r.Blocks == nil { + return nil + } + + if r.Blocks[topic] == nil { + return nil + } + + return r.Blocks[topic][partition] +} + +// Testing API + +func (r *ProduceResponse) AddTopicPartition(topic string, partition int32, err KError) { + if r.Blocks == nil { + r.Blocks = make(map[string]map[int32]*ProduceResponseBlock) + } + byTopic, ok := r.Blocks[topic] + if !ok { + byTopic = make(map[int32]*ProduceResponseBlock) + r.Blocks[topic] = byTopic + } + byTopic[partition] = &ProduceResponseBlock{Err: err} +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/produce_set.go b/vendor/gopkg.in/Shopify/sarama.v1/produce_set.go new file mode 100644 index 000000000..992f1f141 --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/produce_set.go @@ -0,0 +1,166 @@ +package sarama + +import "time" + +type partitionSet struct { + msgs []*ProducerMessage + setToSend *MessageSet + bufferBytes int +} + +type produceSet struct { + parent *asyncProducer + msgs map[string]map[int32]*partitionSet + + bufferBytes int + bufferCount int +} + +func newProduceSet(parent *asyncProducer) *produceSet { + return &produceSet{ + msgs: make(map[string]map[int32]*partitionSet), + parent: parent, + } +} + +func (ps *produceSet) add(msg *ProducerMessage) error { + var err error + var key, val []byte + + if msg.Key != nil { + if key, err = msg.Key.Encode(); err != nil { + return err + } + } + + if msg.Value != nil { + if val, err = msg.Value.Encode(); err != nil { + return err + } + } + + partitions := ps.msgs[msg.Topic] + if partitions == nil { + partitions = make(map[int32]*partitionSet) + ps.msgs[msg.Topic] = partitions + } + + set := partitions[msg.Partition] + if set == nil { + set = &partitionSet{setToSend: new(MessageSet)} + partitions[msg.Partition] = set + } + + set.msgs = append(set.msgs, msg) + msgToSend := &Message{Codec: CompressionNone, Key: key, Value: val} + if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) && !msg.Timestamp.IsZero() { + msgToSend.Timestamp = msg.Timestamp + msgToSend.Version = 1 + } + set.setToSend.addMessage(msgToSend) + + size := producerMessageOverhead + len(key) + len(val) + set.bufferBytes += size + ps.bufferBytes += size + ps.bufferCount++ + + return nil +} + +func (ps *produceSet) buildRequest() *ProduceRequest { + req := &ProduceRequest{ + RequiredAcks: ps.parent.conf.Producer.RequiredAcks, + Timeout: int32(ps.parent.conf.Producer.Timeout / time.Millisecond), + } + if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { + req.Version = 2 + } + + for topic, partitionSet := range ps.msgs { + for partition, set := range partitionSet { + if ps.parent.conf.Producer.Compression == CompressionNone { + req.AddSet(topic, partition, set.setToSend) + } else { + // When compression is enabled, the entire set for each partition is compressed + // and sent as the payload of a single fake "message" with the appropriate codec + // set and no key. When the server sees a message with a compression codec, it + // decompresses the payload and treats the result as its message set. + payload, err := encode(set.setToSend) + if err != nil { + Logger.Println(err) // if this happens, it's basically our fault. + panic(err) + } + req.AddMessage(topic, partition, &Message{ + Codec: ps.parent.conf.Producer.Compression, + Key: nil, + Value: payload, + }) + } + } + } + + return req +} + +func (ps *produceSet) eachPartition(cb func(topic string, partition int32, msgs []*ProducerMessage)) { + for topic, partitionSet := range ps.msgs { + for partition, set := range partitionSet { + cb(topic, partition, set.msgs) + } + } +} + +func (ps *produceSet) dropPartition(topic string, partition int32) []*ProducerMessage { + if ps.msgs[topic] == nil { + return nil + } + set := ps.msgs[topic][partition] + if set == nil { + return nil + } + ps.bufferBytes -= set.bufferBytes + ps.bufferCount -= len(set.msgs) + delete(ps.msgs[topic], partition) + return set.msgs +} + +func (ps *produceSet) wouldOverflow(msg *ProducerMessage) bool { + switch { + // Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety. + case ps.bufferBytes+msg.byteSize() >= int(MaxRequestSize-(10*1024)): + return true + // Would we overflow the size-limit of a compressed message-batch for this partition? + case ps.parent.conf.Producer.Compression != CompressionNone && + ps.msgs[msg.Topic] != nil && ps.msgs[msg.Topic][msg.Partition] != nil && + ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.byteSize() >= ps.parent.conf.Producer.MaxMessageBytes: + return true + // Would we overflow simply in number of messages? + case ps.parent.conf.Producer.Flush.MaxMessages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.MaxMessages: + return true + default: + return false + } +} + +func (ps *produceSet) readyToFlush() bool { + switch { + // If we don't have any messages, nothing else matters + case ps.empty(): + return false + // If all three config values are 0, we always flush as-fast-as-possible + case ps.parent.conf.Producer.Flush.Frequency == 0 && ps.parent.conf.Producer.Flush.Bytes == 0 && ps.parent.conf.Producer.Flush.Messages == 0: + return true + // If we've passed the message trigger-point + case ps.parent.conf.Producer.Flush.Messages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.Messages: + return true + // If we've passed the byte trigger-point + case ps.parent.conf.Producer.Flush.Bytes > 0 && ps.bufferBytes >= ps.parent.conf.Producer.Flush.Bytes: + return true + default: + return false + } +} + +func (ps *produceSet) empty() bool { + return ps.bufferCount == 0 +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/real_decoder.go b/vendor/gopkg.in/Shopify/sarama.v1/real_decoder.go new file mode 100644 index 000000000..a0141af07 --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/real_decoder.go @@ -0,0 +1,259 @@ +package sarama + +import ( + "encoding/binary" + "math" +) + +var errInvalidArrayLength = PacketDecodingError{"invalid array length"} +var errInvalidByteSliceLength = PacketDecodingError{"invalid byteslice length"} +var errInvalidStringLength = PacketDecodingError{"invalid string length"} +var errInvalidSubsetSize = PacketDecodingError{"invalid subset size"} + +type realDecoder struct { + raw []byte + off int + stack []pushDecoder +} + +// primitives + +func (rd *realDecoder) getInt8() (int8, error) { + if rd.remaining() < 1 { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } + tmp := int8(rd.raw[rd.off]) + rd.off++ + return tmp, nil +} + +func (rd *realDecoder) getInt16() (int16, error) { + if rd.remaining() < 2 { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } + tmp := int16(binary.BigEndian.Uint16(rd.raw[rd.off:])) + rd.off += 2 + return tmp, nil +} + +func (rd *realDecoder) getInt32() (int32, error) { + if rd.remaining() < 4 { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } + tmp := int32(binary.BigEndian.Uint32(rd.raw[rd.off:])) + rd.off += 4 + return tmp, nil +} + +func (rd *realDecoder) getInt64() (int64, error) { + if rd.remaining() < 8 { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } + tmp := int64(binary.BigEndian.Uint64(rd.raw[rd.off:])) + rd.off += 8 + return tmp, nil +} + +func (rd *realDecoder) getArrayLength() (int, error) { + if rd.remaining() < 4 { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } + tmp := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) + rd.off += 4 + if tmp > rd.remaining() { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } else if tmp > 2*math.MaxUint16 { + return -1, errInvalidArrayLength + } + return tmp, nil +} + +// collections + +func (rd *realDecoder) getBytes() ([]byte, error) { + tmp, err := rd.getInt32() + + if err != nil { + return nil, err + } + + n := int(tmp) + + switch { + case n < -1: + return nil, errInvalidByteSliceLength + case n == -1: + return nil, nil + case n == 0: + return make([]byte, 0), nil + case n > rd.remaining(): + rd.off = len(rd.raw) + return nil, ErrInsufficientData + } + + tmpStr := rd.raw[rd.off : rd.off+n] + rd.off += n + return tmpStr, nil +} + +func (rd *realDecoder) getString() (string, error) { + tmp, err := rd.getInt16() + + if err != nil { + return "", err + } + + n := int(tmp) + + switch { + case n < -1: + return "", errInvalidStringLength + case n == -1: + return "", nil + case n == 0: + return "", nil + case n > rd.remaining(): + rd.off = len(rd.raw) + return "", ErrInsufficientData + } + + tmpStr := string(rd.raw[rd.off : rd.off+n]) + rd.off += n + return tmpStr, nil +} + +func (rd *realDecoder) getInt32Array() ([]int32, error) { + if rd.remaining() < 4 { + rd.off = len(rd.raw) + return nil, ErrInsufficientData + } + n := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) + rd.off += 4 + + if rd.remaining() < 4*n { + rd.off = len(rd.raw) + return nil, ErrInsufficientData + } + + if n == 0 { + return nil, nil + } + + if n < 0 { + return nil, errInvalidArrayLength + } + + ret := make([]int32, n) + for i := range ret { + ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:])) + rd.off += 4 + } + return ret, nil +} + +func (rd *realDecoder) getInt64Array() ([]int64, error) { + if rd.remaining() < 4 { + rd.off = len(rd.raw) + return nil, ErrInsufficientData + } + n := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) + rd.off += 4 + + if rd.remaining() < 8*n { + rd.off = len(rd.raw) + return nil, ErrInsufficientData + } + + if n == 0 { + return nil, nil + } + + if n < 0 { + return nil, errInvalidArrayLength + } + + ret := make([]int64, n) + for i := range ret { + ret[i] = int64(binary.BigEndian.Uint64(rd.raw[rd.off:])) + rd.off += 8 + } + return ret, nil +} + +func (rd *realDecoder) getStringArray() ([]string, error) { + if rd.remaining() < 4 { + rd.off = len(rd.raw) + return nil, ErrInsufficientData + } + n := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) + rd.off += 4 + + if n == 0 { + return nil, nil + } + + if n < 0 { + return nil, errInvalidArrayLength + } + + ret := make([]string, n) + for i := range ret { + if str, err := rd.getString(); err != nil { + return nil, err + } else { + ret[i] = str + } + } + return ret, nil +} + +// subsets + +func (rd *realDecoder) remaining() int { + return len(rd.raw) - rd.off +} + +func (rd *realDecoder) getSubset(length int) (packetDecoder, error) { + if length < 0 { + return nil, errInvalidSubsetSize + } else if length > rd.remaining() { + rd.off = len(rd.raw) + return nil, ErrInsufficientData + } + + start := rd.off + rd.off += length + return &realDecoder{raw: rd.raw[start:rd.off]}, nil +} + +// stacks + +func (rd *realDecoder) push(in pushDecoder) error { + in.saveOffset(rd.off) + + reserve := in.reserveLength() + if rd.remaining() < reserve { + rd.off = len(rd.raw) + return ErrInsufficientData + } + + rd.stack = append(rd.stack, in) + + rd.off += reserve + + return nil +} + +func (rd *realDecoder) pop() error { + // this is go's ugly pop pattern (the inverse of append) + in := rd.stack[len(rd.stack)-1] + rd.stack = rd.stack[:len(rd.stack)-1] + + return in.check(rd.off, rd.raw) +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/real_encoder.go b/vendor/gopkg.in/Shopify/sarama.v1/real_encoder.go new file mode 100644 index 000000000..076fdd0ca --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/real_encoder.go @@ -0,0 +1,115 @@ +package sarama + +import "encoding/binary" + +type realEncoder struct { + raw []byte + off int + stack []pushEncoder +} + +// primitives + +func (re *realEncoder) putInt8(in int8) { + re.raw[re.off] = byte(in) + re.off++ +} + +func (re *realEncoder) putInt16(in int16) { + binary.BigEndian.PutUint16(re.raw[re.off:], uint16(in)) + re.off += 2 +} + +func (re *realEncoder) putInt32(in int32) { + binary.BigEndian.PutUint32(re.raw[re.off:], uint32(in)) + re.off += 4 +} + +func (re *realEncoder) putInt64(in int64) { + binary.BigEndian.PutUint64(re.raw[re.off:], uint64(in)) + re.off += 8 +} + +func (re *realEncoder) putArrayLength(in int) error { + re.putInt32(int32(in)) + return nil +} + +// collection + +func (re *realEncoder) putRawBytes(in []byte) error { + copy(re.raw[re.off:], in) + re.off += len(in) + return nil +} + +func (re *realEncoder) putBytes(in []byte) error { + if in == nil { + re.putInt32(-1) + return nil + } + re.putInt32(int32(len(in))) + copy(re.raw[re.off:], in) + re.off += len(in) + return nil +} + +func (re *realEncoder) putString(in string) error { + re.putInt16(int16(len(in))) + copy(re.raw[re.off:], in) + re.off += len(in) + return nil +} + +func (re *realEncoder) putStringArray(in []string) error { + err := re.putArrayLength(len(in)) + if err != nil { + return err + } + + for _, val := range in { + if err := re.putString(val); err != nil { + return err + } + } + + return nil +} + +func (re *realEncoder) putInt32Array(in []int32) error { + err := re.putArrayLength(len(in)) + if err != nil { + return err + } + for _, val := range in { + re.putInt32(val) + } + return nil +} + +func (re *realEncoder) putInt64Array(in []int64) error { + err := re.putArrayLength(len(in)) + if err != nil { + return err + } + for _, val := range in { + re.putInt64(val) + } + return nil +} + +// stacks + +func (re *realEncoder) push(in pushEncoder) { + in.saveOffset(re.off) + re.off += in.reserveLength() + re.stack = append(re.stack, in) +} + +func (re *realEncoder) pop() error { + // this is go's ugly pop pattern (the inverse of append) + in := re.stack[len(re.stack)-1] + re.stack = re.stack[:len(re.stack)-1] + + return in.run(re.off, re.raw) +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/request.go b/vendor/gopkg.in/Shopify/sarama.v1/request.go new file mode 100644 index 000000000..5dd337b0d --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/request.go @@ -0,0 +1,117 @@ +package sarama + +import ( + "encoding/binary" + "fmt" + "io" +) + +type protocolBody interface { + encoder + versionedDecoder + key() int16 + version() int16 + requiredVersion() KafkaVersion +} + +type request struct { + correlationID int32 + clientID string + body protocolBody +} + +func (r *request) encode(pe packetEncoder) (err error) { + pe.push(&lengthField{}) + pe.putInt16(r.body.key()) + pe.putInt16(r.body.version()) + pe.putInt32(r.correlationID) + err = pe.putString(r.clientID) + if err != nil { + return err + } + err = r.body.encode(pe) + if err != nil { + return err + } + return pe.pop() +} + +func (r *request) decode(pd packetDecoder) (err error) { + var key int16 + if key, err = pd.getInt16(); err != nil { + return err + } + var version int16 + if version, err = pd.getInt16(); err != nil { + return err + } + if r.correlationID, err = pd.getInt32(); err != nil { + return err + } + r.clientID, err = pd.getString() + + r.body = allocateBody(key, version) + if r.body == nil { + return PacketDecodingError{fmt.Sprintf("unknown request key (%d)", key)} + } + return r.body.decode(pd, version) +} + +func decodeRequest(r io.Reader) (req *request, err error) { + lengthBytes := make([]byte, 4) + if _, err := io.ReadFull(r, lengthBytes); err != nil { + return nil, err + } + + length := int32(binary.BigEndian.Uint32(lengthBytes)) + if length <= 4 || length > MaxRequestSize { + return nil, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)} + } + + encodedReq := make([]byte, length) + if _, err := io.ReadFull(r, encodedReq); err != nil { + return nil, err + } + + req = &request{} + if err := decode(encodedReq, req); err != nil { + return nil, err + } + return req, nil +} + +func allocateBody(key, version int16) protocolBody { + switch key { + case 0: + return &ProduceRequest{} + case 1: + return &FetchRequest{} + case 2: + return &OffsetRequest{} + case 3: + return &MetadataRequest{} + case 8: + return &OffsetCommitRequest{Version: version} + case 9: + return &OffsetFetchRequest{} + case 10: + return &ConsumerMetadataRequest{} + case 11: + return &JoinGroupRequest{} + case 12: + return &HeartbeatRequest{} + case 13: + return &LeaveGroupRequest{} + case 14: + return &SyncGroupRequest{} + case 15: + return &DescribeGroupsRequest{} + case 16: + return &ListGroupsRequest{} + case 17: + return &SaslHandshakeRequest{} + case 18: + return &ApiVersionsRequest{} + } + return nil +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/response_header.go b/vendor/gopkg.in/Shopify/sarama.v1/response_header.go new file mode 100644 index 000000000..f3f4d27d6 --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/response_header.go @@ -0,0 +1,21 @@ +package sarama + +import "fmt" + +type responseHeader struct { + length int32 + correlationID int32 +} + +func (r *responseHeader) decode(pd packetDecoder) (err error) { + r.length, err = pd.getInt32() + if err != nil { + return err + } + if r.length <= 4 || r.length > MaxResponseSize { + return PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", r.length)} + } + + r.correlationID, err = pd.getInt32() + return err +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/sarama.go b/vendor/gopkg.in/Shopify/sarama.v1/sarama.go new file mode 100644 index 000000000..8faa74a91 --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/sarama.go @@ -0,0 +1,58 @@ +/* +Package sarama is a pure Go client library for dealing with Apache Kafka (versions 0.8 and later). It includes a high-level +API for easily producing and consuming messages, and a low-level API for controlling bytes on the wire when the high-level +API is insufficient. Usage examples for the high-level APIs are provided inline with their full documentation. + +To produce messages, use either the AsyncProducer or the SyncProducer. The AsyncProducer accepts messages on a channel +and produces them asynchronously in the background as efficiently as possible; it is preferred in most cases. +The SyncProducer provides a method which will block until Kafka acknowledges the message as produced. This can be +useful but comes with two caveats: it will generally be less efficient, and the actual durability guarantees +depend on the configured value of `Producer.RequiredAcks`. There are configurations where a message acknowledged by the +SyncProducer can still sometimes be lost. + +To consume messages, use the Consumer. Note that Sarama's Consumer implementation does not currently support automatic +consumer-group rebalancing and offset tracking. For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the +https://github.com/wvanbergen/kafka library builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9 +and later), the https://github.com/bsm/sarama-cluster library builds on Sarama to add this support. + +For lower-level needs, the Broker and Request/Response objects permit precise control over each connection +and message sent on the wire; the Client provides higher-level metadata management that is shared between +the producers and the consumer. The Request/Response objects and properties are mostly undocumented, as they line up +exactly with the protocol fields documented by Kafka at +https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol +*/ +package sarama + +import ( + "io/ioutil" + "log" +) + +// Logger is the instance of a StdLogger interface that Sarama writes connection +// management events to. By default it is set to discard all log messages via ioutil.Discard, +// but you can set it to redirect wherever you want. +var Logger StdLogger = log.New(ioutil.Discard, "[Sarama] ", log.LstdFlags) + +// StdLogger is used to log error messages. +type StdLogger interface { + Print(v ...interface{}) + Printf(format string, v ...interface{}) + Println(v ...interface{}) +} + +// PanicHandler is called for recovering from panics spawned internally to the library (and thus +// not recoverable by the caller's goroutine). Defaults to nil, which means panics are not recovered. +var PanicHandler func(interface{}) + +// MaxRequestSize is the maximum size (in bytes) of any request that Sarama will attempt to send. Trying +// to send a request larger than this will result in an PacketEncodingError. The default of 100 MiB is aligned +// with Kafka's default `socket.request.max.bytes`, which is the largest request the broker will attempt +// to process. +var MaxRequestSize int32 = 100 * 1024 * 1024 + +// MaxResponseSize is the maximum size (in bytes) of any response that Sarama will attempt to parse. If +// a broker returns a response message larger than this value, Sarama will return a PacketDecodingError to +// protect the client from running out of memory. Please note that brokers do not have any natural limit on +// the size of responses they send. In particular, they can send arbitrarily large fetch responses to consumers +// (see https://issues.apache.org/jira/browse/KAFKA-2063). +var MaxResponseSize int32 = 100 * 1024 * 1024 diff --git a/vendor/gopkg.in/Shopify/sarama.v1/sasl_handshake_request.go b/vendor/gopkg.in/Shopify/sarama.v1/sasl_handshake_request.go new file mode 100644 index 000000000..fbbc8947b --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/sasl_handshake_request.go @@ -0,0 +1,33 @@ +package sarama + +type SaslHandshakeRequest struct { + Mechanism string +} + +func (r *SaslHandshakeRequest) encode(pe packetEncoder) error { + if err := pe.putString(r.Mechanism); err != nil { + return err + } + + return nil +} + +func (r *SaslHandshakeRequest) decode(pd packetDecoder, version int16) (err error) { + if r.Mechanism, err = pd.getString(); err != nil { + return err + } + + return nil +} + +func (r *SaslHandshakeRequest) key() int16 { + return 17 +} + +func (r *SaslHandshakeRequest) version() int16 { + return 0 +} + +func (r *SaslHandshakeRequest) requiredVersion() KafkaVersion { + return V0_10_0_0 +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/sasl_handshake_response.go b/vendor/gopkg.in/Shopify/sarama.v1/sasl_handshake_response.go new file mode 100644 index 000000000..8379bbb26 --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/sasl_handshake_response.go @@ -0,0 +1,38 @@ +package sarama + +type SaslHandshakeResponse struct { + Err KError + EnabledMechanisms []string +} + +func (r *SaslHandshakeResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + return pe.putStringArray(r.EnabledMechanisms) +} + +func (r *SaslHandshakeResponse) decode(pd packetDecoder, version int16) error { + if kerr, err := pd.getInt16(); err != nil { + return err + } else { + r.Err = KError(kerr) + } + + var err error + if r.EnabledMechanisms, err = pd.getStringArray(); err != nil { + return err + } + + return nil +} + +func (r *SaslHandshakeResponse) key() int16 { + return 17 +} + +func (r *SaslHandshakeResponse) version() int16 { + return 0 +} + +func (r *SaslHandshakeResponse) requiredVersion() KafkaVersion { + return V0_10_0_0 +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/sync_group_request.go b/vendor/gopkg.in/Shopify/sarama.v1/sync_group_request.go new file mode 100644 index 000000000..7fbe47b20 --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/sync_group_request.go @@ -0,0 +1,100 @@ +package sarama + +type SyncGroupRequest struct { + GroupId string + GenerationId int32 + MemberId string + GroupAssignments map[string][]byte +} + +func (r *SyncGroupRequest) encode(pe packetEncoder) error { + if err := pe.putString(r.GroupId); err != nil { + return err + } + + pe.putInt32(r.GenerationId) + + if err := pe.putString(r.MemberId); err != nil { + return err + } + + if err := pe.putArrayLength(len(r.GroupAssignments)); err != nil { + return err + } + for memberId, memberAssignment := range r.GroupAssignments { + if err := pe.putString(memberId); err != nil { + return err + } + if err := pe.putBytes(memberAssignment); err != nil { + return err + } + } + + return nil +} + +func (r *SyncGroupRequest) decode(pd packetDecoder, version int16) (err error) { + if r.GroupId, err = pd.getString(); err != nil { + return + } + if r.GenerationId, err = pd.getInt32(); err != nil { + return + } + if r.MemberId, err = pd.getString(); err != nil { + return + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + if n == 0 { + return nil + } + + r.GroupAssignments = make(map[string][]byte) + for i := 0; i < n; i++ { + memberId, err := pd.getString() + if err != nil { + return err + } + memberAssignment, err := pd.getBytes() + if err != nil { + return err + } + + r.GroupAssignments[memberId] = memberAssignment + } + + return nil +} + +func (r *SyncGroupRequest) key() int16 { + return 14 +} + +func (r *SyncGroupRequest) version() int16 { + return 0 +} + +func (r *SyncGroupRequest) requiredVersion() KafkaVersion { + return V0_9_0_0 +} + +func (r *SyncGroupRequest) AddGroupAssignment(memberId string, memberAssignment []byte) { + if r.GroupAssignments == nil { + r.GroupAssignments = make(map[string][]byte) + } + + r.GroupAssignments[memberId] = memberAssignment +} + +func (r *SyncGroupRequest) AddGroupAssignmentMember(memberId string, memberAssignment *ConsumerGroupMemberAssignment) error { + bin, err := encode(memberAssignment) + if err != nil { + return err + } + + r.AddGroupAssignment(memberId, bin) + return nil +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/sync_group_response.go b/vendor/gopkg.in/Shopify/sarama.v1/sync_group_response.go new file mode 100644 index 000000000..12aef6730 --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/sync_group_response.go @@ -0,0 +1,40 @@ +package sarama + +type SyncGroupResponse struct { + Err KError + MemberAssignment []byte +} + +func (r *SyncGroupResponse) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) { + assignment := new(ConsumerGroupMemberAssignment) + err := decode(r.MemberAssignment, assignment) + return assignment, err +} + +func (r *SyncGroupResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + return pe.putBytes(r.MemberAssignment) +} + +func (r *SyncGroupResponse) decode(pd packetDecoder, version int16) (err error) { + if kerr, err := pd.getInt16(); err != nil { + return err + } else { + r.Err = KError(kerr) + } + + r.MemberAssignment, err = pd.getBytes() + return +} + +func (r *SyncGroupResponse) key() int16 { + return 14 +} + +func (r *SyncGroupResponse) version() int16 { + return 0 +} + +func (r *SyncGroupResponse) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/sync_producer.go b/vendor/gopkg.in/Shopify/sarama.v1/sync_producer.go new file mode 100644 index 000000000..b181527f0 --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/sync_producer.go @@ -0,0 +1,140 @@ +package sarama + +import "sync" + +// SyncProducer publishes Kafka messages, blocking until they have been acknowledged. It routes messages to the correct +// broker, refreshing metadata as appropriate, and parses responses for errors. You must call Close() on a producer +// to avoid leaks, it may not be garbage-collected automatically when it passes out of scope. +// +// The SyncProducer comes with two caveats: it will generally be less efficient than the AsyncProducer, and the actual +// durability guarantee provided when a message is acknowledged depend on the configured value of `Producer.RequiredAcks`. +// There are configurations where a message acknowledged by the SyncProducer can still sometimes be lost. +type SyncProducer interface { + + // SendMessage produces a given message, and returns only when it either has + // succeeded or failed to produce. It will return the partition and the offset + // of the produced message, or an error if the message failed to produce. + SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) + + // SendMessages produces a given set of messages, and returns only when all + // messages in the set have either succeeded or failed. Note that messages + // can succeed and fail individually; if some succeed and some fail, + // SendMessages will return an error. + SendMessages(msgs []*ProducerMessage) error + + // Close shuts down the producer and flushes any messages it may have buffered. + // You must call this function before a producer object passes out of scope, as + // it may otherwise leak memory. You must call this before calling Close on the + // underlying client. + Close() error +} + +type syncProducer struct { + producer *asyncProducer + wg sync.WaitGroup +} + +// NewSyncProducer creates a new SyncProducer using the given broker addresses and configuration. +func NewSyncProducer(addrs []string, config *Config) (SyncProducer, error) { + p, err := NewAsyncProducer(addrs, config) + if err != nil { + return nil, err + } + return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil +} + +// NewSyncProducerFromClient creates a new SyncProducer using the given client. It is still +// necessary to call Close() on the underlying client when shutting down this producer. +func NewSyncProducerFromClient(client Client) (SyncProducer, error) { + p, err := NewAsyncProducerFromClient(client) + if err != nil { + return nil, err + } + return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil +} + +func newSyncProducerFromAsyncProducer(p *asyncProducer) *syncProducer { + p.conf.Producer.Return.Successes = true + p.conf.Producer.Return.Errors = true + sp := &syncProducer{producer: p} + + sp.wg.Add(2) + go withRecover(sp.handleSuccesses) + go withRecover(sp.handleErrors) + + return sp +} + +func (sp *syncProducer) SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) { + oldMetadata := msg.Metadata + defer func() { + msg.Metadata = oldMetadata + }() + + expectation := make(chan *ProducerError, 1) + msg.Metadata = expectation + sp.producer.Input() <- msg + + if err := <-expectation; err != nil { + return -1, -1, err.Err + } + + return msg.Partition, msg.Offset, nil +} + +func (sp *syncProducer) SendMessages(msgs []*ProducerMessage) error { + savedMetadata := make([]interface{}, len(msgs)) + for i := range msgs { + savedMetadata[i] = msgs[i].Metadata + } + defer func() { + for i := range msgs { + msgs[i].Metadata = savedMetadata[i] + } + }() + + expectations := make(chan chan *ProducerError, len(msgs)) + go func() { + for _, msg := range msgs { + expectation := make(chan *ProducerError, 1) + msg.Metadata = expectation + sp.producer.Input() <- msg + expectations <- expectation + } + close(expectations) + }() + + var errors ProducerErrors + for expectation := range expectations { + if err := <-expectation; err != nil { + errors = append(errors, err) + } + } + + if len(errors) > 0 { + return errors + } + return nil +} + +func (sp *syncProducer) handleSuccesses() { + defer sp.wg.Done() + for msg := range sp.producer.Successes() { + expectation := msg.Metadata.(chan *ProducerError) + expectation <- nil + } +} + +func (sp *syncProducer) handleErrors() { + defer sp.wg.Done() + for err := range sp.producer.Errors() { + expectation := err.Msg.Metadata.(chan *ProducerError) + expectation <- err + } +} + +func (sp *syncProducer) Close() error { + sp.producer.AsyncClose() + sp.wg.Wait() + return nil +} diff --git a/vendor/gopkg.in/Shopify/sarama.v1/utils.go b/vendor/gopkg.in/Shopify/sarama.v1/utils.go new file mode 100644 index 000000000..b60e53a07 --- /dev/null +++ b/vendor/gopkg.in/Shopify/sarama.v1/utils.go @@ -0,0 +1,150 @@ +package sarama + +import ( + "bufio" + "net" + "sort" +) + +type none struct{} + +// make []int32 sortable so we can sort partition numbers +type int32Slice []int32 + +func (slice int32Slice) Len() int { + return len(slice) +} + +func (slice int32Slice) Less(i, j int) bool { + return slice[i] < slice[j] +} + +func (slice int32Slice) Swap(i, j int) { + slice[i], slice[j] = slice[j], slice[i] +} + +func dupeAndSort(input []int32) []int32 { + ret := make([]int32, 0, len(input)) + for _, val := range input { + ret = append(ret, val) + } + + sort.Sort(int32Slice(ret)) + return ret +} + +func withRecover(fn func()) { + defer func() { + handler := PanicHandler + if handler != nil { + if err := recover(); err != nil { + handler(err) + } + } + }() + + fn() +} + +func safeAsyncClose(b *Broker) { + tmp := b // local var prevents clobbering in goroutine + go withRecover(func() { + if connected, _ := tmp.Connected(); connected { + if err := tmp.Close(); err != nil { + Logger.Println("Error closing broker", tmp.ID(), ":", err) + } + } + }) +} + +// Encoder is a simple interface for any type that can be encoded as an array of bytes +// in order to be sent as the key or value of a Kafka message. Length() is provided as an +// optimization, and must return the same as len() on the result of Encode(). +type Encoder interface { + Encode() ([]byte, error) + Length() int +} + +// make strings and byte slices encodable for convenience so they can be used as keys +// and/or values in kafka messages + +// StringEncoder implements the Encoder interface for Go strings so that they can be used +// as the Key or Value in a ProducerMessage. +type StringEncoder string + +func (s StringEncoder) Encode() ([]byte, error) { + return []byte(s), nil +} + +func (s StringEncoder) Length() int { + return len(s) +} + +// ByteEncoder implements the Encoder interface for Go byte slices so that they can be used +// as the Key or Value in a ProducerMessage. +type ByteEncoder []byte + +func (b ByteEncoder) Encode() ([]byte, error) { + return b, nil +} + +func (b ByteEncoder) Length() int { + return len(b) +} + +// bufConn wraps a net.Conn with a buffer for reads to reduce the number of +// reads that trigger syscalls. +type bufConn struct { + net.Conn + buf *bufio.Reader +} + +func newBufConn(conn net.Conn) *bufConn { + return &bufConn{ + Conn: conn, + buf: bufio.NewReader(conn), + } +} + +func (bc *bufConn) Read(b []byte) (n int, err error) { + return bc.buf.Read(b) +} + +// KafkaVersion instances represent versions of the upstream Kafka broker. +type KafkaVersion struct { + // it's a struct rather than just typing the array directly to make it opaque and stop people + // generating their own arbitrary versions + version [4]uint +} + +func newKafkaVersion(major, minor, veryMinor, patch uint) KafkaVersion { + return KafkaVersion{ + version: [4]uint{major, minor, veryMinor, patch}, + } +} + +// IsAtLeast return true if and only if the version it is called on is +// greater than or equal to the version passed in: +// V1.IsAtLeast(V2) // false +// V2.IsAtLeast(V1) // true +func (v KafkaVersion) IsAtLeast(other KafkaVersion) bool { + for i := range v.version { + if v.version[i] > other.version[i] { + return true + } else if v.version[i] < other.version[i] { + return false + } + } + return true +} + +// Effective constants defining the supported kafka versions. +var ( + V0_8_2_0 = newKafkaVersion(0, 8, 2, 0) + V0_8_2_1 = newKafkaVersion(0, 8, 2, 1) + V0_8_2_2 = newKafkaVersion(0, 8, 2, 2) + V0_9_0_0 = newKafkaVersion(0, 9, 0, 0) + V0_9_0_1 = newKafkaVersion(0, 9, 0, 1) + V0_10_0_0 = newKafkaVersion(0, 10, 0, 0) + minVersion = V0_8_2_0 +) diff --git a/vendor/vendor.json b/vendor/vendor.json index bf8ceb014..6698c14ed 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -12,6 +12,12 @@ "revision": "73ae1d68fe0bd482ab11913a9828634f795b987f", "revisionTime": "2016-07-13T13:44:25+03:00" }, + { + "checksumSHA1": "/5cvgU+J4l7EhMXTK76KaCAfOuU=", + "path": "github.com/davecgh/go-spew/spew", + "revision": "346938d642f2ec3594ed81d874461961cd0faa76", + "revisionTime": "2016-10-29T20:57:26Z" + }, { "path": "github.com/dgrijalva/jwt-go", "revision": "01aeca54ebda6e0fbfafd0a524d234159c05ec20", @@ -27,6 +33,24 @@ "revision": "fef948f2d241bd1fd0631108ecc2c9553bae60bf", "revisionTime": "2016-06-23T09:40:21+08:00" }, + { + "checksumSHA1": "y2Kh4iPlgCPXSGTCcFpzePYdzzg=", + "path": "github.com/eapache/go-resiliency/breaker", + "revision": "b86b1ec0dd4209a588dc1285cdd471e73525c0b3", + "revisionTime": "2016-01-04T19:15:39Z" + }, + { + "checksumSHA1": "WHl96RVZlOOdF4Lb1OOadMpw8ls=", + "path": "github.com/eapache/go-xerial-snappy", + "revision": "bb955e01b9346ac19dc29eb16586c90ded99a98c", + "revisionTime": "2016-06-09T14:24:08Z" + }, + { + "checksumSHA1": "oCCs6kDanizatplM5e/hX76busE=", + "path": "github.com/eapache/queue", + "revision": "44cc805cf13205b55f69e14bcb69867d1ae92f98", + "revisionTime": "2016-08-05T00:47:13Z" + }, { "path": "github.com/elazarl/go-bindata-assetfs", "revision": "57eb5e1fc594ad4b0b1dbea7b286d299e0cb43c2", @@ -37,6 +61,12 @@ "revision": "87d4004f2ab62d0d255e0a38f1680aa534549fe3", "revisionTime": "2016-06-10T14:06:02+03:00" }, + { + "checksumSHA1": "2a/SsTUBMKtcM6VtpbdPGO+c6c8=", + "path": "github.com/golang/snappy", + "revision": "d9eb7a3d35ec988b8585d4a0068e462c27d28380", + "revisionTime": "2016-05-29T05:00:41Z" + }, { "path": "github.com/gorilla/context", "revision": "aed02d124ae4a0e94fea4541c8effd05bf0c8296", @@ -72,6 +102,12 @@ "revision": "349c675778172472f5e8f3a3e0fe187e302e5a10", "revisionTime": "2016-01-06T11:44:51+01:00" }, + { + "checksumSHA1": "BM6ZlNJmtKy3GBoWwg2X55gnZ4A=", + "path": "github.com/klauspost/crc32", + "revision": "cb6bfca970f6908083f26f39a79009d608efd5cd", + "revisionTime": "2016-10-16T15:41:25Z" + }, { "checksumSHA1": "XRii0aDqXZvztXflEB2EE9TRoks=", "path": "github.com/klauspost/reedsolomon", @@ -228,6 +264,12 @@ "revision": "a728288923b47049b2ce791836767ffbe964a5bd", "revisionTime": "2016-07-07T15:24:52-07:00" }, + { + "checksumSHA1": "p36nkSZUaOs/7Wtcy/FduBLgm8g=", + "path": "gopkg.in/Shopify/sarama.v1", + "revision": "bd61cae2be85fa6ff40eb23dcdd24567967ac2ae", + "revisionTime": "2016-08-30T13:25:53Z" + }, { "path": "gopkg.in/check.v1", "revision": "11d3bc7aa68e238947792f30573146a3231fc0f1", From 5c481fbf6e013d3f4545a3fb3ae2e58fa033fad8 Mon Sep 17 00:00:00 2001 From: dcharbonnier Date: Thu, 15 Dec 2016 20:59:16 +0100 Subject: [PATCH 008/100] Fix Minio Quickstart Guide link (#3457) --- docs/erasure/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/erasure/README.md b/docs/erasure/README.md index 7a72daf3a..80de9ca82 100644 --- a/docs/erasure/README.md +++ b/docs/erasure/README.md @@ -4,7 +4,7 @@ Minio protects data against hardware failures and silent data corruption using e ## 1. Prerequisites: -Install Minio - [Minio Quickstart Guide](https://docs.minio.io/docs/minio) +Install Minio - [Minio Quickstart Guide](https://docs.minio.io/docs/minio-quickstart-guide) ## What is Erasure Code? From 0db484c8f6f4b90d9bc396434413553ba116cd83 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Thu, 15 Dec 2016 14:56:18 -0800 Subject: [PATCH 009/100] signv2: Do not use path encoding for query values. (#3458) Use query unescape before comparing signature. --- cmd/signature-v2.go | 17 +++++++++++++---- cmd/signature-v2_test.go | 10 ++++++++++ 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/cmd/signature-v2.go b/cmd/signature-v2.go index 7dd80ee54..4913fe71d 100644 --- a/cmd/signature-v2.go +++ b/cmd/signature-v2.go @@ -22,6 +22,7 @@ import ( "encoding/base64" "fmt" "net/http" + "net/url" "sort" "strconv" "strings" @@ -99,20 +100,27 @@ func doesPresignV2SignatureMatch(r *http.Request) APIErrorCode { var gotSignature string var expires string var accessKey string + var err error for _, query := range queries { keyval := strings.Split(query, "=") switch keyval[0] { case "AWSAccessKeyId": - accessKey = keyval[1] + accessKey, err = url.QueryUnescape(keyval[1]) case "Signature": - gotSignature = keyval[1] + gotSignature, err = url.QueryUnescape(keyval[1]) case "Expires": - expires = keyval[1] + expires, err = url.QueryUnescape(keyval[1]) default: filteredQueries = append(filteredQueries, query) } } + // Check if the query unescaped properly. + if err != nil { + errorIf(err, "Unable to unescape query values", queries) + return ErrInvalidQueryParams + } + // Invalid access key. if accessKey == "" { return ErrInvalidQueryParams } @@ -128,12 +136,13 @@ func doesPresignV2SignatureMatch(r *http.Request) APIErrorCode { return ErrMalformedExpires } + // Check if the presigned URL has expired. if expiresInt < time.Now().UTC().Unix() { return ErrExpiredPresignRequest } expectedSignature := preSignatureV2(r.Method, encodedResource, strings.Join(filteredQueries, "&"), r.Header, expires) - if gotSignature != getURLEncodedName(expectedSignature) { + if gotSignature != expectedSignature { return ErrSignatureDoesNotMatch } diff --git a/cmd/signature-v2_test.go b/cmd/signature-v2_test.go index 39f0557bc..6b8533539 100644 --- a/cmd/signature-v2_test.go +++ b/cmd/signature-v2_test.go @@ -22,6 +22,7 @@ func TestResourceListSorting(t *testing.T) { } } +// Tests presigned v2 signature. func TestDoesPresignedV2SignatureMatch(t *testing.T) { root, err := newTestConfig("us-east-1") if err != nil { @@ -76,6 +77,15 @@ func TestDoesPresignedV2SignatureMatch(t *testing.T) { }, expected: ErrSignatureDoesNotMatch, }, + // (5) Should error when the signature does not match. + { + queryParams: map[string]string{ + "Expires": fmt.Sprintf("%d", now.Unix()), + "Signature": "zOM2YrY/yAQe15VWmT78OlBrK6g=", + "AWSAccessKeyId": serverConfig.GetCredential().AccessKeyID, + }, + expected: ErrSignatureDoesNotMatch, + }, } // Run each test case individually. From 8ceb9694456b042a38f5fa8c33dcf5ed59ecfccc Mon Sep 17 00:00:00 2001 From: Anis Elleuch Date: Fri, 16 Dec 2016 07:25:05 +0100 Subject: [PATCH 010/100] tests: Use testTmpDir var to specify tmp directory (#3459) To be able to specify the directory where tests will be done. This way, it will be easier to run Minio tests on a mounted directory like NFS, .. --- cmd/fs-v1-metadata_test.go | 5 ++--- cmd/fs-v1-multipart-common_test.go | 5 ++--- cmd/fs-v1-multipart_test.go | 9 ++++----- cmd/fs-v1_test.go | 21 ++++++++++----------- cmd/object-api-listobjects_test.go | 2 +- cmd/posix-list-dir_test.go | 2 +- cmd/posix-utils_nix_test.go | 4 ++-- cmd/posix_test.go | 8 ++++---- cmd/signature-jwt_test.go | 8 ++++---- cmd/test-utils_test.go | 7 +++++-- cmd/tree-walk_test.go | 14 +++++++------- cmd/xl-v1_test.go | 3 +-- 12 files changed, 43 insertions(+), 45 deletions(-) diff --git a/cmd/fs-v1-metadata_test.go b/cmd/fs-v1-metadata_test.go index 7c110c105..151948273 100644 --- a/cmd/fs-v1-metadata_test.go +++ b/cmd/fs-v1-metadata_test.go @@ -18,7 +18,6 @@ package cmd import ( "bytes" - "os" "path/filepath" "testing" ) @@ -37,7 +36,7 @@ func initFSObjects(disk string, t *testing.T) (obj ObjectLayer) { // TestReadFsMetadata - readFSMetadata testing with a healthy and faulty disk func TestReadFSMetadata(t *testing.T) { - disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix()) + disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) defer removeAll(disk) obj := initFSObjects(disk, t) @@ -84,7 +83,7 @@ func TestReadFSMetadata(t *testing.T) { // TestWriteFsMetadata - tests of writeFSMetadata with healthy and faulty disks func TestWriteFSMetadata(t *testing.T) { - disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix()) + disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) defer removeAll(disk) obj := initFSObjects(disk, t) fs := obj.(fsObjects) diff --git a/cmd/fs-v1-multipart-common_test.go b/cmd/fs-v1-multipart-common_test.go index 97d0888fc..2f4b4555a 100644 --- a/cmd/fs-v1-multipart-common_test.go +++ b/cmd/fs-v1-multipart-common_test.go @@ -17,7 +17,6 @@ package cmd import ( - "os" "path/filepath" "testing" "time" @@ -26,7 +25,7 @@ import ( // TestFSIsUploadExists - complete test with valid and invalid cases func TestFSIsUploadExists(t *testing.T) { // Prepare for testing - disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix()) + disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) defer removeAll(disk) obj := initFSObjects(disk, t) @@ -70,7 +69,7 @@ func TestFSIsUploadExists(t *testing.T) { // TestFSWriteUploadJSON - tests for writeUploadJSON for FS func TestFSWriteUploadJSON(t *testing.T) { // Prepare for tests - disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix()) + disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) defer removeAll(disk) obj := initFSObjects(disk, t) diff --git a/cmd/fs-v1-multipart_test.go b/cmd/fs-v1-multipart_test.go index 52c30d7c6..65dda780f 100644 --- a/cmd/fs-v1-multipart_test.go +++ b/cmd/fs-v1-multipart_test.go @@ -18,7 +18,6 @@ package cmd import ( "bytes" - "os" "path/filepath" "reflect" "testing" @@ -27,7 +26,7 @@ import ( // TestNewMultipartUploadFaultyDisk - test NewMultipartUpload with faulty disks func TestNewMultipartUploadFaultyDisk(t *testing.T) { // Prepare for tests - disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix()) + disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) defer removeAll(disk) obj := initFSObjects(disk, t) @@ -66,7 +65,7 @@ func TestPutObjectPartFaultyDisk(t *testing.T) { defer removeAll(root) // Prepare for tests - disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix()) + disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) defer removeAll(disk) obj := initFSObjects(disk, t) fs := obj.(fsObjects) @@ -117,7 +116,7 @@ func TestPutObjectPartFaultyDisk(t *testing.T) { // TestCompleteMultipartUploadFaultyDisk - test CompleteMultipartUpload with faulty disks func TestCompleteMultipartUploadFaultyDisk(t *testing.T) { // Prepare for tests - disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix()) + disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) defer removeAll(disk) obj := initFSObjects(disk, t) @@ -168,7 +167,7 @@ func TestCompleteMultipartUploadFaultyDisk(t *testing.T) { // TestListMultipartUploadsFaultyDisk - test ListMultipartUploads with faulty disks func TestListMultipartUploadsFaultyDisk(t *testing.T) { // Prepare for tests - disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix()) + disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) defer removeAll(disk) obj := initFSObjects(disk, t) fs := obj.(fsObjects) diff --git a/cmd/fs-v1_test.go b/cmd/fs-v1_test.go index aa0479bbe..9b82d862e 100644 --- a/cmd/fs-v1_test.go +++ b/cmd/fs-v1_test.go @@ -18,7 +18,6 @@ package cmd import ( "bytes" - "os" "path/filepath" "testing" ) @@ -29,13 +28,13 @@ func TestNewFS(t *testing.T) { // Do not attempt to create this path, the test validates // so that newFSObjects initializes non existing paths // and successfully returns initialized object layer. - disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix()) + disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) defer removeAll(disk) // Setup to test errFSDiskFormat. disks := []string{} for i := 0; i < 6; i++ { - xlDisk := filepath.Join(os.TempDir(), "minio-"+nextSuffix()) + xlDisk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) defer removeAll(xlDisk) disks = append(disks, xlDisk) } @@ -109,7 +108,7 @@ func TestFSShutdown(t *testing.T) { objectName := "object" // Create and return an fsObject with its path in the disk prepareTest := func() (fsObjects, string) { - disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix()) + disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) obj := initFSObjects(disk, t) fs := obj.(fsObjects) objectContent := "12345" @@ -142,7 +141,7 @@ func TestFSShutdown(t *testing.T) { // TestFSLoadFormatFS - test loadFormatFS with healty and faulty disks func TestFSLoadFormatFS(t *testing.T) { // Prepare for testing - disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix()) + disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) defer removeAll(disk) obj := initFSObjects(disk, t) @@ -171,7 +170,7 @@ func TestFSLoadFormatFS(t *testing.T) { // TestFSGetBucketInfo - test GetBucketInfo with healty and faulty disks func TestFSGetBucketInfo(t *testing.T) { // Prepare for testing - disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix()) + disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) defer removeAll(disk) obj := initFSObjects(disk, t) @@ -208,7 +207,7 @@ func TestFSGetBucketInfo(t *testing.T) { // TestFSDeleteObject - test fs.DeleteObject() with healthy and corrupted disks func TestFSDeleteObject(t *testing.T) { // Prepare for tests - disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix()) + disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) defer removeAll(disk) obj := initFSObjects(disk, t) @@ -249,7 +248,7 @@ func TestFSDeleteObject(t *testing.T) { // TestFSDeleteBucket - tests for fs DeleteBucket func TestFSDeleteBucket(t *testing.T) { // Prepare for testing - disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix()) + disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) defer removeAll(disk) obj := initFSObjects(disk, t) @@ -290,7 +289,7 @@ func TestFSDeleteBucket(t *testing.T) { // TestFSListBuckets - tests for fs ListBuckets func TestFSListBuckets(t *testing.T) { // Prepare for tests - disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix()) + disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) defer removeAll(disk) obj := initFSObjects(disk, t) @@ -328,7 +327,7 @@ func TestFSListBuckets(t *testing.T) { // TestFSHealObject - tests for fs HealObject func TestFSHealObject(t *testing.T) { - disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix()) + disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) defer removeAll(disk) obj := initFSObjects(disk, t) @@ -340,7 +339,7 @@ func TestFSHealObject(t *testing.T) { // TestFSListObjectHeal - tests for fs ListObjectHeals func TestFSListObjectsHeal(t *testing.T) { - disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix()) + disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) defer removeAll(disk) obj := initFSObjects(disk, t) diff --git a/cmd/object-api-listobjects_test.go b/cmd/object-api-listobjects_test.go index 35c8f8d92..77651a20a 100644 --- a/cmd/object-api-listobjects_test.go +++ b/cmd/object-api-listobjects_test.go @@ -587,7 +587,7 @@ func initFSObjectsB(disk string, t *testing.B) (obj ObjectLayer) { // BenchmarkListObjects - Run ListObject Repeatedly and benchmark. func BenchmarkListObjects(b *testing.B) { // Make a temporary directory to use as the obj. - directory, err := ioutil.TempDir("", "minio-list-benchmark") + directory, err := ioutil.TempDir(globalTestTmpDir, "minio-list-benchmark") if err != nil { b.Fatal(err) } diff --git a/cmd/posix-list-dir_test.go b/cmd/posix-list-dir_test.go index 5ee7c1ed8..c709f7ec5 100644 --- a/cmd/posix-list-dir_test.go +++ b/cmd/posix-list-dir_test.go @@ -55,7 +55,7 @@ type result struct { func mustSetupDir(t *testing.T) string { // Create unique test directory. - dir, err := ioutil.TempDir("", "minio-posix-list-dir") + dir, err := ioutil.TempDir(globalTestTmpDir, "minio-posix-list-dir") if err != nil { t.Fatalf("Unable to setup directory, %s", err) } diff --git a/cmd/posix-utils_nix_test.go b/cmd/posix-utils_nix_test.go index c76ccea97..dc8cd3f93 100644 --- a/cmd/posix-utils_nix_test.go +++ b/cmd/posix-utils_nix_test.go @@ -36,7 +36,7 @@ func getUmask() int { // Tests if the directory and file creations happen with proper umask. func TestIsValidUmaskVol(t *testing.T) { - tmpPath, err := ioutil.TempDir(os.TempDir(), "minio-") + tmpPath, err := ioutil.TempDir(globalTestTmpDir, "minio-") if err != nil { t.Fatalf("Initializing temporary directory failed with %s.", err) } @@ -78,7 +78,7 @@ func TestIsValidUmaskVol(t *testing.T) { // Tests if the file creations happen with proper umask. func TestIsValidUmaskFile(t *testing.T) { - tmpPath, err := ioutil.TempDir(os.TempDir(), "minio-") + tmpPath, err := ioutil.TempDir(globalTestTmpDir, "minio-") if err != nil { t.Fatalf("Initializing temporary directory failed with %s.", err) } diff --git a/cmd/posix_test.go b/cmd/posix_test.go index e8a4b0f72..6e0390232 100644 --- a/cmd/posix_test.go +++ b/cmd/posix_test.go @@ -31,7 +31,7 @@ import ( // creates a temp dir and sets up posix layer. // returns posix layer, temp dir path to be used for the purpose of tests. func newPosixTestSetup() (StorageAPI, string, error) { - diskPath, err := ioutil.TempDir(os.TempDir(), "minio-") + diskPath, err := ioutil.TempDir(globalTestTmpDir, "minio-") if err != nil { return nil, "", err } @@ -45,7 +45,7 @@ func newPosixTestSetup() (StorageAPI, string, error) { // Tests posix.getDiskInfo() func TestGetDiskInfo(t *testing.T) { - path, err := ioutil.TempDir(os.TempDir(), "minio-") + path, err := ioutil.TempDir(globalTestTmpDir, "minio-") if err != nil { t.Fatalf("Unable to create a temporary directory, %s", err) } @@ -171,9 +171,9 @@ func TestReadAll(t *testing.T) { // TestNewPosix all the cases handled in posix storage layer initialization. func TestNewPosix(t *testing.T) { // Temporary dir name. - tmpDirName := os.TempDir() + "/" + "minio-" + nextSuffix() + tmpDirName := globalTestTmpDir + "/" + "minio-" + nextSuffix() // Temporary file name. - tmpFileName := os.TempDir() + "/" + "minio-" + nextSuffix() + tmpFileName := globalTestTmpDir + "/" + "minio-" + nextSuffix() f, _ := os.Create(tmpFileName) f.Close() defer os.Remove(tmpFileName) diff --git a/cmd/signature-jwt_test.go b/cmd/signature-jwt_test.go index 5b0588e27..2fe96dcb8 100644 --- a/cmd/signature-jwt_test.go +++ b/cmd/signature-jwt_test.go @@ -32,21 +32,21 @@ func TestNewJWT(t *testing.T) { serverConfig = nil // Test non-existent config directory. - path1, err := ioutil.TempDir("", "minio-") + path1, err := ioutil.TempDir(globalTestTmpDir, "minio-") if err != nil { t.Fatalf("Unable to create a temporary directory, %s", err) } defer removeAll(path1) // Test empty config directory. - path2, err := ioutil.TempDir("", "minio-") + path2, err := ioutil.TempDir(globalTestTmpDir, "minio-") if err != nil { t.Fatalf("Unable to create a temporary directory, %s", err) } defer removeAll(path2) // Test empty config file. - path3, err := ioutil.TempDir("", "minio-") + path3, err := ioutil.TempDir(globalTestTmpDir, "minio-") if err != nil { t.Fatalf("Unable to create a temporary directory, %s", err) } @@ -57,7 +57,7 @@ func TestNewJWT(t *testing.T) { } // Test initialized config file. - path4, err := ioutil.TempDir("", "minio-") + path4, err := ioutil.TempDir(globalTestTmpDir, "minio-") if err != nil { t.Fatalf("Unable to create a temporary directory, %s", err) } diff --git a/cmd/test-utils_test.go b/cmd/test-utils_test.go index e620148e3..fc7440283 100644 --- a/cmd/test-utils_test.go +++ b/cmd/test-utils_test.go @@ -133,6 +133,9 @@ const ( var randN uint32 var randmu sync.Mutex +// Temp files created in default Tmp dir +var globalTestTmpDir = os.TempDir() + // reseed - returns a new seed every time the function is called. func reseed() uint32 { return uint32(time.Now().UnixNano() + int64(os.Getpid())) @@ -1572,14 +1575,14 @@ func getListenBucketNotificationURL(endPoint, bucketName string, prefixes, suffi // returns temp root directory. ` func getTestRoot() (string, error) { - return ioutil.TempDir(os.TempDir(), "api-") + return ioutil.TempDir(globalTestTmpDir, "api-") } // getRandomDisks - Creates a slice of N random disks, each of the form - minio-XXX func getRandomDisks(N int) ([]string, error) { var erasureDisks []string for i := 0; i < N; i++ { - path, err := ioutil.TempDir(os.TempDir(), "minio-") + path, err := ioutil.TempDir(globalTestTmpDir, "minio-") if err != nil { // Remove directories created so far. removeRoots(erasureDisks) diff --git a/cmd/tree-walk_test.go b/cmd/tree-walk_test.go index fc9c44b4b..939439f2f 100644 --- a/cmd/tree-walk_test.go +++ b/cmd/tree-walk_test.go @@ -161,7 +161,7 @@ func testTreeWalkMarker(t *testing.T, listDir listDirFunc, isLeaf isLeafFunc) { // Test tree-walk. func TestTreeWalk(t *testing.T) { - fsDir, err := ioutil.TempDir("", "minio-") + fsDir, err := ioutil.TempDir(globalTestTmpDir, "minio-") if err != nil { t.Fatalf("Unable to create tmp directory: %s", err) } @@ -202,7 +202,7 @@ func TestTreeWalk(t *testing.T) { // Test if tree walk go-routine exits cleanly if tree walk is aborted because of timeout. func TestTreeWalkTimeout(t *testing.T) { - fsDir, err := ioutil.TempDir("", "minio-") + fsDir, err := ioutil.TempDir(globalTestTmpDir, "minio-") if err != nil { t.Fatalf("Unable to create tmp directory: %s", err) } @@ -277,11 +277,11 @@ func TestListDir(t *testing.T) { file1 := "file1" file2 := "file2" // Create two backend directories fsDir1 and fsDir2. - fsDir1, err := ioutil.TempDir("", "minio-") + fsDir1, err := ioutil.TempDir(globalTestTmpDir, "minio-") if err != nil { t.Errorf("Unable to create tmp directory: %s", err) } - fsDir2, err := ioutil.TempDir("", "minio-") + fsDir2, err := ioutil.TempDir(globalTestTmpDir, "minio-") if err != nil { t.Errorf("Unable to create tmp directory: %s", err) } @@ -360,7 +360,7 @@ func TestListDir(t *testing.T) { // without recursively traversing prefixes. func TestRecursiveTreeWalk(t *testing.T) { // Create a backend directories fsDir1. - fsDir1, err := ioutil.TempDir("", "minio-") + fsDir1, err := ioutil.TempDir(globalTestTmpDir, "minio-") if err != nil { t.Fatalf("Unable to create tmp directory: %s", err) } @@ -470,7 +470,7 @@ func TestRecursiveTreeWalk(t *testing.T) { func TestSortedness(t *testing.T) { // Create a backend directories fsDir1. - fsDir1, err := ioutil.TempDir("", "minio-") + fsDir1, err := ioutil.TempDir(globalTestTmpDir, "minio-") if err != nil { t.Errorf("Unable to create tmp directory: %s", err) } @@ -548,7 +548,7 @@ func TestSortedness(t *testing.T) { func TestTreeWalkIsEnd(t *testing.T) { // Create a backend directories fsDir1. - fsDir1, err := ioutil.TempDir("", "minio-") + fsDir1, err := ioutil.TempDir(globalTestTmpDir, "minio-") if err != nil { t.Errorf("Unable to create tmp directory: %s", err) } diff --git a/cmd/xl-v1_test.go b/cmd/xl-v1_test.go index 4626154b9..a098dae7e 100644 --- a/cmd/xl-v1_test.go +++ b/cmd/xl-v1_test.go @@ -17,7 +17,6 @@ package cmd import ( - "os" "path/filepath" "reflect" "testing" @@ -135,7 +134,7 @@ func TestNewXL(t *testing.T) { // Do not attempt to create this path, the test validates // so that newFSObjects initializes non existing paths // and successfully returns initialized object layer. - disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix()) + disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) erasureDisks = append(erasureDisks, disk) defer removeAll(disk) } From b2f920a8687706d495282af0b12e665a96412c62 Mon Sep 17 00:00:00 2001 From: Krishnan Parthasarathi Date: Fri, 16 Dec 2016 11:56:15 +0530 Subject: [PATCH 011/100] Add service API handler stubs for status, stop and restart (#3417) --- cmd/admin-handlers.go | 64 ++++++++++++++ cmd/admin-handlers_test.go | 164 +++++++++++++++++++++++++++++++++++ cmd/admin-router.go | 40 +++++++++ cmd/admin-rpc-client.go | 163 ++++++++++++++++++++++++++++++++++ cmd/admin-rpc-server.go | 63 ++++++++++++++ cmd/admin-rpc-server_test.go | 86 ++++++++++++++++++ cmd/globals.go | 1 + cmd/routers.go | 9 ++ cmd/server-main.go | 3 + cmd/service.go | 57 ++++++------ docs/admin-api/service.md | 33 +++++++ 11 files changed, 655 insertions(+), 28 deletions(-) create mode 100644 cmd/admin-handlers.go create mode 100644 cmd/admin-handlers_test.go create mode 100644 cmd/admin-router.go create mode 100644 cmd/admin-rpc-client.go create mode 100644 cmd/admin-rpc-server.go create mode 100644 cmd/admin-rpc-server_test.go create mode 100644 docs/admin-api/service.md diff --git a/cmd/admin-handlers.go b/cmd/admin-handlers.go new file mode 100644 index 000000000..a4605fac9 --- /dev/null +++ b/cmd/admin-handlers.go @@ -0,0 +1,64 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "encoding/json" + "net/http" +) + +const ( + minioAdminOpHeader = "X-Minio-Operation" +) + +func (adminAPI adminAPIHandlers) ServiceStatusHandler(w http.ResponseWriter, r *http.Request) { + adminAPIErr := checkRequestAuthType(r, "", "", "") + if adminAPIErr != ErrNone { + writeErrorResponse(w, r, adminAPIErr, r.URL.Path) + return + } + storageInfo := newObjectLayerFn().StorageInfo() + jsonBytes, err := json.Marshal(storageInfo) + if err != nil { + writeErrorResponseNoHeader(w, r, ErrInternalError, r.URL.Path) + errorIf(err, "Failed to marshal storage info into json.") + } + w.WriteHeader(http.StatusOK) + writeSuccessResponse(w, jsonBytes) +} + +func (adminAPI adminAPIHandlers) ServiceStopHandler(w http.ResponseWriter, r *http.Request) { + adminAPIErr := checkRequestAuthType(r, "", "", "") + if adminAPIErr != ErrNone { + writeErrorResponse(w, r, adminAPIErr, r.URL.Path) + return + } + // Reply to the client before stopping minio server. + w.WriteHeader(http.StatusOK) + sendServiceCmd(globalAdminPeers, serviceStop) +} + +func (adminAPI adminAPIHandlers) ServiceRestartHandler(w http.ResponseWriter, r *http.Request) { + adminAPIErr := checkRequestAuthType(r, "", "", "") + if adminAPIErr != ErrNone { + writeErrorResponse(w, r, adminAPIErr, r.URL.Path) + return + } + // Reply to the client before restarting minio server. + w.WriteHeader(http.StatusOK) + sendServiceCmd(globalAdminPeers, serviceRestart) +} diff --git a/cmd/admin-handlers_test.go b/cmd/admin-handlers_test.go new file mode 100644 index 000000000..0e012626f --- /dev/null +++ b/cmd/admin-handlers_test.go @@ -0,0 +1,164 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + + router "github.com/gorilla/mux" +) + +type cmdType int + +const ( + statusCmd cmdType = iota + stopCmd + restartCmd +) + +func (c cmdType) String() string { + switch c { + case statusCmd: + return "status" + case stopCmd: + return "stop" + case restartCmd: + return "restart" + } + return "" +} + +func (c cmdType) apiMethod() string { + switch c { + case statusCmd: + return "GET" + case stopCmd: + return "POST" + case restartCmd: + return "POST" + } + return "GET" +} + +func (c cmdType) toServiceSignal() serviceSignal { + switch c { + case statusCmd: + return serviceStatus + case stopCmd: + return serviceStop + case restartCmd: + return serviceRestart + } + return serviceStatus +} + +func testServiceSignalReceiver(cmd cmdType, t *testing.T) { + expectedCmd := cmd.toServiceSignal() + serviceCmd := <-globalServiceSignalCh + if serviceCmd != expectedCmd { + t.Errorf("Expected service command %v but received %v", expectedCmd, serviceCmd) + } +} + +func getAdminCmdRequest(cmd cmdType, cred credential) (*http.Request, error) { + req, err := newTestRequest(cmd.apiMethod(), "/?service", 0, nil) + if err != nil { + return nil, err + } + req.Header.Set(minioAdminOpHeader, cmd.String()) + err = signRequestV4(req, cred.AccessKeyID, cred.SecretAccessKey) + if err != nil { + return nil, err + } + return req, nil +} + +func testServicesCmdHandler(cmd cmdType, t *testing.T) { + rootPath, err := newTestConfig("us-east-1") + if err != nil { + t.Fatalf("Unable to initialize server config. %s", err) + } + defer removeAll(rootPath) + + // Initialize admin peers to make admin RPC calls. + eps, err := parseStorageEndpoints([]string{"http://localhost"}) + if err != nil { + t.Fatalf("Failed to parse storage end point - %v", err) + } + initGlobalAdminPeers(eps) + + if cmd == statusCmd { + // Initializing objectLayer and corresponding + // []StorageAPI since DiskInfo() method requires it. + objLayer, fsDir, fsErr := prepareFS() + if fsErr != nil { + t.Fatalf("failed to initialize XL based object layer - %v.", fsErr) + } + defer removeRoots([]string{fsDir}) + globalObjLayerMutex.Lock() + globalObjectAPI = objLayer + globalObjLayerMutex.Unlock() + } + + // Setting up a go routine to simulate ServerMux's + // handleServiceSignals for stop and restart commands. + switch cmd { + case stopCmd, restartCmd: + go testServiceSignalReceiver(cmd, t) + } + credentials := serverConfig.GetCredential() + adminRouter := router.NewRouter() + registerAdminRouter(adminRouter) + + rec := httptest.NewRecorder() + req, err := getAdminCmdRequest(cmd, credentials) + if err != nil { + t.Fatalf("Failed to build service status request %v", err) + } + adminRouter.ServeHTTP(rec, req) + + if cmd == statusCmd { + expectedInfo := newObjectLayerFn().StorageInfo() + receivedInfo := StorageInfo{} + if jsonErr := json.Unmarshal(rec.Body.Bytes(), &receivedInfo); jsonErr != nil { + t.Errorf("Failed to unmarshal StorageInfo - %v", jsonErr) + } + if expectedInfo != receivedInfo { + t.Errorf("Expected storage info and received storage info differ, %v %v", expectedInfo, receivedInfo) + } + } + + if rec.Code != http.StatusOK { + t.Errorf("Expected to receive %d status code but received %d", + http.StatusOK, rec.Code) + } +} + +func TestServiceStatusHandler(t *testing.T) { + testServicesCmdHandler(statusCmd, t) +} + +func TestServiceStopHandler(t *testing.T) { + testServicesCmdHandler(stopCmd, t) +} + +func TestServiceRestartHandler(t *testing.T) { + testServicesCmdHandler(restartCmd, t) +} diff --git a/cmd/admin-router.go b/cmd/admin-router.go new file mode 100644 index 000000000..60b5c3aeb --- /dev/null +++ b/cmd/admin-router.go @@ -0,0 +1,40 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import router "github.com/gorilla/mux" + +// adminAPIHandlers provides HTTP handlers for Minio admin API. +type adminAPIHandlers struct { +} + +// registerAdminRouter - Add handler functions for each service REST API routes. +func registerAdminRouter(mux *router.Router) { + + adminAPI := adminAPIHandlers{} + // Admin router + adminRouter := mux.NewRoute().PathPrefix("/").Subrouter() + + /// Admin operations + + // Service status + adminRouter.Methods("GET").Queries("service", "").Headers(minioAdminOpHeader, "status").HandlerFunc(adminAPI.ServiceStatusHandler) + // Service stop + adminRouter.Methods("POST").Queries("service", "").Headers(minioAdminOpHeader, "stop").HandlerFunc(adminAPI.ServiceStopHandler) + // Service restart + adminRouter.Methods("POST").Queries("service", "").Headers(minioAdminOpHeader, "restart").HandlerFunc(adminAPI.ServiceRestartHandler) +} diff --git a/cmd/admin-rpc-client.go b/cmd/admin-rpc-client.go new file mode 100644 index 000000000..324ecda7b --- /dev/null +++ b/cmd/admin-rpc-client.go @@ -0,0 +1,163 @@ +/* + * Minio Cloud Storage, (C) 2014-2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "net/rpc" + "net/url" + "path" + "sync" +) + +// localAdminClient - represents admin operation to be executed locally. +type localAdminClient struct { +} + +// remoteAdminClient - represents admin operation to be executed +// remotely, via RPC. +type remoteAdminClient struct { + *AuthRPCClient +} + +// stopRestarter - abstracts stop and restart operations for both +// local and remote execution. +type stopRestarter interface { + Stop() error + Restart() error +} + +// Stop - Sends a message over channel to the go-routine responsible +// for stopping the process. +func (lc localAdminClient) Stop() error { + globalServiceSignalCh <- serviceStop + return nil +} + +// Restart - Sends a message over channel to the go-routine +// responsible for restarting the process. +func (lc localAdminClient) Restart() error { + globalServiceSignalCh <- serviceRestart + return nil +} + +// Stop - Sends stop command to remote server via RPC. +func (rc remoteAdminClient) Stop() error { + args := GenericArgs{} + reply := GenericReply{} + err := rc.Call("Service.Shutdown", &args, &reply) + if err != nil && err == rpc.ErrShutdown { + rc.Close() + } + return err +} + +// Restart - Sends restart command to remote server via RPC. +func (rc remoteAdminClient) Restart() error { + args := GenericArgs{} + reply := GenericReply{} + err := rc.Call("Service.Restart", &args, &reply) + if err != nil && err == rpc.ErrShutdown { + rc.Close() + } + return err +} + +// adminPeer - represents an entity that implements Stop and Restart methods. +type adminPeer struct { + addr string + svcClnt stopRestarter +} + +// type alias for a collection of adminPeer. +type adminPeers []adminPeer + +// makeAdminPeers - helper function to construct a collection of adminPeer. +func makeAdminPeers(eps []*url.URL) adminPeers { + var servicePeers []adminPeer + + // map to store peers that are already added to ret + seenAddr := make(map[string]bool) + + // add local (self) as peer in the array + servicePeers = append(servicePeers, adminPeer{ + globalMinioAddr, + localAdminClient{}, + }) + seenAddr[globalMinioAddr] = true + + // iterate over endpoints to find new remote peers and add + // them to ret. + for _, ep := range eps { + if ep.Host == "" { + continue + } + + // Check if the remote host has been added already + if !seenAddr[ep.Host] { + cfg := authConfig{ + accessKey: serverConfig.GetCredential().AccessKeyID, + secretKey: serverConfig.GetCredential().SecretAccessKey, + address: ep.Host, + secureConn: isSSL(), + path: path.Join(reservedBucket, servicePath), + loginMethod: "Service.LoginHandler", + } + + servicePeers = append(servicePeers, adminPeer{ + addr: ep.Host, + svcClnt: &remoteAdminClient{newAuthClient(&cfg)}, + }) + seenAddr[ep.Host] = true + } + } + + return servicePeers +} + +// Initialize global adminPeer collection. +func initGlobalAdminPeers(eps []*url.URL) { + globalAdminPeers = makeAdminPeers(eps) +} + +// invokeServiceCmd - Invoke Stop/Restart command. +func invokeServiceCmd(cp adminPeer, cmd serviceSignal) (err error) { + switch cmd { + case serviceStop: + err = cp.svcClnt.Stop() + case serviceRestart: + err = cp.svcClnt.Restart() + } + return err +} + +// sendServiceCmd - Invoke Stop/Restart command on remote peers +// adminPeer followed by on the local peer. +func sendServiceCmd(cps adminPeers, cmd serviceSignal) { + // Send service command like stop or restart to all remote nodes and finally run on local node. + errs := make([]error, len(cps)) + var wg sync.WaitGroup + remotePeers := cps[1:] + for i := range remotePeers { + wg.Add(1) + go func(idx int) { + defer wg.Done() + errs[idx] = invokeServiceCmd(remotePeers[idx], cmd) + }(i) + } + wg.Wait() + errs[0] = invokeServiceCmd(cps[0], cmd) +} diff --git a/cmd/admin-rpc-server.go b/cmd/admin-rpc-server.go new file mode 100644 index 000000000..3948901d1 --- /dev/null +++ b/cmd/admin-rpc-server.go @@ -0,0 +1,63 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "net/rpc" + + router "github.com/gorilla/mux" +) + +const servicePath = "/admin/service" + +// serviceCmd - exports RPC methods for service status, stop and +// restart commands. +type serviceCmd struct { + loginServer +} + +// Shutdown - Shutdown this instance of minio server. +func (s *serviceCmd) Shutdown(args *GenericArgs, reply *GenericReply) error { + if !isRPCTokenValid(args.Token) { + return errInvalidToken + } + globalServiceSignalCh <- serviceStop + return nil +} + +// Restart - Restart this instance of minio server. +func (s *serviceCmd) Restart(args *GenericArgs, reply *GenericReply) error { + if !isRPCTokenValid(args.Token) { + return errInvalidToken + } + globalServiceSignalCh <- serviceRestart + return nil +} + +// registerAdminRPCRouter - registers RPC methods for service status, +// stop and restart commands. +func registerAdminRPCRouter(mux *router.Router) error { + adminRPCHandler := &serviceCmd{} + adminRPCServer := rpc.NewServer() + err := adminRPCServer.RegisterName("Service", adminRPCHandler) + if err != nil { + return traceError(err) + } + adminRouter := mux.NewRoute().PathPrefix(reservedBucket).Subrouter() + adminRouter.Path(servicePath).Handler(adminRPCServer) + return nil +} diff --git a/cmd/admin-rpc-server_test.go b/cmd/admin-rpc-server_test.go new file mode 100644 index 000000000..99832642a --- /dev/null +++ b/cmd/admin-rpc-server_test.go @@ -0,0 +1,86 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "testing" + "time" +) + +func testAdminCmd(cmd cmdType, t *testing.T) { + rootPath, err := newTestConfig("us-east-1") + if err != nil { + t.Fatalf("Failed to create test config - %v", err) + } + defer removeAll(rootPath) + + adminServer := serviceCmd{} + creds := serverConfig.GetCredential() + reply := RPCLoginReply{} + args := RPCLoginArgs{Username: creds.AccessKeyID, Password: creds.SecretAccessKey} + err = adminServer.LoginHandler(&args, &reply) + if err != nil { + t.Fatalf("Failed to login to admin server - %v", err) + } + + go func() { + // mocking signal receiver + <-globalServiceSignalCh + }() + + validToken := reply.Token + timeNow := time.Now().UTC() + testCases := []struct { + ga GenericArgs + expectedErr error + }{ + // Valid case + { + ga: GenericArgs{Token: validToken, Timestamp: timeNow}, + expectedErr: nil, + }, + // Invalid token + { + ga: GenericArgs{Token: "invalidToken", Timestamp: timeNow}, + expectedErr: errInvalidToken, + }, + } + + genReply := GenericReply{} + for i, test := range testCases { + switch cmd { + case stopCmd: + err = adminServer.Shutdown(&test.ga, &genReply) + if err != test.expectedErr { + t.Errorf("Test %d: Expected error %v but received %v", i+1, test.expectedErr, err) + } + case restartCmd: + err = adminServer.Restart(&test.ga, &genReply) + if err != test.expectedErr { + t.Errorf("Test %d: Expected error %v but received %v", i+1, test.expectedErr, err) + } + } + } +} + +func TestAdminShutdown(t *testing.T) { + testAdminCmd(stopCmd, t) +} + +func TestAdminRestart(t *testing.T) { + testAdminCmd(restartCmd, t) +} diff --git a/cmd/globals.go b/cmd/globals.go index 96ae4a9e7..8e039597f 100644 --- a/cmd/globals.go +++ b/cmd/globals.go @@ -85,6 +85,7 @@ var ( // CA root certificates, a nil value means system certs pool will be used globalRootCAs *x509.CertPool + globalAdminPeers = adminPeers{} // Add new variable global values here. ) diff --git a/cmd/routers.go b/cmd/routers.go index 6f03c480f..837180bee 100644 --- a/cmd/routers.go +++ b/cmd/routers.go @@ -110,6 +110,12 @@ func configureServerHandler(srvCmdConfig serverCmdConfig) (http.Handler, error) registerDistXLRouters(mux, srvCmdConfig) } + // Add Admin RPC router + err := registerAdminRPCRouter(mux) + if err != nil { + return nil, err + } + // Register web router when its enabled. if globalIsBrowserEnabled { if err := registerWebRouter(mux); err != nil { @@ -117,6 +123,9 @@ func configureServerHandler(srvCmdConfig serverCmdConfig) (http.Handler, error) } } + // Add Admin router. + registerAdminRouter(mux) + // Add API router. registerAPIRouter(mux) diff --git a/cmd/server-main.go b/cmd/server-main.go index fe41c5562..a752a15c1 100644 --- a/cmd/server-main.go +++ b/cmd/server-main.go @@ -464,6 +464,9 @@ func serverMain(c *cli.Context) { // Initialize S3 Peers inter-node communication initGlobalS3Peers(endpoints) + // Initialize Admin Peers inter-node communication + initGlobalAdminPeers(endpoints) + // Start server, automatically configures TLS if certs are available. go func(tls bool) { var lerr error diff --git a/cmd/service.go b/cmd/service.go index ea7aafc9c..d0af1c0d4 100644 --- a/cmd/service.go +++ b/cmd/service.go @@ -41,7 +41,7 @@ var globalServiceDoneCh chan struct{} // Initialize service mutex once. func init() { globalServiceDoneCh = make(chan struct{}, 1) - globalServiceSignalCh = make(chan serviceSignal, 1) + globalServiceSignalCh = make(chan serviceSignal) } // restartProcess starts a new process passing it the active fd's. It @@ -80,36 +80,37 @@ func (m *ServerMux) handleServiceSignals() error { globalServiceDoneCh <- struct{}{} } - // Start listening on service signal. Monitor signals. + // Wait for SIGTERM in a go-routine. trapCh := signalTrap(os.Interrupt, syscall.SIGTERM) + go func(<-chan bool) { + <-trapCh + globalServiceSignalCh <- serviceStop + }(trapCh) + + // Start listening on service signal. Monitor signals. for { - select { - case <-trapCh: - // Initiate graceful stop. - globalServiceSignalCh <- serviceStop - case signal := <-globalServiceSignalCh: - switch signal { - case serviceStatus: - /// We don't do anything for this. - case serviceRestart: - if err := m.Close(); err != nil { - errorIf(err, "Unable to close server gracefully") - } - if err := restartProcess(); err != nil { - errorIf(err, "Unable to restart the server.") - } + signal := <-globalServiceSignalCh + switch signal { + case serviceStatus: + /// We don't do anything for this. + case serviceRestart: + if err := m.Close(); err != nil { + errorIf(err, "Unable to close server gracefully") + } + if err := restartProcess(); err != nil { + errorIf(err, "Unable to restart the server.") + } + runExitFn(nil) + case serviceStop: + if err := m.Close(); err != nil { + errorIf(err, "Unable to close server gracefully") + } + objAPI := newObjectLayerFn() + if objAPI == nil { + // Server not initialized yet, exit happily. runExitFn(nil) - case serviceStop: - if err := m.Close(); err != nil { - errorIf(err, "Unable to close server gracefully") - } - objAPI := newObjectLayerFn() - if objAPI == nil { - // Server not initialized yet, exit happily. - runExitFn(nil) - } else { - runExitFn(objAPI.Shutdown()) - } + } else { + runExitFn(objAPI.Shutdown()) } } } diff --git a/docs/admin-api/service.md b/docs/admin-api/service.md new file mode 100644 index 000000000..bc1e580ab --- /dev/null +++ b/docs/admin-api/service.md @@ -0,0 +1,33 @@ +# Service REST API + +## Authentication +- AWS signatureV4 +- We use "minio" as region. Here region is set only for signature calculation. + +## List of management APIs +- Service + - Stop + - Restart + - Status + +- Locks + - List + - Clear + +- Healing + +### Service Management APIs +* Stop + - POST /?service + - x-minio-operation: stop + - Response: On success 200 + +* Restart + - POST /?service + - x-minio-operation: restart + - Response: On success 200 + +* Status + - GET /?service + - x-minio-operation: status + - Response: On success 200, return json formatted StorageInfo object. From f7766b49aa341057887793de03a06948735f847f Mon Sep 17 00:00:00 2001 From: Aditya Manthramurthy Date: Fri, 16 Dec 2016 23:29:02 +0530 Subject: [PATCH 012/100] Amend ZFS line in README - to make it about Minio on FreeNAS (#3461) --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 9abe0bec2..cc24414c2 100644 --- a/README.md +++ b/README.md @@ -67,7 +67,8 @@ minio.exe server D:\Photos chmod 755 minio ./minio server ~/Photos ``` -Please visit official zfs FreeBSD guide for more details [here](https://www.freebsd.org/doc/handbook/zfs-quickstart.html) + +You can run Minio on FreeBSD with FreeNAS storage-backend - see [here](https://docs.minio.io/docs/how-to-run-minio-in-freenas) for more details. ## Install from Source From 7a17b2a585ab71aa78cafc694935a93b49acdbe5 Mon Sep 17 00:00:00 2001 From: koolhead17 Date: Fri, 16 Dec 2016 23:29:37 +0530 Subject: [PATCH 013/100] Browser: Added character limit for Bucket Name. (#3454) --- cmd/web-handlers.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/web-handlers.go b/cmd/web-handlers.go index 4f2343123..eb6de45ee 100644 --- a/cmd/web-handlers.go +++ b/cmd/web-handlers.go @@ -805,7 +805,7 @@ func toJSONError(err error, params ...string) (jerr *json2.Error) { case "InvalidBucketName": if len(params) > 0 { jerr = &json2.Error{ - Message: fmt.Sprintf("Bucket Name %s is invalid. Lowercase letters, period and numerals are the only allowed characters.", params[0]), + Message: fmt.Sprintf("Bucket Name %s is invalid. Lowercase letters, period, numerals are the only allowed characters and should be minimum 3 characters in length.", params[0]), } } // Bucket not found custom error message. From 1b2b16998fa73a252df8d26141bad6b79d03eb62 Mon Sep 17 00:00:00 2001 From: Bala FA Date: Sun, 18 Dec 2016 00:30:16 +0530 Subject: [PATCH 014/100] Remove regexp usage (#3456) This patch removes regexp usage in cacheControlHandler.ServeHTTP() and server-main.checkEndpointsSyntax() --- cmd/generic-handlers.go | 24 +++++++-------- cmd/server-main.go | 65 +++++++++++++++++++++++----------------- cmd/server-main_test.go | 66 ++++++++++++++++++++++------------------- 3 files changed, 83 insertions(+), 72 deletions(-) diff --git a/cmd/generic-handlers.go b/cmd/generic-handlers.go index 23a10dfa2..6e17fbea9 100644 --- a/cmd/generic-handlers.go +++ b/cmd/generic-handlers.go @@ -19,7 +19,6 @@ package cmd import ( "net/http" "path" - "regexp" "strings" "time" @@ -142,21 +141,18 @@ func setBrowserCacheControlHandler(h http.Handler) http.Handler { func (h cacheControlHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if r.Method == "GET" && guessIsBrowserReq(r) && globalIsBrowserEnabled { // For all browser requests set appropriate Cache-Control policies - match, err := regexp.Match(reservedBucket+`/([^/]+\.js|favicon.ico)`, []byte(r.URL.Path)) - if err != nil { - errorIf(err, "Unable to match incoming URL %s", r.URL) - writeErrorResponse(w, r, ErrInternalError, r.URL.Path) - return - } - if match { - // For assets set cache expiry of one year. For each release, the name - // of the asset name will change and hence it can not be served from cache. - w.Header().Set("Cache-Control", "max-age=31536000") - } else if strings.HasPrefix(r.URL.Path, reservedBucket+"/") { - // For non asset requests we serve index.html which will never be cached. - w.Header().Set("Cache-Control", "no-store") + if strings.HasPrefix(r.URL.Path, reservedBucket+"/") { + if strings.HasSuffix(r.URL.Path, ".js") || r.URL.Path == reservedBucket+"/favicon.ico" { + // For assets set cache expiry of one year. For each release, the name + // of the asset name will change and hence it can not be served from cache. + w.Header().Set("Cache-Control", "max-age=31536000") + } else { + // For non asset requests we serve index.html which will never be cached. + w.Header().Set("Cache-Control", "no-store") + } } } + h.handler.ServeHTTP(w, r) } diff --git a/cmd/server-main.go b/cmd/server-main.go index a752a15c1..2e8ca8c1e 100644 --- a/cmd/server-main.go +++ b/cmd/server-main.go @@ -23,9 +23,9 @@ import ( "net/http" "net/url" "os" + "path" "strings" - "regexp" "runtime" "github.com/minio/cli" @@ -260,35 +260,46 @@ func isDistributedSetup(eps []*url.URL) bool { return false } -// Check if the endpoints are following expected syntax, i.e -// valid scheme, valid path across all platforms. -func checkEndpointsSyntax(eps []*url.URL, disks []string) error { - for i, u := range eps { - switch u.Scheme { - case "": - // "/" is not allowed. - if u.Path == "" || u.Path == "/" { - return fmt.Errorf("Root path is not allowed : %s (%s)", u.Path, disks[i]) +// Check if endpoint is in expected syntax by valid scheme/path across all platforms. +func checkEndpointURL(endpointURL *url.URL) (err error) { + // applicable to all OS. + if endpointURL.Scheme == "" || endpointURL.Scheme == "http" || endpointURL.Scheme == "https" { + urlPath := path.Clean(endpointURL.Path) + if urlPath == "" || urlPath == "." || urlPath == "/" || urlPath == `\` { + err = fmt.Errorf("Empty or root path is not allowed") + } + + return err + } + + // Applicable to Windows only. + if runtime.GOOS == "windows" { + // On Windows, endpoint can be a path with drive eg. C:\Export and its URL.Scheme is 'C'. + // Check if URL.Scheme is a single letter alphabet to represent a drive. + // Note: URL.Parse() converts scheme into lower case always. + if len(endpointURL.Scheme) == 1 && endpointURL.Scheme[0] >= 'a' && endpointURL.Scheme[0] <= 'z' { + // If endpoint is C:\ or C:\export, URL.Path does not have path information like \ or \export + // hence we directly work with endpoint. + urlPath := strings.SplitN(path.Clean(endpointURL.String()), ":", 2)[1] + if urlPath == "" || urlPath == "." || urlPath == "/" || urlPath == `\` { + err = fmt.Errorf("Empty or root path is not allowed") } - case "http", "https": - // "http://server1/" is not allowed - if u.Path == "" || u.Path == "/" || u.Path == "\\" { - return fmt.Errorf("Root path is not allowed : %s (%s)", u.Path, disks[i]) - } - default: - if runtime.GOOS == "windows" { - // On windows for "C:\export" scheme will be "C" - matched, err := regexp.MatchString("^[a-zA-Z]$", u.Scheme) - if err != nil { - return fmt.Errorf("Invalid scheme : %s (%s), ERROR %s", u.Scheme, disks[i], err) - } - if matched { - break - } - } - return fmt.Errorf("Invalid scheme : %s (%s)", u.Scheme, disks[i]) + + return err } } + + return fmt.Errorf("Invalid scheme") +} + +// Check if endpoints are in expected syntax by valid scheme/path across all platforms. +func checkEndpointsSyntax(eps []*url.URL, disks []string) error { + for i, u := range eps { + if err := checkEndpointURL(u); err != nil { + return fmt.Errorf("%s: %s (%s)", err.Error(), u.Path, disks[i]) + } + } + return nil } diff --git a/cmd/server-main_test.go b/cmd/server-main_test.go index 5019f55ce..d990764b2 100644 --- a/cmd/server-main_test.go +++ b/cmd/server-main_test.go @@ -200,48 +200,52 @@ func TestParseStorageEndpoints(t *testing.T) { // Test check endpoints syntax function for syntax verification // across various scenarios of inputs. func TestCheckEndpointsSyntax(t *testing.T) { - var testCases []string - if runtime.GOOS == "windows" { - testCases = []string{ - "\\export", - "D:\\export", - "D:\\", - "D:", - "\\", - } - } else { - testCases = []string{ - "/export", - } - } - testCasesCommon := []string{ + successCases := []string{ "export", + "/export", "http://localhost/export", "https://localhost/export", } - testCases = append(testCases, testCasesCommon...) - for _, disk := range testCases { + + failureCases := []string{ + "/", + "http://localhost", + "http://localhost/", + "ftp://localhost/export", + "server:/export", + } + + if runtime.GOOS == "windows" { + successCases = append(successCases, + `\export`, + `D:\export`, + ) + + failureCases = append(failureCases, + "D:", + `D:\`, + `\`, + ) + } + + for _, disk := range successCases { eps, err := parseStorageEndpoints([]string{disk}) if err != nil { t.Fatalf("Unable to parse %s, error %s", disk, err) } if err = checkEndpointsSyntax(eps, []string{disk}); err != nil { - t.Errorf("Invalid endpoints %s", err) + t.Errorf("expected: , got: %s", err) } } - eps, err := parseStorageEndpoints([]string{"/"}) - if err != nil { - t.Fatalf("Unable to parse /, error %s", err) - } - if err = checkEndpointsSyntax(eps, []string{"/"}); err == nil { - t.Error("Should fail, passed instead") - } - eps, err = parseStorageEndpoints([]string{"http://localhost/"}) - if err != nil { - t.Fatalf("Unable to parse http://localhost/, error %s", err) - } - if err = checkEndpointsSyntax(eps, []string{"http://localhost/"}); err == nil { - t.Error("Should fail, passed instead") + + for _, disk := range failureCases { + eps, err := parseStorageEndpoints([]string{disk}) + if err != nil { + t.Fatalf("Unable to parse %s, error %s", disk, err) + } + if err = checkEndpointsSyntax(eps, []string{disk}); err == nil { + t.Errorf("expected: , got: ") + } } } From 9c9f390350cee8a526a908611e54511f8b048037 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Sat, 17 Dec 2016 13:43:26 -0800 Subject: [PATCH 015/100] fs: validate filesystem path argument properly. (#3470) FS should fail for invalid paths like - file:/// - ftp:// - http:// --- cmd/server-main.go | 13 ++++++++----- cmd/server-main_test.go | 19 +++++++++++++++---- 2 files changed, 23 insertions(+), 9 deletions(-) diff --git a/cmd/server-main.go b/cmd/server-main.go index 2e8ca8c1e..c020e240b 100644 --- a/cmd/server-main.go +++ b/cmd/server-main.go @@ -111,8 +111,7 @@ func parseStorageEndpoints(eps []string) (endpoints []*url.URL, err error) { // we return error as port is configurable only // using "--address :port" if port != "" { - errorIf(fmt.Errorf("Invalid argument %s, port configurable using --address :", u.Host), "") - return nil, errInvalidArgument + return nil, fmt.Errorf("Invalid Argument %s, port configurable using --address :", u.Host) } u.Host = net.JoinHostPort(u.Host, globalMinioPort) } else { @@ -120,8 +119,7 @@ func parseStorageEndpoints(eps []string) (endpoints []*url.URL, err error) { // i.e if "--address host:port" is specified // port info in u.Host is mandatory else return error. if port == "" { - errorIf(fmt.Errorf("Invalid argument %s, port mandatory when --address : is used", u.Host), "") - return nil, errInvalidArgument + return nil, fmt.Errorf("Invalid Argument %s, port mandatory when --address : is used", u.Host) } } } @@ -326,7 +324,12 @@ func checkServerSyntax(c *cli.Context) { if len(endpoints) > 1 { // Validate if we have sufficient disks for XL setup. err = checkSufficientDisks(endpoints) - fatalIf(err, "Storage endpoint error.") + fatalIf(err, "Invalid number of disks supplied.") + } else { + // Validate if we have invalid disk for FS setup. + if endpoints[0].Host != "" && endpoints[0].Scheme != "" { + fatalIf(errInvalidArgument, "%s, FS setup expects a filesystem path", endpoints[0]) + } } if !isDistributedSetup(endpoints) { diff --git a/cmd/server-main_test.go b/cmd/server-main_test.go index d990764b2..d666d31c9 100644 --- a/cmd/server-main_test.go +++ b/cmd/server-main_test.go @@ -17,6 +17,7 @@ package cmd import ( + "errors" "flag" "net/http" "os" @@ -182,15 +183,25 @@ func TestParseStorageEndpoints(t *testing.T) { expectedErr error }{ {"", "http://localhost/export", nil}, - {"testhost", "http://localhost/export", errInvalidArgument}, - {"", "http://localhost:9000/export", errInvalidArgument}, + { + "testhost", + "http://localhost/export", + errors.New("Invalid Argument localhost, port mandatory when --address : is used"), + }, + { + "", + "http://localhost:9000/export", + errors.New("Invalid Argument localhost:9000, port configurable using --address :"), + }, {"testhost", "http://localhost:9000/export", nil}, } for i, test := range testCases { globalMinioHost = test.globalMinioHost _, err := parseStorageEndpoints([]string{test.host}) - if err != test.expectedErr { - t.Errorf("Test %d : got %v, expected %v", i+1, err, test.expectedErr) + if err != nil { + if err.Error() != test.expectedErr.Error() { + t.Errorf("Test %d : got %v, expected %v", i+1, err, test.expectedErr) + } } } // Should be reset back to "" so that we don't affect other tests. From 1875a47495b1308f61774d35e89eb1ccbe51acff Mon Sep 17 00:00:00 2001 From: Bala FA Date: Sun, 18 Dec 2016 07:47:40 +0530 Subject: [PATCH 016/100] rpcclient: fix leaky connection (#3471) Previously, more than one goroutine calls RPCClient.dial(), each goroutine gets a new rpc.Client but only one such client is stored into RPCClient object. This leads to leaky connection at the server side. This is fixed by taking lock at top of dial() and release on return. --- cmd/auth-rpc-client.go | 2 +- cmd/browser-peer-rpc_test.go | 4 +- cmd/lock-rpc-server.go | 2 +- cmd/net-rpc-client.go | 110 +++++++++++++------------------ cmd/s3-peer-rpc-handlers_test.go | 2 +- 5 files changed, 51 insertions(+), 69 deletions(-) diff --git a/cmd/auth-rpc-client.go b/cmd/auth-rpc-client.go index c397de20b..905eb2083 100644 --- a/cmd/auth-rpc-client.go +++ b/cmd/auth-rpc-client.go @@ -111,7 +111,7 @@ func newAuthClient(cfg *authConfig) *AuthRPCClient { // Save the config. config: cfg, // Initialize a new reconnectable rpc client. - rpc: newClient(cfg.address, cfg.path, cfg.secureConn), + rpc: newRPCClient(cfg.address, cfg.path, cfg.secureConn), // Allocated auth client not logged in yet. isLoggedIn: false, } diff --git a/cmd/browser-peer-rpc_test.go b/cmd/browser-peer-rpc_test.go index 0f6cc74f8..2f34da764 100644 --- a/cmd/browser-peer-rpc_test.go +++ b/cmd/browser-peer-rpc_test.go @@ -70,7 +70,7 @@ func (s *TestRPCBrowserPeerSuite) testBrowserPeerRPC(t *testing.T) { // Validate for invalid token. args := SetAuthPeerArgs{Creds: creds} args.Token = "garbage" - rclient := newClient(s.testAuthConf.address, s.testAuthConf.path, false) + rclient := newRPCClient(s.testAuthConf.address, s.testAuthConf.path, false) defer rclient.Close() err := rclient.Call("BrowserPeer.SetAuthPeer", &args, &GenericReply{}) if err != nil { @@ -89,7 +89,7 @@ func (s *TestRPCBrowserPeerSuite) testBrowserPeerRPC(t *testing.T) { } // Validate for failure in login handler with previous credentials. - rclient = newClient(s.testAuthConf.address, s.testAuthConf.path, false) + rclient = newRPCClient(s.testAuthConf.address, s.testAuthConf.path, false) defer rclient.Close() rargs := &RPCLoginArgs{ Username: s.testAuthConf.accessKey, diff --git a/cmd/lock-rpc-server.go b/cmd/lock-rpc-server.go index 42cba5303..6e362557c 100644 --- a/cmd/lock-rpc-server.go +++ b/cmd/lock-rpc-server.go @@ -279,7 +279,7 @@ func (l *lockServer) lockMaintenance(interval time.Duration) { // Validate if long lived locks are indeed clean. for _, nlrip := range nlripLongLived { // Initialize client based on the long live locks. - c := newClient(nlrip.lri.node, nlrip.lri.rpcPath, isSSL()) + c := newRPCClient(nlrip.lri.node, nlrip.lri.rpcPath, isSSL()) var expired bool diff --git a/cmd/net-rpc-client.go b/cmd/net-rpc-client.go index b74b531c2..1ba1bb8e5 100644 --- a/cmd/net-rpc-client.go +++ b/cmd/net-rpc-client.go @@ -30,19 +30,21 @@ import ( "time" ) +// defaultDialTimeout is used for non-secure connection. +const defaultDialTimeout = 3 * time.Second + // RPCClient is a wrapper type for rpc.Client which provides reconnect on first failure. type RPCClient struct { - mu sync.Mutex - rpcPrivate *rpc.Client - node string - rpcPath string - secureConn bool + mu sync.Mutex + netRPCClient *rpc.Client + node string + rpcPath string + secureConn bool } // newClient constructs a RPCClient object with node and rpcPath initialized. -// It _doesn't_ connect to the remote endpoint. See Call method to see when the -// connect happens. -func newClient(node, rpcPath string, secureConn bool) *RPCClient { +// It does lazy connect to the remote endpoint on Call(). +func newRPCClient(node, rpcPath string, secureConn bool) *RPCClient { return &RPCClient{ node: node, rpcPath: rpcPath, @@ -50,34 +52,19 @@ func newClient(node, rpcPath string, secureConn bool) *RPCClient { } } -// clearRPCClient clears the pointer to the rpc.Client object in a safe manner -func (rpcClient *RPCClient) clearRPCClient() { +// dial tries to establish a connection to the server in a safe manner. +// If there is a valid rpc.Cliemt, it returns that else creates a new one. +func (rpcClient *RPCClient) dial() (*rpc.Client, error) { rpcClient.mu.Lock() - rpcClient.rpcPrivate = nil - rpcClient.mu.Unlock() -} + defer rpcClient.mu.Unlock() -// getRPCClient gets the pointer to the rpc.Client object in a safe manner -func (rpcClient *RPCClient) getRPCClient() *rpc.Client { - rpcClient.mu.Lock() - rpcLocalStack := rpcClient.rpcPrivate - rpcClient.mu.Unlock() - return rpcLocalStack -} - -// dialRPCClient tries to establish a connection to the server in a safe manner -func (rpcClient *RPCClient) dialRPCClient() (*rpc.Client, error) { - rpcClient.mu.Lock() - // After acquiring lock, check whether another thread may not have already dialed and established connection - if rpcClient.rpcPrivate != nil { - rpcClient.mu.Unlock() - return rpcClient.rpcPrivate, nil + // Nothing to do as we already have valid connection. + if rpcClient.netRPCClient != nil { + return rpcClient.netRPCClient, nil } - rpcClient.mu.Unlock() var err error var conn net.Conn - if rpcClient.secureConn { hostname, _, splitErr := net.SplitHostPort(rpcClient.node) if splitErr != nil { @@ -92,14 +79,14 @@ func (rpcClient *RPCClient) dialRPCClient() (*rpc.Client, error) { // ServerName in tls.Config needs to be specified to support SNI certificates conn, err = tls.Dial("tcp", rpcClient.node, &tls.Config{ServerName: hostname, RootCAs: globalRootCAs}) } else { - // Have a dial timeout with 3 secs. - conn, err = net.DialTimeout("tcp", rpcClient.node, 3*time.Second) + // Dial with 3 seconds timeout. + conn, err = net.DialTimeout("tcp", rpcClient.node, defaultDialTimeout) } if err != nil { // Print RPC connection errors that are worthy to display in log switch err.(type) { case x509.HostnameError: - errorIf(err, "Unable to establish RPC to %s", rpcClient.node) + errorIf(err, "Unable to establish secure connection to %s", rpcClient.node) } return nil, &net.OpError{ Op: "dial-http", @@ -108,25 +95,27 @@ func (rpcClient *RPCClient) dialRPCClient() (*rpc.Client, error) { Err: err, } } + io.WriteString(conn, "CONNECT "+rpcClient.rpcPath+" HTTP/1.0\n\n") // Require successful HTTP response before switching to RPC protocol. resp, err := http.ReadResponse(bufio.NewReader(conn), &http.Request{Method: "CONNECT"}) if err == nil && resp.Status == "200 Connected to Go RPC" { - rpc := rpc.NewClient(conn) - if rpc == nil { + netRPCClient := rpc.NewClient(conn) + if netRPCClient == nil { return nil, &net.OpError{ Op: "dial-http", Net: rpcClient.node + " " + rpcClient.rpcPath, Addr: nil, - Err: fmt.Errorf("Unable to initialize new rpcClient, %s", errUnexpected), + Err: fmt.Errorf("Unable to initialize new rpc.Client, %s", errUnexpected), } } - rpcClient.mu.Lock() - rpcClient.rpcPrivate = rpc - rpcClient.mu.Unlock() - return rpc, nil + + rpcClient.netRPCClient = netRPCClient + + return netRPCClient, nil } + if err == nil { err = errors.New("unexpected HTTP response: " + resp.Status) } @@ -141,38 +130,31 @@ func (rpcClient *RPCClient) dialRPCClient() (*rpc.Client, error) { // Call makes a RPC call to the remote endpoint using the default codec, namely encoding/gob. func (rpcClient *RPCClient) Call(serviceMethod string, args interface{}, reply interface{}) error { - // Make a copy below so that we can safely (continue to) work with the rpc.Client. - // Even in the case the two threads would simultaneously find that the connection is not initialised, - // they would both attempt to dial and only one of them would succeed in doing so. - rpcLocalStack := rpcClient.getRPCClient() - - // If the rpc.Client is nil, we attempt to (re)connect with the remote endpoint. - if rpcLocalStack == nil { - var err error - rpcLocalStack, err = rpcClient.dialRPCClient() - if err != nil { - return err - } + // Get a new or existing rpc.Client. + netRPCClient, err := rpcClient.dial() + if err != nil { + return err } - // If the RPC fails due to a network-related error - return rpcLocalStack.Call(serviceMethod, args, reply) + return netRPCClient.Call(serviceMethod, args, reply) } -// Close closes the underlying socket file descriptor. +// Close closes underlying rpc.Client. func (rpcClient *RPCClient) Close() error { - // See comment above for making a copy on local stack - rpcLocalStack := rpcClient.getRPCClient() + rpcClient.mu.Lock() - // If rpc client has not connected yet there is nothing to close. - if rpcLocalStack == nil { - return nil + if rpcClient.netRPCClient != nil { + // We make a copy of rpc.Client and unlock it immediately so that another + // goroutine could try to dial or close in parallel. + netRPCClient := rpcClient.netRPCClient + rpcClient.netRPCClient = nil + rpcClient.mu.Unlock() + + return netRPCClient.Close() } - // Reset rpcClient.rpc to allow for subsequent calls to use a new - // (socket) connection. - rpcClient.clearRPCClient() - return rpcLocalStack.Close() + rpcClient.mu.Unlock() + return nil } // Node returns the node (network address) of the connection diff --git a/cmd/s3-peer-rpc-handlers_test.go b/cmd/s3-peer-rpc-handlers_test.go index c310cfbfe..ff46414bc 100644 --- a/cmd/s3-peer-rpc-handlers_test.go +++ b/cmd/s3-peer-rpc-handlers_test.go @@ -63,7 +63,7 @@ func TestS3PeerRPC(t *testing.T) { func (s *TestRPCS3PeerSuite) testS3PeerRPC(t *testing.T) { // Validate for invalid token. args := GenericArgs{Token: "garbage", Timestamp: time.Now().UTC()} - rclient := newClient(s.testAuthConf.address, s.testAuthConf.path, false) + rclient := newRPCClient(s.testAuthConf.address, s.testAuthConf.path, false) defer rclient.Close() err := rclient.Call("S3.SetBucketNotificationPeer", &args, &GenericReply{}) if err != nil { From 4692fdbb8fe2506dc8c8cf6beefb9d5f22e777f6 Mon Sep 17 00:00:00 2001 From: Anis Elleuch Date: Sun, 18 Dec 2016 22:39:56 +0100 Subject: [PATCH 017/100] PostForm: Follow success_action_status requirement (#3467) S3 spec requires that Post Object response depends on the passed success_action_status. This commit implements that requirement. --- cmd/api-response.go | 21 ++++++++++++++++----- cmd/bucket-handlers.go | 19 +++++++++++++++---- 2 files changed, 31 insertions(+), 9 deletions(-) diff --git a/cmd/api-response.go b/cmd/api-response.go index d89dafa5a..120132bb8 100644 --- a/cmd/api-response.go +++ b/cmd/api-response.go @@ -251,6 +251,14 @@ type DeleteObjectsResponse struct { Errors []DeleteError `xml:"Error,omitempty"` } +// PostResponse container for POST object request when success_action_status is set to 201 +type PostResponse struct { + Bucket string + Key string + ETag string + Location string +} + // getLocation get URL location. func getLocation(r *http.Request) string { return path.Clean(r.URL.Path) // Clean any trailing slashes. @@ -474,21 +482,24 @@ func generateMultiDeleteResponse(quiet bool, deletedObjects []ObjectIdentifier, return deleteResp } -// writeSuccessResponse write success headers and response if any. -func writeSuccessResponse(w http.ResponseWriter, response []byte) { +func writeResponse(w http.ResponseWriter, statusCode int, response []byte) { setCommonHeaders(w) + w.WriteHeader(statusCode) if response == nil { - w.WriteHeader(http.StatusOK) return } w.Write(response) w.(http.Flusher).Flush() } +// writeSuccessResponse write success headers and response if any. +func writeSuccessResponse(w http.ResponseWriter, response []byte) { + writeResponse(w, http.StatusOK, response) +} + // writeSuccessNoContent write success headers with http status 204 func writeSuccessNoContent(w http.ResponseWriter) { - setCommonHeaders(w) - w.WriteHeader(http.StatusNoContent) + writeResponse(w, http.StatusNoContent, nil) } // writeErrorRespone write error headers diff --git a/cmd/bucket-handlers.go b/cmd/bucket-handlers.go index afbc3dded..f8f6baacc 100644 --- a/cmd/bucket-handlers.go +++ b/cmd/bucket-handlers.go @@ -448,11 +448,22 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h w.Header().Set("ETag", "\""+objInfo.MD5Sum+"\"") w.Header().Set("Location", getObjectLocation(bucket, object)) - // Set common headers. - setCommonHeaders(w) + // Decide what http response to send depending on success_action_status parameter + switch formValues[http.CanonicalHeaderKey("success_action_status")] { + case "201": + resp := encodeResponse(PostResponse{ + Bucket: bucket, + Key: object, + ETag: "\"" + objInfo.MD5Sum + "\"", + Location: getObjectLocation(bucket, object), + }) + writeResponse(w, http.StatusCreated, resp) - // Write successful response. - writeSuccessNoContent(w) + case "200": + writeSuccessResponse(w, nil) + default: + writeSuccessNoContent(w) + } // Notify object created event. eventNotify(eventData{ From 5404dddcea7d731509a880923deff0571a70cf83 Mon Sep 17 00:00:00 2001 From: Anis Elleuch Date: Tue, 20 Dec 2016 01:14:04 +0100 Subject: [PATCH 018/100] PostForm: Save supported headers in obj metadata (#3474) Supported Headers like Content-Type, Cache-Control, Content-Encoding, X-Amz-* , etc.. are now saved in object metadata --- cmd/bucket-handlers.go | 5 ++--- cmd/handler-utils.go | 26 ++++++++++++++++++++++++++ cmd/post-policy_test.go | 16 ++++++++++++++++ 3 files changed, 44 insertions(+), 3 deletions(-) diff --git a/cmd/bucket-handlers.go b/cmd/bucket-handlers.go index f8f6baacc..ede9c9643 100644 --- a/cmd/bucket-handlers.go +++ b/cmd/bucket-handlers.go @@ -429,9 +429,8 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h } } - // Save metadata. - metadata := make(map[string]string) - // Nothing to store right now. + // Extract metadata to be saved from received Form. + metadata := extractMetadataFromForm(formValues) sha256sum := "" diff --git a/cmd/handler-utils.go b/cmd/handler-utils.go index a087e1b40..4fc0083f9 100644 --- a/cmd/handler-utils.go +++ b/cmd/handler-utils.go @@ -92,6 +92,32 @@ func extractMetadataFromHeader(header http.Header) map[string]string { return metadata } +// extractMetadataFromForm extracts metadata from Post Form. +func extractMetadataFromForm(formValues map[string]string) map[string]string { + metadata := make(map[string]string) + // Save standard supported headers. + for _, supportedHeader := range supportedHeaders { + canonicalHeader := http.CanonicalHeaderKey(supportedHeader) + // Form field names are case insensitive, look for both canonical + // and non canonical entries. + if _, ok := formValues[canonicalHeader]; ok { + metadata[supportedHeader] = formValues[canonicalHeader] + } else if _, ok := formValues[supportedHeader]; ok { + metadata[supportedHeader] = formValues[canonicalHeader] + } + } + // Go through all other form values for any additional headers that needs to be saved. + for key := range formValues { + cKey := http.CanonicalHeaderKey(key) + if strings.HasPrefix(cKey, "X-Amz-Meta-") { + metadata[cKey] = formValues[key] + } else if strings.HasPrefix(cKey, "X-Minio-Meta-") { + metadata[cKey] = formValues[key] + } + } + return metadata +} + // Extract form fields and file data from a HTTP POST Policy func extractPostPolicyFormValues(reader *multipart.Reader) (filePart io.Reader, fileName string, formValues map[string]string, err error) { /// HTML Form values diff --git a/cmd/post-policy_test.go b/cmd/post-policy_test.go index 456302b62..2453d224c 100644 --- a/cmd/post-policy_test.go +++ b/cmd/post-policy_test.go @@ -178,6 +178,7 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr testCasesV4 := []struct { objectName string data []byte + expectedHeaders map[string]string expectedRespStatus int accessKey string secretKey string @@ -188,6 +189,7 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr objectName: "test", data: []byte("Hello, World"), expectedRespStatus: http.StatusNoContent, + expectedHeaders: map[string]string{"X-Amz-Meta-Uuid": "1234"}, accessKey: credentials.AccessKeyID, secretKey: credentials.SecretAccessKey, malformedBody: false, @@ -229,6 +231,18 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr if rec.Code != testCase.expectedRespStatus { t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, rec.Code) } + // When the operation is successful, check if sending metadata is successful too + if rec.Code == http.StatusNoContent { + objInfo, err := obj.GetObjectInfo(bucketName, testCase.objectName+"/upload.txt") + if err != nil { + t.Error("Unexpected error: ", err) + } + for k, v := range testCase.expectedHeaders { + if objInfo.UserDefined[k] != v { + t.Errorf("Expected to have header %s with value %s, but found value `%s` instead", k, v, objInfo.UserDefined[k]) + } + } + } } // Test cases for signature-V4. @@ -475,6 +489,8 @@ func newPostRequestV4Generic(endPoint, bucketName, objectName string, objData [] "x-amz-signature": signature, "x-amz-date": t.Format(iso8601DateFormat), "x-amz-algorithm": "AWS4-HMAC-SHA256", + "x-amz-meta-uuid": "1234", + "Content-Encoding": "gzip", } // Create the multipart form. From 85c6bb98099345502d6eceb0f2c92b3d91402434 Mon Sep 17 00:00:00 2001 From: Krishnan Parthasarathi Date: Tue, 20 Dec 2016 06:34:31 +0530 Subject: [PATCH 019/100] server: Sort disk arguments for consistent ordering (#3469) This is important in a distributed setup, where the server hosting the first disk formats a fresh setup. Sorting ensures that all servers arrive at the same 'first' server. Note: This change doesn't protect against different disk arguments with some disks being same across servers. --- cmd/server-main.go | 7 ++++ cmd/url-sort.go | 34 +++++++++++++++ cmd/url-sort_test.go | 99 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 140 insertions(+) create mode 100644 cmd/url-sort.go create mode 100644 cmd/url-sort_test.go diff --git a/cmd/server-main.go b/cmd/server-main.go index c020e240b..23ba3cac1 100644 --- a/cmd/server-main.go +++ b/cmd/server-main.go @@ -24,6 +24,7 @@ import ( "net/url" "os" "path" + "sort" "strings" "runtime" @@ -429,6 +430,12 @@ func serverMain(c *cli.Context) { fatalIf(errInvalidArgument, "None of the disks passed as command line args are local to this server.") } + // Sort endpoints for consistent ordering across multiple + // nodes in a distributed setup. This is to avoid format.json + // corruption if the disks aren't supplied in the same order + // on all nodes. + sort.Sort(byHostPath(endpoints)) + storageDisks, err := initStorageDisks(endpoints) fatalIf(err, "Unable to initialize storage disk(s).") diff --git a/cmd/url-sort.go b/cmd/url-sort.go new file mode 100644 index 000000000..a1f3f306c --- /dev/null +++ b/cmd/url-sort.go @@ -0,0 +1,34 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import "net/url" + +type byHostPath []*url.URL + +func (s byHostPath) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s byHostPath) Len() int { + return len(s) +} + +// Note: Host in url.URL includes the port too. +func (s byHostPath) Less(i, j int) bool { + return (s[i].Host + s[i].Path) < (s[j].Host + s[j].Path) +} diff --git a/cmd/url-sort_test.go b/cmd/url-sort_test.go new file mode 100644 index 000000000..fb1b6e5c1 --- /dev/null +++ b/cmd/url-sort_test.go @@ -0,0 +1,99 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "net/url" + "reflect" + "sort" + "testing" +) + +// TestSortByHostPath - tests if ordering of urls are based on +// host+path concatenated. +func TestSortByHostPath(t *testing.T) { + testCases := []struct { + given []string + expected []*url.URL + }{ + { + given: []string{ + "http://abcd.com/a/b/d", + "http://abcd.com/a/b/c", + "http://abcd.com/a/b/e", + }, + expected: []*url.URL{ + { + Scheme: "http", + Host: "abcd.com:9000", + Path: "/a/b/c", + }, + { + Scheme: "http", + Host: "abcd.com:9000", + Path: "/a/b/d", + }, + { + Scheme: "http", + Host: "abcd.com:9000", + Path: "/a/b/e", + }, + }, + }, + { + given: []string{ + "http://defg.com/a/b/c", + "http://abcd.com/a/b/c", + "http://hijk.com/a/b/c", + }, + expected: []*url.URL{ + { + Scheme: "http", + Host: "abcd.com:9000", + Path: "/a/b/c", + }, + { + Scheme: "http", + Host: "defg.com:9000", + Path: "/a/b/c", + }, + { + Scheme: "http", + Host: "hijk.com:9000", + Path: "/a/b/c", + }, + }, + }, + } + + saveGlobalPort := globalMinioPort + globalMinioPort = "9000" + for i, test := range testCases { + eps, err := parseStorageEndpoints(test.given) + if err != nil { + t.Fatalf("Test %d - Failed to parse storage endpoint %v", i+1, err) + } + sort.Sort(byHostPath(eps)) + if !sort.IsSorted(byHostPath(eps)) { + t.Errorf("Test %d - Expected order %v but got %v", i+1, test.expected, eps) + } + if !reflect.DeepEqual(eps, test.expected) { + t.Errorf("Test %d - Expected order %v but got %v", i+1, test.expected, eps) + } + } + globalMinioPort = saveGlobalPort +} From faa6b1e925f01da76ca7190d73e6bf38cb740ab4 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Mon, 19 Dec 2016 19:32:55 -0800 Subject: [PATCH 020/100] vendorize deps for snappy, blake2b and sha256 (#3476) Bring in new optimization and portability changes. Fixes https://github.com/minio/minio-go/issues/578 --- cmd/fs-v1-multipart.go | 3 +- cmd/fs-v1.go | 2 +- cmd/hasher.go | 3 +- cmd/xl-v1-multipart.go | 2 +- cmd/xl-v1-object.go | 2 +- .../github.com/golang/snappy/decode_other.go | 101 ++ .../github.com/golang/snappy/encode_other.go | 238 +++ .../klauspost/reedsolomon/README.md | 2 + .../github.com/minio/blake2b-simd/README.md | 2 +- .../minio/blake2b-simd/compressAvx2_amd64.go | 11 +- .../minio/blake2b-simd/compressAvx_amd64.go | 11 +- .../minio/blake2b-simd/compressAvx_amd64.s | 1348 +++++++-------- .../minio/blake2b-simd/compressSse_amd64.go | 11 +- .../minio/blake2b-simd/compressSse_amd64.s | 1526 ++++++++--------- .../minio/blake2b-simd/compress_generic.go | 9 +- .../github.com/minio/sha256-simd/cpuid_386.s | 22 +- .../minio/sha256-simd/cpuid_amd64.s | 22 +- .../github.com/minio/sha256-simd/cpuid_arm.go | 3 +- .../minio/sha256-simd/cpuid_linux_arm64.go | 49 + .../minio/sha256-simd/cpuid_others_arm64.go | 35 + .../{cpuid_arm64.go => cpuid_ppc64.go} | 3 +- .../minio/sha256-simd/cpuid_ppc64le.go | 32 + vendor/github.com/minio/sha256-simd/sha256.go | 3 +- .../minio/sha256-simd/sha256block_ppc64.go | 22 + .../minio/sha256-simd/sha256block_ppc64le.go | 22 + vendor/vendor.json | 21 +- 26 files changed, 1915 insertions(+), 1590 deletions(-) create mode 100644 vendor/github.com/golang/snappy/decode_other.go create mode 100644 vendor/github.com/golang/snappy/encode_other.go create mode 100644 vendor/github.com/minio/sha256-simd/cpuid_linux_arm64.go create mode 100644 vendor/github.com/minio/sha256-simd/cpuid_others_arm64.go rename vendor/github.com/minio/sha256-simd/{cpuid_arm64.go => cpuid_ppc64.go} (93%) create mode 100644 vendor/github.com/minio/sha256-simd/cpuid_ppc64le.go create mode 100644 vendor/github.com/minio/sha256-simd/sha256block_ppc64.go create mode 100644 vendor/github.com/minio/sha256-simd/sha256block_ppc64le.go diff --git a/cmd/fs-v1-multipart.go b/cmd/fs-v1-multipart.go index 4f7e45dc3..963f6b291 100644 --- a/cmd/fs-v1-multipart.go +++ b/cmd/fs-v1-multipart.go @@ -18,7 +18,6 @@ package cmd import ( "crypto/md5" - "crypto/sha256" "encoding/hex" "fmt" "hash" @@ -26,6 +25,8 @@ import ( "path" "strings" "time" + + "github.com/minio/sha256-simd" ) // listMultipartUploads - lists all multipart uploads. diff --git a/cmd/fs-v1.go b/cmd/fs-v1.go index ead0c5745..b7588a47f 100644 --- a/cmd/fs-v1.go +++ b/cmd/fs-v1.go @@ -18,7 +18,6 @@ package cmd import ( "crypto/md5" - "crypto/sha256" "encoding/hex" "errors" "fmt" @@ -29,6 +28,7 @@ import ( "strings" "github.com/minio/minio/pkg/mimedb" + "github.com/minio/sha256-simd" ) // fsObjects - Implements fs object layer. diff --git a/cmd/hasher.go b/cmd/hasher.go index 83e2c474a..2988ede1b 100644 --- a/cmd/hasher.go +++ b/cmd/hasher.go @@ -18,9 +18,10 @@ package cmd import ( "crypto/md5" - "crypto/sha256" "encoding/base64" "encoding/hex" + + "github.com/minio/sha256-simd" ) // getSHA256Hash returns SHA-256 hash of given data. diff --git a/cmd/xl-v1-multipart.go b/cmd/xl-v1-multipart.go index 802c34ee4..1ce509639 100644 --- a/cmd/xl-v1-multipart.go +++ b/cmd/xl-v1-multipart.go @@ -18,7 +18,6 @@ package cmd import ( "crypto/md5" - "crypto/sha256" "encoding/hex" "fmt" "hash" @@ -29,6 +28,7 @@ import ( "time" "github.com/minio/minio/pkg/mimedb" + "github.com/minio/sha256-simd" ) // listMultipartUploads - lists all multipart uploads. diff --git a/cmd/xl-v1-object.go b/cmd/xl-v1-object.go index 6ee87336a..a39fef4a1 100644 --- a/cmd/xl-v1-object.go +++ b/cmd/xl-v1-object.go @@ -18,7 +18,6 @@ package cmd import ( "crypto/md5" - "crypto/sha256" "encoding/hex" "hash" "io" @@ -30,6 +29,7 @@ import ( "github.com/minio/minio/pkg/bpool" "github.com/minio/minio/pkg/mimedb" "github.com/minio/minio/pkg/objcache" + "github.com/minio/sha256-simd" ) // list all errors which can be ignored in object operations. diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go new file mode 100644 index 000000000..8c9f2049b --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_other.go @@ -0,0 +1,101 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func decode(dst, src []byte) int { + var d, s, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length <= 0 { + return decodeErrCodeUnsupportedLiteralLength + } + if length > len(dst)-d || length > len(src)-s { + return decodeErrCodeCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + // Copy from an earlier sub-slice of dst to a later sub-slice. Unlike + // the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + for end := d + length; d != end; d++ { + dst[d] = dst[d-offset] + } + } + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go new file mode 100644 index 000000000..dbcae905e --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_other.go @@ -0,0 +1,238 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= len(lit) && len(lit) <= 65536 +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + default: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + } + return i + copy(dst[i:], lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= 65535 +// 4 <= length && length <= 65535 +func emitCopy(dst []byte, offset, length int) int { + i := 0 + // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The + // threshold for this loop is a little higher (at 68 = 64 + 4), and the + // length emitted down below is is a little lower (at 60 = 64 - 4), because + // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed + // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as + // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as + // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a + // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an + // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. + for length >= 68 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[i+0] = 63<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 64 + } + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + dst[i+0] = 59<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 60 + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + return i + 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + return i + 2 +} + +// extendMatch returns the largest k such that k <= len(src) and that +// src[i:i+k-j] and src[j:k] have the same contents. +// +// It assumes that: +// 0 <= i && i < j && j <= len(src) +func extendMatch(src []byte, i, j int) int { + for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { + } + return j +} + +func hash(u, shift uint32) uint32 { + return (u * 0x1e35a7bd) >> shift +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxBlockSize and maxBlockSize == 65536. + const ( + maxTableSize = 1 << 14 + // tableMask is redundant, but helps the compiler eliminate bounds + // checks. + tableMask = maxTableSize - 1 + ) + shift := uint32(32 - 8) + for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + shift-- + } + // In Go, all array elements are zero-initialized, so there is no advantage + // to a smaller tableSize per se. However, it matches the C++ algorithm, + // and in the asm versions of this code, we can get away with zeroing only + // the first tableSize elements. + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s), shift) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS), shift) + if load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { + } + + d += emitCopy(dst[d:], base-candidate, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x>>0), shift) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x>>8), shift) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x>>16), shift) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/vendor/github.com/klauspost/reedsolomon/README.md b/vendor/github.com/klauspost/reedsolomon/README.md index cd6fc04cc..3e7f51841 100644 --- a/vendor/github.com/klauspost/reedsolomon/README.md +++ b/vendor/github.com/klauspost/reedsolomon/README.md @@ -193,6 +193,8 @@ Example of performance scaling on Intel(R) Core(TM) i7-2600 CPU @ 3.40GHz - 4 ph # Links * [Backblaze Open Sources Reed-Solomon Erasure Coding Source Code](https://www.backblaze.com/blog/reed-solomon/). * [JavaReedSolomon](https://github.com/Backblaze/JavaReedSolomon). Compatible java library by Backblaze. +* [reedsolomon-c](https://github.com/jannson/reedsolomon-c). C version, compatible with output from this package. +* [Reed-Solomon Erasure Coding in Haskell](https://github.com/NicolasT/reedsolomon). Haskell port of the package with similar performance. * [go-erasure](https://github.com/somethingnew2-0/go-erasure). A similar library using cgo, slower in my tests. * [rsraid](https://github.com/goayame/rsraid). A similar library written in Go. Slower, but supports more shards. * [Screaming Fast Galois Field Arithmetic](http://www.snia.org/sites/default/files2/SDC2013/presentations/NewThinking/EthanMiller_Screaming_Fast_Galois_Field%20Arithmetic_SIMD%20Instructions.pdf). Basis for SSE3 optimizations. diff --git a/vendor/github.com/minio/blake2b-simd/README.md b/vendor/github.com/minio/blake2b-simd/README.md index b4f6ac558..31fcbf749 100644 --- a/vendor/github.com/minio/blake2b-simd/README.md +++ b/vendor/github.com/minio/blake2b-simd/README.md @@ -24,7 +24,7 @@ This is a summary of the performance improvements. Full details are shown below. asm2plan9s ---------- -In order to be able to work more easily with AVX2/AVX instructions, a separate tool was developed to convert AVX2/AVX instructions into the corresponding BYTE sequence as accepted by Go assembly. See [asm2plan9s](https://github.com/fwessels/asm2plan9s) for more information. +In order to be able to work more easily with AVX2/AVX instructions, a separate tool was developed to convert AVX2/AVX instructions into the corresponding BYTE sequence as accepted by Go assembly. See [asm2plan9s](https://github.com/minio/asm2plan9s) for more information. bt2sum ------ diff --git a/vendor/github.com/minio/blake2b-simd/compressAvx2_amd64.go b/vendor/github.com/minio/blake2b-simd/compressAvx2_amd64.go index 1b3ebae0f..ec53599f8 100644 --- a/vendor/github.com/minio/blake2b-simd/compressAvx2_amd64.go +++ b/vendor/github.com/minio/blake2b-simd/compressAvx2_amd64.go @@ -23,11 +23,12 @@ package blake2b func compressAVX2Loop(p []uint8, in, iv, t, f, shffle, out []uint64) func compressAVX2(d *digest, p []uint8) { + var ( + in [8]uint64 + out [8]uint64 + shffle [8]uint64 + ) - in := make([]uint64, 8, 8) - out := make([]uint64, 8, 8) - - shffle := make([]uint64, 8, 8) // vector for PSHUFB instruction shffle[0] = 0x0201000706050403 shffle[1] = 0x0a09080f0e0d0c0b @@ -40,7 +41,7 @@ func compressAVX2(d *digest, p []uint8) { in[0], in[1], in[2], in[3], in[4], in[5], in[6], in[7] = d.h[0], d.h[1], d.h[2], d.h[3], d.h[4], d.h[5], d.h[6], d.h[7] - compressAVX2Loop(p, in, iv[:], d.t[:], d.f[:], shffle, out) + compressAVX2Loop(p, in[:], iv[:], d.t[:], d.f[:], shffle[:], out[:]) d.h[0], d.h[1], d.h[2], d.h[3], d.h[4], d.h[5], d.h[6], d.h[7] = out[0], out[1], out[2], out[3], out[4], out[5], out[6], out[7] } diff --git a/vendor/github.com/minio/blake2b-simd/compressAvx_amd64.go b/vendor/github.com/minio/blake2b-simd/compressAvx_amd64.go index 7bed76c02..cfa12c04f 100644 --- a/vendor/github.com/minio/blake2b-simd/compressAvx_amd64.go +++ b/vendor/github.com/minio/blake2b-simd/compressAvx_amd64.go @@ -23,18 +23,19 @@ package blake2b func blockAVXLoop(p []uint8, in, iv, t, f, shffle, out []uint64) func compressAVX(d *digest, p []uint8) { + var ( + in [8]uint64 + out [8]uint64 + shffle [2]uint64 + ) - in := make([]uint64, 8, 8) - out := make([]uint64, 8, 8) - - shffle := make([]uint64, 2, 2) // vector for PSHUFB instruction shffle[0] = 0x0201000706050403 shffle[1] = 0x0a09080f0e0d0c0b in[0], in[1], in[2], in[3], in[4], in[5], in[6], in[7] = d.h[0], d.h[1], d.h[2], d.h[3], d.h[4], d.h[5], d.h[6], d.h[7] - blockAVXLoop(p, in, iv[:], d.t[:], d.f[:], shffle, out) + blockAVXLoop(p, in[:], iv[:], d.t[:], d.f[:], shffle[:], out[:]) d.h[0], d.h[1], d.h[2], d.h[3], d.h[4], d.h[5], d.h[6], d.h[7] = out[0], out[1], out[2], out[3], out[4], out[5], out[6], out[7] } diff --git a/vendor/github.com/minio/blake2b-simd/compressAvx_amd64.s b/vendor/github.com/minio/blake2b-simd/compressAvx_amd64.s index 987d0bf47..f68e17392 100644 --- a/vendor/github.com/minio/blake2b-simd/compressAvx_amd64.s +++ b/vendor/github.com/minio/blake2b-simd/compressAvx_amd64.s @@ -43,750 +43,640 @@ // #define G1 \ - \ // G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); - BYTE $0xc4; BYTE $0xc1; BYTE $0x79; BYTE $0xd4; BYTE $0xc0 \ // VPADDQ XMM0,XMM0,XMM8 /* v0 += m[0], v1 += m[2] */ - BYTE $0xc4; BYTE $0xc1; BYTE $0x71; BYTE $0xd4; BYTE $0xc9 \ // VPADDQ XMM1,XMM1,XMM9 /* v2 += m[4], v3 += m[6] */ - BYTE $0xc5; BYTE $0xf9; BYTE $0xd4; BYTE $0xc2 \ // VPADDQ XMM0,XMM0,XMM2 /* v0 += v4, v1 += v5 */ - BYTE $0xc5; BYTE $0xf1; BYTE $0xd4; BYTE $0xcb \ // VPADDQ XMM1,XMM1,XMM3 /* v2 += v6, v3 += v7 */ - BYTE $0xc5; BYTE $0xc9; BYTE $0xef; BYTE $0xf0 \ // VPXOR XMM6,XMM6,XMM0 /* v12 ^= v0, v13 ^= v1 */ - BYTE $0xc5; BYTE $0xc1; BYTE $0xef; BYTE $0xf9 \ // VPXOR XMM7,XMM7,XMM1 /* v14 ^= v2, v15 ^= v3 */ - BYTE $0xc5; BYTE $0xf9; BYTE $0x70; BYTE $0xf6; BYTE $0xb1 \ // VPSHUFD XMM6,XMM6,0xb1 /* v12 = v12<<(64-32) | v12>>32, v13 = v13<<(64-32) | v13>>32 */ - BYTE $0xc5; BYTE $0xf9; BYTE $0x70; BYTE $0xff; BYTE $0xb1 \ // VPSHUFD XMM7,XMM7,0xb1 /* v14 = v14<<(64-32) | v14>>32, v15 = v15<<(64-32) | v15>>32 */ - BYTE $0xc5; BYTE $0xd9; BYTE $0xd4; BYTE $0xe6 \ // VPADDQ XMM4,XMM4,XMM6 /* v8 += v12, v9 += v13 */ - BYTE $0xc5; BYTE $0xd1; BYTE $0xd4; BYTE $0xef \ // VPADDQ XMM5,XMM5,XMM7 /* v10 += v14, v11 += v15 */ - BYTE $0xc5; BYTE $0xe9; BYTE $0xef; BYTE $0xd4 \ // VPXOR XMM2,XMM2,XMM4 /* v4 ^= v8, v5 ^= v9 */ - BYTE $0xc5; BYTE $0xe1; BYTE $0xef; BYTE $0xdd \ // VPXOR XMM3,XMM3,XMM5 /* v6 ^= v10, v7 ^= v11 */ - BYTE $0xc4; BYTE $0xc2; BYTE $0x69; BYTE $0x00; BYTE $0xd4 \ // VPSHUFB XMM2,XMM2,XMM12 /* v4 = v4<<(64-24) | v4>>24, v5 = v5<<(64-24) | v5>>24 */ - BYTE $0xc4; BYTE $0xc2; BYTE $0x61; BYTE $0x00; BYTE $0xdc // VPSHUFB XMM3,XMM3,XMM12 /* v6 = v6<<(64-24) | v6>>24, v7 = v7<<(64-24) | v7>>24 */ + \ // G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); + LONG $0xd479c1c4; BYTE $0xc0 \ // VPADDQ XMM0,XMM0,XMM8 /* v0 += m[0], v1 += m[2] */ + LONG $0xd471c1c4; BYTE $0xc9 \ // VPADDQ XMM1,XMM1,XMM9 /* v2 += m[4], v3 += m[6] */ + LONG $0xc2d4f9c5 \ // VPADDQ XMM0,XMM0,XMM2 /* v0 += v4, v1 += v5 */ + LONG $0xcbd4f1c5 \ // VPADDQ XMM1,XMM1,XMM3 /* v2 += v6, v3 += v7 */ + LONG $0xf0efc9c5 \ // VPXOR XMM6,XMM6,XMM0 /* v12 ^= v0, v13 ^= v1 */ + LONG $0xf9efc1c5 \ // VPXOR XMM7,XMM7,XMM1 /* v14 ^= v2, v15 ^= v3 */ + LONG $0xf670f9c5; BYTE $0xb1 \ // VPSHUFD XMM6,XMM6,0xb1 /* v12 = v12<<(64-32) | v12>>32, v13 = v13<<(64-32) | v13>>32 */ + LONG $0xff70f9c5; BYTE $0xb1 \ // VPSHUFD XMM7,XMM7,0xb1 /* v14 = v14<<(64-32) | v14>>32, v15 = v15<<(64-32) | v15>>32 */ + LONG $0xe6d4d9c5 \ // VPADDQ XMM4,XMM4,XMM6 /* v8 += v12, v9 += v13 */ + LONG $0xefd4d1c5 \ // VPADDQ XMM5,XMM5,XMM7 /* v10 += v14, v11 += v15 */ + LONG $0xd4efe9c5 \ // VPXOR XMM2,XMM2,XMM4 /* v4 ^= v8, v5 ^= v9 */ + LONG $0xddefe1c5 \ // VPXOR XMM3,XMM3,XMM5 /* v6 ^= v10, v7 ^= v11 */ + LONG $0x0069c2c4; BYTE $0xd4 \ // VPSHUFB XMM2,XMM2,XMM12 /* v4 = v4<<(64-24) | v4>>24, v5 = v5<<(64-24) | v5>>24 */ + LONG $0x0061c2c4; BYTE $0xdc // VPSHUFB XMM3,XMM3,XMM12 /* v6 = v6<<(64-24) | v6>>24, v7 = v7<<(64-24) | v7>>24 */ #define G2 \ - \ // G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); - BYTE $0xc4; BYTE $0xc1; BYTE $0x79; BYTE $0xd4; BYTE $0xc2 \ // VPADDQ XMM0,XMM0,XMM10 /* v0 += m[1], v1 += m[3] */ - BYTE $0xc4; BYTE $0xc1; BYTE $0x71; BYTE $0xd4; BYTE $0xcb \ // VPADDQ XMM1,XMM1,XMM11 /* v2 += m[5], v3 += m[7] */ - BYTE $0xc5; BYTE $0xf9; BYTE $0xd4; BYTE $0xc2 \ // VPADDQ XMM0,XMM0,XMM2 /* v0 += v4, v1 += v5 */ - BYTE $0xc5; BYTE $0xf1; BYTE $0xd4; BYTE $0xcb \ // VPADDQ XMM1,XMM1,XMM3 /* v2 += v6, v3 += v7 */ - BYTE $0xc5; BYTE $0xc9; BYTE $0xef; BYTE $0xf0 \ // VPXOR XMM6,XMM6,XMM0 /* v12 ^= v0, v13 ^= v1 */ - BYTE $0xc5; BYTE $0xc1; BYTE $0xef; BYTE $0xf9 \ // VPXOR XMM7,XMM7,XMM1 /* v14 ^= v2, v15 ^= v3 */ - BYTE $0xc5; BYTE $0xfb; BYTE $0x70; BYTE $0xf6; BYTE $0x39 \ // VPSHUFLW XMM6,XMM6,0x39 /* combined with next ... */ - BYTE $0xc5; BYTE $0xfa; BYTE $0x70; BYTE $0xf6; BYTE $0x39 \ // VPSHUFHW XMM6,XMM6,0x39 /* v12 = v12<<(64-16) | v12>>16, v13 = v13<<(64-16) | v13>>16 */ - BYTE $0xc5; BYTE $0xfb; BYTE $0x70; BYTE $0xff; BYTE $0x39 \ // VPSHUFLW XMM7,XMM7,0x39 /* combined with next ... */ - BYTE $0xc5; BYTE $0xfa; BYTE $0x70; BYTE $0xff; BYTE $0x39 \ // VPSHUFHW XMM7,XMM7,0x39 /* v14 = v14<<(64-16) | v14>>16, v15 = v15<<(64-16) | v15>>16 */ - BYTE $0xc5; BYTE $0xd9; BYTE $0xd4; BYTE $0xe6 \ // VPADDQ XMM4,XMM4,XMM6 /* v8 += v12, v9 += v13 */ - BYTE $0xc5; BYTE $0xd1; BYTE $0xd4; BYTE $0xef \ // VPADDQ XMM5,XMM5,XMM7 /* v10 += v14, v11 += v15 */ - BYTE $0xc5; BYTE $0xe9; BYTE $0xef; BYTE $0xd4 \ // VPXOR XMM2,XMM2,XMM4 /* v4 ^= v8, v5 ^= v9 */ - BYTE $0xc5; BYTE $0xe1; BYTE $0xef; BYTE $0xdd \ // VPXOR XMM3,XMM3,XMM5 /* v6 ^= v10, v7 ^= v11 */ - BYTE $0xc5; BYTE $0x69; BYTE $0xd4; BYTE $0xfa \ // VPADDQ XMM15,XMM2,XMM2 /* temp reg = reg*2 */ - BYTE $0xc5; BYTE $0xe9; BYTE $0x73; BYTE $0xd2; BYTE $0x3f \ // VPSRLQ XMM2,XMM2,0x3f /* reg = reg>>63 */ - BYTE $0xc4; BYTE $0xc1; BYTE $0x69; BYTE $0xef; BYTE $0xd7 \ // VPXOR XMM2,XMM2,XMM15 /* ORed together: v4 = v4<<(64-63) | v4>>63, v5 = v5<<(64-63) | v5>>63 */ - BYTE $0xc5; BYTE $0x61; BYTE $0xd4; BYTE $0xfb \ // VPADDQ XMM15,XMM3,XMM3 /* temp reg = reg*2 */ - BYTE $0xc5; BYTE $0xe1; BYTE $0x73; BYTE $0xd3; BYTE $0x3f \ // VPSRLQ XMM3,XMM3,0x3f /* reg = reg>>63 */ - BYTE $0xc4; BYTE $0xc1; BYTE $0x61; BYTE $0xef; BYTE $0xdf // VPXOR XMM3,XMM3,XMM15 /* ORed together: v6 = v6<<(64-63) | v6>>63, v7 = v7<<(64-63) | v7>>63 */ + \ // G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); + LONG $0xd479c1c4; BYTE $0xc2 \ // VPADDQ XMM0,XMM0,XMM10 /* v0 += m[1], v1 += m[3] */ + LONG $0xd471c1c4; BYTE $0xcb \ // VPADDQ XMM1,XMM1,XMM11 /* v2 += m[5], v3 += m[7] */ + LONG $0xc2d4f9c5 \ // VPADDQ XMM0,XMM0,XMM2 /* v0 += v4, v1 += v5 */ + LONG $0xcbd4f1c5 \ // VPADDQ XMM1,XMM1,XMM3 /* v2 += v6, v3 += v7 */ + LONG $0xf0efc9c5 \ // VPXOR XMM6,XMM6,XMM0 /* v12 ^= v0, v13 ^= v1 */ + LONG $0xf9efc1c5 \ // VPXOR XMM7,XMM7,XMM1 /* v14 ^= v2, v15 ^= v3 */ + LONG $0xf670fbc5; BYTE $0x39 \ // VPSHUFLW XMM6,XMM6,0x39 /* combined with next ... */ + LONG $0xf670fac5; BYTE $0x39 \ // VPSHUFHW XMM6,XMM6,0x39 /* v12 = v12<<(64-16) | v12>>16, v13 = v13<<(64-16) | v13>>16 */ + LONG $0xff70fbc5; BYTE $0x39 \ // VPSHUFLW XMM7,XMM7,0x39 /* combined with next ... */ + LONG $0xff70fac5; BYTE $0x39 \ // VPSHUFHW XMM7,XMM7,0x39 /* v14 = v14<<(64-16) | v14>>16, v15 = v15<<(64-16) | v15>>16 */ + LONG $0xe6d4d9c5 \ // VPADDQ XMM4,XMM4,XMM6 /* v8 += v12, v9 += v13 */ + LONG $0xefd4d1c5 \ // VPADDQ XMM5,XMM5,XMM7 /* v10 += v14, v11 += v15 */ + LONG $0xd4efe9c5 \ // VPXOR XMM2,XMM2,XMM4 /* v4 ^= v8, v5 ^= v9 */ + LONG $0xddefe1c5 \ // VPXOR XMM3,XMM3,XMM5 /* v6 ^= v10, v7 ^= v11 */ + LONG $0xfad469c5 \ // VPADDQ XMM15,XMM2,XMM2 /* temp reg = reg*2 */ + LONG $0xd273e9c5; BYTE $0x3f \ // VPSRLQ XMM2,XMM2,0x3f /* reg = reg>>63 */ + LONG $0xef69c1c4; BYTE $0xd7 \ // VPXOR XMM2,XMM2,XMM15 /* ORed together: v4 = v4<<(64-63) | v4>>63, v5 = v5<<(64-63) | v5>>63 */ + LONG $0xfbd461c5 \ // VPADDQ XMM15,XMM3,XMM3 /* temp reg = reg*2 */ + LONG $0xd373e1c5; BYTE $0x3f \ // VPSRLQ XMM3,XMM3,0x3f /* reg = reg>>63 */ + LONG $0xef61c1c4; BYTE $0xdf // VPXOR XMM3,XMM3,XMM15 /* ORed together: v6 = v6<<(64-63) | v6>>63, v7 = v7<<(64-63) | v7>>63 */ #define DIAGONALIZE \ - \ // DIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h); - MOVOU X6, X13 \ /* t0 = row4l;\ */ - MOVOU X2, X14 \ /* t1 = row2l;\ */ - MOVOU X4, X6 \ /* row4l = row3l;\ */ - MOVOU X5, X4 \ /* row3l = row3h;\ */ - MOVOU X6, X5 \ /* row3h = row4l;\ */ - BYTE $0xc4; BYTE $0x41; BYTE $0x11; BYTE $0x6c; BYTE $0xfd \ // VPUNPCKLQDQ XMM15, XMM13, XMM13 /* _mm_unpacklo_epi64(t0, t0) */ - BYTE $0xc4; BYTE $0xc1; BYTE $0x41; BYTE $0x6d; BYTE $0xf7 \ // VPUNPCKHQDQ XMM6, XMM7, XMM15 /* row4l = _mm_unpackhi_epi64(row4h, ); \ */ - BYTE $0xc5; BYTE $0x41; BYTE $0x6c; BYTE $0xff \ // VPUNPCKLQDQ XMM15, XMM7, XMM7 /* _mm_unpacklo_epi64(row4h, row4h) */ - BYTE $0xc4; BYTE $0xc1; BYTE $0x11; BYTE $0x6d; BYTE $0xff \ // VPUNPCKHQDQ XMM7, XMM13, XMM15 /* row4h = _mm_unpackhi_epi64(t0, ); \ */ - BYTE $0xc5; BYTE $0x61; BYTE $0x6c; BYTE $0xfb \ // VPUNPCKLQDQ XMM15, XMM3, XMM3 /* _mm_unpacklo_epi64(row2h, row2h) */ - BYTE $0xc4; BYTE $0xc1; BYTE $0x69; BYTE $0x6d; BYTE $0xd7 \ // VPUNPCKHQDQ XMM2, XMM2, XMM15 /* row2l = _mm_unpackhi_epi64(row2l, ); \ */ - BYTE $0xc4; BYTE $0x41; BYTE $0x09; BYTE $0x6c; BYTE $0xfe \ // VPUNPCKLQDQ XMM15, XMM14, XMM14 /* _mm_unpacklo_epi64(t1, t1) */ - BYTE $0xc4; BYTE $0xc1; BYTE $0x61; BYTE $0x6d; BYTE $0xdf // VPUNPCKHQDQ XMM3, XMM3, XMM15 /* row2h = _mm_unpackhi_epi64(row2h, ) */ + \ // DIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h); + MOVOU X6, X13 \ /* t0 = row4l;\ */ + MOVOU X2, X14 \ /* t1 = row2l;\ */ + MOVOU X4, X6 \ /* row4l = row3l;\ */ + MOVOU X5, X4 \ /* row3l = row3h;\ */ + MOVOU X6, X5 \ /* row3h = row4l;\ */ + LONG $0x6c1141c4; BYTE $0xfd \ // VPUNPCKLQDQ XMM15, XMM13, XMM13 /* _mm_unpacklo_epi64(t0, t0) */ + LONG $0x6d41c1c4; BYTE $0xf7 \ // VPUNPCKHQDQ XMM6, XMM7, XMM15 /* row4l = _mm_unpackhi_epi64(row4h, ); \ */ + LONG $0xff6c41c5 \ // VPUNPCKLQDQ XMM15, XMM7, XMM7 /* _mm_unpacklo_epi64(row4h, row4h) */ + LONG $0x6d11c1c4; BYTE $0xff \ // VPUNPCKHQDQ XMM7, XMM13, XMM15 /* row4h = _mm_unpackhi_epi64(t0, ); \ */ + LONG $0xfb6c61c5 \ // VPUNPCKLQDQ XMM15, XMM3, XMM3 /* _mm_unpacklo_epi64(row2h, row2h) */ + LONG $0x6d69c1c4; BYTE $0xd7 \ // VPUNPCKHQDQ XMM2, XMM2, XMM15 /* row2l = _mm_unpackhi_epi64(row2l, ); \ */ + LONG $0x6c0941c4; BYTE $0xfe \ // VPUNPCKLQDQ XMM15, XMM14, XMM14 /* _mm_unpacklo_epi64(t1, t1) */ + LONG $0x6d61c1c4; BYTE $0xdf // VPUNPCKHQDQ XMM3, XMM3, XMM15 /* row2h = _mm_unpackhi_epi64(row2h, ) */ #define UNDIAGONALIZE \ - \ // UNDIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h); - MOVOU X4, X13 \ /* t0 = row3l;\ */ - MOVOU X5, X4 \ /* row3l = row3h;\ */ - MOVOU X13, X5 \ /* row3h = t0;\ */ - MOVOU X2, X13 \ /* t0 = row2l;\ */ - MOVOU X6, X14 \ /* t1 = row4l;\ */ - BYTE $0xc5; BYTE $0x69; BYTE $0x6c; BYTE $0xfa \ // VPUNPCKLQDQ XMM15, XMM2, XMM2 /* _mm_unpacklo_epi64(row2l, row2l) */ - BYTE $0xc4; BYTE $0xc1; BYTE $0x61; BYTE $0x6d; BYTE $0xd7 \ // VPUNPCKHQDQ XMM2, XMM3, XMM15 /* row2l = _mm_unpackhi_epi64(row2h, ); \ */ - BYTE $0xc5; BYTE $0x61; BYTE $0x6c; BYTE $0xfb \ // VPUNPCKLQDQ XMM15, XMM3, XMM3 /* _mm_unpacklo_epi64(row2h, row2h) */ - BYTE $0xc4; BYTE $0xc1; BYTE $0x11; BYTE $0x6d; BYTE $0xdf \ // VPUNPCKHQDQ XMM3, XMM13, XMM15 /* row2h = _mm_unpackhi_epi64(t0, ); \ */ - BYTE $0xc5; BYTE $0x41; BYTE $0x6c; BYTE $0xff \ // VPUNPCKLQDQ XMM15, XMM7, XMM7 /* _mm_unpacklo_epi64(row4h, row4h) */ - BYTE $0xc4; BYTE $0xc1; BYTE $0x49; BYTE $0x6d; BYTE $0xf7 \ // VPUNPCKHQDQ XMM6, XMM6, XMM15 /* row4l = _mm_unpackhi_epi64(row4l, ); \ */ - BYTE $0xc4; BYTE $0x41; BYTE $0x09; BYTE $0x6c; BYTE $0xfe \ // VPUNPCKLQDQ XMM15, XMM14, XMM14 /* _mm_unpacklo_epi64(t1, t1) */ - BYTE $0xc4; BYTE $0xc1; BYTE $0x41; BYTE $0x6d; BYTE $0xff // VPUNPCKHQDQ XMM7, XMM7, XMM15 /* row4h = _mm_unpackhi_epi64(row4h, ) */ + \ // UNDIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h); + MOVOU X4, X13 \ /* t0 = row3l;\ */ + MOVOU X5, X4 \ /* row3l = row3h;\ */ + MOVOU X13, X5 \ /* row3h = t0;\ */ + MOVOU X2, X13 \ /* t0 = row2l;\ */ + MOVOU X6, X14 \ /* t1 = row4l;\ */ + LONG $0xfa6c69c5 \ // VPUNPCKLQDQ XMM15, XMM2, XMM2 /* _mm_unpacklo_epi64(row2l, row2l) */ + LONG $0x6d61c1c4; BYTE $0xd7 \ // VPUNPCKHQDQ XMM2, XMM3, XMM15 /* row2l = _mm_unpackhi_epi64(row2h, ); \ */ + LONG $0xfb6c61c5 \ // VPUNPCKLQDQ XMM15, XMM3, XMM3 /* _mm_unpacklo_epi64(row2h, row2h) */ + LONG $0x6d11c1c4; BYTE $0xdf \ // VPUNPCKHQDQ XMM3, XMM13, XMM15 /* row2h = _mm_unpackhi_epi64(t0, ); \ */ + LONG $0xff6c41c5 \ // VPUNPCKLQDQ XMM15, XMM7, XMM7 /* _mm_unpacklo_epi64(row4h, row4h) */ + LONG $0x6d49c1c4; BYTE $0xf7 \ // VPUNPCKHQDQ XMM6, XMM6, XMM15 /* row4l = _mm_unpackhi_epi64(row4l, ); \ */ + LONG $0x6c0941c4; BYTE $0xfe \ // VPUNPCKLQDQ XMM15, XMM14, XMM14 /* _mm_unpacklo_epi64(t1, t1) */ + LONG $0x6d41c1c4; BYTE $0xff // VPUNPCKHQDQ XMM7, XMM7, XMM15 /* row4h = _mm_unpackhi_epi64(row4h, ) */ #define LOAD_SHUFFLE \ - \ // Load shuffle value - MOVQ shffle+120(FP), SI \ // SI: &shuffle - MOVOU 0(SI), X12 // X12 = 03040506 07000102 0b0c0d0e 0f08090a + \ // Load shuffle value + MOVQ shffle+120(FP), SI \ // SI: &shuffle + MOVOU 0(SI), X12 // X12 = 03040506 07000102 0b0c0d0e 0f08090a // func blockAVXLoop(p []uint8, in, iv, t, f, shffle, out []uint64) TEXT ·blockAVXLoop(SB), 7, $0 - // REGISTER USE - // R8: loop counter - // DX: message pointer - // SI: temp pointer for loading - // X0 - X7: v0 - v15 - // X8 - X11: m[0] - m[7] - // X12: shuffle value - // X13 - X15: temp registers + // REGISTER USE + // R8: loop counter + // DX: message pointer + // SI: temp pointer for loading + // X0 - X7: v0 - v15 + // X8 - X11: m[0] - m[7] + // X12: shuffle value + // X13 - X15: temp registers - // Load digest - MOVQ in+24(FP), SI // SI: &in - MOVOU 0(SI), X0 // X0 = in[0]+in[1] /* row1l = LOAD( &S->h[0] ); */ - MOVOU 16(SI), X1 // X1 = in[2]+in[3] /* row1h = LOAD( &S->h[2] ); */ - MOVOU 32(SI), X2 // X2 = in[4]+in[5] /* row2l = LOAD( &S->h[4] ); */ - MOVOU 48(SI), X3 // X3 = in[6]+in[7] /* row2h = LOAD( &S->h[6] ); */ + // Load digest + MOVQ in+24(FP), SI // SI: &in + MOVOU 0(SI), X0 // X0 = in[0]+in[1] /* row1l = LOAD( &S->h[0] ); */ + MOVOU 16(SI), X1 // X1 = in[2]+in[3] /* row1h = LOAD( &S->h[2] ); */ + MOVOU 32(SI), X2 // X2 = in[4]+in[5] /* row2l = LOAD( &S->h[4] ); */ + MOVOU 48(SI), X3 // X3 = in[6]+in[7] /* row2h = LOAD( &S->h[6] ); */ - // Already store digest into &out (so we can reload it later generically) - MOVQ out+144(FP), SI // SI: &out - MOVOU X0, 0(SI) // out[0]+out[1] = X0 - MOVOU X1, 16(SI) // out[2]+out[3] = X1 - MOVOU X2, 32(SI) // out[4]+out[5] = X2 - MOVOU X3, 48(SI) // out[6]+out[7] = X3 + // Already store digest into &out (so we can reload it later generically) + MOVQ out+144(FP), SI // SI: &out + MOVOU X0, 0(SI) // out[0]+out[1] = X0 + MOVOU X1, 16(SI) // out[2]+out[3] = X1 + MOVOU X2, 32(SI) // out[4]+out[5] = X2 + MOVOU X3, 48(SI) // out[6]+out[7] = X3 - // Initialize message pointer and loop counter - MOVQ message+0(FP), DX // DX: &p (message) - MOVQ message_len+8(FP), R8 // R8: len(message) - SHRQ $7, R8 // len(message) / 128 - CMPQ R8, $0 - JEQ complete + // Initialize message pointer and loop counter + MOVQ message+0(FP), DX // DX: &p (message) + MOVQ message_len+8(FP), R8 // R8: len(message) + SHRQ $7, R8 // len(message) / 128 + CMPQ R8, $0 + JEQ complete loop: - // Increment counter - MOVQ t+72(FP), SI // SI: &t - MOVQ 0(SI), R9 // - ADDQ $128, R9 // /* d.t[0] += BlockSize */ - MOVQ R9, 0(SI) // - CMPQ R9, $128 // /* if d.t[0] < BlockSize { */ - JGE noincr // - MOVQ 8(SI), R9 // - ADDQ $1, R9 // /* d.t[1]++ */ - MOVQ R9, 8(SI) // -noincr: // /* } */ - - // Load initialization vector - MOVQ iv+48(FP), SI // SI: &iv - MOVOU 0(SI), X4 // X4 = iv[0]+iv[1] /* row3l = LOAD( &blake2b_IV[0] ); */ - MOVOU 16(SI), X5 // X5 = iv[2]+iv[3] /* row3h = LOAD( &blake2b_IV[2] ); */ - MOVOU 32(SI), X6 // X6 = iv[4]+iv[5] /* LOAD( &blake2b_IV[4] ) */ - MOVOU 48(SI), X7 // X7 = iv[6]+iv[7] /* LOAD( &blake2b_IV[6] ) */ - MOVQ t+72(FP), SI // SI: &t - MOVOU 0(SI), X8 // X8 = t[0]+t[1] /* LOAD( &S->t[0] ) */ - PXOR X8, X6 // X6 = X6 ^ X8 /* row4l = _mm_xor_si128( , ); */ - MOVQ t+96(FP), SI // SI: &f - MOVOU 0(SI), X8 // X8 = f[0]+f[1] /* LOAD( &S->f[0] ) */ - PXOR X8, X7 // X7 = X7 ^ X8 /* row4h = _mm_xor_si128( , ); */ - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 1 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1(b0, b1); - // LOAD_MSG_ ##r ##_2(b0, b1); - // (X12 used as additional temp register) - MOVOU 0(DX), X12 // X12 = m[0]+m[1] - MOVOU 16(DX), X13 // X13 = m[2]+m[3] - MOVOU 32(DX), X14 // X14 = m[4]+m[5] - MOVOU 48(DX), X15 // X15 = m[6]+m[7] - BYTE $0xc4; BYTE $0x41; BYTE $0x19; BYTE $0x6c; BYTE $0xc5 // VPUNPCKLQDQ XMM8, XMM12, XMM13 /* m[0], m[2] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x09; BYTE $0x6c; BYTE $0xcf // VPUNPCKLQDQ XMM9, XMM14, XMM15 /* m[4], m[6] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x19; BYTE $0x6d; BYTE $0xd5 // VPUNPCKHQDQ XMM10, XMM12, XMM13 /* m[1], m[3] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x09; BYTE $0x6d; BYTE $0xdf // VPUNPCKHQDQ XMM11, XMM14, XMM15 /* m[5], m[7] */ - - LOAD_SHUFFLE - - G1 - G2 - - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3(b0, b1); - // LOAD_MSG_ ##r ##_4(b0, b1); - // (X12 used as additional temp register) - MOVOU 64(DX), X12 // X12 = m[8]+ m[9] - MOVOU 80(DX), X13 // X13 = m[10]+m[11] - MOVOU 96(DX), X14 // X14 = m[12]+m[13] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - BYTE $0xc4; BYTE $0x41; BYTE $0x19; BYTE $0x6c; BYTE $0xc5 // VPUNPCKLQDQ XMM8, XMM12, XMM13 /* m[8],m[10] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x09; BYTE $0x6c; BYTE $0xcf // VPUNPCKLQDQ XMM9, XMM14, XMM15 /* m[12],m[14] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x19; BYTE $0x6d; BYTE $0xd5 // VPUNPCKHQDQ XMM10, XMM12, XMM13 /* m[9],m[11] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x09; BYTE $0x6d; BYTE $0xdf // VPUNPCKHQDQ XMM11, XMM14, XMM15 /* m[13],m[15] */ - - LOAD_SHUFFLE - - G1 - G2 - - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 2 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1(b0, b1); - // LOAD_MSG_ ##r ##_2(b0, b1); - // (X12 used as additional temp register) - MOVOU 112(DX), X12 // X12 = m[14]+m[15] - MOVOU 32(DX), X13 // X13 = m[4]+ m[5] - MOVOU 64(DX), X14 // X14 = m[8]+ m[9] - MOVOU 96(DX), X15 // X15 = m[12]+m[13] - BYTE $0xc4; BYTE $0x41; BYTE $0x19; BYTE $0x6c; BYTE $0xc5 // VPUNPCKLQDQ XMM8, XMM12, XMM13 /* m[14], m[4] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x09; BYTE $0x6d; BYTE $0xcf // VPUNPCKHQDQ XMM9, XMM14, XMM15 /* m[9], m[13] */ - MOVOU 80(DX), X13 // X13 = m[10]+m[11] - MOVOU 48(DX), X15 // X15 = m[6]+ m[7] - BYTE $0xc4; BYTE $0x41; BYTE $0x11; BYTE $0x6c; BYTE $0xd6 // VPUNPCKLQDQ XMM10, XMM13, XMM14 /* m[10], m[8] */ - BYTE $0xc4; BYTE $0x43; BYTE $0x01; BYTE $0x0f; BYTE $0xdc // VPALIGNR XMM11, XMM15, XMM12, 0x8 /* m[15], m[6] */ - BYTE $0x08 - - LOAD_SHUFFLE - - G1 - G2 - - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3(b0, b1); - // LOAD_MSG_ ##r ##_4(b0, b1); - // (X12 used as additional temp register) - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 32(DX), X13 // X13 = m[4]+ m[5] - MOVOU 80(DX), X14 // X14 = m[10]+m[11] - BYTE $0xc4; BYTE $0x43; BYTE $0x19; BYTE $0x0f; BYTE $0xc4 // VPALIGNR XMM8, XMM12, XMM12, 0x8 /* m[1], m[0] */ - BYTE $0x08 - BYTE $0xc4; BYTE $0x41; BYTE $0x09; BYTE $0x6d; BYTE $0xcd // VPUNPCKHQDQ XMM9, XMM14, XMM13 /* m[11], m[5] */ - MOVOU 16(DX), X12 // X12 = m[2]+ m[3] - MOVOU 48(DX), X13 // X13 = m[6]+ m[7] - MOVOU 96(DX), X14 // X14 = m[12]+m[13] - BYTE $0xc4; BYTE $0x41; BYTE $0x09; BYTE $0x6c; BYTE $0xd4 // VPUNPCKLQDQ XMM10, XMM14, XMM12 /* m[12], m[2] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x11; BYTE $0x6d; BYTE $0xdc // VPUNPCKHQDQ XMM11, XMM13, XMM12 /* m[7], m[3] */ - - LOAD_SHUFFLE - - G1 - G2 - - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 3 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1(b0, b1); - // LOAD_MSG_ ##r ##_2(b0, b1); - // (X12 used as additional temp register) - MOVOU 32(DX), X12 // X12 = m[4]+ m[5] - MOVOU 80(DX), X13 // X13 = m[10]+m[11] - MOVOU 96(DX), X14 // X14 = m[12]+m[13] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - BYTE $0xc4; BYTE $0x43; BYTE $0x09; BYTE $0x0f; BYTE $0xc5 // VPALIGNR XMM8, XMM14, XMM13, 0x8 /* m[11], m[12] */ - BYTE $0x08 - BYTE $0xc4; BYTE $0x41; BYTE $0x19; BYTE $0x6d; BYTE $0xcf // VPUNPCKHQDQ XMM9, XMM12, XMM15 /* m[5], m[15] */ - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 16(DX), X13 // X13 = m[2]+ m[3] - MOVOU 64(DX), X15 // X15 = m[8]+ m[9] - BYTE $0xc4; BYTE $0x41; BYTE $0x01; BYTE $0x6c; BYTE $0xd4 // VPUNPCKLQDQ XMM10, XMM15, XMM12 /* m[8], m[0] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x09; BYTE $0x6d; BYTE $0xde // VPUNPCKHQDQ XMM11, XMM14, XMM14 /* ___, m[13] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x11; BYTE $0x6c; BYTE $0xdb // VPUNPCKLQDQ XMM11, XMM13, XMM11 /* m[2], ___ */ - - LOAD_SHUFFLE - - G1 - G2 - - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3(b0, b1); - // LOAD_MSG_ ##r ##_4(b0, b1); - // (X12 used as additional temp register) - MOVOU 16(DX), X12 // X12 = m[2]+ m[3] - MOVOU 48(DX), X13 // X13 = m[6]+ m[7] - MOVOU 64(DX), X14 // X14 = m[8]+ m[9] - MOVOU 80(DX), X15 // X15 = m[10]+m[11] - BYTE $0xc4; BYTE $0x41; BYTE $0x19; BYTE $0x6d; BYTE $0xc4 // VPUNPCKHQDQ XMM8, XMM12, XMM12 /* ___, m[3] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x01; BYTE $0x6c; BYTE $0xc0 // VPUNPCKLQDQ XMM8, XMM15, XMM8 /* m[10], ___ */ - BYTE $0xc4; BYTE $0x41; BYTE $0x11; BYTE $0x6d; BYTE $0xce // VPUNPCKHQDQ XMM9, XMM13, XMM14 /* m[7], m[9] */ - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 32(DX), X14 // X14 = m[4]+ m[5] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - BYTE $0xc4; BYTE $0x41; BYTE $0x01; BYTE $0x6c; BYTE $0xd5 // VPUNPCKLQDQ XMM10, XMM15, XMM13 /* m[14], m[6] */ - BYTE $0xc4; BYTE $0x43; BYTE $0x09; BYTE $0x0f; BYTE $0xdc // VPALIGNR XMM11, XMM14, XMM12, 0x8 /* m[1], m[4] */ - BYTE $0x08 - - LOAD_SHUFFLE - - G1 - G2 - - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 4 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1(b0, b1); - // LOAD_MSG_ ##r ##_2(b0, b1); - // (X12 used as additional temp register) - MOVOU 16(DX), X12 // X12 = m[2]+ m[3] - MOVOU 48(DX), X13 // X13 = m[6]+ m[7] - MOVOU 80(DX), X14 // X14 = m[10]+m[11] - MOVOU 96(DX), X15 // X15 = m[12]+m[13] - BYTE $0xc4; BYTE $0x41; BYTE $0x11; BYTE $0x6d; BYTE $0xc4 // VPUNPCKHQDQ XMM8, XMM13, XMM12 /* m[7], m[3] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x01; BYTE $0x6d; BYTE $0xce // VPUNPCKHQDQ XMM9, XMM15, XMM14 /* m[13], m[11] */ - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 64(DX), X13 // X13 = m[8]+ m[9] - MOVOU 112(DX), X14 // X14 = m[14]+m[15] - BYTE $0xc4; BYTE $0x41; BYTE $0x11; BYTE $0x6d; BYTE $0xd4 // VPUNPCKHQDQ XMM10, XMM13, XMM12 /* m[9], m[1] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x01; BYTE $0x6c; BYTE $0xde // VPUNPCKLQDQ XMM11, XMM15, XMM14 /* m[12], m[14] */ - - LOAD_SHUFFLE - - G1 - G2 - - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3(b0, b1); - // LOAD_MSG_ ##r ##_4(b0, b1); - // (X12 used as additional temp register) - MOVOU 16(DX), X12 // X12 = m[2]+ m[3] - MOVOU 32(DX), X13 // X13 = m[4]+ m[5] - MOVOU 80(DX), X14 // X14 = m[10]+m[11] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - BYTE $0xc4; BYTE $0x41; BYTE $0x11; BYTE $0x6d; BYTE $0xc5 // VPUNPCKHQDQ XMM8, XMM13, XMM13 /* ___, m[5] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x19; BYTE $0x6c; BYTE $0xc0 // VPUNPCKLQDQ XMM8, XMM12, XMM8 /* m[2], ____ */ - BYTE $0xc4; BYTE $0x41; BYTE $0x01; BYTE $0x6d; BYTE $0xcf // VPUNPCKHQDQ XMM9, XMM15, XMM15 /* ___, m[15] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x11; BYTE $0x6c; BYTE $0xc9 // VPUNPCKLQDQ XMM9, XMM13, XMM9 /* m[4], ____ */ - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 48(DX), X13 // X13 = m[6]+ m[7] - MOVOU 64(DX), X15 // X15 = m[8]+ m[9] - BYTE $0xc4; BYTE $0x41; BYTE $0x11; BYTE $0x6c; BYTE $0xd6 // VPUNPCKLQDQ XMM10, XMM13, XMM14 /* m[6], m[10] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x19; BYTE $0x6c; BYTE $0xdf // VPUNPCKLQDQ XMM11, XMM12, XMM15 /* m[0], m[8] */ - - LOAD_SHUFFLE - - G1 - G2 - - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 5 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1(b0, b1); - // LOAD_MSG_ ##r ##_2(b0, b1); - // (X12 used as additional temp register) - MOVOU 16(DX), X12 // X12 = m[2]+ m[3] - MOVOU 32(DX), X13 // X13 = m[4]+ m[5] - MOVOU 64(DX), X14 // X14 = m[8]+ m[9] - MOVOU 80(DX), X15 // X15 = m[10]+m[11] - BYTE $0xc4; BYTE $0x41; BYTE $0x09; BYTE $0x6d; BYTE $0xc5 // VPUNPCKHQDQ XMM8, XMM14, XMM13 /* m[9], m[5] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x19; BYTE $0x6c; BYTE $0xcf // VPUNPCKLQDQ XMM9, XMM12, XMM15 /* m[2], m[10] */ - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 48(DX), X14 // X14 = m[6]+ m[7] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - BYTE $0xc4; BYTE $0x41; BYTE $0x09; BYTE $0x6d; BYTE $0xd6 // VPUNPCKHQDQ XMM10, XMM14, XMM14 /* ___, m[7] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x19; BYTE $0x6c; BYTE $0xd2 // VPUNPCKLQDQ XMM10, XMM12, XMM10 /* m[0], ____ */ - BYTE $0xc4; BYTE $0x41; BYTE $0x01; BYTE $0x6d; BYTE $0xdf // VPUNPCKHQDQ XMM11, XMM15, XMM15 /* ___, m[15] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x11; BYTE $0x6c; BYTE $0xdb // VPUNPCKLQDQ XMM11, XMM13, XMM11 /* m[4], ____ */ - - LOAD_SHUFFLE - - G1 - G2 - - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3(b0, b1); - // LOAD_MSG_ ##r ##_4(b0, b1); - // (X12 used as additional temp register) - MOVOU 16(DX), X12 // X12 = m[2]+ m[3] - MOVOU 48(DX), X13 // X13 = m[6]+ m[7] - MOVOU 80(DX), X14 // X14 = m[10]+m[11] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - BYTE $0xc4; BYTE $0x41; BYTE $0x09; BYTE $0x6d; BYTE $0xc6 // VPUNPCKHQDQ XMM8, XMM14, XMM14 /* ___, m[11] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x01; BYTE $0x6c; BYTE $0xc0 // VPUNPCKLQDQ XMM8, XMM15, XMM8 /* m[14], ____ */ - BYTE $0xc4; BYTE $0x41; BYTE $0x19; BYTE $0x6d; BYTE $0xcc // VPUNPCKHQDQ XMM9, XMM12, XMM12 /* ___, m[3] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x11; BYTE $0x6c; BYTE $0xc9 // VPUNPCKLQDQ XMM9, XMM13, XMM9 /* m[6], ____ */ - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 64(DX), X13 // X13 = m[8]+ m[9] - MOVOU 96(DX), X14 // X14 = m[12]+m[13] - BYTE $0xc4; BYTE $0x43; BYTE $0x09; BYTE $0x0f; BYTE $0xd4 // VPALIGNR XMM10, XMM14, XMM12, 0x8 /* m[1], m[12] */ - BYTE $0x08 - BYTE $0xc4; BYTE $0x41; BYTE $0x09; BYTE $0x6d; BYTE $0xde // VPUNPCKHQDQ XMM11, XMM14, XMM14 /* ___, m[13] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x11; BYTE $0x6c; BYTE $0xdb // VPUNPCKLQDQ XMM11, XMM13, XMM11 /* m[8], ____ */ - - LOAD_SHUFFLE - - G1 - G2 - - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 6 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1(b0, b1); - // LOAD_MSG_ ##r ##_2(b0, b1); - // (X12 used as additional temp register) - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 16(DX), X13 // X13 = m[2]+ m[3] - MOVOU 48(DX), X14 // X14 = m[6]+ m[7] - MOVOU 64(DX), X15 // X15 = m[8]+ m[9] - BYTE $0xc4; BYTE $0x41; BYTE $0x11; BYTE $0x6c; BYTE $0xc6 // VPUNPCKLQDQ XMM8, XMM13, XMM14 /* m[2], m[6] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x19; BYTE $0x6c; BYTE $0xcf // VPUNPCKLQDQ XMM9, XMM12, XMM15 /* m[0], m[8] */ - MOVOU 80(DX), X12 // X12 = m[10]+m[11] - MOVOU 96(DX), X14 // X14 = m[12]+m[13] - BYTE $0xc4; BYTE $0x41; BYTE $0x09; BYTE $0x6c; BYTE $0xd4 // VPUNPCKLQDQ XMM10, XMM14, XMM12 /* m[12], m[10] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x19; BYTE $0x6d; BYTE $0xdd // VPUNPCKHQDQ XMM11, XMM12, XMM13 /* m[11], m[3] */ - - LOAD_SHUFFLE - - G1 - G2 - - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3(b0, b1); - // LOAD_MSG_ ##r ##_4(b0, b1); - // (X12 used as additional temp register) - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 32(DX), X13 // X13 = m[4]+ m[5] - MOVOU 48(DX), X14 // X14 = m[6]+ m[7] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - BYTE $0xc4; BYTE $0x41; BYTE $0x09; BYTE $0x6d; BYTE $0xc6 // VPUNPCKHQDQ XMM8, XMM14, XMM14 /* ___, m[7] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x11; BYTE $0x6c; BYTE $0xc0 // VPUNPCKLQDQ XMM8, XMM13, XMM8 /* m[4], ____ */ - BYTE $0xc4; BYTE $0x41; BYTE $0x01; BYTE $0x6d; BYTE $0xcc // VPUNPCKHQDQ XMM9, XMM15, XMM12 /* m[15], m[1] */ - MOVOU 64(DX), X12 // X12 = m[8]+ m[9] - MOVOU 96(DX), X14 // X14 = m[12]+m[13] - BYTE $0xc4; BYTE $0x41; BYTE $0x09; BYTE $0x6d; BYTE $0xd5 // VPUNPCKHQDQ XMM10, XMM14, XMM13 /* m[13], m[5] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x19; BYTE $0x6d; BYTE $0xdc // VPUNPCKHQDQ XMM11, XMM12, XMM12 /* ___, m[9] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x01; BYTE $0x6c; BYTE $0xdb // VPUNPCKLQDQ XMM11, XMM15, XMM11 /* m[14], ____ */ - - LOAD_SHUFFLE - - G1 - G2 - - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 7 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1(b0, b1); - // LOAD_MSG_ ##r ##_2(b0, b1); - // (X12 used as additional temp register) - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 32(DX), X13 // X13 = m[4]+ m[5] - MOVOU 96(DX), X14 // X14 = m[12]+m[13] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - BYTE $0xc4; BYTE $0x41; BYTE $0x19; BYTE $0x6d; BYTE $0xc4 // VPUNPCKHQDQ XMM8, XMM12, XMM12 /* ___, m[1] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x09; BYTE $0x6c; BYTE $0xc0 // VPUNPCKLQDQ XMM8, XMM14, XMM8 /* m[12], ____ */ - BYTE $0xc4; BYTE $0x41; BYTE $0x01; BYTE $0x6c; BYTE $0xcd // VPUNPCKLQDQ XMM9, XMM15, XMM13 /* m[14], m[4] */ - MOVOU 80(DX), X12 // X12 = m[10]+m[11] - BYTE $0xc4; BYTE $0x41; BYTE $0x11; BYTE $0x6d; BYTE $0xd7 // VPUNPCKHQDQ XMM10, XMM13, XMM15 /* m[5], m[15] */ - BYTE $0xc4; BYTE $0x43; BYTE $0x19; BYTE $0x0f; BYTE $0xde // VPALIGNR XMM11, XMM12, XMM14, 0x8 /* m[13], m[10] */ - BYTE $0x08 - - LOAD_SHUFFLE - - G1 - G2 - - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3(b0, b1); - // LOAD_MSG_ ##r ##_4(b0, b1); - // (X12 used as additional temp register) - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 48(DX), X13 // X13 = m[6]+ m[7] - MOVOU 64(DX), X14 // X14 = m[8]+ m[9] - MOVOU 80(DX), X15 // X15 = m[10]+m[11] - BYTE $0xc4; BYTE $0x41; BYTE $0x19; BYTE $0x6c; BYTE $0xc5 // VPUNPCKLQDQ XMM8, XMM12, XMM13 /* m[0], m[6] */ - BYTE $0xc4; BYTE $0x43; BYTE $0x09; BYTE $0x0f; BYTE $0xce // VPALIGNR XMM9, XMM14, XMM14, 0x8 /* m[9], m[8] */ - BYTE $0x08 - MOVOU 16(DX), X14 // X14 = m[2]+ m[3] - BYTE $0xc4; BYTE $0x41; BYTE $0x11; BYTE $0x6d; BYTE $0xd6 // VPUNPCKHQDQ XMM10, XMM13, XMM14 /* m[7], m[3] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x01; BYTE $0x6d; BYTE $0xdf // VPUNPCKHQDQ XMM11, XMM15, XMM15 /* ___, m[11] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x09; BYTE $0x6c; BYTE $0xdb // VPUNPCKLQDQ XMM11, XMM14, XMM11 /* m[2], ____ */ - - LOAD_SHUFFLE - - G1 - G2 - - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 8 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1(b0, b1); - // LOAD_MSG_ ##r ##_2(b0, b1); - // (X12 used as additional temp register) - MOVOU 16(DX), X12 // X12 = m[2]+ m[3] - MOVOU 48(DX), X13 // X13 = m[6]+ m[7] - MOVOU 96(DX), X14 // X14 = m[12]+m[13] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - BYTE $0xc4; BYTE $0x41; BYTE $0x09; BYTE $0x6d; BYTE $0xc5 // VPUNPCKHQDQ XMM8, XMM14, XMM13 /* m[13], m[7] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x19; BYTE $0x6d; BYTE $0xcc // VPUNPCKHQDQ XMM9, XMM12, XMM12 /* ___, m[3] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x09; BYTE $0x6c; BYTE $0xc9 // VPUNPCKLQDQ XMM9, XMM14, XMM9 /* m[12], ____ */ - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 64(DX), X13 // X13 = m[8]+ m[9] - MOVOU 80(DX), X14 // X14 = m[10]+m[11] - BYTE $0xc4; BYTE $0x43; BYTE $0x01; BYTE $0x0f; BYTE $0xd6 // VPALIGNR XMM10, XMM15, XMM14, 0x8 /* m[11], m[14] */ - BYTE $0x08 - BYTE $0xc4; BYTE $0x41; BYTE $0x19; BYTE $0x6d; BYTE $0xdd // VPUNPCKHQDQ XMM11, XMM12, XMM13 /* m[1], m[9] */ - - LOAD_SHUFFLE - - G1 - G2 - - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3(b0, b1); - // LOAD_MSG_ ##r ##_4(b0, b1); - // (X12 used as additional temp register) - MOVOU 16(DX), X12 // X12 = m[2]+ m[3] - MOVOU 32(DX), X13 // X13 = m[4]+ m[5] - MOVOU 64(DX), X14 // X14 = m[8]+ m[9] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - BYTE $0xc4; BYTE $0x41; BYTE $0x11; BYTE $0x6d; BYTE $0xc7 // VPUNPCKHQDQ XMM8, XMM13, XMM15 /* m[5], m[15] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x09; BYTE $0x6c; BYTE $0xcc // VPUNPCKLQDQ XMM9, XMM14, XMM12 /* m[8], m[2] */ - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 48(DX), X14 // X14 = m[6]+ m[7] - MOVOU 80(DX), X15 // X15 = m[10]+m[11] - BYTE $0xc4; BYTE $0x41; BYTE $0x19; BYTE $0x6c; BYTE $0xd5 // VPUNPCKLQDQ XMM10, XMM12, XMM13 /* m[0], m[4] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x09; BYTE $0x6c; BYTE $0xdf // VPUNPCKLQDQ XMM11, XMM14, XMM15 /* m[6], m[10] */ - - LOAD_SHUFFLE - - G1 - G2 - - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 9 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1(b0, b1); - // LOAD_MSG_ ##r ##_2(b0, b1); - // (X12 used as additional temp register) - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 48(DX), X13 // X13 = m[6]+ m[7] - MOVOU 80(DX), X14 // X14 = m[10]+m[11] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - BYTE $0xc4; BYTE $0x41; BYTE $0x11; BYTE $0x6c; BYTE $0xc7 // VPUNPCKLQDQ XMM8, XMM13, XMM15 /* m[6], m[14] */ - BYTE $0xc4; BYTE $0x43; BYTE $0x19; BYTE $0x0f; BYTE $0xce // VPALIGNR XMM9, XMM12, XMM14, 0x8 /* m[11], m[0] */ - BYTE $0x08 - MOVOU 16(DX), X13 // X13 = m[2]+ m[3] - MOVOU 64(DX), X14 // X14 = m[8]+ m[9] - BYTE $0xc4; BYTE $0x41; BYTE $0x01; BYTE $0x6d; BYTE $0xd6 // VPUNPCKHQDQ XMM10, XMM15, XMM14 /* m[15], m[9] */ - BYTE $0xc4; BYTE $0x43; BYTE $0x09; BYTE $0x0f; BYTE $0xdd // VPALIGNR XMM11, XMM14, XMM13, 0x8 /* m[3], m[8] */ - BYTE $0x08 - - LOAD_SHUFFLE - - G1 - G2 - - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3(b0, b1); - // LOAD_MSG_ ##r ##_4(b0, b1); - // (X12 used as additional temp register) - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 16(DX), X13 // X13 = m[2]+ m[3] - MOVOU 80(DX), X14 // X14 = m[10]+m[11] - MOVOU 96(DX), X15 // X15 = m[12]+m[13] - BYTE $0xc4; BYTE $0x41; BYTE $0x01; BYTE $0x6d; BYTE $0xc7 // VPUNPCKHQDQ XMM8, XMM15, XMM15 /* ___, m[13] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x01; BYTE $0x6c; BYTE $0xc0 // VPUNPCKLQDQ XMM8, XMM15, XMM8 /* m[12], ____ */ - BYTE $0xc4; BYTE $0x43; BYTE $0x09; BYTE $0x0f; BYTE $0xcc // VPALIGNR XMM9, XMM14, XMM12, 0x8 /* m[1], m[10] */ - BYTE $0x08 - MOVOU 32(DX), X12 // X12 = m[4]+ m[5] - MOVOU 48(DX), X15 // X15 = m[6]+ m[7] - BYTE $0xc4; BYTE $0x41; BYTE $0x01; BYTE $0x6d; BYTE $0xd7 // VPUNPCKHQDQ XMM10, XMM15, XMM15 /* ___, m[7] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x11; BYTE $0x6c; BYTE $0xd2 // VPUNPCKLQDQ XMM10, XMM13, XMM10 /* m[2], ____ */ - BYTE $0xc4; BYTE $0x41; BYTE $0x19; BYTE $0x6d; BYTE $0xdc // VPUNPCKHQDQ XMM11, XMM12, XMM12 /* ___, m[5] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x19; BYTE $0x6c; BYTE $0xdb // VPUNPCKLQDQ XMM11, XMM12, XMM11 /* m[4], ____ */ - - LOAD_SHUFFLE - - G1 - G2 - - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 1 0 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1(b0, b1); - // LOAD_MSG_ ##r ##_2(b0, b1); - // (X12 used as additional temp register) - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 48(DX), X13 // X13 = m[6]+ m[7] - MOVOU 64(DX), X14 // X14 = m[8]+ m[9] - MOVOU 80(DX), X15 // X15 = m[10]+m[11] - BYTE $0xc4; BYTE $0x41; BYTE $0x01; BYTE $0x6c; BYTE $0xc6 // VPUNPCKLQDQ XMM8, XMM15, XMM14 /* m[10], m[8] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x11; BYTE $0x6d; BYTE $0xcc // VPUNPCKHQDQ XMM9, XMM13, XMM12 /* m[7], m[1] */ - MOVOU 16(DX), X12 // X12 = m[2]+ m[3] - MOVOU 32(DX), X14 // X14 = m[4]+ m[5] - BYTE $0xc4; BYTE $0x41; BYTE $0x19; BYTE $0x6c; BYTE $0xd6 // VPUNPCKLQDQ XMM10, XMM12, XMM14 /* m[2], m[4] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x09; BYTE $0x6d; BYTE $0xde // VPUNPCKHQDQ XMM11, XMM14, XMM14 /* ___, m[5] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x11; BYTE $0x6c; BYTE $0xdb // VPUNPCKLQDQ XMM11, XMM13, XMM11 /* m[6], ____ */ - - LOAD_SHUFFLE - - G1 - G2 - - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3(b0, b1); - // LOAD_MSG_ ##r ##_4(b0, b1); - // (X12 used as additional temp register) - MOVOU 16(DX), X12 // X12 = m[2]+ m[3] - MOVOU 64(DX), X13 // X13 = m[8]+ m[9] - MOVOU 96(DX), X14 // X14 = m[12]+m[13] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - BYTE $0xc4; BYTE $0x41; BYTE $0x01; BYTE $0x6d; BYTE $0xc5 // VPUNPCKHQDQ XMM8, XMM15, XMM13 /* m[15], m[9] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x19; BYTE $0x6d; BYTE $0xce // VPUNPCKHQDQ XMM9, XMM12, XMM14 /* m[3], m[13] */ - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 80(DX), X13 // X13 = m[10]+m[11] - BYTE $0xc4; BYTE $0x43; BYTE $0x01; BYTE $0x0f; BYTE $0xd5 // VPALIGNR XMM10, XMM15, XMM13, 0x8 /* m[11], m[14] */ - BYTE $0x08 - BYTE $0xc4; BYTE $0x41; BYTE $0x09; BYTE $0x6c; BYTE $0xdc // VPUNPCKLQDQ XMM11, XMM14, XMM12 /* m[12], m[0] */ - - LOAD_SHUFFLE - - G1 - G2 - - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 1 1 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1(b0, b1); - // LOAD_MSG_ ##r ##_2(b0, b1); - // (X12 used as additional temp register) - MOVOU 0(DX), X12 // X12 = m[0]+m[1] - MOVOU 16(DX), X13 // X13 = m[2]+m[3] - MOVOU 32(DX), X14 // X14 = m[4]+m[5] - MOVOU 48(DX), X15 // X15 = m[6]+m[7] - BYTE $0xc4; BYTE $0x41; BYTE $0x19; BYTE $0x6c; BYTE $0xc5 // VPUNPCKLQDQ XMM8, XMM12, XMM13 /* m[0], m[2] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x09; BYTE $0x6c; BYTE $0xcf // VPUNPCKLQDQ XMM9, XMM14, XMM15 /* m[4], m[6] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x19; BYTE $0x6d; BYTE $0xd5 // VPUNPCKHQDQ XMM10, XMM12, XMM13 /* m[1], m[3] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x09; BYTE $0x6d; BYTE $0xdf // VPUNPCKHQDQ XMM11, XMM14, XMM15 /* m[5], m[7] */ - - LOAD_SHUFFLE - - G1 - G2 - - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3(b0, b1); - // LOAD_MSG_ ##r ##_4(b0, b1); - // (X12 used as additional temp register) - MOVOU 64(DX), X12 // X12 = m[8]+ m[9] - MOVOU 80(DX), X13 // X13 = m[10]+m[11] - MOVOU 96(DX), X14 // X14 = m[12]+m[13] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - BYTE $0xc4; BYTE $0x41; BYTE $0x19; BYTE $0x6c; BYTE $0xc5 // VPUNPCKLQDQ XMM8, XMM12, XMM13 /* m[8],m[10] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x09; BYTE $0x6c; BYTE $0xcf // VPUNPCKLQDQ XMM9, XMM14, XMM15 /* m[12],m[14] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x19; BYTE $0x6d; BYTE $0xd5 // VPUNPCKHQDQ XMM10, XMM12, XMM13 /* m[9],m[11] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x09; BYTE $0x6d; BYTE $0xdf // VPUNPCKHQDQ XMM11, XMM14, XMM15 /* m[13],m[15] */ - - LOAD_SHUFFLE - - G1 - G2 - - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 1 2 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1(b0, b1); - // LOAD_MSG_ ##r ##_2(b0, b1); - // (X12 used as additional temp register) - MOVOU 112(DX), X12 // X12 = m[14]+m[15] - MOVOU 32(DX), X13 // X13 = m[4]+ m[5] - MOVOU 64(DX), X14 // X14 = m[8]+ m[9] - MOVOU 96(DX), X15 // X15 = m[12]+m[13] - BYTE $0xc4; BYTE $0x41; BYTE $0x19; BYTE $0x6c; BYTE $0xc5 // VPUNPCKLQDQ XMM8, XMM12, XMM13 /* m[14], m[4] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x09; BYTE $0x6d; BYTE $0xcf // VPUNPCKHQDQ XMM9, XMM14, XMM15 /* m[9], m[13] */ - MOVOU 80(DX), X13 // X13 = m[10]+m[11] - MOVOU 48(DX), X15 // X15 = m[6]+ m[7] - BYTE $0xc4; BYTE $0x41; BYTE $0x11; BYTE $0x6c; BYTE $0xd6 // VPUNPCKLQDQ XMM10, XMM13, XMM14 /* m[10], m[8] */ - BYTE $0xc4; BYTE $0x43; BYTE $0x01; BYTE $0x0f; BYTE $0xdc // VPALIGNR XMM11, XMM15, XMM12, 0x8 /* m[15], m[6] */ - BYTE $0x08 - - LOAD_SHUFFLE - - G1 - G2 - - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3(b0, b1); - // LOAD_MSG_ ##r ##_4(b0, b1); - // (X12 used as additional temp register) - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 32(DX), X13 // X13 = m[4]+ m[5] - MOVOU 80(DX), X14 // X14 = m[10]+m[11] - BYTE $0xc4; BYTE $0x43; BYTE $0x19; BYTE $0x0f; BYTE $0xc4 // VPALIGNR XMM8, XMM12, XMM12, 0x8 /* m[1], m[0] */ - BYTE $0x08 - BYTE $0xc4; BYTE $0x41; BYTE $0x09; BYTE $0x6d; BYTE $0xcd // VPUNPCKHQDQ XMM9, XMM14, XMM13 /* m[11], m[5] */ - MOVOU 16(DX), X12 // X12 = m[2]+ m[3] - MOVOU 48(DX), X13 // X13 = m[6]+ m[7] - MOVOU 96(DX), X14 // X14 = m[12]+m[13] - BYTE $0xc4; BYTE $0x41; BYTE $0x09; BYTE $0x6c; BYTE $0xd4 // VPUNPCKLQDQ XMM10, XMM14, XMM12 /* m[12], m[2] */ - BYTE $0xc4; BYTE $0x41; BYTE $0x11; BYTE $0x6d; BYTE $0xdc // VPUNPCKHQDQ XMM11, XMM13, XMM12 /* m[7], m[3] */ - - LOAD_SHUFFLE - - G1 - G2 - - UNDIAGONALIZE - - // Reload digest (most current value store in &out) - MOVQ out+144(FP), SI // SI: &in - MOVOU 0(SI), X12 // X12 = in[0]+in[1] /* row1l = LOAD( &S->h[0] ); */ - MOVOU 16(SI), X13 // X13 = in[2]+in[3] /* row1h = LOAD( &S->h[2] ); */ - MOVOU 32(SI), X14 // X14 = in[4]+in[5] /* row2l = LOAD( &S->h[4] ); */ - MOVOU 48(SI), X15 // X15 = in[6]+in[7] /* row2h = LOAD( &S->h[6] ); */ - - // Final computations and prepare for storing - PXOR X4, X0 // X0 = X0 ^ X4 /* row1l = _mm_xor_si128( row3l, row1l ); */ - PXOR X5, X1 // X1 = X1 ^ X5 /* row1h = _mm_xor_si128( row3h, row1h ); */ - PXOR X12, X0 // X0 = X0 ^ X12 /* STORE( &S->h[0], _mm_xor_si128( LOAD( &S->h[0] ), row1l ) ); */ - PXOR X13, X1 // X1 = X1 ^ X13 /* STORE( &S->h[2], _mm_xor_si128( LOAD( &S->h[2] ), row1h ) ); */ - PXOR X6, X2 // X2 = X2 ^ X6 /* row2l = _mm_xor_si128( row4l, row2l ); */ - PXOR X7, X3 // X3 = X3 ^ X7 /* row2h = _mm_xor_si128( row4h, row2h ); */ - PXOR X14, X2 // X2 = X2 ^ X14 /* STORE( &S->h[4], _mm_xor_si128( LOAD( &S->h[4] ), row2l ) ); */ - PXOR X15, X3 // X3 = X3 ^ X15 /* STORE( &S->h[6], _mm_xor_si128( LOAD( &S->h[6] ), row2h ) ); */ - - // Store digest into &out - MOVQ out+144(FP), SI // SI: &out - MOVOU X0, 0(SI) // out[0]+out[1] = X0 - MOVOU X1, 16(SI) // out[2]+out[3] = X1 - MOVOU X2, 32(SI) // out[4]+out[5] = X2 - MOVOU X3, 48(SI) // out[6]+out[7] = X3 - - // Increment message pointer and check if there's more to do - ADDQ $128, DX // message += 128 - SUBQ $1, R8 - JNZ loop + // Increment counter + MOVQ t+72(FP), SI // SI: &t + MOVQ 0(SI), R9 + ADDQ $128, R9 // /* d.t[0] += BlockSize */ + MOVQ R9, 0(SI) + CMPQ R9, $128 // /* if d.t[0] < BlockSize { */ + JGE noincr + MOVQ 8(SI), R9 + ADDQ $1, R9 // /* d.t[1]++ */ + MOVQ R9, 8(SI) +noincr: // /* } */ + + // Load initialization vector + MOVQ iv+48(FP), SI // SI: &iv + MOVOU 0(SI), X4 // X4 = iv[0]+iv[1] /* row3l = LOAD( &blake2b_IV[0] ); */ + MOVOU 16(SI), X5 // X5 = iv[2]+iv[3] /* row3h = LOAD( &blake2b_IV[2] ); */ + MOVOU 32(SI), X6 // X6 = iv[4]+iv[5] /* LOAD( &blake2b_IV[4] ) */ + MOVOU 48(SI), X7 // X7 = iv[6]+iv[7] /* LOAD( &blake2b_IV[6] ) */ + MOVQ t+72(FP), SI // SI: &t + MOVOU 0(SI), X8 // X8 = t[0]+t[1] /* LOAD( &S->t[0] ) */ + PXOR X8, X6 // X6 = X6 ^ X8 /* row4l = _mm_xor_si128( , ); */ + MOVQ t+96(FP), SI // SI: &f + MOVOU 0(SI), X8 // X8 = f[0]+f[1] /* LOAD( &S->f[0] ) */ + PXOR X8, X7 // X7 = X7 ^ X8 /* row4h = _mm_xor_si128( , ); */ + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 1 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 0(DX), X12 // X12 = m[0]+m[1] + MOVOU 16(DX), X13 // X13 = m[2]+m[3] + MOVOU 32(DX), X14 // X14 = m[4]+m[5] + MOVOU 48(DX), X15 // X15 = m[6]+m[7] + LONG $0x6c1941c4; BYTE $0xc5 // VPUNPCKLQDQ XMM8, XMM12, XMM13 /* m[0], m[2] */ + LONG $0x6c0941c4; BYTE $0xcf // VPUNPCKLQDQ XMM9, XMM14, XMM15 /* m[4], m[6] */ + LONG $0x6d1941c4; BYTE $0xd5 // VPUNPCKHQDQ XMM10, XMM12, XMM13 /* m[1], m[3] */ + LONG $0x6d0941c4; BYTE $0xdf // VPUNPCKHQDQ XMM11, XMM14, XMM15 /* m[5], m[7] */ + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 64(DX), X12 // X12 = m[8]+ m[9] + MOVOU 80(DX), X13 // X13 = m[10]+m[11] + MOVOU 96(DX), X14 // X14 = m[12]+m[13] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + LONG $0x6c1941c4; BYTE $0xc5 // VPUNPCKLQDQ XMM8, XMM12, XMM13 /* m[8],m[10] */ + LONG $0x6c0941c4; BYTE $0xcf // VPUNPCKLQDQ XMM9, XMM14, XMM15 /* m[12],m[14] */ + LONG $0x6d1941c4; BYTE $0xd5 // VPUNPCKHQDQ XMM10, XMM12, XMM13 /* m[9],m[11] */ + LONG $0x6d0941c4; BYTE $0xdf // VPUNPCKHQDQ XMM11, XMM14, XMM15 /* m[13],m[15] */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 2 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 112(DX), X12 // X12 = m[14]+m[15] + MOVOU 32(DX), X13 // X13 = m[4]+ m[5] + MOVOU 64(DX), X14 // X14 = m[8]+ m[9] + MOVOU 96(DX), X15 // X15 = m[12]+m[13] + LONG $0x6c1941c4; BYTE $0xc5 // VPUNPCKLQDQ XMM8, XMM12, XMM13 /* m[14], m[4] */ + LONG $0x6d0941c4; BYTE $0xcf // VPUNPCKHQDQ XMM9, XMM14, XMM15 /* m[9], m[13] */ + MOVOU 80(DX), X13 // X13 = m[10]+m[11] + MOVOU 48(DX), X15 // X15 = m[6]+ m[7] + LONG $0x6c1141c4; BYTE $0xd6 // VPUNPCKLQDQ XMM10, XMM13, XMM14 /* m[10], m[8] */ + LONG $0x0f0143c4; WORD $0x08dc // VPALIGNR XMM11, XMM15, XMM12, 0x8 /* m[15], m[6] */ + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 32(DX), X13 // X13 = m[4]+ m[5] + MOVOU 80(DX), X14 // X14 = m[10]+m[11] + LONG $0x0f1943c4; WORD $0x08c4 // VPALIGNR XMM8, XMM12, XMM12, 0x8 /* m[1], m[0] */ + LONG $0x6d0941c4; BYTE $0xcd // VPUNPCKHQDQ XMM9, XMM14, XMM13 /* m[11], m[5] */ + MOVOU 16(DX), X12 // X12 = m[2]+ m[3] + MOVOU 48(DX), X13 // X13 = m[6]+ m[7] + MOVOU 96(DX), X14 // X14 = m[12]+m[13] + LONG $0x6c0941c4; BYTE $0xd4 // VPUNPCKLQDQ XMM10, XMM14, XMM12 /* m[12], m[2] */ + LONG $0x6d1141c4; BYTE $0xdc // VPUNPCKHQDQ XMM11, XMM13, XMM12 /* m[7], m[3] */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 3 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 32(DX), X12 // X12 = m[4]+ m[5] + MOVOU 80(DX), X13 // X13 = m[10]+m[11] + MOVOU 96(DX), X14 // X14 = m[12]+m[13] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + LONG $0x0f0943c4; WORD $0x08c5 // VPALIGNR XMM8, XMM14, XMM13, 0x8 /* m[11], m[12] */ + LONG $0x6d1941c4; BYTE $0xcf // VPUNPCKHQDQ XMM9, XMM12, XMM15 /* m[5], m[15] */ + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 16(DX), X13 // X13 = m[2]+ m[3] + MOVOU 64(DX), X15 // X15 = m[8]+ m[9] + LONG $0x6c0141c4; BYTE $0xd4 // VPUNPCKLQDQ XMM10, XMM15, XMM12 /* m[8], m[0] */ + LONG $0x6d0941c4; BYTE $0xde // VPUNPCKHQDQ XMM11, XMM14, XMM14 /* ___, m[13] */ + LONG $0x6c1141c4; BYTE $0xdb // VPUNPCKLQDQ XMM11, XMM13, XMM11 /* m[2], ___ */ + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 16(DX), X12 // X12 = m[2]+ m[3] + MOVOU 48(DX), X13 // X13 = m[6]+ m[7] + MOVOU 64(DX), X14 // X14 = m[8]+ m[9] + MOVOU 80(DX), X15 // X15 = m[10]+m[11] + LONG $0x6d1941c4; BYTE $0xc4 // VPUNPCKHQDQ XMM8, XMM12, XMM12 /* ___, m[3] */ + LONG $0x6c0141c4; BYTE $0xc0 // VPUNPCKLQDQ XMM8, XMM15, XMM8 /* m[10], ___ */ + LONG $0x6d1141c4; BYTE $0xce // VPUNPCKHQDQ XMM9, XMM13, XMM14 /* m[7], m[9] */ + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 32(DX), X14 // X14 = m[4]+ m[5] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + LONG $0x6c0141c4; BYTE $0xd5 // VPUNPCKLQDQ XMM10, XMM15, XMM13 /* m[14], m[6] */ + LONG $0x0f0943c4; WORD $0x08dc // VPALIGNR XMM11, XMM14, XMM12, 0x8 /* m[1], m[4] */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 4 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 16(DX), X12 // X12 = m[2]+ m[3] + MOVOU 48(DX), X13 // X13 = m[6]+ m[7] + MOVOU 80(DX), X14 // X14 = m[10]+m[11] + MOVOU 96(DX), X15 // X15 = m[12]+m[13] + LONG $0x6d1141c4; BYTE $0xc4 // VPUNPCKHQDQ XMM8, XMM13, XMM12 /* m[7], m[3] */ + LONG $0x6d0141c4; BYTE $0xce // VPUNPCKHQDQ XMM9, XMM15, XMM14 /* m[13], m[11] */ + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 64(DX), X13 // X13 = m[8]+ m[9] + MOVOU 112(DX), X14 // X14 = m[14]+m[15] + LONG $0x6d1141c4; BYTE $0xd4 // VPUNPCKHQDQ XMM10, XMM13, XMM12 /* m[9], m[1] */ + LONG $0x6c0141c4; BYTE $0xde // VPUNPCKLQDQ XMM11, XMM15, XMM14 /* m[12], m[14] */ + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 16(DX), X12 // X12 = m[2]+ m[3] + MOVOU 32(DX), X13 // X13 = m[4]+ m[5] + MOVOU 80(DX), X14 // X14 = m[10]+m[11] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + LONG $0x6d1141c4; BYTE $0xc5 // VPUNPCKHQDQ XMM8, XMM13, XMM13 /* ___, m[5] */ + LONG $0x6c1941c4; BYTE $0xc0 // VPUNPCKLQDQ XMM8, XMM12, XMM8 /* m[2], ____ */ + LONG $0x6d0141c4; BYTE $0xcf // VPUNPCKHQDQ XMM9, XMM15, XMM15 /* ___, m[15] */ + LONG $0x6c1141c4; BYTE $0xc9 // VPUNPCKLQDQ XMM9, XMM13, XMM9 /* m[4], ____ */ + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 48(DX), X13 // X13 = m[6]+ m[7] + MOVOU 64(DX), X15 // X15 = m[8]+ m[9] + LONG $0x6c1141c4; BYTE $0xd6 // VPUNPCKLQDQ XMM10, XMM13, XMM14 /* m[6], m[10] */ + LONG $0x6c1941c4; BYTE $0xdf // VPUNPCKLQDQ XMM11, XMM12, XMM15 /* m[0], m[8] */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 5 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 16(DX), X12 // X12 = m[2]+ m[3] + MOVOU 32(DX), X13 // X13 = m[4]+ m[5] + MOVOU 64(DX), X14 // X14 = m[8]+ m[9] + MOVOU 80(DX), X15 // X15 = m[10]+m[11] + LONG $0x6d0941c4; BYTE $0xc5 // VPUNPCKHQDQ XMM8, XMM14, XMM13 /* m[9], m[5] */ + LONG $0x6c1941c4; BYTE $0xcf // VPUNPCKLQDQ XMM9, XMM12, XMM15 /* m[2], m[10] */ + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 48(DX), X14 // X14 = m[6]+ m[7] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + LONG $0x6d0941c4; BYTE $0xd6 // VPUNPCKHQDQ XMM10, XMM14, XMM14 /* ___, m[7] */ + LONG $0x6c1941c4; BYTE $0xd2 // VPUNPCKLQDQ XMM10, XMM12, XMM10 /* m[0], ____ */ + LONG $0x6d0141c4; BYTE $0xdf // VPUNPCKHQDQ XMM11, XMM15, XMM15 /* ___, m[15] */ + LONG $0x6c1141c4; BYTE $0xdb // VPUNPCKLQDQ XMM11, XMM13, XMM11 /* m[4], ____ */ + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 16(DX), X12 // X12 = m[2]+ m[3] + MOVOU 48(DX), X13 // X13 = m[6]+ m[7] + MOVOU 80(DX), X14 // X14 = m[10]+m[11] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + LONG $0x6d0941c4; BYTE $0xc6 // VPUNPCKHQDQ XMM8, XMM14, XMM14 /* ___, m[11] */ + LONG $0x6c0141c4; BYTE $0xc0 // VPUNPCKLQDQ XMM8, XMM15, XMM8 /* m[14], ____ */ + LONG $0x6d1941c4; BYTE $0xcc // VPUNPCKHQDQ XMM9, XMM12, XMM12 /* ___, m[3] */ + LONG $0x6c1141c4; BYTE $0xc9 // VPUNPCKLQDQ XMM9, XMM13, XMM9 /* m[6], ____ */ + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 64(DX), X13 // X13 = m[8]+ m[9] + MOVOU 96(DX), X14 // X14 = m[12]+m[13] + LONG $0x0f0943c4; WORD $0x08d4 // VPALIGNR XMM10, XMM14, XMM12, 0x8 /* m[1], m[12] */ + LONG $0x6d0941c4; BYTE $0xde // VPUNPCKHQDQ XMM11, XMM14, XMM14 /* ___, m[13] */ + LONG $0x6c1141c4; BYTE $0xdb // VPUNPCKLQDQ XMM11, XMM13, XMM11 /* m[8], ____ */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 6 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 16(DX), X13 // X13 = m[2]+ m[3] + MOVOU 48(DX), X14 // X14 = m[6]+ m[7] + MOVOU 64(DX), X15 // X15 = m[8]+ m[9] + LONG $0x6c1141c4; BYTE $0xc6 // VPUNPCKLQDQ XMM8, XMM13, XMM14 /* m[2], m[6] */ + LONG $0x6c1941c4; BYTE $0xcf // VPUNPCKLQDQ XMM9, XMM12, XMM15 /* m[0], m[8] */ + MOVOU 80(DX), X12 // X12 = m[10]+m[11] + MOVOU 96(DX), X14 // X14 = m[12]+m[13] + LONG $0x6c0941c4; BYTE $0xd4 // VPUNPCKLQDQ XMM10, XMM14, XMM12 /* m[12], m[10] */ + LONG $0x6d1941c4; BYTE $0xdd // VPUNPCKHQDQ XMM11, XMM12, XMM13 /* m[11], m[3] */ + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 32(DX), X13 // X13 = m[4]+ m[5] + MOVOU 48(DX), X14 // X14 = m[6]+ m[7] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + LONG $0x6d0941c4; BYTE $0xc6 // VPUNPCKHQDQ XMM8, XMM14, XMM14 /* ___, m[7] */ + LONG $0x6c1141c4; BYTE $0xc0 // VPUNPCKLQDQ XMM8, XMM13, XMM8 /* m[4], ____ */ + LONG $0x6d0141c4; BYTE $0xcc // VPUNPCKHQDQ XMM9, XMM15, XMM12 /* m[15], m[1] */ + MOVOU 64(DX), X12 // X12 = m[8]+ m[9] + MOVOU 96(DX), X14 // X14 = m[12]+m[13] + LONG $0x6d0941c4; BYTE $0xd5 // VPUNPCKHQDQ XMM10, XMM14, XMM13 /* m[13], m[5] */ + LONG $0x6d1941c4; BYTE $0xdc // VPUNPCKHQDQ XMM11, XMM12, XMM12 /* ___, m[9] */ + LONG $0x6c0141c4; BYTE $0xdb // VPUNPCKLQDQ XMM11, XMM15, XMM11 /* m[14], ____ */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 7 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 32(DX), X13 // X13 = m[4]+ m[5] + MOVOU 96(DX), X14 // X14 = m[12]+m[13] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + LONG $0x6d1941c4; BYTE $0xc4 // VPUNPCKHQDQ XMM8, XMM12, XMM12 /* ___, m[1] */ + LONG $0x6c0941c4; BYTE $0xc0 // VPUNPCKLQDQ XMM8, XMM14, XMM8 /* m[12], ____ */ + LONG $0x6c0141c4; BYTE $0xcd // VPUNPCKLQDQ XMM9, XMM15, XMM13 /* m[14], m[4] */ + MOVOU 80(DX), X12 // X12 = m[10]+m[11] + LONG $0x6d1141c4; BYTE $0xd7 // VPUNPCKHQDQ XMM10, XMM13, XMM15 /* m[5], m[15] */ + LONG $0x0f1943c4; WORD $0x08de // VPALIGNR XMM11, XMM12, XMM14, 0x8 /* m[13], m[10] */ + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 48(DX), X13 // X13 = m[6]+ m[7] + MOVOU 64(DX), X14 // X14 = m[8]+ m[9] + MOVOU 80(DX), X15 // X15 = m[10]+m[11] + LONG $0x6c1941c4; BYTE $0xc5 // VPUNPCKLQDQ XMM8, XMM12, XMM13 /* m[0], m[6] */ + LONG $0x0f0943c4; WORD $0x08ce // VPALIGNR XMM9, XMM14, XMM14, 0x8 /* m[9], m[8] */ + MOVOU 16(DX), X14 // X14 = m[2]+ m[3] + LONG $0x6d1141c4; BYTE $0xd6 // VPUNPCKHQDQ XMM10, XMM13, XMM14 /* m[7], m[3] */ + LONG $0x6d0141c4; BYTE $0xdf // VPUNPCKHQDQ XMM11, XMM15, XMM15 /* ___, m[11] */ + LONG $0x6c0941c4; BYTE $0xdb // VPUNPCKLQDQ XMM11, XMM14, XMM11 /* m[2], ____ */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 8 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 16(DX), X12 // X12 = m[2]+ m[3] + MOVOU 48(DX), X13 // X13 = m[6]+ m[7] + MOVOU 96(DX), X14 // X14 = m[12]+m[13] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + LONG $0x6d0941c4; BYTE $0xc5 // VPUNPCKHQDQ XMM8, XMM14, XMM13 /* m[13], m[7] */ + LONG $0x6d1941c4; BYTE $0xcc // VPUNPCKHQDQ XMM9, XMM12, XMM12 /* ___, m[3] */ + LONG $0x6c0941c4; BYTE $0xc9 // VPUNPCKLQDQ XMM9, XMM14, XMM9 /* m[12], ____ */ + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 64(DX), X13 // X13 = m[8]+ m[9] + MOVOU 80(DX), X14 // X14 = m[10]+m[11] + LONG $0x0f0143c4; WORD $0x08d6 // VPALIGNR XMM10, XMM15, XMM14, 0x8 /* m[11], m[14] */ + LONG $0x6d1941c4; BYTE $0xdd // VPUNPCKHQDQ XMM11, XMM12, XMM13 /* m[1], m[9] */ + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 16(DX), X12 // X12 = m[2]+ m[3] + MOVOU 32(DX), X13 // X13 = m[4]+ m[5] + MOVOU 64(DX), X14 // X14 = m[8]+ m[9] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + LONG $0x6d1141c4; BYTE $0xc7 // VPUNPCKHQDQ XMM8, XMM13, XMM15 /* m[5], m[15] */ + LONG $0x6c0941c4; BYTE $0xcc // VPUNPCKLQDQ XMM9, XMM14, XMM12 /* m[8], m[2] */ + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 48(DX), X14 // X14 = m[6]+ m[7] + MOVOU 80(DX), X15 // X15 = m[10]+m[11] + LONG $0x6c1941c4; BYTE $0xd5 // VPUNPCKLQDQ XMM10, XMM12, XMM13 /* m[0], m[4] */ + LONG $0x6c0941c4; BYTE $0xdf // VPUNPCKLQDQ XMM11, XMM14, XMM15 /* m[6], m[10] */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 9 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 48(DX), X13 // X13 = m[6]+ m[7] + MOVOU 80(DX), X14 // X14 = m[10]+m[11] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + LONG $0x6c1141c4; BYTE $0xc7 // VPUNPCKLQDQ XMM8, XMM13, XMM15 /* m[6], m[14] */ + LONG $0x0f1943c4; WORD $0x08ce // VPALIGNR XMM9, XMM12, XMM14, 0x8 /* m[11], m[0] */ + MOVOU 16(DX), X13 // X13 = m[2]+ m[3] + MOVOU 64(DX), X14 // X14 = m[8]+ m[9] + LONG $0x6d0141c4; BYTE $0xd6 // VPUNPCKHQDQ XMM10, XMM15, XMM14 /* m[15], m[9] */ + LONG $0x0f0943c4; WORD $0x08dd // VPALIGNR XMM11, XMM14, XMM13, 0x8 /* m[3], m[8] */ + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 16(DX), X13 // X13 = m[2]+ m[3] + MOVOU 80(DX), X14 // X14 = m[10]+m[11] + MOVOU 96(DX), X15 // X15 = m[12]+m[13] + LONG $0x6d0141c4; BYTE $0xc7 // VPUNPCKHQDQ XMM8, XMM15, XMM15 /* ___, m[13] */ + LONG $0x6c0141c4; BYTE $0xc0 // VPUNPCKLQDQ XMM8, XMM15, XMM8 /* m[12], ____ */ + LONG $0x0f0943c4; WORD $0x08cc // VPALIGNR XMM9, XMM14, XMM12, 0x8 /* m[1], m[10] */ + MOVOU 32(DX), X12 // X12 = m[4]+ m[5] + MOVOU 48(DX), X15 // X15 = m[6]+ m[7] + LONG $0x6d0141c4; BYTE $0xd7 // VPUNPCKHQDQ XMM10, XMM15, XMM15 /* ___, m[7] */ + LONG $0x6c1141c4; BYTE $0xd2 // VPUNPCKLQDQ XMM10, XMM13, XMM10 /* m[2], ____ */ + LONG $0x6d1941c4; BYTE $0xdc // VPUNPCKHQDQ XMM11, XMM12, XMM12 /* ___, m[5] */ + LONG $0x6c1941c4; BYTE $0xdb // VPUNPCKLQDQ XMM11, XMM12, XMM11 /* m[4], ____ */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 1 0 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 48(DX), X13 // X13 = m[6]+ m[7] + MOVOU 64(DX), X14 // X14 = m[8]+ m[9] + MOVOU 80(DX), X15 // X15 = m[10]+m[11] + LONG $0x6c0141c4; BYTE $0xc6 // VPUNPCKLQDQ XMM8, XMM15, XMM14 /* m[10], m[8] */ + LONG $0x6d1141c4; BYTE $0xcc // VPUNPCKHQDQ XMM9, XMM13, XMM12 /* m[7], m[1] */ + MOVOU 16(DX), X12 // X12 = m[2]+ m[3] + MOVOU 32(DX), X14 // X14 = m[4]+ m[5] + LONG $0x6c1941c4; BYTE $0xd6 // VPUNPCKLQDQ XMM10, XMM12, XMM14 /* m[2], m[4] */ + LONG $0x6d0941c4; BYTE $0xde // VPUNPCKHQDQ XMM11, XMM14, XMM14 /* ___, m[5] */ + LONG $0x6c1141c4; BYTE $0xdb // VPUNPCKLQDQ XMM11, XMM13, XMM11 /* m[6], ____ */ + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 16(DX), X12 // X12 = m[2]+ m[3] + MOVOU 64(DX), X13 // X13 = m[8]+ m[9] + MOVOU 96(DX), X14 // X14 = m[12]+m[13] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + LONG $0x6d0141c4; BYTE $0xc5 // VPUNPCKHQDQ XMM8, XMM15, XMM13 /* m[15], m[9] */ + LONG $0x6d1941c4; BYTE $0xce // VPUNPCKHQDQ XMM9, XMM12, XMM14 /* m[3], m[13] */ + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 80(DX), X13 // X13 = m[10]+m[11] + LONG $0x0f0143c4; WORD $0x08d5 // VPALIGNR XMM10, XMM15, XMM13, 0x8 /* m[11], m[14] */ + LONG $0x6c0941c4; BYTE $0xdc // VPUNPCKLQDQ XMM11, XMM14, XMM12 /* m[12], m[0] */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 1 1 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 0(DX), X12 // X12 = m[0]+m[1] + MOVOU 16(DX), X13 // X13 = m[2]+m[3] + MOVOU 32(DX), X14 // X14 = m[4]+m[5] + MOVOU 48(DX), X15 // X15 = m[6]+m[7] + LONG $0x6c1941c4; BYTE $0xc5 // VPUNPCKLQDQ XMM8, XMM12, XMM13 /* m[0], m[2] */ + LONG $0x6c0941c4; BYTE $0xcf // VPUNPCKLQDQ XMM9, XMM14, XMM15 /* m[4], m[6] */ + LONG $0x6d1941c4; BYTE $0xd5 // VPUNPCKHQDQ XMM10, XMM12, XMM13 /* m[1], m[3] */ + LONG $0x6d0941c4; BYTE $0xdf // VPUNPCKHQDQ XMM11, XMM14, XMM15 /* m[5], m[7] */ + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 64(DX), X12 // X12 = m[8]+ m[9] + MOVOU 80(DX), X13 // X13 = m[10]+m[11] + MOVOU 96(DX), X14 // X14 = m[12]+m[13] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + LONG $0x6c1941c4; BYTE $0xc5 // VPUNPCKLQDQ XMM8, XMM12, XMM13 /* m[8],m[10] */ + LONG $0x6c0941c4; BYTE $0xcf // VPUNPCKLQDQ XMM9, XMM14, XMM15 /* m[12],m[14] */ + LONG $0x6d1941c4; BYTE $0xd5 // VPUNPCKHQDQ XMM10, XMM12, XMM13 /* m[9],m[11] */ + LONG $0x6d0941c4; BYTE $0xdf // VPUNPCKHQDQ XMM11, XMM14, XMM15 /* m[13],m[15] */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 1 2 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 112(DX), X12 // X12 = m[14]+m[15] + MOVOU 32(DX), X13 // X13 = m[4]+ m[5] + MOVOU 64(DX), X14 // X14 = m[8]+ m[9] + MOVOU 96(DX), X15 // X15 = m[12]+m[13] + LONG $0x6c1941c4; BYTE $0xc5 // VPUNPCKLQDQ XMM8, XMM12, XMM13 /* m[14], m[4] */ + LONG $0x6d0941c4; BYTE $0xcf // VPUNPCKHQDQ XMM9, XMM14, XMM15 /* m[9], m[13] */ + MOVOU 80(DX), X13 // X13 = m[10]+m[11] + MOVOU 48(DX), X15 // X15 = m[6]+ m[7] + LONG $0x6c1141c4; BYTE $0xd6 // VPUNPCKLQDQ XMM10, XMM13, XMM14 /* m[10], m[8] */ + LONG $0x0f0143c4; WORD $0x08dc // VPALIGNR XMM11, XMM15, XMM12, 0x8 /* m[15], m[6] */ + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 32(DX), X13 // X13 = m[4]+ m[5] + MOVOU 80(DX), X14 // X14 = m[10]+m[11] + LONG $0x0f1943c4; WORD $0x08c4 // VPALIGNR XMM8, XMM12, XMM12, 0x8 /* m[1], m[0] */ + LONG $0x6d0941c4; BYTE $0xcd // VPUNPCKHQDQ XMM9, XMM14, XMM13 /* m[11], m[5] */ + MOVOU 16(DX), X12 // X12 = m[2]+ m[3] + MOVOU 48(DX), X13 // X13 = m[6]+ m[7] + MOVOU 96(DX), X14 // X14 = m[12]+m[13] + LONG $0x6c0941c4; BYTE $0xd4 // VPUNPCKLQDQ XMM10, XMM14, XMM12 /* m[12], m[2] */ + LONG $0x6d1141c4; BYTE $0xdc // VPUNPCKHQDQ XMM11, XMM13, XMM12 /* m[7], m[3] */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + // Reload digest (most current value store in &out) + MOVQ out+144(FP), SI // SI: &in + MOVOU 0(SI), X12 // X12 = in[0]+in[1] /* row1l = LOAD( &S->h[0] ); */ + MOVOU 16(SI), X13 // X13 = in[2]+in[3] /* row1h = LOAD( &S->h[2] ); */ + MOVOU 32(SI), X14 // X14 = in[4]+in[5] /* row2l = LOAD( &S->h[4] ); */ + MOVOU 48(SI), X15 // X15 = in[6]+in[7] /* row2h = LOAD( &S->h[6] ); */ + + // Final computations and prepare for storing + PXOR X4, X0 // X0 = X0 ^ X4 /* row1l = _mm_xor_si128( row3l, row1l ); */ + PXOR X5, X1 // X1 = X1 ^ X5 /* row1h = _mm_xor_si128( row3h, row1h ); */ + PXOR X12, X0 // X0 = X0 ^ X12 /* STORE( &S->h[0], _mm_xor_si128( LOAD( &S->h[0] ), row1l ) ); */ + PXOR X13, X1 // X1 = X1 ^ X13 /* STORE( &S->h[2], _mm_xor_si128( LOAD( &S->h[2] ), row1h ) ); */ + PXOR X6, X2 // X2 = X2 ^ X6 /* row2l = _mm_xor_si128( row4l, row2l ); */ + PXOR X7, X3 // X3 = X3 ^ X7 /* row2h = _mm_xor_si128( row4h, row2h ); */ + PXOR X14, X2 // X2 = X2 ^ X14 /* STORE( &S->h[4], _mm_xor_si128( LOAD( &S->h[4] ), row2l ) ); */ + PXOR X15, X3 // X3 = X3 ^ X15 /* STORE( &S->h[6], _mm_xor_si128( LOAD( &S->h[6] ), row2h ) ); */ + + // Store digest into &out + MOVQ out+144(FP), SI // SI: &out + MOVOU X0, 0(SI) // out[0]+out[1] = X0 + MOVOU X1, 16(SI) // out[2]+out[3] = X1 + MOVOU X2, 32(SI) // out[4]+out[5] = X2 + MOVOU X3, 48(SI) // out[6]+out[7] = X3 + + // Increment message pointer and check if there's more to do + ADDQ $128, DX // message += 128 + SUBQ $1, R8 + JNZ loop complete: - RET + RET diff --git a/vendor/github.com/minio/blake2b-simd/compressSse_amd64.go b/vendor/github.com/minio/blake2b-simd/compressSse_amd64.go index 7032f46e6..d539a7ade 100644 --- a/vendor/github.com/minio/blake2b-simd/compressSse_amd64.go +++ b/vendor/github.com/minio/blake2b-simd/compressSse_amd64.go @@ -23,18 +23,19 @@ package blake2b func blockSSELoop(p []uint8, in, iv, t, f, shffle, out []uint64) func compressSSE(d *digest, p []uint8) { + var ( + in [8]uint64 + out [8]uint64 + shffle [2]uint64 + ) - in := make([]uint64, 8, 8) - out := make([]uint64, 8, 8) - - shffle := make([]uint64, 2, 2) // vector for PSHUFB instruction shffle[0] = 0x0201000706050403 shffle[1] = 0x0a09080f0e0d0c0b in[0], in[1], in[2], in[3], in[4], in[5], in[6], in[7] = d.h[0], d.h[1], d.h[2], d.h[3], d.h[4], d.h[5], d.h[6], d.h[7] - blockSSELoop(p, in, iv[:], d.t[:], d.f[:], shffle, out) + blockSSELoop(p, in[:], iv[:], d.t[:], d.f[:], shffle[:], out[:]) d.h[0], d.h[1], d.h[2], d.h[3], d.h[4], d.h[5], d.h[6], d.h[7] = out[0], out[1], out[2], out[3], out[4], out[5], out[6], out[7] } diff --git a/vendor/github.com/minio/blake2b-simd/compressSse_amd64.s b/vendor/github.com/minio/blake2b-simd/compressSse_amd64.s index 051a6d6b9..6f31c949e 100644 --- a/vendor/github.com/minio/blake2b-simd/compressSse_amd64.s +++ b/vendor/github.com/minio/blake2b-simd/compressSse_amd64.s @@ -43,840 +43,728 @@ // #define G1 \ - \ // G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); - BYTE $0x66; BYTE $0x41; BYTE $0x0f; BYTE $0xd4; BYTE $0xc0 \ // PADDQ XMM0,XMM8 /* v0 += m[0], v1 += m[2] */ - BYTE $0x66; BYTE $0x41; BYTE $0x0f; BYTE $0xd4; BYTE $0xc9 \ // PADDQ XMM1,XMM9 /* v2 += m[4], v3 += m[6] */ - BYTE $0x66; BYTE $0x0f; BYTE $0xd4; BYTE $0xc2 \ // PADDQ XMM0,XMM2 /* v0 += v4, v1 += v5 */ - BYTE $0x66; BYTE $0x0f; BYTE $0xd4; BYTE $0xcb \ // PADDQ XMM1,XMM3 /* v2 += v6, v3 += v7 */ - BYTE $0x66; BYTE $0x0f; BYTE $0xef; BYTE $0xf0 \ // PXOR XMM6,XMM0 /* v12 ^= v0, v13 ^= v1 */ - BYTE $0x66; BYTE $0x0f; BYTE $0xef; BYTE $0xf9 \ // PXOR XMM7,XMM1 /* v14 ^= v2, v15 ^= v3 */ - BYTE $0x66; BYTE $0x0f; BYTE $0x70; BYTE $0xf6; BYTE $0xb1 \ // PSHUFD XMM6,XMM6,0xb1 /* v12 = v12<<(64-32) | v12>>32, v13 = v13<<(64-32) | v13>>32 */ - BYTE $0x66; BYTE $0x0f; BYTE $0x70; BYTE $0xff; BYTE $0xb1 \ // PSHUFD XMM7,XMM7,0xb1 /* v14 = v14<<(64-32) | v14>>32, v15 = v15<<(64-32) | v15>>32 */ - BYTE $0x66; BYTE $0x0f; BYTE $0xd4; BYTE $0xe6 \ // PADDQ XMM4,XMM6 /* v8 += v12, v9 += v13 */ - BYTE $0x66; BYTE $0x0f; BYTE $0xd4; BYTE $0xef \ // PADDQ XMM5,XMM7 /* v10 += v14, v11 += v15 */ - BYTE $0x66; BYTE $0x0f; BYTE $0xef; BYTE $0xd4 \ // PXOR XMM2,XMM4 /* v4 ^= v8, v5 ^= v9 */ - BYTE $0x66; BYTE $0x0f; BYTE $0xef; BYTE $0xdd \ // PXOR XMM3,XMM5 /* v6 ^= v10, v7 ^= v11 */ - BYTE $0x66; BYTE $0x41; BYTE $0x0f; BYTE $0x38; BYTE $0x00 \ // PSHUFB XMM2,XMM12 /* v4 = v4<<(64-24) | v4>>24, v5 = v5<<(64-24) | v5>>24 */ - BYTE $0xd4 \ - BYTE $0x66; BYTE $0x41; BYTE $0x0f; BYTE $0x38; BYTE $0x00 \ // PSHUFB XMM3,XMM12 /* v6 = v6<<(64-24) | v6>>24, v7 = v7<<(64-24) | v7>>24 */ - BYTE $0xdc \ - // DO NOT DELETE -- macro delimiter (previous line extended) + \ // G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); + LONG $0xd40f4166; BYTE $0xc0 \ // PADDQ XMM0,XMM8 /* v0 += m[0], v1 += m[2] */ + LONG $0xd40f4166; BYTE $0xc9 \ // PADDQ XMM1,XMM9 /* v2 += m[4], v3 += m[6] */ + LONG $0xc2d40f66 \ // PADDQ XMM0,XMM2 /* v0 += v4, v1 += v5 */ + LONG $0xcbd40f66 \ // PADDQ XMM1,XMM3 /* v2 += v6, v3 += v7 */ + LONG $0xf0ef0f66 \ // PXOR XMM6,XMM0 /* v12 ^= v0, v13 ^= v1 */ + LONG $0xf9ef0f66 \ // PXOR XMM7,XMM1 /* v14 ^= v2, v15 ^= v3 */ + LONG $0xf6700f66; BYTE $0xb1 \ // PSHUFD XMM6,XMM6,0xb1 /* v12 = v12<<(64-32) | v12>>32, v13 = v13<<(64-32) | v13>>32 */ + LONG $0xff700f66; BYTE $0xb1 \ // PSHUFD XMM7,XMM7,0xb1 /* v14 = v14<<(64-32) | v14>>32, v15 = v15<<(64-32) | v15>>32 */ + LONG $0xe6d40f66 \ // PADDQ XMM4,XMM6 /* v8 += v12, v9 += v13 */ + LONG $0xefd40f66 \ // PADDQ XMM5,XMM7 /* v10 += v14, v11 += v15 */ + LONG $0xd4ef0f66 \ // PXOR XMM2,XMM4 /* v4 ^= v8, v5 ^= v9 */ + LONG $0xddef0f66 \ // PXOR XMM3,XMM5 /* v6 ^= v10, v7 ^= v11 */ + LONG $0x380f4166; WORD $0xd400 \ // PSHUFB XMM2,XMM12 /* v4 = v4<<(64-24) | v4>>24, v5 = v5<<(64-24) | v5>>24 */ + LONG $0x380f4166; WORD $0xdc00 // PSHUFB XMM3,XMM12 /* v6 = v6<<(64-24) | v6>>24, v7 = v7<<(64-24) | v7>>24 */ #define G2 \ - \ // G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); - BYTE $0x66; BYTE $0x41; BYTE $0x0f; BYTE $0xd4; BYTE $0xc2 \ // PADDQ XMM0,XMM10 /* v0 += m[1], v1 += m[3] */ - BYTE $0x66; BYTE $0x41; BYTE $0x0f; BYTE $0xd4; BYTE $0xcb \ // PADDQ XMM1,XMM11 /* v2 += m[5], v3 += m[7] */ - BYTE $0x66; BYTE $0x0f; BYTE $0xd4; BYTE $0xc2 \ // PADDQ XMM0,XMM2 /* v0 += v4, v1 += v5 */ - BYTE $0x66; BYTE $0x0f; BYTE $0xd4; BYTE $0xcb \ // PADDQ XMM1,XMM3 /* v2 += v6, v3 += v7 */ - BYTE $0x66; BYTE $0x0f; BYTE $0xef; BYTE $0xf0 \ // PXOR XMM6,XMM0 /* v12 ^= v0, v13 ^= v1 */ - BYTE $0x66; BYTE $0x0f; BYTE $0xef; BYTE $0xf9 \ // PXOR XMM7,XMM1 /* v14 ^= v2, v15 ^= v3 */ - BYTE $0xf2; BYTE $0x0f; BYTE $0x70; BYTE $0xf6; BYTE $0x39 \ // PSHUFLW XMM6,XMM6,0x39 /* combined with next ... */ - BYTE $0xf3; BYTE $0x0f; BYTE $0x70; BYTE $0xf6; BYTE $0x39 \ // PSHUFHW XMM6,XMM6,0x39 /* v12 = v12<<(64-16) | v12>>16, v13 = v13<<(64-16) | v13>>16 */ - BYTE $0xf2; BYTE $0x0f; BYTE $0x70; BYTE $0xff; BYTE $0x39 \ // PSHUFLW XMM7,XMM7,0x39 /* combined with next ... */ - BYTE $0xf3; BYTE $0x0f; BYTE $0x70; BYTE $0xff; BYTE $0x39 \ // PSHUFHW XMM7,XMM7,0x39 /* v14 = v14<<(64-16) | v14>>16, v15 = v15<<(64-16) | v15>>16 */ - BYTE $0x66; BYTE $0x0f; BYTE $0xd4; BYTE $0xe6 \ // PADDQ XMM4,XMM6 /* v8 += v12, v9 += v13 */ - BYTE $0x66; BYTE $0x0f; BYTE $0xd4; BYTE $0xef \ // PADDQ XMM5,XMM7 /* v10 += v14, v11 += v15 */ - BYTE $0x66; BYTE $0x0f; BYTE $0xef; BYTE $0xd4 \ // PXOR XMM2,XMM4 /* v4 ^= v8, v5 ^= v9 */ - BYTE $0x66; BYTE $0x0f; BYTE $0xef; BYTE $0xdd \ // PXOR XMM3,XMM5 /* v6 ^= v10, v7 ^= v11 */ - MOVOU X2, X15 \ - BYTE $0x66; BYTE $0x44; BYTE $0x0f; BYTE $0xd4; BYTE $0xfa \ // PADDQ XMM15,XMM2 /* temp reg = reg*2 */ - BYTE $0x66; BYTE $0x0f; BYTE $0x73; BYTE $0xd2; BYTE $0x3f \ // PSRLQ XMM2,0x3f /* reg = reg>>63 */ - BYTE $0x66; BYTE $0x41; BYTE $0x0f; BYTE $0xef; BYTE $0xd7 \ // PXOR XMM2,XMM15 /* ORed together: v4 = v4<<(64-63) | v4>>63, v5 = v5<<(64-63) | v5>>63 */ - MOVOU X3, X15 \ - BYTE $0x66; BYTE $0x44; BYTE $0x0f; BYTE $0xd4; BYTE $0xfb \ // PADDQ XMM15,XMM3 /* temp reg = reg*2 */ - BYTE $0x66; BYTE $0x0f; BYTE $0x73; BYTE $0xd3; BYTE $0x3f \ // PSRLQ XMM3,0x3f /* reg = reg>>63 */ - BYTE $0x66; BYTE $0x41; BYTE $0x0f; BYTE $0xef; BYTE $0xdf // PXOR XMM3,XMM15 /* ORed together: v6 = v6<<(64-63) | v6>>63, v7 = v7<<(64-63) | v7>>63 */ + \ // G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); + LONG $0xd40f4166; BYTE $0xc2 \ // PADDQ XMM0,XMM10 /* v0 += m[1], v1 += m[3] */ + LONG $0xd40f4166; BYTE $0xcb \ // PADDQ XMM1,XMM11 /* v2 += m[5], v3 += m[7] */ + LONG $0xc2d40f66 \ // PADDQ XMM0,XMM2 /* v0 += v4, v1 += v5 */ + LONG $0xcbd40f66 \ // PADDQ XMM1,XMM3 /* v2 += v6, v3 += v7 */ + LONG $0xf0ef0f66 \ // PXOR XMM6,XMM0 /* v12 ^= v0, v13 ^= v1 */ + LONG $0xf9ef0f66 \ // PXOR XMM7,XMM1 /* v14 ^= v2, v15 ^= v3 */ + LONG $0xf6700ff2; BYTE $0x39 \ // PSHUFLW XMM6,XMM6,0x39 /* combined with next ... */ + LONG $0xf6700ff3; BYTE $0x39 \ // PSHUFHW XMM6,XMM6,0x39 /* v12 = v12<<(64-16) | v12>>16, v13 = v13<<(64-16) | v13>>16 */ + LONG $0xff700ff2; BYTE $0x39 \ // PSHUFLW XMM7,XMM7,0x39 /* combined with next ... */ + LONG $0xff700ff3; BYTE $0x39 \ // PSHUFHW XMM7,XMM7,0x39 /* v14 = v14<<(64-16) | v14>>16, v15 = v15<<(64-16) | v15>>16 */ + LONG $0xe6d40f66 \ // PADDQ XMM4,XMM6 /* v8 += v12, v9 += v13 */ + LONG $0xefd40f66 \ // PADDQ XMM5,XMM7 /* v10 += v14, v11 += v15 */ + LONG $0xd4ef0f66 \ // PXOR XMM2,XMM4 /* v4 ^= v8, v5 ^= v9 */ + LONG $0xddef0f66 \ // PXOR XMM3,XMM5 /* v6 ^= v10, v7 ^= v11 */ + MOVOU X2, X15 \ + LONG $0xd40f4466; BYTE $0xfa \ // PADDQ XMM15,XMM2 /* temp reg = reg*2 */ + LONG $0xd2730f66; BYTE $0x3f \ // PSRLQ XMM2,0x3f /* reg = reg>>63 */ + LONG $0xef0f4166; BYTE $0xd7 \ // PXOR XMM2,XMM15 /* ORed together: v4 = v4<<(64-63) | v4>>63, v5 = v5<<(64-63) | v5>>63 */ + MOVOU X3, X15 \ + LONG $0xd40f4466; BYTE $0xfb \ // PADDQ XMM15,XMM3 /* temp reg = reg*2 */ + LONG $0xd3730f66; BYTE $0x3f \ // PSRLQ XMM3,0x3f /* reg = reg>>63 */ + LONG $0xef0f4166; BYTE $0xdf // PXOR XMM3,XMM15 /* ORed together: v6 = v6<<(64-63) | v6>>63, v7 = v7<<(64-63) | v7>>63 */ #define DIAGONALIZE \ - \ // DIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h); - MOVOU X6, X13 \ /* t0 = row4l;\ */ - MOVOU X2, X14 \ /* t1 = row2l;\ */ - MOVOU X4, X6 \ /* row4l = row3l;\ */ - MOVOU X5, X4 \ /* row3l = row3h;\ */ - MOVOU X6, X5 \ /* row3h = row4l;\ */ - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xfd \ // PUNPCKLQDQ XMM15, XMM13 /* _mm_unpacklo_epi64(t0, t0) */ - MOVOU X7, X6 \ - BYTE $0x66; BYTE $0x41; BYTE $0x0f; BYTE $0x6d; BYTE $0xf7 \ // PUNPCKHQDQ XMM6, XMM15 /* row4l = _mm_unpackhi_epi64(row4h, ); \ */ - BYTE $0x66; BYTE $0x44; BYTE $0x0f; BYTE $0x6c; BYTE $0xff \ // PUNPCKLQDQ XMM15, XMM7 /* _mm_unpacklo_epi64(row4h, row4h) */ - MOVOU X13, X7 \ - BYTE $0x66; BYTE $0x41; BYTE $0x0f; BYTE $0x6d; BYTE $0xff \ // PUNPCKHQDQ XMM7, XMM15 /* row4h = _mm_unpackhi_epi64(t0, ); \ */ - BYTE $0x66; BYTE $0x44; BYTE $0x0f; BYTE $0x6c; BYTE $0xfb \ // PUNPCKLQDQ XMM15, XMM3 /* _mm_unpacklo_epi64(row2h, row2h) */ - BYTE $0x66; BYTE $0x41; BYTE $0x0f; BYTE $0x6d; BYTE $0xd7 \ // PUNPCKHQDQ XMM2, XMM15 /* row2l = _mm_unpackhi_epi64(row2l, ); \ */ - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xfe \ // PUNPCKLQDQ XMM15, XMM14 /* _mm_unpacklo_epi64(t1, t1) */ - BYTE $0x66; BYTE $0x41; BYTE $0x0f; BYTE $0x6d; BYTE $0xdf // PUNPCKHQDQ XMM3, XMM15 /* row2h = _mm_unpackhi_epi64(row2h, ) */ + \ // DIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h); + MOVOU X6, X13 \ /* t0 = row4l;\ */ + MOVOU X2, X14 \ /* t1 = row2l;\ */ + MOVOU X4, X6 \ /* row4l = row3l;\ */ + MOVOU X5, X4 \ /* row3l = row3h;\ */ + MOVOU X6, X5 \ /* row3h = row4l;\ */ + LONG $0x6c0f4566; BYTE $0xfd \ // PUNPCKLQDQ XMM15, XMM13 /* _mm_unpacklo_epi64(t0, t0) */ + MOVOU X7, X6 \ + LONG $0x6d0f4166; BYTE $0xf7 \ // PUNPCKHQDQ XMM6, XMM15 /* row4l = _mm_unpackhi_epi64(row4h, ); \ */ + LONG $0x6c0f4466; BYTE $0xff \ // PUNPCKLQDQ XMM15, XMM7 /* _mm_unpacklo_epi64(row4h, row4h) */ + MOVOU X13, X7 \ + LONG $0x6d0f4166; BYTE $0xff \ // PUNPCKHQDQ XMM7, XMM15 /* row4h = _mm_unpackhi_epi64(t0, ); \ */ + LONG $0x6c0f4466; BYTE $0xfb \ // PUNPCKLQDQ XMM15, XMM3 /* _mm_unpacklo_epi64(row2h, row2h) */ + LONG $0x6d0f4166; BYTE $0xd7 \ // PUNPCKHQDQ XMM2, XMM15 /* row2l = _mm_unpackhi_epi64(row2l, ); \ */ + LONG $0x6c0f4566; BYTE $0xfe \ // PUNPCKLQDQ XMM15, XMM14 /* _mm_unpacklo_epi64(t1, t1) */ + LONG $0x6d0f4166; BYTE $0xdf // PUNPCKHQDQ XMM3, XMM15 /* row2h = _mm_unpackhi_epi64(row2h, ) */ #define UNDIAGONALIZE \ - \ // UNDIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h); - MOVOU X4, X13 \ /* t0 = row3l;\ */ - MOVOU X5, X4 \ /* row3l = row3h;\ */ - MOVOU X13, X5 \ /* row3h = t0;\ */ - MOVOU X2, X13 \ /* t0 = row2l;\ */ - MOVOU X6, X14 \ /* t1 = row4l;\ */ - BYTE $0x66; BYTE $0x44; BYTE $0x0f; BYTE $0x6c; BYTE $0xfa \ // PUNPCKLQDQ XMM15, XMM2 /* _mm_unpacklo_epi64(row2l, row2l) */ - MOVOU X3, X2 \ - BYTE $0x66; BYTE $0x41; BYTE $0x0f; BYTE $0x6d; BYTE $0xd7 \ // PUNPCKHQDQ XMM2, XMM15 /* row2l = _mm_unpackhi_epi64(row2h, ); \ */ - BYTE $0x66; BYTE $0x44; BYTE $0x0f; BYTE $0x6c; BYTE $0xfb \ // PUNPCKLQDQ XMM15, XMM3 /* _mm_unpacklo_epi64(row2h, row2h) */ - MOVOU X13, X3 \ - BYTE $0x66; BYTE $0x41; BYTE $0x0f; BYTE $0x6d; BYTE $0xdf \ // PUNPCKHQDQ XMM3, XMM15 /* row2h = _mm_unpackhi_epi64(t0, ); \ */ - BYTE $0x66; BYTE $0x44; BYTE $0x0f; BYTE $0x6c; BYTE $0xff \ // PUNPCKLQDQ XMM15, XMM7 /* _mm_unpacklo_epi64(row4h, row4h) */ - BYTE $0x66; BYTE $0x41; BYTE $0x0f; BYTE $0x6d; BYTE $0xf7 \ // PUNPCKHQDQ XMM6, XMM15 /* row4l = _mm_unpackhi_epi64(row4l, ); \ */ - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xfe \ // PUNPCKLQDQ XMM15, XMM14 /* _mm_unpacklo_epi64(t1, t1) */ - BYTE $0x66; BYTE $0x41; BYTE $0x0f; BYTE $0x6d; BYTE $0xff // PUNPCKHQDQ XMM7, XMM15 /* row4h = _mm_unpackhi_epi64(row4h, ) */ + \ // UNDIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h); + MOVOU X4, X13 \ /* t0 = row3l;\ */ + MOVOU X5, X4 \ /* row3l = row3h;\ */ + MOVOU X13, X5 \ /* row3h = t0;\ */ + MOVOU X2, X13 \ /* t0 = row2l;\ */ + MOVOU X6, X14 \ /* t1 = row4l;\ */ + LONG $0x6c0f4466; BYTE $0xfa \ // PUNPCKLQDQ XMM15, XMM2 /* _mm_unpacklo_epi64(row2l, row2l) */ + MOVOU X3, X2 \ + LONG $0x6d0f4166; BYTE $0xd7 \ // PUNPCKHQDQ XMM2, XMM15 /* row2l = _mm_unpackhi_epi64(row2h, ); \ */ + LONG $0x6c0f4466; BYTE $0xfb \ // PUNPCKLQDQ XMM15, XMM3 /* _mm_unpacklo_epi64(row2h, row2h) */ + MOVOU X13, X3 \ + LONG $0x6d0f4166; BYTE $0xdf \ // PUNPCKHQDQ XMM3, XMM15 /* row2h = _mm_unpackhi_epi64(t0, ); \ */ + LONG $0x6c0f4466; BYTE $0xff \ // PUNPCKLQDQ XMM15, XMM7 /* _mm_unpacklo_epi64(row4h, row4h) */ + LONG $0x6d0f4166; BYTE $0xf7 \ // PUNPCKHQDQ XMM6, XMM15 /* row4l = _mm_unpackhi_epi64(row4l, ); \ */ + LONG $0x6c0f4566; BYTE $0xfe \ // PUNPCKLQDQ XMM15, XMM14 /* _mm_unpacklo_epi64(t1, t1) */ + LONG $0x6d0f4166; BYTE $0xff // PUNPCKHQDQ XMM7, XMM15 /* row4h = _mm_unpackhi_epi64(row4h, ) */ #define LOAD_SHUFFLE \ - \ // Load shuffle value - MOVQ shffle+120(FP), SI \ // SI: &shuffle - MOVOU 0(SI), X12 // X12 = 03040506 07000102 0b0c0d0e 0f08090a + \ // Load shuffle value + MOVQ shffle+120(FP), SI \ // SI: &shuffle + MOVOU 0(SI), X12 // X12 = 03040506 07000102 0b0c0d0e 0f08090a // func blockSSELoop(p []uint8, in, iv, t, f, shffle, out []uint64) TEXT ·blockSSELoop(SB), 7, $0 - // REGISTER USE - // R8: loop counter - // DX: message pointer - // SI: temp pointer for loading - // X0 - X7: v0 - v15 - // X8 - X11: m[0] - m[7] - // X12: shuffle value - // X13 - X15: temp registers + // REGISTER USE + // R8: loop counter + // DX: message pointer + // SI: temp pointer for loading + // X0 - X7: v0 - v15 + // X8 - X11: m[0] - m[7] + // X12: shuffle value + // X13 - X15: temp registers - // Load digest - MOVQ in+24(FP), SI // SI: &in - MOVOU 0(SI), X0 // X0 = in[0]+in[1] /* row1l = LOAD( &S->h[0] ); */ - MOVOU 16(SI), X1 // X1 = in[2]+in[3] /* row1h = LOAD( &S->h[2] ); */ - MOVOU 32(SI), X2 // X2 = in[4]+in[5] /* row2l = LOAD( &S->h[4] ); */ - MOVOU 48(SI), X3 // X3 = in[6]+in[7] /* row2h = LOAD( &S->h[6] ); */ + // Load digest + MOVQ in+24(FP), SI // SI: &in + MOVOU 0(SI), X0 // X0 = in[0]+in[1] /* row1l = LOAD( &S->h[0] ); */ + MOVOU 16(SI), X1 // X1 = in[2]+in[3] /* row1h = LOAD( &S->h[2] ); */ + MOVOU 32(SI), X2 // X2 = in[4]+in[5] /* row2l = LOAD( &S->h[4] ); */ + MOVOU 48(SI), X3 // X3 = in[6]+in[7] /* row2h = LOAD( &S->h[6] ); */ - // Already store digest into &out (so we can reload it later generically) - MOVQ out+144(FP), SI // SI: &out - MOVOU X0, 0(SI) // out[0]+out[1] = X0 - MOVOU X1, 16(SI) // out[2]+out[3] = X1 - MOVOU X2, 32(SI) // out[4]+out[5] = X2 - MOVOU X3, 48(SI) // out[6]+out[7] = X3 + // Already store digest into &out (so we can reload it later generically) + MOVQ out+144(FP), SI // SI: &out + MOVOU X0, 0(SI) // out[0]+out[1] = X0 + MOVOU X1, 16(SI) // out[2]+out[3] = X1 + MOVOU X2, 32(SI) // out[4]+out[5] = X2 + MOVOU X3, 48(SI) // out[6]+out[7] = X3 - // Initialize message pointer and loop counter - MOVQ message+0(FP), DX // DX: &p (message) - MOVQ message_len+8(FP), R8 // R8: len(message) - SHRQ $7, R8 // len(message) / 128 - CMPQ R8, $0 - JEQ complete + // Initialize message pointer and loop counter + MOVQ message+0(FP), DX // DX: &p (message) + MOVQ message_len+8(FP), R8 // R8: len(message) + SHRQ $7, R8 // len(message) / 128 + CMPQ R8, $0 + JEQ complete loop: - // Increment counter - MOVQ t+72(FP), SI // SI: &t - MOVQ 0(SI), R9 // - ADDQ $128, R9 // /* d.t[0] += BlockSize */ - MOVQ R9, 0(SI) // - CMPQ R9, $128 // /* if d.t[0] < BlockSize { */ - JGE noincr // - MOVQ 8(SI), R9 // - ADDQ $1, R9 // /* d.t[1]++ */ - MOVQ R9, 8(SI) // -noincr: // /* } */ - - // Load initialization vector - MOVQ iv+48(FP), SI // SI: &iv - MOVOU 0(SI), X4 // X4 = iv[0]+iv[1] /* row3l = LOAD( &blake2b_IV[0] ); */ - MOVOU 16(SI), X5 // X5 = iv[2]+iv[3] /* row3h = LOAD( &blake2b_IV[2] ); */ - MOVOU 32(SI), X6 // X6 = iv[4]+iv[5] /* LOAD( &blake2b_IV[4] ) */ - MOVOU 48(SI), X7 // X7 = iv[6]+iv[7] /* LOAD( &blake2b_IV[6] ) */ - MOVQ t+72(FP), SI // SI: &t - MOVOU 0(SI), X8 // X8 = t[0]+t[1] /* LOAD( &S->t[0] ) */ - PXOR X8, X6 // X6 = X6 ^ X8 /* row4l = _mm_xor_si128( , ); */ - MOVQ t+96(FP), SI // SI: &f - MOVOU 0(SI), X8 // X8 = f[0]+f[1] /* LOAD( &S->f[0] ) */ - PXOR X8, X7 // X7 = X7 ^ X8 /* row4h = _mm_xor_si128( , ); */ - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 1 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1(b0, b1); - // LOAD_MSG_ ##r ##_2(b0, b1); - // (X12 used as additional temp register) - MOVOU 0(DX), X12 // X12 = m[0]+m[1] - MOVOU 16(DX), X13 // X13 = m[2]+m[3] - MOVOU 32(DX), X14 // X14 = m[4]+m[5] - MOVOU 48(DX), X15 // X15 = m[6]+m[7] - MOVOU X12, X8 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xc5 // PUNPCKLQDQ XMM8, XMM13 /* m[0], m[2] */ - MOVOU X14, X9 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xcf // PUNPCKLQDQ XMM9, XMM15 /* m[4], m[6] */ - MOVOU X12, X10 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xd5 // PUNPCKHQDQ XMM10, XMM13 /* m[1], m[3] */ - MOVOU X14, X11 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xdf // PUNPCKHQDQ XMM11, XMM15 /* m[5], m[7] */ - - LOAD_SHUFFLE - - G1 - G2 - - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3(b0, b1); - // LOAD_MSG_ ##r ##_4(b0, b1); - // (X12 used as additional temp register) - MOVOU 64(DX), X12 // X12 = m[8]+ m[9] - MOVOU 80(DX), X13 // X13 = m[10]+m[11] - MOVOU 96(DX), X14 // X14 = m[12]+m[13] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - MOVOU X12, X8 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xc5 // PUNPCKLQDQ XMM8, XMM13 /* m[8],m[10] */ - MOVOU X14, X9 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xcf // PUNPCKLQDQ XMM9, XMM15 /* m[12],m[14] */ - MOVOU X12, X10 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xd5 // PUNPCKHQDQ XMM10, XMM13 /* m[9],m[11] */ - MOVOU X14, X11 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xdf // PUNPCKHQDQ XMM11, XMM15 /* m[13],m[15] */ - - LOAD_SHUFFLE - - G1 - G2 - - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 2 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1(b0, b1); - // LOAD_MSG_ ##r ##_2(b0, b1); - // (X12 used as additional temp register) - MOVOU 112(DX), X12 // X12 = m[14]+m[15] - MOVOU 32(DX), X13 // X13 = m[4]+ m[5] - MOVOU 64(DX), X14 // X14 = m[8]+ m[9] - MOVOU 96(DX), X15 // X15 = m[12]+m[13] - MOVOU X12, X8 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xc5 // PUNPCKLQDQ XMM8, XMM13 /* m[14], m[4] */ - MOVOU X14, X9 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xcf // PUNPCKHQDQ XMM9, XMM15 /* m[9], m[13] */ - MOVOU 80(DX), X10 // X10 = m[10]+m[11] - MOVOU 48(DX), X11 // X11 = m[6]+ m[7] - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xd6 // PUNPCKLQDQ XMM10, XMM14 /* m[10], m[8] */ - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f // PALIGNR XMM11, XMM12, 0x8 /* m[15], m[6] */; ; ; ; ; - BYTE $0xdc; BYTE $0x08 - - LOAD_SHUFFLE - - G1 - G2 - - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3(b0, b1); - // LOAD_MSG_ ##r ##_4(b0, b1); - // (X12 used as additional temp register) - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 32(DX), X13 // X13 = m[4]+ m[5] - MOVOU 80(DX), X14 // X14 = m[10]+m[11] - MOVOU X12, X8 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f // PALIGNR XMM8, XMM12, 0x8 /* m[1], m[0] */ - BYTE $0xc4; BYTE $0x08 - MOVOU X14, X9 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xcd // PUNPCKHQDQ XMM9, XMM13 /* m[11], m[5] */ - MOVOU 16(DX), X12 // X12 = m[2]+ m[3] - MOVOU 48(DX), X11 // X11 = m[6]+ m[7] - MOVOU 96(DX), X10 // X10 = m[12]+m[13] - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xd4 // PUNPCKLQDQ XMM10, XMM12 /* m[12], m[2] */ - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xdc // PUNPCKHQDQ XMM11, XMM12 /* m[7], m[3] */ - - LOAD_SHUFFLE - - G1 - G2 - - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 3 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1(b0, b1); - // LOAD_MSG_ ##r ##_2(b0, b1); - // (X12 used as additional temp register) - MOVOU 32(DX), X12 // X12 = m[4]+ m[5] - MOVOU 80(DX), X13 // X13 = m[10]+m[11] - MOVOU 96(DX), X14 // X14 = m[12]+m[13] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - MOVOU X14, X8 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f // PALIGNR XMM8, XMM13, 0x8 /* m[11], m[12] */ - BYTE $0xc5; BYTE $0x08 - MOVOU X12, X9 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xcf // PUNPCKHQDQ XMM9, XMM15 /* m[5], m[15] */ - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 16(DX), X13 // X13 = m[2]+ m[3] - MOVOU 64(DX), X10 // X10 = m[8]+ m[9] - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xd4 // PUNPCKLQDQ XMM10, XMM12 /* m[8], m[0] */ - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xf6 // PUNPCKHQDQ XMM14, XMM14 /* ___, m[13] */ - MOVOU X13, X11 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xde // PUNPCKLQDQ XMM11, XMM14 /* m[2], ___ */ - - LOAD_SHUFFLE - - G1 - G2 - - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3(b0, b1); - // LOAD_MSG_ ##r ##_4(b0, b1); - // (X12 used as additional temp register) - MOVOU 16(DX), X12 // X12 = m[2]+ m[3] - MOVOU 48(DX), X13 // X13 = m[6]+ m[7] - MOVOU 64(DX), X14 // X14 = m[8]+ m[9] - MOVOU 80(DX), X15 // X15 = m[10]+m[11] - MOVOU X12, X9 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xcc // PUNPCKHQDQ XMM9, XMM12 /* ___, m[3] */ - MOVOU X15, X8 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xc1 // PUNPCKLQDQ XMM8, XMM9 /* m[10], ___ */ - MOVOU X13, X9 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xce // PUNPCKHQDQ XMM9, XMM14 /* m[7], m[9] */ - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 32(DX), X11 // X11 = m[4]+ m[5] - MOVOU 112(DX), X10 // X10 = m[14]+m[15] - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xd5 // PUNPCKLQDQ XMM10, XMM13 /* m[14], m[6] */ - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f // PALIGNR XMM11, XMM12, 0x8 /* m[1], m[4] */ - BYTE $0xdc; BYTE $0x08 - - LOAD_SHUFFLE - - G1 - G2 - - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 4 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1(b0, b1); - // LOAD_MSG_ ##r ##_2(b0, b1); - // (X12 used as additional temp register) - MOVOU 16(DX), X12 // X12 = m[2]+ m[3] - MOVOU 48(DX), X13 // X13 = m[6]+ m[7] - MOVOU 80(DX), X14 // X14 = m[10]+m[11] - MOVOU 96(DX), X15 // X15 = m[12]+m[13] - MOVOU X13, X8 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xc4 // PUNPCKHQDQ XMM8, XMM12 /* m[7], m[3] */ - MOVOU X15, X9 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xce // PUNPCKHQDQ XMM9, XMM14 /* m[13], m[11] */ - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 64(DX), X10 // X10 = m[8]+ m[9] - MOVOU 112(DX), X14 // X14 = m[14]+m[15] - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xd4 // PUNPCKHQDQ XMM10, XMM12 /* m[9], m[1] */ - MOVOU X15, X11 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xde // PUNPCKLQDQ XMM11, XMM14 /* m[12], m[14] */ - - LOAD_SHUFFLE - - G1 - G2 - - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3(b0, b1); - // LOAD_MSG_ ##r ##_4(b0, b1); - // (X12 used as additional temp register) - MOVOU 16(DX), X12 // X12 = m[2]+ m[3] - MOVOU 32(DX), X13 // X13 = m[4]+ m[5] - MOVOU 80(DX), X14 // X14 = m[10]+m[11] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - MOVOU X13, X9 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xcd // PUNPCKHQDQ XMM9, XMM13 /* ___, m[5] */ - MOVOU X12, X8 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xc1 // PUNPCKLQDQ XMM8, XMM9 /* m[2], ____ */ - MOVOU X15, X10 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xd7 // PUNPCKHQDQ XMM10, XMM15 /* ___, m[15] */ - MOVOU X13, X9 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xca // PUNPCKLQDQ XMM9, XMM10 /* m[4], ____ */ - MOVOU 0(DX), X11 // X11 = m[0]+ m[1] - MOVOU 48(DX), X10 // X10 = m[6]+ m[7] - MOVOU 64(DX), X15 // X15 = m[8]+ m[9] - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xd6 // PUNPCKLQDQ XMM10, XMM14 /* m[6], m[10] */ - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xdf // PUNPCKLQDQ XMM11, XMM15 /* m[0], m[8] */ - - LOAD_SHUFFLE - - G1 - G2 - - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 5 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1(b0, b1); - // LOAD_MSG_ ##r ##_2(b0, b1); - // (X12 used as additional temp register) - MOVOU 16(DX), X12 // X12 = m[2]+ m[3] - MOVOU 32(DX), X13 // X13 = m[4]+ m[5] - MOVOU 64(DX), X14 // X14 = m[8]+ m[9] - MOVOU 80(DX), X15 // X15 = m[10]+m[11] - MOVOU X14, X8 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xc5 // PUNPCKHQDQ XMM8, XMM13 /* m[9], m[5] */ - MOVOU X12, X9 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xcf // PUNPCKLQDQ XMM9, XMM15 /* m[2], m[10] */ - MOVOU 0(DX), X10 // X10 = m[0]+ m[1] - MOVOU 48(DX), X14 // X14 = m[6]+ m[7] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xf6 // PUNPCKHQDQ XMM14, XMM14 /* ___, m[7] */ - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xd6 // PUNPCKLQDQ XMM10, XMM14 /* m[0], ____ */ - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xff // PUNPCKHQDQ XMM15, XMM15 /* ___, m[15] */ - MOVOU X13, X11 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xdf // PUNPCKLQDQ XMM11, XMM15 /* m[4], ____ */ - - LOAD_SHUFFLE - - G1 - G2 - - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3(b0, b1); - // LOAD_MSG_ ##r ##_4(b0, b1); - // (X12 used as additional temp register) - MOVOU 16(DX), X12 // X12 = m[2]+ m[3] - MOVOU 48(DX), X13 // X13 = m[6]+ m[7] - MOVOU 80(DX), X14 // X14 = m[10]+m[11] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xf6 // PUNPCKHQDQ XMM14, XMM14 /* ___, m[11] */ - MOVOU X15, X8 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xc6 // PUNPCKLQDQ XMM8, XMM14 /* m[14], ____ */ - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xe4 // PUNPCKHQDQ XMM12, XMM12 /* ___, m[3] */ - MOVOU X13, X9 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xcc // PUNPCKLQDQ XMM9, XMM12 /* m[6], ____ */ - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 64(DX), X11 // X11 = m[8]+ m[9] - MOVOU 96(DX), X14 // X14 = m[12]+m[13] - MOVOU X14, X10 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f // PALIGNR XMM10, XMM12, 0x8 /* m[1], m[12] */ - BYTE $0xd4; BYTE $0x08 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xf6 // PUNPCKHQDQ XMM14, XMM14 /* ___, m[13] */ - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xde // PUNPCKLQDQ XMM11, XMM14 /* m[8], ____ */ - - LOAD_SHUFFLE - - G1 - G2 - - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 6 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1(b0, b1); - // LOAD_MSG_ ##r ##_2(b0, b1); - // (X12 used as additional temp register) - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 16(DX), X13 // X13 = m[2]+ m[3] - MOVOU 48(DX), X14 // X14 = m[6]+ m[7] - MOVOU 64(DX), X15 // X15 = m[8]+ m[9] - MOVOU X13, X8 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xc6 // PUNPCKLQDQ XMM8, XMM14 /* m[2], m[6] */ - MOVOU X12, X9 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xcf // PUNPCKLQDQ XMM9, XMM15 /* m[0], m[8] */ - MOVOU 80(DX), X12 // X12 = m[10]+m[11] - MOVOU 96(DX), X10 // X10 = m[12]+m[13] - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xd4 // PUNPCKLQDQ XMM10, XMM12 /* m[12], m[10] */ - MOVOU X12, X11 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xdd // PUNPCKHQDQ XMM11, XMM13 /* m[11], m[3] */ - - LOAD_SHUFFLE - - G1 - G2 - - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3(b0, b1); - // LOAD_MSG_ ##r ##_4(b0, b1); - // (X12 used as additional temp register) - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 32(DX), X13 // X13 = m[4]+ m[5] - MOVOU 48(DX), X14 // X14 = m[6]+ m[7] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - MOVOU X14, X9 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xce // PUNPCKHQDQ XMM9, XMM14 /* ___, m[7] */ - MOVOU X13, X8 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xc1 // PUNPCKLQDQ XMM8, XMM9 /* m[4], ____ */ - MOVOU X15, X9 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xcc // PUNPCKHQDQ XMM9, XMM12 /* m[15], m[1] */ - MOVOU 64(DX), X12 // X12 = m[8]+ m[9] - MOVOU 96(DX), X10 // X10 = m[12]+m[13] - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xd5 // PUNPCKHQDQ XMM10, XMM13 /* m[13], m[5] */ - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xe4 // PUNPCKHQDQ XMM12, XMM12 /* ___, m[9] */ - MOVOU X15, X11 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xdc // PUNPCKLQDQ XMM11, XMM12 /* m[14], ____ */ - - LOAD_SHUFFLE - - G1 - G2 - - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 7 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1(b0, b1); - // LOAD_MSG_ ##r ##_2(b0, b1); - // (X12 used as additional temp register) - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 32(DX), X13 // X13 = m[4]+ m[5] - MOVOU 96(DX), X14 // X14 = m[12]+m[13] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - MOVOU X12, X9 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xcc // PUNPCKHQDQ XMM9, XMM12 /* ___, m[1] */ - MOVOU X14, X8 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xc1 // PUNPCKLQDQ XMM8, XMM9 /* m[12], ____ */ - MOVOU X15, X9 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xcd // PUNPCKLQDQ XMM9, XMM13 /* m[14], m[4] */ - MOVOU 80(DX), X11 // X11 = m[10]+m[11] - MOVOU X13, X10 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xd7 // PUNPCKHQDQ XMM10, XMM15 /* m[5], m[15] */ - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f // PALIGNR XMM11, XMM14, 0x8 /* m[13], m[10] */ - BYTE $0xde; BYTE $0x08 - - LOAD_SHUFFLE - - G1 - G2 - - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3(b0, b1); - // LOAD_MSG_ ##r ##_4(b0, b1); - // (X12 used as additional temp register) - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 48(DX), X13 // X13 = m[6]+ m[7] - MOVOU 64(DX), X14 // X14 = m[8]+ m[9] - MOVOU 80(DX), X15 // X15 = m[10]+m[11] - MOVOU X12, X8 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xc5 // PUNPCKLQDQ XMM8, XMM13 /* m[0], m[6] */ - MOVOU X14, X9 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f // PALIGNR XMM9, XMM14, 0x8 /* m[9], m[8] */ - BYTE $0xce; BYTE $0x08 - MOVOU 16(DX), X11 // X14 = m[2]+ m[3] - MOVOU X13, X10 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xd3 // PUNPCKHQDQ XMM10, XMM11 /* m[7], m[3] */ - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xff // PUNPCKHQDQ XMM15, XMM15 /* ___, m[11] */ - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xdf // PUNPCKLQDQ XMM11, XMM15 /* m[2], ____ */ - - LOAD_SHUFFLE - - G1 - G2 - - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 8 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1(b0, b1); - // LOAD_MSG_ ##r ##_2(b0, b1); - // (X12 used as additional temp register) - MOVOU 16(DX), X12 // X12 = m[2]+ m[3] - MOVOU 48(DX), X13 // X13 = m[6]+ m[7] - MOVOU 96(DX), X14 // X14 = m[12]+m[13] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - MOVOU X14, X8 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xc5 // PUNPCKHQDQ XMM8, XMM13 /* m[13], m[7] */ - MOVOU X12, X10 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xd4 // PUNPCKHQDQ XMM10, XMM12 /* ___, m[3] */ - MOVOU X14, X9 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xca // PUNPCKLQDQ XMM9, XMM10 /* m[12], ____ */ - MOVOU 0(DX), X11 // X11 = m[0]+ m[1] - MOVOU 64(DX), X13 // X13 = m[8]+ m[9] - MOVOU 80(DX), X14 // X14 = m[10]+m[11] - MOVOU X15, X10 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f // PALIGNR XMM10, XMM14, 0x8 /* m[11], m[14] */ - BYTE $0xd6; BYTE $0x08 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xdd // PUNPCKHQDQ XMM11, XMM13 /* m[1], m[9] */ - - LOAD_SHUFFLE - - G1 - G2 - - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3(b0, b1); - // LOAD_MSG_ ##r ##_4(b0, b1); - // (X12 used as additional temp register) - MOVOU 16(DX), X12 // X12 = m[2]+ m[3] - MOVOU 32(DX), X13 // X13 = m[4]+ m[5] - MOVOU 64(DX), X14 // X14 = m[8]+ m[9] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - MOVOU X13, X8 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xc7 // PUNPCKHQDQ XMM8, XMM15 /* m[5], m[15] */ - MOVOU X14, X9 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xcc // PUNPCKLQDQ XMM9, XMM12 /* m[8], m[2] */ - MOVOU 0(DX), X10 // X10 = m[0]+ m[1] - MOVOU 48(DX), X11 // X11 = m[6]+ m[7] - MOVOU 80(DX), X15 // X15 = m[10]+m[11] - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xd5 // PUNPCKLQDQ XMM10, XMM13 /* m[0], m[4] */ - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xdf // PUNPCKLQDQ XMM11, XMM15 /* m[6], m[10] */ - - LOAD_SHUFFLE - - G1 - G2 - - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 9 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1(b0, b1); - // LOAD_MSG_ ##r ##_2(b0, b1); - // (X12 used as additional temp register) - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 48(DX), X13 // X13 = m[6]+ m[7] - MOVOU 80(DX), X14 // X14 = m[10]+m[11] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - MOVOU X13, X8 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xc7 // PUNPCKLQDQ XMM8, XMM15 /* m[6], m[14] */ - MOVOU X12, X9 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f // PALIGNR XMM9, XMM14, 0x8 /* m[11], m[0] */ - BYTE $0xce; BYTE $0x08 - MOVOU 16(DX), X13 // X13 = m[2]+ m[3] - MOVOU 64(DX), X11 // X11 = m[8]+ m[9] - MOVOU X15, X10 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xd3 // PUNPCKHQDQ XMM10, XMM11 /* m[15], m[9] */ - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f // PALIGNR XMM11, XMM13, 0x8 /* m[3], m[8] */ - BYTE $0xdd; BYTE $0x08 - - LOAD_SHUFFLE - - G1 - G2 - - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3(b0, b1); - // LOAD_MSG_ ##r ##_4(b0, b1); - // (X12 used as additional temp register) - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 16(DX), X13 // X13 = m[2]+ m[3] - MOVOU 80(DX), X14 // X14 = m[10]+m[11] - MOVOU 96(DX), X15 // X15 = m[12]+m[13] - MOVOU X15, X9 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xcf // PUNPCKHQDQ XMM9, XMM15 /* ___, m[13] */ - MOVOU X15, X8 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xc1 // PUNPCKLQDQ XMM8, XMM9 /* m[12], ____ */ - MOVOU X14, X9 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f // PALIGNR XMM9, XMM12, 0x8 /* m[1], m[10] */ - BYTE $0xcc; BYTE $0x08 - MOVOU 32(DX), X12 // X12 = m[4]+ m[5] - MOVOU 48(DX), X15 // X15 = m[6]+ m[7] - MOVOU X15, X11 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xdf // PUNPCKHQDQ XMM11, XMM15 /* ___, m[7] */ - MOVOU X13, X10 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xd3 // PUNPCKLQDQ XMM10, XMM11 /* m[2], ____ */ - MOVOU X12, X15 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xfc // PUNPCKHQDQ XMM15, XMM12 /* ___, m[5] */ - MOVOU X12, X11 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xdf // PUNPCKLQDQ XMM11, XMM15 /* m[4], ____ */ - - LOAD_SHUFFLE - - G1 - G2 - - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 1 0 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1(b0, b1); - // LOAD_MSG_ ##r ##_2(b0, b1); - // (X12 used as additional temp register) - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 48(DX), X13 // X13 = m[6]+ m[7] - MOVOU 64(DX), X14 // X14 = m[8]+ m[9] - MOVOU 80(DX), X15 // X15 = m[10]+m[11] - MOVOU X15, X8 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xc6 // PUNPCKLQDQ XMM8, XMM14 /* m[10], m[8] */ - MOVOU X13, X9 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xcc // PUNPCKHQDQ XMM9, XMM12 /* m[7], m[1] */ - MOVOU 16(DX), X10 // X10 = m[2]+ m[3] - MOVOU 32(DX), X14 // X14 = m[4]+ m[5] - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xd6 // PUNPCKLQDQ XMM10, XMM14 /* m[2], m[4] */ - MOVOU X14, X15 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xfe // PUNPCKHQDQ XMM15, XMM14 /* ___, m[5] */ - MOVOU X13, X11 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xdf // PUNPCKLQDQ XMM11, XMM15 /* m[6], ____ */ - - LOAD_SHUFFLE - - G1 - G2 - - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3(b0, b1); - // LOAD_MSG_ ##r ##_4(b0, b1); - // (X12 used as additional temp register) - MOVOU 16(DX), X12 // X12 = m[2]+ m[3] - MOVOU 64(DX), X13 // X13 = m[8]+ m[9] - MOVOU 96(DX), X14 // X14 = m[12]+m[13] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - MOVOU X15, X8 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xc5 // PUNPCKHQDQ XMM8, XMM13 /* m[15], m[9] */ - MOVOU X12, X9 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xce // PUNPCKHQDQ XMM9, XMM14 /* m[3], m[13] */ - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 80(DX), X13 // X13 = m[10]+m[11] - MOVOU X15, X10 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f // PALIGNR XMM10, XMM13, 0x8 /* m[11], m[14] */ - BYTE $0xd5; BYTE $0x08 - MOVOU X14, X11 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xdc // PUNPCKLQDQ XMM11, XMM12 /* m[12], m[0] */ - - LOAD_SHUFFLE - - G1 - G2 - - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 1 1 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1(b0, b1); - // LOAD_MSG_ ##r ##_2(b0, b1); - // (X12 used as additional temp register) - MOVOU 0(DX), X12 // X12 = m[0]+m[1] - MOVOU 16(DX), X13 // X13 = m[2]+m[3] - MOVOU 32(DX), X14 // X14 = m[4]+m[5] - MOVOU 48(DX), X15 // X15 = m[6]+m[7] - MOVOU X12, X8 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xc5 // PUNPCKLQDQ XMM8, XMM13 /* m[0], m[2] */ - MOVOU X14, X9 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xcf // PUNPCKLQDQ XMM9, XMM15 /* m[4], m[6] */ - MOVOU X12, X10 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xd5 // PUNPCKHQDQ XMM10, XMM13 /* m[1], m[3] */ - MOVOU X14, X11 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xdf // PUNPCKHQDQ XMM11, XMM15 /* m[5], m[7] */ - - LOAD_SHUFFLE - - G1 - G2 - - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3(b0, b1); - // LOAD_MSG_ ##r ##_4(b0, b1); - // (X12 used as additional temp register) - MOVOU 64(DX), X12 // X12 = m[8]+ m[9] - MOVOU 80(DX), X13 // X13 = m[10]+m[11] - MOVOU 96(DX), X14 // X14 = m[12]+m[13] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - MOVOU X12, X8 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xc5 // PUNPCKLQDQ XMM8, XMM13 /* m[8],m[10] */ - MOVOU X14, X9 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xcf // PUNPCKLQDQ XMM9, XMM15 /* m[12],m[14] */ - MOVOU X12, X10 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xd5 // PUNPCKHQDQ XMM10, XMM13 /* m[9],m[11] */ - MOVOU X14, X11 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xdf // PUNPCKHQDQ XMM11, XMM15 /* m[13],m[15] */ - - LOAD_SHUFFLE - - G1 - G2 - - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 1 2 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1(b0, b1); - // LOAD_MSG_ ##r ##_2(b0, b1); - // (X12 used as additional temp register) - MOVOU 112(DX), X12 // X12 = m[14]+m[15] - MOVOU 32(DX), X13 // X13 = m[4]+ m[5] - MOVOU 64(DX), X14 // X14 = m[8]+ m[9] - MOVOU 96(DX), X15 // X15 = m[12]+m[13] - MOVOU X12, X8 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xc5 // PUNPCKLQDQ XMM8, XMM13 /* m[14], m[4] */ - MOVOU X14, X9 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xcf // PUNPCKHQDQ XMM9, XMM15 /* m[9], m[13] */ - MOVOU 80(DX), X10 // X10 = m[10]+m[11] - MOVOU 48(DX), X11 // X11 = m[6]+ m[7] - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xd6 // PUNPCKLQDQ XMM10, XMM14 /* m[10], m[8] */ - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f // PALIGNR XMM11, XMM12, 0x8 /* m[15], m[6] */; ; ; ; ; - BYTE $0xdc; BYTE $0x08 - - LOAD_SHUFFLE - - G1 - G2 - - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3(b0, b1); - // LOAD_MSG_ ##r ##_4(b0, b1); - // (X12 used as additional temp register) - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 32(DX), X13 // X13 = m[4]+ m[5] - MOVOU 80(DX), X14 // X14 = m[10]+m[11] - MOVOU X12, X8 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f // PALIGNR XMM8, XMM12, 0x8 /* m[1], m[0] */ - BYTE $0xc4; BYTE $0x08 - MOVOU X14, X9 - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xcd // PUNPCKHQDQ XMM9, XMM13 /* m[11], m[5] */ - MOVOU 16(DX), X12 // X12 = m[2]+ m[3] - MOVOU 48(DX), X11 // X11 = m[6]+ m[7] - MOVOU 96(DX), X10 // X10 = m[12]+m[13] - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6c; BYTE $0xd4 // PUNPCKLQDQ XMM10, XMM12 /* m[12], m[2] */ - BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x6d; BYTE $0xdc // PUNPCKHQDQ XMM11, XMM12 /* m[7], m[3] */ - - LOAD_SHUFFLE - - G1 - G2 - - UNDIAGONALIZE - - // Reload digest (most current value store in &out) - MOVQ out+144(FP), SI // SI: &in - MOVOU 0(SI), X12 // X12 = in[0]+in[1] /* row1l = LOAD( &S->h[0] ); */ - MOVOU 16(SI), X13 // X13 = in[2]+in[3] /* row1h = LOAD( &S->h[2] ); */ - MOVOU 32(SI), X14 // X14 = in[4]+in[5] /* row2l = LOAD( &S->h[4] ); */ - MOVOU 48(SI), X15 // X15 = in[6]+in[7] /* row2h = LOAD( &S->h[6] ); */ - - // Final computations and prepare for storing - PXOR X4, X0 // X0 = X0 ^ X4 /* row1l = _mm_xor_si128( row3l, row1l ); */ - PXOR X5, X1 // X1 = X1 ^ X5 /* row1h = _mm_xor_si128( row3h, row1h ); */ - PXOR X12, X0 // X0 = X0 ^ X12 /* STORE( &S->h[0], _mm_xor_si128( LOAD( &S->h[0] ), row1l ) ); */ - PXOR X13, X1 // X1 = X1 ^ X13 /* STORE( &S->h[2], _mm_xor_si128( LOAD( &S->h[2] ), row1h ) ); */ - PXOR X6, X2 // X2 = X2 ^ X6 /* row2l = _mm_xor_si128( row4l, row2l ); */ - PXOR X7, X3 // X3 = X3 ^ X7 /* row2h = _mm_xor_si128( row4h, row2h ); */ - PXOR X14, X2 // X2 = X2 ^ X14 /* STORE( &S->h[4], _mm_xor_si128( LOAD( &S->h[4] ), row2l ) ); */ - PXOR X15, X3 // X3 = X3 ^ X15 /* STORE( &S->h[6], _mm_xor_si128( LOAD( &S->h[6] ), row2h ) ); */ - - // Store digest into &out - MOVQ out+144(FP), SI // SI: &out - MOVOU X0, 0(SI) // out[0]+out[1] = X0 - MOVOU X1, 16(SI) // out[2]+out[3] = X1 - MOVOU X2, 32(SI) // out[4]+out[5] = X2 - MOVOU X3, 48(SI) // out[6]+out[7] = X3 - - // Increment message pointer and check if there's more to do - ADDQ $128, DX // message += 128 - SUBQ $1, R8 - JNZ loop + // Increment counter + MOVQ t+72(FP), SI // SI: &t + MOVQ 0(SI), R9 + ADDQ $128, R9 // /* d.t[0] += BlockSize */ + MOVQ R9, 0(SI) + CMPQ R9, $128 // /* if d.t[0] < BlockSize { */ + JGE noincr + MOVQ 8(SI), R9 + ADDQ $1, R9 // /* d.t[1]++ */ + MOVQ R9, 8(SI) + +noincr: // /* } */ + + // Load initialization vector + MOVQ iv+48(FP), SI // SI: &iv + MOVOU 0(SI), X4 // X4 = iv[0]+iv[1] /* row3l = LOAD( &blake2b_IV[0] ); */ + MOVOU 16(SI), X5 // X5 = iv[2]+iv[3] /* row3h = LOAD( &blake2b_IV[2] ); */ + MOVOU 32(SI), X6 // X6 = iv[4]+iv[5] /* LOAD( &blake2b_IV[4] ) */ + MOVOU 48(SI), X7 // X7 = iv[6]+iv[7] /* LOAD( &blake2b_IV[6] ) */ + MOVQ t+72(FP), SI // SI: &t + MOVOU 0(SI), X8 // X8 = t[0]+t[1] /* LOAD( &S->t[0] ) */ + PXOR X8, X6 // X6 = X6 ^ X8 /* row4l = _mm_xor_si128( , ); */ + MOVQ t+96(FP), SI // SI: &f + MOVOU 0(SI), X8 // X8 = f[0]+f[1] /* LOAD( &S->f[0] ) */ + PXOR X8, X7 // X7 = X7 ^ X8 /* row4h = _mm_xor_si128( , ); */ + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 1 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 0(DX), X12 // X12 = m[0]+m[1] + MOVOU 16(DX), X13 // X13 = m[2]+m[3] + MOVOU 32(DX), X14 // X14 = m[4]+m[5] + MOVOU 48(DX), X15 // X15 = m[6]+m[7] + MOVOU X12, X8 + LONG $0x6c0f4566; BYTE $0xc5 // PUNPCKLQDQ XMM8, XMM13 /* m[0], m[2] */ + MOVOU X14, X9 + LONG $0x6c0f4566; BYTE $0xcf // PUNPCKLQDQ XMM9, XMM15 /* m[4], m[6] */ + MOVOU X12, X10 + LONG $0x6d0f4566; BYTE $0xd5 // PUNPCKHQDQ XMM10, XMM13 /* m[1], m[3] */ + MOVOU X14, X11 + LONG $0x6d0f4566; BYTE $0xdf // PUNPCKHQDQ XMM11, XMM15 /* m[5], m[7] */ + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 64(DX), X12 // X12 = m[8]+ m[9] + MOVOU 80(DX), X13 // X13 = m[10]+m[11] + MOVOU 96(DX), X14 // X14 = m[12]+m[13] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + MOVOU X12, X8 + LONG $0x6c0f4566; BYTE $0xc5 // PUNPCKLQDQ XMM8, XMM13 /* m[8],m[10] */ + MOVOU X14, X9 + LONG $0x6c0f4566; BYTE $0xcf // PUNPCKLQDQ XMM9, XMM15 /* m[12],m[14] */ + MOVOU X12, X10 + LONG $0x6d0f4566; BYTE $0xd5 // PUNPCKHQDQ XMM10, XMM13 /* m[9],m[11] */ + MOVOU X14, X11 + LONG $0x6d0f4566; BYTE $0xdf // PUNPCKHQDQ XMM11, XMM15 /* m[13],m[15] */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 2 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 112(DX), X12 // X12 = m[14]+m[15] + MOVOU 32(DX), X13 // X13 = m[4]+ m[5] + MOVOU 64(DX), X14 // X14 = m[8]+ m[9] + MOVOU 96(DX), X15 // X15 = m[12]+m[13] + MOVOU X12, X8 + LONG $0x6c0f4566; BYTE $0xc5 // PUNPCKLQDQ XMM8, XMM13 /* m[14], m[4] */ + MOVOU X14, X9 + LONG $0x6d0f4566; BYTE $0xcf // PUNPCKHQDQ XMM9, XMM15 /* m[9], m[13] */ + MOVOU 80(DX), X10 // X10 = m[10]+m[11] + MOVOU 48(DX), X11 // X11 = m[6]+ m[7] + LONG $0x6c0f4566; BYTE $0xd6 // PUNPCKLQDQ XMM10, XMM14 /* m[10], m[8] */ + LONG $0x3a0f4566; WORD $0xdc0f; BYTE $0x08 // PALIGNR XMM11, XMM12, 0x8 /* m[15], m[6] */; ; ; ; ; + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 32(DX), X13 // X13 = m[4]+ m[5] + MOVOU 80(DX), X14 // X14 = m[10]+m[11] + MOVOU X12, X8 + LONG $0x3a0f4566; WORD $0xc40f; BYTE $0x08 // PALIGNR XMM8, XMM12, 0x8 /* m[1], m[0] */ + MOVOU X14, X9 + LONG $0x6d0f4566; BYTE $0xcd // PUNPCKHQDQ XMM9, XMM13 /* m[11], m[5] */ + MOVOU 16(DX), X12 // X12 = m[2]+ m[3] + MOVOU 48(DX), X11 // X11 = m[6]+ m[7] + MOVOU 96(DX), X10 // X10 = m[12]+m[13] + LONG $0x6c0f4566; BYTE $0xd4 // PUNPCKLQDQ XMM10, XMM12 /* m[12], m[2] */ + LONG $0x6d0f4566; BYTE $0xdc // PUNPCKHQDQ XMM11, XMM12 /* m[7], m[3] */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 3 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 32(DX), X12 // X12 = m[4]+ m[5] + MOVOU 80(DX), X13 // X13 = m[10]+m[11] + MOVOU 96(DX), X14 // X14 = m[12]+m[13] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + MOVOU X14, X8 + LONG $0x3a0f4566; WORD $0xc50f; BYTE $0x08 // PALIGNR XMM8, XMM13, 0x8 /* m[11], m[12] */ + MOVOU X12, X9 + LONG $0x6d0f4566; BYTE $0xcf // PUNPCKHQDQ XMM9, XMM15 /* m[5], m[15] */ + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 16(DX), X13 // X13 = m[2]+ m[3] + MOVOU 64(DX), X10 // X10 = m[8]+ m[9] + LONG $0x6c0f4566; BYTE $0xd4 // PUNPCKLQDQ XMM10, XMM12 /* m[8], m[0] */ + LONG $0x6d0f4566; BYTE $0xf6 // PUNPCKHQDQ XMM14, XMM14 /* ___, m[13] */ + MOVOU X13, X11 + LONG $0x6c0f4566; BYTE $0xde // PUNPCKLQDQ XMM11, XMM14 /* m[2], ___ */ + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 16(DX), X12 // X12 = m[2]+ m[3] + MOVOU 48(DX), X13 // X13 = m[6]+ m[7] + MOVOU 64(DX), X14 // X14 = m[8]+ m[9] + MOVOU 80(DX), X15 // X15 = m[10]+m[11] + MOVOU X12, X9 + LONG $0x6d0f4566; BYTE $0xcc // PUNPCKHQDQ XMM9, XMM12 /* ___, m[3] */ + MOVOU X15, X8 + LONG $0x6c0f4566; BYTE $0xc1 // PUNPCKLQDQ XMM8, XMM9 /* m[10], ___ */ + MOVOU X13, X9 + LONG $0x6d0f4566; BYTE $0xce // PUNPCKHQDQ XMM9, XMM14 /* m[7], m[9] */ + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 32(DX), X11 // X11 = m[4]+ m[5] + MOVOU 112(DX), X10 // X10 = m[14]+m[15] + LONG $0x6c0f4566; BYTE $0xd5 // PUNPCKLQDQ XMM10, XMM13 /* m[14], m[6] */ + LONG $0x3a0f4566; WORD $0xdc0f; BYTE $0x08 // PALIGNR XMM11, XMM12, 0x8 /* m[1], m[4] */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 4 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 16(DX), X12 // X12 = m[2]+ m[3] + MOVOU 48(DX), X13 // X13 = m[6]+ m[7] + MOVOU 80(DX), X14 // X14 = m[10]+m[11] + MOVOU 96(DX), X15 // X15 = m[12]+m[13] + MOVOU X13, X8 + LONG $0x6d0f4566; BYTE $0xc4 // PUNPCKHQDQ XMM8, XMM12 /* m[7], m[3] */ + MOVOU X15, X9 + LONG $0x6d0f4566; BYTE $0xce // PUNPCKHQDQ XMM9, XMM14 /* m[13], m[11] */ + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 64(DX), X10 // X10 = m[8]+ m[9] + MOVOU 112(DX), X14 // X14 = m[14]+m[15] + LONG $0x6d0f4566; BYTE $0xd4 // PUNPCKHQDQ XMM10, XMM12 /* m[9], m[1] */ + MOVOU X15, X11 + LONG $0x6c0f4566; BYTE $0xde // PUNPCKLQDQ XMM11, XMM14 /* m[12], m[14] */ + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 16(DX), X12 // X12 = m[2]+ m[3] + MOVOU 32(DX), X13 // X13 = m[4]+ m[5] + MOVOU 80(DX), X14 // X14 = m[10]+m[11] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + MOVOU X13, X9 + LONG $0x6d0f4566; BYTE $0xcd // PUNPCKHQDQ XMM9, XMM13 /* ___, m[5] */ + MOVOU X12, X8 + LONG $0x6c0f4566; BYTE $0xc1 // PUNPCKLQDQ XMM8, XMM9 /* m[2], ____ */ + MOVOU X15, X10 + LONG $0x6d0f4566; BYTE $0xd7 // PUNPCKHQDQ XMM10, XMM15 /* ___, m[15] */ + MOVOU X13, X9 + LONG $0x6c0f4566; BYTE $0xca // PUNPCKLQDQ XMM9, XMM10 /* m[4], ____ */ + MOVOU 0(DX), X11 // X11 = m[0]+ m[1] + MOVOU 48(DX), X10 // X10 = m[6]+ m[7] + MOVOU 64(DX), X15 // X15 = m[8]+ m[9] + LONG $0x6c0f4566; BYTE $0xd6 // PUNPCKLQDQ XMM10, XMM14 /* m[6], m[10] */ + LONG $0x6c0f4566; BYTE $0xdf // PUNPCKLQDQ XMM11, XMM15 /* m[0], m[8] */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 5 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 16(DX), X12 // X12 = m[2]+ m[3] + MOVOU 32(DX), X13 // X13 = m[4]+ m[5] + MOVOU 64(DX), X14 // X14 = m[8]+ m[9] + MOVOU 80(DX), X15 // X15 = m[10]+m[11] + MOVOU X14, X8 + LONG $0x6d0f4566; BYTE $0xc5 // PUNPCKHQDQ XMM8, XMM13 /* m[9], m[5] */ + MOVOU X12, X9 + LONG $0x6c0f4566; BYTE $0xcf // PUNPCKLQDQ XMM9, XMM15 /* m[2], m[10] */ + MOVOU 0(DX), X10 // X10 = m[0]+ m[1] + MOVOU 48(DX), X14 // X14 = m[6]+ m[7] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + LONG $0x6d0f4566; BYTE $0xf6 // PUNPCKHQDQ XMM14, XMM14 /* ___, m[7] */ + LONG $0x6c0f4566; BYTE $0xd6 // PUNPCKLQDQ XMM10, XMM14 /* m[0], ____ */ + LONG $0x6d0f4566; BYTE $0xff // PUNPCKHQDQ XMM15, XMM15 /* ___, m[15] */ + MOVOU X13, X11 + LONG $0x6c0f4566; BYTE $0xdf // PUNPCKLQDQ XMM11, XMM15 /* m[4], ____ */ + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 16(DX), X12 // X12 = m[2]+ m[3] + MOVOU 48(DX), X13 // X13 = m[6]+ m[7] + MOVOU 80(DX), X14 // X14 = m[10]+m[11] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + LONG $0x6d0f4566; BYTE $0xf6 // PUNPCKHQDQ XMM14, XMM14 /* ___, m[11] */ + MOVOU X15, X8 + LONG $0x6c0f4566; BYTE $0xc6 // PUNPCKLQDQ XMM8, XMM14 /* m[14], ____ */ + LONG $0x6d0f4566; BYTE $0xe4 // PUNPCKHQDQ XMM12, XMM12 /* ___, m[3] */ + MOVOU X13, X9 + LONG $0x6c0f4566; BYTE $0xcc // PUNPCKLQDQ XMM9, XMM12 /* m[6], ____ */ + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 64(DX), X11 // X11 = m[8]+ m[9] + MOVOU 96(DX), X14 // X14 = m[12]+m[13] + MOVOU X14, X10 + LONG $0x3a0f4566; WORD $0xd40f; BYTE $0x08 // PALIGNR XMM10, XMM12, 0x8 /* m[1], m[12] */ + LONG $0x6d0f4566; BYTE $0xf6 // PUNPCKHQDQ XMM14, XMM14 /* ___, m[13] */ + LONG $0x6c0f4566; BYTE $0xde // PUNPCKLQDQ XMM11, XMM14 /* m[8], ____ */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 6 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 16(DX), X13 // X13 = m[2]+ m[3] + MOVOU 48(DX), X14 // X14 = m[6]+ m[7] + MOVOU 64(DX), X15 // X15 = m[8]+ m[9] + MOVOU X13, X8 + LONG $0x6c0f4566; BYTE $0xc6 // PUNPCKLQDQ XMM8, XMM14 /* m[2], m[6] */ + MOVOU X12, X9 + LONG $0x6c0f4566; BYTE $0xcf // PUNPCKLQDQ XMM9, XMM15 /* m[0], m[8] */ + MOVOU 80(DX), X12 // X12 = m[10]+m[11] + MOVOU 96(DX), X10 // X10 = m[12]+m[13] + LONG $0x6c0f4566; BYTE $0xd4 // PUNPCKLQDQ XMM10, XMM12 /* m[12], m[10] */ + MOVOU X12, X11 + LONG $0x6d0f4566; BYTE $0xdd // PUNPCKHQDQ XMM11, XMM13 /* m[11], m[3] */ + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 32(DX), X13 // X13 = m[4]+ m[5] + MOVOU 48(DX), X14 // X14 = m[6]+ m[7] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + MOVOU X14, X9 + LONG $0x6d0f4566; BYTE $0xce // PUNPCKHQDQ XMM9, XMM14 /* ___, m[7] */ + MOVOU X13, X8 + LONG $0x6c0f4566; BYTE $0xc1 // PUNPCKLQDQ XMM8, XMM9 /* m[4], ____ */ + MOVOU X15, X9 + LONG $0x6d0f4566; BYTE $0xcc // PUNPCKHQDQ XMM9, XMM12 /* m[15], m[1] */ + MOVOU 64(DX), X12 // X12 = m[8]+ m[9] + MOVOU 96(DX), X10 // X10 = m[12]+m[13] + LONG $0x6d0f4566; BYTE $0xd5 // PUNPCKHQDQ XMM10, XMM13 /* m[13], m[5] */ + LONG $0x6d0f4566; BYTE $0xe4 // PUNPCKHQDQ XMM12, XMM12 /* ___, m[9] */ + MOVOU X15, X11 + LONG $0x6c0f4566; BYTE $0xdc // PUNPCKLQDQ XMM11, XMM12 /* m[14], ____ */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 7 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 32(DX), X13 // X13 = m[4]+ m[5] + MOVOU 96(DX), X14 // X14 = m[12]+m[13] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + MOVOU X12, X9 + LONG $0x6d0f4566; BYTE $0xcc // PUNPCKHQDQ XMM9, XMM12 /* ___, m[1] */ + MOVOU X14, X8 + LONG $0x6c0f4566; BYTE $0xc1 // PUNPCKLQDQ XMM8, XMM9 /* m[12], ____ */ + MOVOU X15, X9 + LONG $0x6c0f4566; BYTE $0xcd // PUNPCKLQDQ XMM9, XMM13 /* m[14], m[4] */ + MOVOU 80(DX), X11 // X11 = m[10]+m[11] + MOVOU X13, X10 + LONG $0x6d0f4566; BYTE $0xd7 // PUNPCKHQDQ XMM10, XMM15 /* m[5], m[15] */ + LONG $0x3a0f4566; WORD $0xde0f; BYTE $0x08 // PALIGNR XMM11, XMM14, 0x8 /* m[13], m[10] */ + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 48(DX), X13 // X13 = m[6]+ m[7] + MOVOU 64(DX), X14 // X14 = m[8]+ m[9] + MOVOU 80(DX), X15 // X15 = m[10]+m[11] + MOVOU X12, X8 + LONG $0x6c0f4566; BYTE $0xc5 // PUNPCKLQDQ XMM8, XMM13 /* m[0], m[6] */ + MOVOU X14, X9 + LONG $0x3a0f4566; WORD $0xce0f; BYTE $0x08 // PALIGNR XMM9, XMM14, 0x8 /* m[9], m[8] */ + MOVOU 16(DX), X11 // X14 = m[2]+ m[3] + MOVOU X13, X10 + LONG $0x6d0f4566; BYTE $0xd3 // PUNPCKHQDQ XMM10, XMM11 /* m[7], m[3] */ + LONG $0x6d0f4566; BYTE $0xff // PUNPCKHQDQ XMM15, XMM15 /* ___, m[11] */ + LONG $0x6c0f4566; BYTE $0xdf // PUNPCKLQDQ XMM11, XMM15 /* m[2], ____ */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 8 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 16(DX), X12 // X12 = m[2]+ m[3] + MOVOU 48(DX), X13 // X13 = m[6]+ m[7] + MOVOU 96(DX), X14 // X14 = m[12]+m[13] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + MOVOU X14, X8 + LONG $0x6d0f4566; BYTE $0xc5 // PUNPCKHQDQ XMM8, XMM13 /* m[13], m[7] */ + MOVOU X12, X10 + LONG $0x6d0f4566; BYTE $0xd4 // PUNPCKHQDQ XMM10, XMM12 /* ___, m[3] */ + MOVOU X14, X9 + LONG $0x6c0f4566; BYTE $0xca // PUNPCKLQDQ XMM9, XMM10 /* m[12], ____ */ + MOVOU 0(DX), X11 // X11 = m[0]+ m[1] + MOVOU 64(DX), X13 // X13 = m[8]+ m[9] + MOVOU 80(DX), X14 // X14 = m[10]+m[11] + MOVOU X15, X10 + LONG $0x3a0f4566; WORD $0xd60f; BYTE $0x08 // PALIGNR XMM10, XMM14, 0x8 /* m[11], m[14] */ + LONG $0x6d0f4566; BYTE $0xdd // PUNPCKHQDQ XMM11, XMM13 /* m[1], m[9] */ + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 16(DX), X12 // X12 = m[2]+ m[3] + MOVOU 32(DX), X13 // X13 = m[4]+ m[5] + MOVOU 64(DX), X14 // X14 = m[8]+ m[9] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + MOVOU X13, X8 + LONG $0x6d0f4566; BYTE $0xc7 // PUNPCKHQDQ XMM8, XMM15 /* m[5], m[15] */ + MOVOU X14, X9 + LONG $0x6c0f4566; BYTE $0xcc // PUNPCKLQDQ XMM9, XMM12 /* m[8], m[2] */ + MOVOU 0(DX), X10 // X10 = m[0]+ m[1] + MOVOU 48(DX), X11 // X11 = m[6]+ m[7] + MOVOU 80(DX), X15 // X15 = m[10]+m[11] + LONG $0x6c0f4566; BYTE $0xd5 // PUNPCKLQDQ XMM10, XMM13 /* m[0], m[4] */ + LONG $0x6c0f4566; BYTE $0xdf // PUNPCKLQDQ XMM11, XMM15 /* m[6], m[10] */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 9 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 48(DX), X13 // X13 = m[6]+ m[7] + MOVOU 80(DX), X14 // X14 = m[10]+m[11] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + MOVOU X13, X8 + LONG $0x6c0f4566; BYTE $0xc7 // PUNPCKLQDQ XMM8, XMM15 /* m[6], m[14] */ + MOVOU X12, X9 + LONG $0x3a0f4566; WORD $0xce0f; BYTE $0x08 // PALIGNR XMM9, XMM14, 0x8 /* m[11], m[0] */ + MOVOU 16(DX), X13 // X13 = m[2]+ m[3] + MOVOU 64(DX), X11 // X11 = m[8]+ m[9] + MOVOU X15, X10 + LONG $0x6d0f4566; BYTE $0xd3 // PUNPCKHQDQ XMM10, XMM11 /* m[15], m[9] */ + LONG $0x3a0f4566; WORD $0xdd0f; BYTE $0x08 // PALIGNR XMM11, XMM13, 0x8 /* m[3], m[8] */ + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 16(DX), X13 // X13 = m[2]+ m[3] + MOVOU 80(DX), X14 // X14 = m[10]+m[11] + MOVOU 96(DX), X15 // X15 = m[12]+m[13] + MOVOU X15, X9 + LONG $0x6d0f4566; BYTE $0xcf // PUNPCKHQDQ XMM9, XMM15 /* ___, m[13] */ + MOVOU X15, X8 + LONG $0x6c0f4566; BYTE $0xc1 // PUNPCKLQDQ XMM8, XMM9 /* m[12], ____ */ + MOVOU X14, X9 + LONG $0x3a0f4566; WORD $0xcc0f; BYTE $0x08 // PALIGNR XMM9, XMM12, 0x8 /* m[1], m[10] */ + MOVOU 32(DX), X12 // X12 = m[4]+ m[5] + MOVOU 48(DX), X15 // X15 = m[6]+ m[7] + MOVOU X15, X11 + LONG $0x6d0f4566; BYTE $0xdf // PUNPCKHQDQ XMM11, XMM15 /* ___, m[7] */ + MOVOU X13, X10 + LONG $0x6c0f4566; BYTE $0xd3 // PUNPCKLQDQ XMM10, XMM11 /* m[2], ____ */ + MOVOU X12, X15 + LONG $0x6d0f4566; BYTE $0xfc // PUNPCKHQDQ XMM15, XMM12 /* ___, m[5] */ + MOVOU X12, X11 + LONG $0x6c0f4566; BYTE $0xdf // PUNPCKLQDQ XMM11, XMM15 /* m[4], ____ */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 1 0 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 48(DX), X13 // X13 = m[6]+ m[7] + MOVOU 64(DX), X14 // X14 = m[8]+ m[9] + MOVOU 80(DX), X15 // X15 = m[10]+m[11] + MOVOU X15, X8 + LONG $0x6c0f4566; BYTE $0xc6 // PUNPCKLQDQ XMM8, XMM14 /* m[10], m[8] */ + MOVOU X13, X9 + LONG $0x6d0f4566; BYTE $0xcc // PUNPCKHQDQ XMM9, XMM12 /* m[7], m[1] */ + MOVOU 16(DX), X10 // X10 = m[2]+ m[3] + MOVOU 32(DX), X14 // X14 = m[4]+ m[5] + LONG $0x6c0f4566; BYTE $0xd6 // PUNPCKLQDQ XMM10, XMM14 /* m[2], m[4] */ + MOVOU X14, X15 + LONG $0x6d0f4566; BYTE $0xfe // PUNPCKHQDQ XMM15, XMM14 /* ___, m[5] */ + MOVOU X13, X11 + LONG $0x6c0f4566; BYTE $0xdf // PUNPCKLQDQ XMM11, XMM15 /* m[6], ____ */ + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 16(DX), X12 // X12 = m[2]+ m[3] + MOVOU 64(DX), X13 // X13 = m[8]+ m[9] + MOVOU 96(DX), X14 // X14 = m[12]+m[13] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + MOVOU X15, X8 + LONG $0x6d0f4566; BYTE $0xc5 // PUNPCKHQDQ XMM8, XMM13 /* m[15], m[9] */ + MOVOU X12, X9 + LONG $0x6d0f4566; BYTE $0xce // PUNPCKHQDQ XMM9, XMM14 /* m[3], m[13] */ + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 80(DX), X13 // X13 = m[10]+m[11] + MOVOU X15, X10 + LONG $0x3a0f4566; WORD $0xd50f; BYTE $0x08 // PALIGNR XMM10, XMM13, 0x8 /* m[11], m[14] */ + MOVOU X14, X11 + LONG $0x6c0f4566; BYTE $0xdc // PUNPCKLQDQ XMM11, XMM12 /* m[12], m[0] */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 1 1 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 0(DX), X12 // X12 = m[0]+m[1] + MOVOU 16(DX), X13 // X13 = m[2]+m[3] + MOVOU 32(DX), X14 // X14 = m[4]+m[5] + MOVOU 48(DX), X15 // X15 = m[6]+m[7] + MOVOU X12, X8 + LONG $0x6c0f4566; BYTE $0xc5 // PUNPCKLQDQ XMM8, XMM13 /* m[0], m[2] */ + MOVOU X14, X9 + LONG $0x6c0f4566; BYTE $0xcf // PUNPCKLQDQ XMM9, XMM15 /* m[4], m[6] */ + MOVOU X12, X10 + LONG $0x6d0f4566; BYTE $0xd5 // PUNPCKHQDQ XMM10, XMM13 /* m[1], m[3] */ + MOVOU X14, X11 + LONG $0x6d0f4566; BYTE $0xdf // PUNPCKHQDQ XMM11, XMM15 /* m[5], m[7] */ + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 64(DX), X12 // X12 = m[8]+ m[9] + MOVOU 80(DX), X13 // X13 = m[10]+m[11] + MOVOU 96(DX), X14 // X14 = m[12]+m[13] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + MOVOU X12, X8 + LONG $0x6c0f4566; BYTE $0xc5 // PUNPCKLQDQ XMM8, XMM13 /* m[8],m[10] */ + MOVOU X14, X9 + LONG $0x6c0f4566; BYTE $0xcf // PUNPCKLQDQ XMM9, XMM15 /* m[12],m[14] */ + MOVOU X12, X10 + LONG $0x6d0f4566; BYTE $0xd5 // PUNPCKHQDQ XMM10, XMM13 /* m[9],m[11] */ + MOVOU X14, X11 + LONG $0x6d0f4566; BYTE $0xdf // PUNPCKHQDQ XMM11, XMM15 /* m[13],m[15] */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 1 2 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 112(DX), X12 // X12 = m[14]+m[15] + MOVOU 32(DX), X13 // X13 = m[4]+ m[5] + MOVOU 64(DX), X14 // X14 = m[8]+ m[9] + MOVOU 96(DX), X15 // X15 = m[12]+m[13] + MOVOU X12, X8 + LONG $0x6c0f4566; BYTE $0xc5 // PUNPCKLQDQ XMM8, XMM13 /* m[14], m[4] */ + MOVOU X14, X9 + LONG $0x6d0f4566; BYTE $0xcf // PUNPCKHQDQ XMM9, XMM15 /* m[9], m[13] */ + MOVOU 80(DX), X10 // X10 = m[10]+m[11] + MOVOU 48(DX), X11 // X11 = m[6]+ m[7] + LONG $0x6c0f4566; BYTE $0xd6 // PUNPCKLQDQ XMM10, XMM14 /* m[10], m[8] */ + LONG $0x3a0f4566; WORD $0xdc0f; BYTE $0x08 // PALIGNR XMM11, XMM12, 0x8 /* m[15], m[6] */; ; ; ; ; + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 32(DX), X13 // X13 = m[4]+ m[5] + MOVOU 80(DX), X14 // X14 = m[10]+m[11] + MOVOU X12, X8 + LONG $0x3a0f4566; WORD $0xc40f; BYTE $0x08 // PALIGNR XMM8, XMM12, 0x8 /* m[1], m[0] */ + MOVOU X14, X9 + LONG $0x6d0f4566; BYTE $0xcd // PUNPCKHQDQ XMM9, XMM13 /* m[11], m[5] */ + MOVOU 16(DX), X12 // X12 = m[2]+ m[3] + MOVOU 48(DX), X11 // X11 = m[6]+ m[7] + MOVOU 96(DX), X10 // X10 = m[12]+m[13] + LONG $0x6c0f4566; BYTE $0xd4 // PUNPCKLQDQ XMM10, XMM12 /* m[12], m[2] */ + LONG $0x6d0f4566; BYTE $0xdc // PUNPCKHQDQ XMM11, XMM12 /* m[7], m[3] */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + // Reload digest (most current value store in &out) + MOVQ out+144(FP), SI // SI: &in + MOVOU 0(SI), X12 // X12 = in[0]+in[1] /* row1l = LOAD( &S->h[0] ); */ + MOVOU 16(SI), X13 // X13 = in[2]+in[3] /* row1h = LOAD( &S->h[2] ); */ + MOVOU 32(SI), X14 // X14 = in[4]+in[5] /* row2l = LOAD( &S->h[4] ); */ + MOVOU 48(SI), X15 // X15 = in[6]+in[7] /* row2h = LOAD( &S->h[6] ); */ + + // Final computations and prepare for storing + PXOR X4, X0 // X0 = X0 ^ X4 /* row1l = _mm_xor_si128( row3l, row1l ); */ + PXOR X5, X1 // X1 = X1 ^ X5 /* row1h = _mm_xor_si128( row3h, row1h ); */ + PXOR X12, X0 // X0 = X0 ^ X12 /* STORE( &S->h[0], _mm_xor_si128( LOAD( &S->h[0] ), row1l ) ); */ + PXOR X13, X1 // X1 = X1 ^ X13 /* STORE( &S->h[2], _mm_xor_si128( LOAD( &S->h[2] ), row1h ) ); */ + PXOR X6, X2 // X2 = X2 ^ X6 /* row2l = _mm_xor_si128( row4l, row2l ); */ + PXOR X7, X3 // X3 = X3 ^ X7 /* row2h = _mm_xor_si128( row4h, row2h ); */ + PXOR X14, X2 // X2 = X2 ^ X14 /* STORE( &S->h[4], _mm_xor_si128( LOAD( &S->h[4] ), row2l ) ); */ + PXOR X15, X3 // X3 = X3 ^ X15 /* STORE( &S->h[6], _mm_xor_si128( LOAD( &S->h[6] ), row2h ) ); */ + + // Store digest into &out + MOVQ out+144(FP), SI // SI: &out + MOVOU X0, 0(SI) // out[0]+out[1] = X0 + MOVOU X1, 16(SI) // out[2]+out[3] = X1 + MOVOU X2, 32(SI) // out[4]+out[5] = X2 + MOVOU X3, 48(SI) // out[6]+out[7] = X3 + + // Increment message pointer and check if there's more to do + ADDQ $128, DX // message += 128 + SUBQ $1, R8 + JNZ loop complete: - RET + RET diff --git a/vendor/github.com/minio/blake2b-simd/compress_generic.go b/vendor/github.com/minio/blake2b-simd/compress_generic.go index 62d81aaab..e9e16e8b9 100644 --- a/vendor/github.com/minio/blake2b-simd/compress_generic.go +++ b/vendor/github.com/minio/blake2b-simd/compress_generic.go @@ -26,12 +26,13 @@ func compressGeneric(d *digest, p []uint8) { v13 := iv[5] ^ d.t[1] v14 := iv[6] ^ d.f[0] v15 := iv[7] ^ d.f[1] - var m [16]uint64 j := 0 - for i := 0; i < 16; i++ { - m[i] = uint64(p[j]) | uint64(p[j+1])<<8 | uint64(p[j+2])<<16 | uint64(p[j+3])<<24 | - uint64(p[j+4])<<32 | uint64(p[j+5])<<40 | uint64(p[j+6])<<48 | uint64(p[j+7])<<56 + var m [16]uint64 + for i := range m { + m[i] = uint64(p[j]) | uint64(p[j+1])<<8 | uint64(p[j+2])<<16 | + uint64(p[j+3])<<24 | uint64(p[j+4])<<32 | uint64(p[j+5])<<40 | + uint64(p[j+6])<<48 | uint64(p[j+7])<<56 j += 8 } diff --git a/vendor/github.com/minio/sha256-simd/cpuid_386.s b/vendor/github.com/minio/sha256-simd/cpuid_386.s index fa38814ec..f908ae862 100644 --- a/vendor/github.com/minio/sha256-simd/cpuid_386.s +++ b/vendor/github.com/minio/sha256-simd/cpuid_386.s @@ -1,4 +1,24 @@ -// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. +// The MIT License (MIT) +// +// Copyright (c) 2015 Klaus Post +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. // +build 386,!gccgo diff --git a/vendor/github.com/minio/sha256-simd/cpuid_amd64.s b/vendor/github.com/minio/sha256-simd/cpuid_amd64.s index fb45a6560..9a8a03297 100644 --- a/vendor/github.com/minio/sha256-simd/cpuid_amd64.s +++ b/vendor/github.com/minio/sha256-simd/cpuid_amd64.s @@ -1,4 +1,24 @@ -// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. +// The MIT License (MIT) +// +// Copyright (c) 2015 Klaus Post +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. // +build amd64,!gccgo diff --git a/vendor/github.com/minio/sha256-simd/cpuid_arm.go b/vendor/github.com/minio/sha256-simd/cpuid_arm.go index 28637d391..351dff4b6 100644 --- a/vendor/github.com/minio/sha256-simd/cpuid_arm.go +++ b/vendor/github.com/minio/sha256-simd/cpuid_arm.go @@ -28,6 +28,5 @@ func xgetbv(index uint32) (eax, edx uint32) { } func haveArmSha() bool { - // TODO: Implement feature detection for ARM - return true + return false } diff --git a/vendor/github.com/minio/sha256-simd/cpuid_linux_arm64.go b/vendor/github.com/minio/sha256-simd/cpuid_linux_arm64.go new file mode 100644 index 000000000..e739996d9 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/cpuid_linux_arm64.go @@ -0,0 +1,49 @@ +// +build arm64,linux + +// Minio Cloud Storage, (C) 2016 Minio, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package sha256 + +import ( + "bytes" + "io/ioutil" +) + +func cpuid(op uint32) (eax, ebx, ecx, edx uint32) { + return 0, 0, 0, 0 +} + +func cpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) { + return 0, 0, 0, 0 +} + +func xgetbv(index uint32) (eax, edx uint32) { + return 0, 0 +} + +// File to check for cpu capabilities. +const procCPUInfo = "/proc/cpuinfo" + +// Feature to check for. +const sha256Feature = "sha2" + +func haveArmSha() bool { + cpuInfo, err := ioutil.ReadFile(procCPUInfo) + if err != nil { + return false + } + return bytes.Contains(cpuInfo, []byte(sha256Feature)) +} diff --git a/vendor/github.com/minio/sha256-simd/cpuid_others_arm64.go b/vendor/github.com/minio/sha256-simd/cpuid_others_arm64.go new file mode 100644 index 000000000..0fb4022f7 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/cpuid_others_arm64.go @@ -0,0 +1,35 @@ +// +build arm64,!linux + +// Minio Cloud Storage, (C) 2016 Minio, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package sha256 + +func cpuid(op uint32) (eax, ebx, ecx, edx uint32) { + return 0, 0, 0, 0 +} + +func cpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) { + return 0, 0, 0, 0 +} + +func xgetbv(index uint32) (eax, edx uint32) { + return 0, 0 +} + +// Check for sha2 instruction flag. +func haveArmSha() bool { + return false +} diff --git a/vendor/github.com/minio/sha256-simd/cpuid_arm64.go b/vendor/github.com/minio/sha256-simd/cpuid_ppc64.go similarity index 93% rename from vendor/github.com/minio/sha256-simd/cpuid_arm64.go rename to vendor/github.com/minio/sha256-simd/cpuid_ppc64.go index 28637d391..351dff4b6 100644 --- a/vendor/github.com/minio/sha256-simd/cpuid_arm64.go +++ b/vendor/github.com/minio/sha256-simd/cpuid_ppc64.go @@ -28,6 +28,5 @@ func xgetbv(index uint32) (eax, edx uint32) { } func haveArmSha() bool { - // TODO: Implement feature detection for ARM - return true + return false } diff --git a/vendor/github.com/minio/sha256-simd/cpuid_ppc64le.go b/vendor/github.com/minio/sha256-simd/cpuid_ppc64le.go new file mode 100644 index 000000000..351dff4b6 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/cpuid_ppc64le.go @@ -0,0 +1,32 @@ +// Minio Cloud Storage, (C) 2016 Minio, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package sha256 + +func cpuid(op uint32) (eax, ebx, ecx, edx uint32) { + return 0, 0, 0, 0 +} + +func cpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) { + return 0, 0, 0, 0 +} + +func xgetbv(index uint32) (eax, edx uint32) { + return 0, 0 +} + +func haveArmSha() bool { + return false +} diff --git a/vendor/github.com/minio/sha256-simd/sha256.go b/vendor/github.com/minio/sha256-simd/sha256.go index a753e2886..f28236eba 100644 --- a/vendor/github.com/minio/sha256-simd/sha256.go +++ b/vendor/github.com/minio/sha256-simd/sha256.go @@ -89,7 +89,8 @@ func New() hash.Hash { d.Reset() return d } - // default back to the standard golang implementation + // Fallback to the standard golang implementation + // if no features were found. return sha256.New() } diff --git a/vendor/github.com/minio/sha256-simd/sha256block_ppc64.go b/vendor/github.com/minio/sha256-simd/sha256block_ppc64.go new file mode 100644 index 000000000..b81017e8d --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256block_ppc64.go @@ -0,0 +1,22 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sha256 + +func blockAvx2Go(dig *digest, p []byte) {} +func blockAvxGo(dig *digest, p []byte) {} +func blockSsseGo(dig *digest, p []byte) {} +func blockArmGo(dig *digest, p []byte) {} diff --git a/vendor/github.com/minio/sha256-simd/sha256block_ppc64le.go b/vendor/github.com/minio/sha256-simd/sha256block_ppc64le.go new file mode 100644 index 000000000..b81017e8d --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256block_ppc64le.go @@ -0,0 +1,22 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sha256 + +func blockAvx2Go(dig *digest, p []byte) {} +func blockAvxGo(dig *digest, p []byte) {} +func blockSsseGo(dig *digest, p []byte) {} +func blockArmGo(dig *digest, p []byte) {} diff --git a/vendor/vendor.json b/vendor/vendor.json index 6698c14ed..4ac32a7ac 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -1,6 +1,6 @@ { "comment": "", - "ignore": "appengine test", + "ignore": "test", "package": [ { "path": "github.com/Sirupsen/logrus", @@ -62,7 +62,7 @@ "revisionTime": "2016-06-10T14:06:02+03:00" }, { - "checksumSHA1": "2a/SsTUBMKtcM6VtpbdPGO+c6c8=", + "checksumSHA1": "W+E/2xXcE1GmJ0Qb784ald0Fn6I=", "path": "github.com/golang/snappy", "revision": "d9eb7a3d35ec988b8585d4a0068e462c27d28380", "revisionTime": "2016-05-29T05:00:41Z" @@ -109,10 +109,10 @@ "revisionTime": "2016-10-16T15:41:25Z" }, { - "checksumSHA1": "XRii0aDqXZvztXflEB2EE9TRoks=", + "checksumSHA1": "Pzd1bfm8Yj1radncaohNZu+UT1I=", "path": "github.com/klauspost/reedsolomon", - "revision": "c54154da9e35cab25232314cf69ab9d78447f9a5", - "revisionTime": "2016-09-12T19:31:07Z" + "revision": "d0a56f72c0d40a6cdde43a1575ad9686a0098b70", + "revisionTime": "2016-10-28T07:13:20Z" }, { "checksumSHA1": "dNYxHiBLalTqluak2/Z8c3RsSEM=", @@ -137,9 +137,10 @@ "revisionTime": "2015-12-11T09:06:21+09:00" }, { + "checksumSHA1": "IgPoMBktWdCLuyzDBfzi34sT+jg=", "path": "github.com/minio/blake2b-simd", - "revision": "25efc542f2c5064cf312cdca043790a7af861c4c", - "revisionTime": "2016-07-06T10:29:24+02:00" + "revision": "c50cace0dc7d72a80244a5f88ddd3e08a73db8de", + "revisionTime": "2016-07-22T09:38:12Z" }, { "path": "github.com/minio/cli", @@ -187,10 +188,10 @@ "revisionTime": "2016-07-24T00:05:56Z" }, { - "checksumSHA1": "i8Hl0yGP1jqorMgfFMoJCItnI38=", + "checksumSHA1": "URVle4qtadmW9w9BulDRHY3kxnA=", "path": "github.com/minio/sha256-simd", - "revision": "6f50cd1d784b2bea46167b6929f16c0d12eefbfb", - "revisionTime": "2016-08-16T22:25:11Z" + "revision": "e82e73b775766b9011503e80e6772fc32b9afc5b", + "revisionTime": "2016-12-19T23:17:30Z" }, { "checksumSHA1": "Nj7vQ2GlvJiPP7sqJX5AurrDSD4=", From d8e4d3c9c88010a6805894c1d79da92b9a022bde Mon Sep 17 00:00:00 2001 From: Anis Elleuch Date: Tue, 20 Dec 2016 18:32:17 +0100 Subject: [PATCH 021/100] POSTForm: Return http 303 if redirect is specified (#3479) success_action_redirect in the sent Form means that the server needs to return 303 in addition to a well specific redirection url, this commit adds this feature --- cmd/api-response.go | 12 +++-- cmd/bucket-handlers.go | 43 ++++++++++++----- cmd/post-policy_test.go | 103 ++++++++++++++++++++++++++++++++++++++-- 3 files changed, 137 insertions(+), 21 deletions(-) diff --git a/cmd/api-response.go b/cmd/api-response.go index 120132bb8..420e7a608 100644 --- a/cmd/api-response.go +++ b/cmd/api-response.go @@ -492,17 +492,23 @@ func writeResponse(w http.ResponseWriter, statusCode int, response []byte) { w.(http.Flusher).Flush() } -// writeSuccessResponse write success headers and response if any. +// writeSuccessResponse writes success headers and response if any. func writeSuccessResponse(w http.ResponseWriter, response []byte) { writeResponse(w, http.StatusOK, response) } -// writeSuccessNoContent write success headers with http status 204 +// writeSuccessNoContent writes success headers with http status 204 func writeSuccessNoContent(w http.ResponseWriter) { writeResponse(w, http.StatusNoContent, nil) } -// writeErrorRespone write error headers +// writeRedirectSeeOther writes Location header with http status 303 +func writeRedirectSeeOther(w http.ResponseWriter, location string) { + w.Header().Set("Location", location) + writeResponse(w, http.StatusSeeOther, nil) +} + +// writeErrorRespone writes error headers func writeErrorResponse(w http.ResponseWriter, req *http.Request, errorCode APIErrorCode, resource string) { apiError := getAPIError(errorCode) // set common headers diff --git a/cmd/bucket-handlers.go b/cmd/bucket-handlers.go index ede9c9643..7a9e12b4b 100644 --- a/cmd/bucket-handlers.go +++ b/cmd/bucket-handlers.go @@ -19,6 +19,7 @@ package cmd import ( "encoding/base64" "encoding/xml" + "fmt" "io" "net/http" "net/url" @@ -447,21 +448,37 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h w.Header().Set("ETag", "\""+objInfo.MD5Sum+"\"") w.Header().Set("Location", getObjectLocation(bucket, object)) - // Decide what http response to send depending on success_action_status parameter - switch formValues[http.CanonicalHeaderKey("success_action_status")] { - case "201": - resp := encodeResponse(PostResponse{ - Bucket: bucket, - Key: object, - ETag: "\"" + objInfo.MD5Sum + "\"", - Location: getObjectLocation(bucket, object), - }) - writeResponse(w, http.StatusCreated, resp) + successRedirect := formValues[http.CanonicalHeaderKey("success_action_redirect")] + successStatus := formValues[http.CanonicalHeaderKey("success_action_status")] - case "200": - writeSuccessResponse(w, nil) - default: + if successStatus == "" && successRedirect == "" { writeSuccessNoContent(w) + } else { + if successRedirect != "" { + redirectURL := successRedirect + "?" + fmt.Sprintf("bucket=%s&key=%s&etag=%s", + bucket, + getURLEncodedName(object), + getURLEncodedName("\""+objInfo.MD5Sum+"\"")) + + writeRedirectSeeOther(w, redirectURL) + } else { + // Decide what http response to send depending on success_action_status parameter + switch successStatus { + case "201": + resp := encodeResponse(PostResponse{ + Bucket: bucket, + Key: object, + ETag: "\"" + objInfo.MD5Sum + "\"", + Location: getObjectLocation(bucket, object), + }) + writeResponse(w, http.StatusCreated, resp) + + case "200": + writeSuccessResponse(w, nil) + default: + writeSuccessNoContent(w) + } + } } // Notify object created event. diff --git a/cmd/post-policy_test.go b/cmd/post-policy_test.go index 2453d224c..6881f18af 100644 --- a/cmd/post-policy_test.go +++ b/cmd/post-policy_test.go @@ -326,10 +326,10 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr // initialize HTTP NewRecorder, this records any mutations to response writer inside the handler. rec := httptest.NewRecorder() - // policy := buildGenericPolicy(curTime, testCase.accessKey, bucketName, testCase.objectName, false) testCase.policy = fmt.Sprintf(testCase.policy, testCase.dates...) + req, perr := newPostRequestV4Generic("", bucketName, testCase.objectName, testCase.data, testCase.accessKey, - testCase.secretKey, curTime, []byte(testCase.policy), testCase.corruptedBase64, testCase.corruptedMultipart) + testCase.secretKey, curTime, []byte(testCase.policy), nil, testCase.corruptedBase64, testCase.corruptedMultipart) if perr != nil { t.Fatalf("Test %d: %s: Failed to create HTTP request for PostPolicyHandler: %v", i+1, instanceType, perr) } @@ -395,6 +395,93 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr } +// Wrapper for calling TestPostPolicyBucketHandlerRedirect tests for both XL multiple disks and single node setup. +func TestPostPolicyBucketHandlerRedirect(t *testing.T) { + ExecObjectLayerTest(t, testPostPolicyBucketHandlerRedirect) +} + +// testPostPolicyBucketHandlerRedirect tests POST Object when success_action_redirect is specified +func testPostPolicyBucketHandlerRedirect(obj ObjectLayer, instanceType string, t TestErrHandler) { + root, err := newTestConfig("us-east-1") + if err != nil { + t.Fatalf("Initializing config.json failed") + } + defer removeAll(root) + + // Register event notifier. + err = initEventNotifier(obj) + if err != nil { + t.Fatalf("Initializing event notifiers failed") + } + + // get random bucket name. + bucketName := getRandomBucketName() + + // Key specified in Form data + keyName := "test/object" + + // The final name of the upload object + targetObj := keyName + "/upload.txt" + + // The url of success_action_redirect field + redirectURL := "http://www.google.com" + + // Register the API end points with XL/FS object layer. + apiRouter := initTestAPIEndPoints(obj, []string{"PostPolicy"}) + + credentials := serverConfig.GetCredential() + + curTime := time.Now().UTC() + curTimePlus5Min := curTime.Add(time.Minute * 5) + + err = obj.MakeBucket(bucketName) + if err != nil { + // Failed to create newbucket, abort. + t.Fatalf("%s : %s", instanceType, err.Error()) + } + + // initialize HTTP NewRecorder, this records any mutations to response writer inside the handler. + rec := httptest.NewRecorder() + + dates := []interface{}{curTimePlus5Min.Format(expirationDateFormat), curTime.Format(iso8601DateFormat), curTime.Format(yyyymmdd)} + policy := `{"expiration": "%s","conditions":[["eq", "$bucket", "` + bucketName + `"], {"success_action_redirect":"` + redirectURL + `"},["starts-with", "$key", "test/"], ["eq", "$x-amz-algorithm", "AWS4-HMAC-SHA256"], ["eq", "$x-amz-date", "%s"], ["eq", "$x-amz-credential", "` + credentials.AccessKeyID + `/%s/us-east-1/s3/aws4_request"]]}` + + // Generate the final policy document + policy = fmt.Sprintf(policy, dates...) + + // Create a new POST request with success_action_redirect field specified + req, perr := newPostRequestV4Generic("", bucketName, keyName, []byte("objData"), + credentials.AccessKeyID, credentials.SecretAccessKey, curTime, + []byte(policy), map[string]string{"success_action_redirect": redirectURL}, false, false) + + if perr != nil { + t.Fatalf("%s: Failed to create HTTP request for PostPolicyHandler: %v", instanceType, perr) + } + // Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic ofthe handler. + // Call the ServeHTTP to execute the handler. + apiRouter.ServeHTTP(rec, req) + + // Check the status code, which must be 303 because success_action_redirect is specified + if rec.Code != http.StatusSeeOther { + t.Errorf("%s: Expected the response status to be `%d`, but instead found `%d`", instanceType, http.StatusSeeOther, rec.Code) + } + + // Get the uploaded object info + info, err := obj.GetObjectInfo(bucketName, targetObj) + if err != nil { + t.Error("Unexpected error: ", err) + } + + expectedLocation := fmt.Sprintf(redirectURL+"?bucket=%s&key=%s&etag=%s", + bucketName, getURLEncodedName(targetObj), getURLEncodedName("\""+info.MD5Sum+"\"")) + + // Check the new location url + if rec.HeaderMap.Get("Location") != expectedLocation { + t.Errorf("Unexpected location, expected = %s, found = `%s`", rec.HeaderMap.Get("Location"), expectedLocation) + } + +} + // postPresignSignatureV4 - presigned signature for PostPolicy requests. func postPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string { // Get signining key. @@ -467,7 +554,8 @@ func buildGenericPolicy(t time.Time, accessKey, bucketName, objectName string, c return policy } -func newPostRequestV4Generic(endPoint, bucketName, objectName string, objData []byte, accessKey, secretKey string, t time.Time, policy []byte, corruptedB64 bool, corruptedMultipart bool) (*http.Request, error) { +func newPostRequestV4Generic(endPoint, bucketName, objectName string, objData []byte, accessKey, secretKey string, + t time.Time, policy []byte, addFormData map[string]string, corruptedB64 bool, corruptedMultipart bool) (*http.Request, error) { // Get the user credential. credStr := getCredential(accessKey, serverConfig.GetRegion(), t) @@ -493,6 +581,11 @@ func newPostRequestV4Generic(endPoint, bucketName, objectName string, objData [] "Content-Encoding": "gzip", } + // Add form data + for k, v := range addFormData { + formData[k] = v + } + // Create the multipart form. var buf bytes.Buffer w := multipart.NewWriter(&buf) @@ -529,11 +622,11 @@ func newPostRequestV4Generic(endPoint, bucketName, objectName string, objData [] func newPostRequestV4WithContentLength(endPoint, bucketName, objectName string, objData []byte, accessKey, secretKey string) (*http.Request, error) { t := time.Now().UTC() policy := buildGenericPolicy(t, accessKey, bucketName, objectName, true) - return newPostRequestV4Generic(endPoint, bucketName, objectName, objData, accessKey, secretKey, t, policy, false, false) + return newPostRequestV4Generic(endPoint, bucketName, objectName, objData, accessKey, secretKey, t, policy, nil, false, false) } func newPostRequestV4(endPoint, bucketName, objectName string, objData []byte, accessKey, secretKey string) (*http.Request, error) { t := time.Now().UTC() policy := buildGenericPolicy(t, accessKey, bucketName, objectName, false) - return newPostRequestV4Generic(endPoint, bucketName, objectName, objData, accessKey, secretKey, t, policy, false, false) + return newPostRequestV4Generic(endPoint, bucketName, objectName, objData, accessKey, secretKey, t, policy, nil, false, false) } From ef3319a49d2a51fc5585fd141786b062d39d6eb7 Mon Sep 17 00:00:00 2001 From: Anis Elleuch Date: Tue, 20 Dec 2016 20:59:08 +0100 Subject: [PATCH 022/100] Fix: Typo in non canonicalized header extraction (#3480) Extracting metadata from headers was doing wrong when Headers are not well canonicalized, fixing typo. --- cmd/handler-utils.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/handler-utils.go b/cmd/handler-utils.go index 4fc0083f9..0e2c7dcf7 100644 --- a/cmd/handler-utils.go +++ b/cmd/handler-utils.go @@ -83,9 +83,9 @@ func extractMetadataFromHeader(header http.Header) map[string]string { for key := range header { cKey := http.CanonicalHeaderKey(key) if strings.HasPrefix(cKey, "X-Amz-Meta-") { - metadata[cKey] = header.Get(cKey) + metadata[cKey] = header.Get(key) } else if strings.HasPrefix(key, "X-Minio-Meta-") { - metadata[cKey] = header.Get(cKey) + metadata[cKey] = header.Get(key) } } // Return. From 4309727354a12bfc439bd1132f549e72c8465973 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Tue, 20 Dec 2016 14:42:13 -0800 Subject: [PATCH 023/100] docs: Add docs for minio limitations. (#3477) --- docs/minio-limitations.md | 50 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) create mode 100644 docs/minio-limitations.md diff --git a/docs/minio-limitations.md b/docs/minio-limitations.md new file mode 100644 index 000000000..4af0c6074 --- /dev/null +++ b/docs/minio-limitations.md @@ -0,0 +1,50 @@ +## Minio Server Limits Per Tenant +We found the following APIs to be redundant or less useful outside of AWS. If you have a different view on any of the APIs we missed, please open a [github issue](https://github.com/minio/minio/issues). + +### Erasure Code (Multiple Drives / Servers) + +|Item|Specification| +|:---|:---| +|Maximum number of drives| 16| +|Minimum number of drives| 4| +|Read quorum| N/2| +|Write quorum| N/2+1| + +### Browser Access + +|Item|Specification| +|:---|:---| +|Web browser upload size limit| 5GB| + +### Limits of S3 API + +|Item|Specification| +|:---|:---| +|Maximum number of buckets| no-limit| +|Maximum number of objects per bucket| no-limit| +|Maximum object size| 5 TB| +|Minimum object size| 0 B| +|Maximum object size per PUT operation| 5 GB| +|Maximum number of parts per upload| 10,000| +|Part size|5 MB to 5 GB. Last part can be 0 B to 5 GB| +|Maximum number of parts returned per list parts request| 1000| +|Maximum number of objects returned per list objects request| 1000| +|Maximum number of multipart uploads returned per list multipart uploads request| 1000| + +### List of Amazon S3 Bucket API's not supported on Minio. + +- BucketACL (Use bucket policies instead) +- BucketCORS (CORS enabled by default) +- BucketLifecycle (Not required for Minio's XL backend) +- BucketReplication (Use `mc mirror` instead) +- BucketVersions, BucketVersioning (Use `s3git`) +- BucketWebsite (Use `caddy` or `nginx`) +- BucketAnalytics, BucketMetrics, BucketLogging (Use bucket notification APIs) +- BucketRequestPayment +- BucketTagging + +### List of Amazon S3 Object API's not supported on Minio. + +- ObjectACL (Use bucket policies instead) +- ObjectTorrent +- ObjectCopyPart From 329a910b86501f27780b2664c46dcf8cb5d00f59 Mon Sep 17 00:00:00 2001 From: Anis Elleuch Date: Tue, 20 Dec 2016 23:45:17 +0100 Subject: [PATCH 024/100] Admin Lib: Implement Service API (#3426) Three APIs were added to control a minio server * NewAdminClient() * ServiceStop() * ServiceRestart() * ServiceStatus() --- pkg/madmin/api-error-response.go | 60 +++ pkg/madmin/client.go | 37 ++ pkg/madmin/client_test.go | 24 + pkg/madmin/constants.go | 19 + pkg/madmin/requests.go | 480 ++++++++++++++++++ pkg/madmin/service-api.go | 141 +++++ pkg/madmin/utils.go | 108 ++++ .../vendor/github.com/minio/minio-go/LICENSE | 202 ++++++++ .../pkg/s3signer/request-signature-v2.go | 324 ++++++++++++ .../pkg/s3signer/request-signature-v4.go | 305 +++++++++++ .../minio/minio-go/pkg/s3signer/utils.go | 39 ++ .../minio/minio-go/pkg/s3utils/utils.go | 192 +++++++ pkg/madmin/vendor/vendor.json | 19 + 13 files changed, 1950 insertions(+) create mode 100644 pkg/madmin/api-error-response.go create mode 100644 pkg/madmin/client.go create mode 100644 pkg/madmin/client_test.go create mode 100644 pkg/madmin/constants.go create mode 100644 pkg/madmin/requests.go create mode 100644 pkg/madmin/service-api.go create mode 100644 pkg/madmin/utils.go create mode 100644 pkg/madmin/vendor/github.com/minio/minio-go/LICENSE create mode 100644 pkg/madmin/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go create mode 100644 pkg/madmin/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go create mode 100644 pkg/madmin/vendor/github.com/minio/minio-go/pkg/s3signer/utils.go create mode 100644 pkg/madmin/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go create mode 100644 pkg/madmin/vendor/vendor.json diff --git a/pkg/madmin/api-error-response.go b/pkg/madmin/api-error-response.go new file mode 100644 index 000000000..49abf7e70 --- /dev/null +++ b/pkg/madmin/api-error-response.go @@ -0,0 +1,60 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package madmin + +import "encoding/xml" + +/* **** SAMPLE ERROR RESPONSE **** + + + AccessDenied + Access Denied + bucketName + objectName + F19772218238A85A + GuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD + +*/ + +// ErrorResponse - Is the typed error returned by all API operations. +type ErrorResponse struct { + XMLName xml.Name `xml:"Error" json:"-"` + Code string + Message string + BucketName string + Key string + RequestID string `xml:"RequestId"` + HostID string `xml:"HostId"` + + // Region where the bucket is located. This header is returned + // only in HEAD bucket and ListObjects response. + Region string +} + +// Error - Returns HTTP error string +func (e ErrorResponse) Error() string { + return e.Message +} + +// ErrInvalidArgument - Invalid argument response. +func ErrInvalidArgument(message string) error { + return ErrorResponse{ + Code: "InvalidArgument", + Message: message, + RequestID: "minio", + } +} diff --git a/pkg/madmin/client.go b/pkg/madmin/client.go new file mode 100644 index 000000000..612e45927 --- /dev/null +++ b/pkg/madmin/client.go @@ -0,0 +1,37 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package madmin + +const ( + minioAdminOpHeader = "X-Minio-Operation" +) + +// AdminClient - interface to Minio Management API +type AdminClient struct { + client *Client +} + +// NewAdminClient - create new Management client +func NewAdminClient(addr string, access string, secret string, secure bool) (*AdminClient, error) { + client, err := New(addr, access, secret, secure) + if err != nil { + return nil, err + } + + return &AdminClient{client: client}, nil +} diff --git a/pkg/madmin/client_test.go b/pkg/madmin/client_test.go new file mode 100644 index 000000000..1aff33c45 --- /dev/null +++ b/pkg/madmin/client_test.go @@ -0,0 +1,24 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package madmin_test +package madmin_test + +import "testing" + +func TestMAdminClient(t *testing.T) { +} diff --git a/pkg/madmin/constants.go b/pkg/madmin/constants.go new file mode 100644 index 000000000..299945b73 --- /dev/null +++ b/pkg/madmin/constants.go @@ -0,0 +1,19 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package madmin + +const unsignedPayload = "UNSIGNED-PAYLOAD" diff --git a/pkg/madmin/requests.go b/pkg/madmin/requests.go new file mode 100644 index 000000000..3c6c7b615 --- /dev/null +++ b/pkg/madmin/requests.go @@ -0,0 +1,480 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package madmin + +import ( + "bytes" + "encoding/base64" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "math/rand" + "net/http" + "net/http/httputil" + "net/url" + "os" + "regexp" + "runtime" + "strings" + + "github.com/minio/minio-go/pkg/s3signer" + "github.com/minio/minio-go/pkg/s3utils" +) + +// Client implements Amazon S3 compatible methods. +type Client struct { + /// Standard options. + + // AccessKeyID required for authorized requests. + accessKeyID string + // SecretAccessKey required for authorized requests. + secretAccessKey string + + // User supplied. + appInfo struct { + appName string + appVersion string + } + + endpointURL url.URL + + // Indicate whether we are using https or not + secure bool + + // Needs allocation. + httpClient *http.Client + + // Advanced functionality. + isTraceEnabled bool + traceOutput io.Writer + + // Random seed. + random *rand.Rand +} + +// Global constants. +const ( + libraryName = "madmin-go" + libraryVersion = "0.0.1" +) + +// User Agent should always following the below style. +// Please open an issue to discuss any new changes here. +// +// Minio (OS; ARCH) LIB/VER APP/VER +const ( + libraryUserAgentPrefix = "Minio (" + runtime.GOOS + "; " + runtime.GOARCH + ") " + libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion +) + +// New - instantiate minio client Client, adds automatic verification +// of signature. +func New(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) { + clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, secure) + if err != nil { + return nil, err + } + return clnt, nil +} + +// redirectHeaders copies all headers when following a redirect URL. +// This won't be needed anymore from go 1.8 (https://github.com/golang/go/issues/4800) +func redirectHeaders(req *http.Request, via []*http.Request) error { + if len(via) == 0 { + return nil + } + for key, val := range via[0].Header { + req.Header[key] = val + } + return nil +} + +func privateNew(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Client, error) { + // construct endpoint. + endpointURL, err := getEndpointURL(endpoint, secure) + if err != nil { + return nil, err + } + + // instantiate new Client. + clnt := new(Client) + clnt.accessKeyID = accessKeyID + clnt.secretAccessKey = secretAccessKey + + // Remember whether we are using https or not + clnt.secure = secure + + // Save endpoint URL, user agent for future uses. + clnt.endpointURL = *endpointURL + + // Instantiate http client and bucket location cache. + clnt.httpClient = &http.Client{ + Transport: http.DefaultTransport, + CheckRedirect: redirectHeaders, + } + + // Return. + return clnt, nil +} + +// SetAppInfo - add application details to user agent. +func (c *Client) SetAppInfo(appName string, appVersion string) { + // if app name and version is not set, we do not a new user + // agent. + if appName != "" && appVersion != "" { + c.appInfo = struct { + appName string + appVersion string + }{} + c.appInfo.appName = appName + c.appInfo.appVersion = appVersion + } +} + +// SetCustomTransport - set new custom transport. +func (c *Client) SetCustomTransport(customHTTPTransport http.RoundTripper) { + // Set this to override default transport + // ``http.DefaultTransport``. + // + // This transport is usually needed for debugging OR to add your + // own custom TLS certificates on the client transport, for custom + // CA's and certs which are not part of standard certificate + // authority follow this example :- + // + // tr := &http.Transport{ + // TLSClientConfig: &tls.Config{RootCAs: pool}, + // DisableCompression: true, + // } + // api.SetTransport(tr) + // + if c.httpClient != nil { + c.httpClient.Transport = customHTTPTransport + } +} + +// TraceOn - enable HTTP tracing. +func (c *Client) TraceOn(outputStream io.Writer) { + // if outputStream is nil then default to os.Stdout. + if outputStream == nil { + outputStream = os.Stdout + } + // Sets a new output stream. + c.traceOutput = outputStream + + // Enable tracing. + c.isTraceEnabled = true +} + +// TraceOff - disable HTTP tracing. +func (c *Client) TraceOff() { + // Disable tracing. + c.isTraceEnabled = false +} + +// requestMetadata - is container for all the values to make a +// request. +type requestData struct { + customHeaders http.Header + queryValues url.Values + + contentBody io.Reader + contentLength int64 + contentSHA256Bytes []byte + contentMD5Bytes []byte +} + +// Filter out signature value from Authorization header. +func (c Client) filterSignature(req *http.Request) { + /// Signature V4 authorization header. + + // Save the original auth. + origAuth := req.Header.Get("Authorization") + // Strip out accessKeyID from: + // Credential=////aws4_request + regCred := regexp.MustCompile("Credential=([A-Z0-9]+)/") + newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/") + + // Strip out 256-bit signature from: Signature=<256-bit signature> + regSign := regexp.MustCompile("Signature=([[0-9a-f]+)") + newAuth = regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**") + + // Set a temporary redacted auth + req.Header.Set("Authorization", newAuth) + return +} + +// dumpHTTP - dump HTTP request and response. +func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error { + // Starts http dump. + _, err := fmt.Fprintln(c.traceOutput, "---------START-HTTP---------") + if err != nil { + return err + } + + // Filter out Signature field from Authorization header. + c.filterSignature(req) + + // Only display request header. + reqTrace, err := httputil.DumpRequestOut(req, false) + if err != nil { + return err + } + + // Write request to trace output. + _, err = fmt.Fprint(c.traceOutput, string(reqTrace)) + if err != nil { + return err + } + + // Only display response header. + var respTrace []byte + + // For errors we make sure to dump response body as well. + if resp.StatusCode != http.StatusOK && + resp.StatusCode != http.StatusPartialContent && + resp.StatusCode != http.StatusNoContent { + respTrace, err = httputil.DumpResponse(resp, true) + if err != nil { + return err + } + } else { + // WORKAROUND for https://github.com/golang/go/issues/13942. + // httputil.DumpResponse does not print response headers for + // all successful calls which have response ContentLength set + // to zero. Keep this workaround until the above bug is fixed. + if resp.ContentLength == 0 { + var buffer bytes.Buffer + if err = resp.Header.Write(&buffer); err != nil { + return err + } + respTrace = buffer.Bytes() + respTrace = append(respTrace, []byte("\r\n")...) + } else { + respTrace, err = httputil.DumpResponse(resp, false) + if err != nil { + return err + } + } + } + // Write response to trace output. + _, err = fmt.Fprint(c.traceOutput, strings.TrimSuffix(string(respTrace), "\r\n")) + if err != nil { + return err + } + + // Ends the http dump. + _, err = fmt.Fprintln(c.traceOutput, "---------END-HTTP---------") + if err != nil { + return err + } + + // Returns success. + return nil +} + +// do - execute http request. +func (c Client) do(req *http.Request) (*http.Response, error) { + var resp *http.Response + var err error + // Do the request in a loop in case of 307 http is met since golang still doesn't + // handle properly this situation (https://github.com/golang/go/issues/7912) + for { + resp, err = c.httpClient.Do(req) + if err != nil { + // Handle this specifically for now until future Golang + // versions fix this issue properly. + urlErr, ok := err.(*url.Error) + if ok && strings.Contains(urlErr.Err.Error(), "EOF") { + return nil, &url.Error{ + Op: urlErr.Op, + URL: urlErr.URL, + Err: fmt.Errorf("Connection closed by foreign host %s", urlErr.URL), + } + } + return nil, err + } + // Redo the request with the new redirect url if http 307 is returned, quit the loop otherwise + if resp != nil && resp.StatusCode == http.StatusTemporaryRedirect { + newURL, uErr := url.Parse(resp.Header.Get("Location")) + if uErr != nil { + break + } + req.URL = newURL + } else { + break + } + } + + // Response cannot be non-nil, report if its the case. + if resp == nil { + msg := "Response is empty. " // + reportIssue + return nil, ErrInvalidArgument(msg) + } + + // If trace is enabled, dump http request and response. + if c.isTraceEnabled { + err = c.dumpHTTP(req, resp) + if err != nil { + return nil, err + } + } + return resp, nil +} + +// List of success status. +var successStatus = []int{ + http.StatusOK, + http.StatusNoContent, + http.StatusPartialContent, +} + +// executeMethod - instantiates a given method, and retries the +// request upon any error up to maxRetries attempts in a binomially +// delayed manner using a standard back off algorithm. +func (c Client) executeMethod(method string, reqData requestData) (res *http.Response, err error) { + + // Create a done channel to control 'ListObjects' go routine. + doneCh := make(chan struct{}, 1) + + // Indicate to our routine to exit cleanly upon return. + defer close(doneCh) + + // Instantiate a new request. + var req *http.Request + req, err = c.newRequest(method, reqData) + if err != nil { + return nil, err + } + + // Initiate the request. + res, err = c.do(req) + if err != nil { + return nil, err + } + + // For any known successful http status, return quickly. + for _, httpStatus := range successStatus { + if httpStatus == res.StatusCode { + return res, nil + } + } + + // Read the body to be saved later. + errBodyBytes, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, err + } + // Save the body. + errBodySeeker := bytes.NewReader(errBodyBytes) + res.Body = ioutil.NopCloser(errBodySeeker) + + // Save the body back again. + errBodySeeker.Seek(0, 0) // Seek back to starting point. + res.Body = ioutil.NopCloser(errBodySeeker) + + return res, err +} + +// set User agent. +func (c Client) setUserAgent(req *http.Request) { + req.Header.Set("User-Agent", libraryUserAgent) + if c.appInfo.appName != "" && c.appInfo.appVersion != "" { + req.Header.Set("User-Agent", libraryUserAgent+" "+c.appInfo.appName+"/"+c.appInfo.appVersion) + } +} + +// newRequest - instantiate a new HTTP request for a given method. +func (c Client) newRequest(method string, reqData requestData) (req *http.Request, err error) { + // If no method is supplied default to 'POST'. + if method == "" { + method = "POST" + } + + // Default all requests to "us-east-1" + location := "us-east-1" + + // Construct a new target URL. + targetURL, err := c.makeTargetURL(reqData.queryValues) + if err != nil { + return nil, err + } + + // Initialize a new HTTP request for the method. + req, err = http.NewRequest(method, targetURL.String(), nil) + if err != nil { + return nil, err + } + + // Set content body if available. + if reqData.contentBody != nil { + req.Body = ioutil.NopCloser(reqData.contentBody) + } + + // Set 'User-Agent' header for the request. + c.setUserAgent(req) + + // Set all headers. + for k, v := range reqData.customHeaders { + req.Header.Set(k, v[0]) + } + + // set incoming content-length. + if reqData.contentLength > 0 { + req.ContentLength = reqData.contentLength + } + + shaHeader := unsignedPayload + if !c.secure { + if reqData.contentSHA256Bytes == nil { + shaHeader = hex.EncodeToString(sum256([]byte{})) + } else { + shaHeader = hex.EncodeToString(reqData.contentSHA256Bytes) + } + } + req.Header.Set("X-Amz-Content-Sha256", shaHeader) + + // set md5Sum for content protection. + if reqData.contentMD5Bytes != nil { + req.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(reqData.contentMD5Bytes)) + } + + // Add signature version '4' authorization header. + req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, location) + + // Return request. + return req, nil +} + +// makeTargetURL make a new target url. +func (c Client) makeTargetURL(queryValues url.Values) (*url.URL, error) { + + host := c.endpointURL.Host + scheme := c.endpointURL.Scheme + + urlStr := scheme + "://" + host + "/" + + // If there are any query values, add them to the end. + if len(queryValues) > 0 { + urlStr = urlStr + "?" + s3utils.QueryEncode(queryValues) + } + u, err := url.Parse(urlStr) + if err != nil { + return nil, err + } + return u, nil +} diff --git a/pkg/madmin/service-api.go b/pkg/madmin/service-api.go new file mode 100644 index 000000000..2d62f7b02 --- /dev/null +++ b/pkg/madmin/service-api.go @@ -0,0 +1,141 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package madmin + +import ( + "encoding/json" + "errors" + "io/ioutil" + "net/http" + "net/url" +) + +// BackendType - represents different backend types. +type BackendType int + +// Enum for different backend types. +const ( + Unknown BackendType = iota + // Filesystem backend. + FS + // Multi disk single node XL backend. + XL + // Add your own backend. +) + +// ServiceStatusMetadata - represents total capacity of underlying storage. +type ServiceStatusMetadata struct { + // Total disk space. + Total int64 + // Free available disk space. + Free int64 + // Backend type. + Backend struct { + // Represents various backend types, currently on FS and XL. + Type BackendType + // Following fields are only meaningful if BackendType is XL. + OnlineDisks int // Online disks during server startup. + OfflineDisks int // Offline disks during server startup. + ReadQuorum int // Minimum disks required for successful read operations. + WriteQuorum int // Minimum disks required for successful write operations. + } +} + +// ServiceStatus - Connect to a minio server and call Service Status Management API +// to fetch server's storage information represented by ServiceStatusMetadata structure +func (adm *AdminClient) ServiceStatus() (ServiceStatusMetadata, error) { + + reqData := requestData{} + reqData.queryValues = make(url.Values) + reqData.queryValues.Set("service", "") + reqData.customHeaders = make(http.Header) + reqData.customHeaders.Set(minioAdminOpHeader, "status") + + // Execute GET on bucket to list objects. + resp, err := adm.client.executeMethod("GET", reqData) + + defer closeResponse(resp) + if err != nil { + return ServiceStatusMetadata{}, err + } + + if resp.StatusCode != http.StatusOK { + return ServiceStatusMetadata{}, errors.New("Got " + resp.Status) + } + + respBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + return ServiceStatusMetadata{}, err + } + + var storageInfo ServiceStatusMetadata + + err = json.Unmarshal(respBytes, &storageInfo) + if err != nil { + return ServiceStatusMetadata{}, err + } + + return storageInfo, nil +} + +// ServiceStop - Call Service Stop Management API to stop a specified Minio server +func (adm *AdminClient) ServiceStop() error { + // + reqData := requestData{} + reqData.queryValues = make(url.Values) + reqData.queryValues.Set("service", "") + reqData.customHeaders = make(http.Header) + reqData.customHeaders.Set(minioAdminOpHeader, "stop") + + // Execute GET on bucket to list objects. + resp, err := adm.client.executeMethod("POST", reqData) + + defer closeResponse(resp) + if err != nil { + return err + } + + if resp.StatusCode != http.StatusOK { + return errors.New("Got " + resp.Status) + } + + return nil +} + +// ServiceRestart - Call Service Restart API to restart a specified Minio server +func (adm *AdminClient) ServiceRestart() error { + // + reqData := requestData{} + reqData.queryValues = make(url.Values) + reqData.queryValues.Set("service", "") + reqData.customHeaders = make(http.Header) + reqData.customHeaders.Set(minioAdminOpHeader, "restart") + + // Execute GET on bucket to list objects. + resp, err := adm.client.executeMethod("POST", reqData) + + defer closeResponse(resp) + if err != nil { + return err + } + + if resp.StatusCode != http.StatusOK { + return errors.New("Got " + resp.Status) + } + return nil +} diff --git a/pkg/madmin/utils.go b/pkg/madmin/utils.go new file mode 100644 index 000000000..1661c2f8c --- /dev/null +++ b/pkg/madmin/utils.go @@ -0,0 +1,108 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package madmin + +import ( + "crypto/sha256" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "strings" + + "github.com/minio/minio-go/pkg/s3utils" +) + +// sum256 calculate sha256 sum for an input byte array. +func sum256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +// getEndpointURL - construct a new endpoint. +func getEndpointURL(endpoint string, secure bool) (*url.URL, error) { + if strings.Contains(endpoint, ":") { + host, _, err := net.SplitHostPort(endpoint) + if err != nil { + return nil, err + } + if !s3utils.IsValidIP(host) && !s3utils.IsValidDomain(host) { + msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards." + return nil, ErrInvalidArgument(msg) + } + } else { + if !s3utils.IsValidIP(endpoint) && !s3utils.IsValidDomain(endpoint) { + msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards." + return nil, ErrInvalidArgument(msg) + } + } + // If secure is false, use 'http' scheme. + scheme := "https" + if !secure { + scheme = "http" + } + + // Construct a secured endpoint URL. + endpointURLStr := scheme + "://" + endpoint + endpointURL, err := url.Parse(endpointURLStr) + if err != nil { + return nil, err + } + + // Validate incoming endpoint URL. + if err := isValidEndpointURL(endpointURL.String()); err != nil { + return nil, err + } + return endpointURL, nil +} + +// Verify if input endpoint URL is valid. +func isValidEndpointURL(endpointURL string) error { + if endpointURL == "" { + return ErrInvalidArgument("Endpoint url cannot be empty.") + } + url, err := url.Parse(endpointURL) + if err != nil { + return ErrInvalidArgument("Endpoint url cannot be parsed.") + } + if url.Path != "/" && url.Path != "" { + return ErrInvalidArgument("Endpoint url cannot have fully qualified paths.") + } + return nil +} + +// closeResponse close non nil response with any response Body. +// convenient wrapper to drain any remaining data on response body. +// +// Subsequently this allows golang http RoundTripper +// to re-use the same connection for future requests. +func closeResponse(resp *http.Response) { + // Callers should close resp.Body when done reading from it. + // If resp.Body is not closed, the Client's underlying RoundTripper + // (typically Transport) may not be able to re-use a persistent TCP + // connection to the server for a subsequent "keep-alive" request. + if resp != nil && resp.Body != nil { + // Drain any remaining Body and then close the connection. + // Without this closing connection would disallow re-using + // the same connection for future uses. + // - http://stackoverflow.com/a/17961593/4465767 + io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + } +} diff --git a/pkg/madmin/vendor/github.com/minio/minio-go/LICENSE b/pkg/madmin/vendor/github.com/minio/minio-go/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/pkg/madmin/vendor/github.com/minio/minio-go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/pkg/madmin/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go b/pkg/madmin/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go new file mode 100644 index 000000000..e1ec6c02c --- /dev/null +++ b/pkg/madmin/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go @@ -0,0 +1,324 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package s3signer + +import ( + "bytes" + "crypto/hmac" + "crypto/sha1" + "encoding/base64" + "fmt" + "net/http" + "net/url" + "path/filepath" + "sort" + "strconv" + "strings" + "time" + + "github.com/minio/minio-go/pkg/s3utils" +) + +// Signature and API related constants. +const ( + signV2Algorithm = "AWS" +) + +// Encode input URL path to URL encoded path. +func encodeURL2Path(u *url.URL) (path string) { + // Encode URL path. + if isS3, _ := filepath.Match("*.s3*.amazonaws.com", u.Host); isS3 { + hostSplits := strings.SplitN(u.Host, ".", 4) + // First element is the bucket name. + bucketName := hostSplits[0] + path = "/" + bucketName + path += u.Path + path = s3utils.EncodePath(path) + return + } + if strings.HasSuffix(u.Host, ".storage.googleapis.com") { + path = "/" + strings.TrimSuffix(u.Host, ".storage.googleapis.com") + path += u.Path + path = s3utils.EncodePath(path) + return + } + path = s3utils.EncodePath(u.Path) + return +} + +// PreSignV2 - presign the request in following style. +// https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE}. +func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64) *http.Request { + // Presign is not needed for anonymous credentials. + if accessKeyID == "" || secretAccessKey == "" { + return &req + } + + d := time.Now().UTC() + // Find epoch expires when the request will expire. + epochExpires := d.Unix() + expires + + // Add expires header if not present. + if expiresStr := req.Header.Get("Expires"); expiresStr == "" { + req.Header.Set("Expires", strconv.FormatInt(epochExpires, 10)) + } + + // Get presigned string to sign. + stringToSign := preStringifyHTTPReq(req) + hm := hmac.New(sha1.New, []byte(secretAccessKey)) + hm.Write([]byte(stringToSign)) + + // Calculate signature. + signature := base64.StdEncoding.EncodeToString(hm.Sum(nil)) + + query := req.URL.Query() + // Handle specially for Google Cloud Storage. + if strings.Contains(req.URL.Host, ".storage.googleapis.com") { + query.Set("GoogleAccessId", accessKeyID) + } else { + query.Set("AWSAccessKeyId", accessKeyID) + } + + // Fill in Expires for presigned query. + query.Set("Expires", strconv.FormatInt(epochExpires, 10)) + + // Encode query and save. + req.URL.RawQuery = s3utils.QueryEncode(query) + + // Save signature finally. + req.URL.RawQuery += "&Signature=" + s3utils.EncodePath(signature) + + // Return. + return &req +} + +// PostPresignSignatureV2 - presigned signature for PostPolicy +// request. +func PostPresignSignatureV2(policyBase64, secretAccessKey string) string { + hm := hmac.New(sha1.New, []byte(secretAccessKey)) + hm.Write([]byte(policyBase64)) + signature := base64.StdEncoding.EncodeToString(hm.Sum(nil)) + return signature +} + +// Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature; +// Signature = Base64( HMAC-SHA1( YourSecretAccessKeyID, UTF-8-Encoding-Of( StringToSign ) ) ); +// +// StringToSign = HTTP-Verb + "\n" + +// Content-Md5 + "\n" + +// Content-Type + "\n" + +// Date + "\n" + +// CanonicalizedProtocolHeaders + +// CanonicalizedResource; +// +// CanonicalizedResource = [ "/" + Bucket ] + +// + +// [ subresource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; +// +// CanonicalizedProtocolHeaders = + +// SignV2 sign the request before Do() (AWS Signature Version 2). +func SignV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request { + // Signature calculation is not needed for anonymous credentials. + if accessKeyID == "" || secretAccessKey == "" { + return &req + } + + // Initial time. + d := time.Now().UTC() + + // Add date if not present. + if date := req.Header.Get("Date"); date == "" { + req.Header.Set("Date", d.Format(http.TimeFormat)) + } + + // Calculate HMAC for secretAccessKey. + stringToSign := stringifyHTTPReq(req) + hm := hmac.New(sha1.New, []byte(secretAccessKey)) + hm.Write([]byte(stringToSign)) + + // Prepare auth header. + authHeader := new(bytes.Buffer) + authHeader.WriteString(fmt.Sprintf("%s %s:", signV2Algorithm, accessKeyID)) + encoder := base64.NewEncoder(base64.StdEncoding, authHeader) + encoder.Write(hm.Sum(nil)) + encoder.Close() + + // Set Authorization header. + req.Header.Set("Authorization", authHeader.String()) + + return &req +} + +// From the Amazon docs: +// +// StringToSign = HTTP-Verb + "\n" + +// Content-Md5 + "\n" + +// Content-Type + "\n" + +// Expires + "\n" + +// CanonicalizedProtocolHeaders + +// CanonicalizedResource; +func preStringifyHTTPReq(req http.Request) string { + buf := new(bytes.Buffer) + // Write standard headers. + writePreSignV2Headers(buf, req) + // Write canonicalized protocol headers if any. + writeCanonicalizedHeaders(buf, req) + // Write canonicalized Query resources if any. + isPreSign := true + writeCanonicalizedResource(buf, req, isPreSign) + return buf.String() +} + +// writePreSignV2Headers - write preSign v2 required headers. +func writePreSignV2Headers(buf *bytes.Buffer, req http.Request) { + buf.WriteString(req.Method + "\n") + buf.WriteString(req.Header.Get("Content-Md5") + "\n") + buf.WriteString(req.Header.Get("Content-Type") + "\n") + buf.WriteString(req.Header.Get("Expires") + "\n") +} + +// From the Amazon docs: +// +// StringToSign = HTTP-Verb + "\n" + +// Content-Md5 + "\n" + +// Content-Type + "\n" + +// Date + "\n" + +// CanonicalizedProtocolHeaders + +// CanonicalizedResource; +func stringifyHTTPReq(req http.Request) string { + buf := new(bytes.Buffer) + // Write standard headers. + writeSignV2Headers(buf, req) + // Write canonicalized protocol headers if any. + writeCanonicalizedHeaders(buf, req) + // Write canonicalized Query resources if any. + isPreSign := false + writeCanonicalizedResource(buf, req, isPreSign) + return buf.String() +} + +// writeSignV2Headers - write signV2 required headers. +func writeSignV2Headers(buf *bytes.Buffer, req http.Request) { + buf.WriteString(req.Method + "\n") + buf.WriteString(req.Header.Get("Content-Md5") + "\n") + buf.WriteString(req.Header.Get("Content-Type") + "\n") + buf.WriteString(req.Header.Get("Date") + "\n") +} + +// writeCanonicalizedHeaders - write canonicalized headers. +func writeCanonicalizedHeaders(buf *bytes.Buffer, req http.Request) { + var protoHeaders []string + vals := make(map[string][]string) + for k, vv := range req.Header { + // All the AMZ headers should be lowercase + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-amz") { + protoHeaders = append(protoHeaders, lk) + vals[lk] = vv + } + } + sort.Strings(protoHeaders) + for _, k := range protoHeaders { + buf.WriteString(k) + buf.WriteByte(':') + for idx, v := range vals[k] { + if idx > 0 { + buf.WriteByte(',') + } + if strings.Contains(v, "\n") { + // TODO: "Unfold" long headers that + // span multiple lines (as allowed by + // RFC 2616, section 4.2) by replacing + // the folding white-space (including + // new-line) by a single space. + buf.WriteString(v) + } else { + buf.WriteString(v) + } + } + buf.WriteByte('\n') + } +} + +// The following list is already sorted and should always be, otherwise we could +// have signature-related issues +var resourceList = []string{ + "acl", + "delete", + "location", + "logging", + "notification", + "partNumber", + "policy", + "requestPayment", + "torrent", + "uploadId", + "uploads", + "versionId", + "versioning", + "versions", + "website", +} + +// From the Amazon docs: +// +// CanonicalizedResource = [ "/" + Bucket ] + +// + +// [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; +func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, isPreSign bool) { + // Save request URL. + requestURL := req.URL + // Get encoded URL path. + path := encodeURL2Path(requestURL) + if isPreSign { + // Get encoded URL path. + if len(requestURL.Query()) > 0 { + // Keep the usual queries unescaped for string to sign. + query, _ := url.QueryUnescape(s3utils.QueryEncode(requestURL.Query())) + path = path + "?" + query + } + buf.WriteString(path) + return + } + buf.WriteString(path) + if requestURL.RawQuery != "" { + var n int + vals, _ := url.ParseQuery(requestURL.RawQuery) + // Verify if any sub resource queries are present, if yes + // canonicallize them. + for _, resource := range resourceList { + if vv, ok := vals[resource]; ok && len(vv) > 0 { + n++ + // First element + switch n { + case 1: + buf.WriteByte('?') + // The rest + default: + buf.WriteByte('&') + } + buf.WriteString(resource) + // Request parameters + if len(vv[0]) > 0 { + buf.WriteByte('=') + buf.WriteString(strings.Replace(url.QueryEscape(vv[0]), "+", "%20", -1)) + } + } + } + } +} diff --git a/pkg/madmin/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go b/pkg/madmin/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go new file mode 100644 index 000000000..3322b67cc --- /dev/null +++ b/pkg/madmin/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go @@ -0,0 +1,305 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package s3signer + +import ( + "bytes" + "encoding/hex" + "net/http" + "sort" + "strconv" + "strings" + "time" + + "github.com/minio/minio-go/pkg/s3utils" +) + +// Signature and API related constants. +const ( + signV4Algorithm = "AWS4-HMAC-SHA256" + iso8601DateFormat = "20060102T150405Z" + yyyymmdd = "20060102" +) + +/// +/// Excerpts from @lsegal - +/// https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258. +/// +/// User-Agent: +/// +/// This is ignored from signing because signing this causes +/// problems with generating pre-signed URLs (that are executed +/// by other agents) or when customers pass requests through +/// proxies, which may modify the user-agent. +/// +/// Content-Length: +/// +/// This is ignored from signing because generating a pre-signed +/// URL should not provide a content-length constraint, +/// specifically when vending a S3 pre-signed PUT URL. The +/// corollary to this is that when sending regular requests +/// (non-pre-signed), the signature contains a checksum of the +/// body, which implicitly validates the payload length (since +/// changing the number of bytes would change the checksum) +/// and therefore this header is not valuable in the signature. +/// +/// Content-Type: +/// +/// Signing this header causes quite a number of problems in +/// browser environments, where browsers like to modify and +/// normalize the content-type header in different ways. There is +/// more information on this in https://goo.gl/2E9gyy. Avoiding +/// this field simplifies logic and reduces the possibility of +/// future bugs. +/// +/// Authorization: +/// +/// Is skipped for obvious reasons +/// +var ignoredHeaders = map[string]bool{ + "Authorization": true, + "Content-Type": true, + "Content-Length": true, + "User-Agent": true, +} + +// getSigningKey hmac seed to calculate final signature. +func getSigningKey(secret, loc string, t time.Time) []byte { + date := sumHMAC([]byte("AWS4"+secret), []byte(t.Format(yyyymmdd))) + location := sumHMAC(date, []byte(loc)) + service := sumHMAC(location, []byte("s3")) + signingKey := sumHMAC(service, []byte("aws4_request")) + return signingKey +} + +// getSignature final signature in hexadecimal form. +func getSignature(signingKey []byte, stringToSign string) string { + return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign))) +} + +// getScope generate a string of a specific date, an AWS region, and a +// service. +func getScope(location string, t time.Time) string { + scope := strings.Join([]string{ + t.Format(yyyymmdd), + location, + "s3", + "aws4_request", + }, "/") + return scope +} + +// GetCredential generate a credential string. +func GetCredential(accessKeyID, location string, t time.Time) string { + scope := getScope(location, t) + return accessKeyID + "/" + scope +} + +// getHashedPayload get the hexadecimal value of the SHA256 hash of +// the request payload. +func getHashedPayload(req http.Request) string { + hashedPayload := req.Header.Get("X-Amz-Content-Sha256") + if hashedPayload == "" { + // Presign does not have a payload, use S3 recommended value. + hashedPayload = unsignedPayload + } + return hashedPayload +} + +// getCanonicalHeaders generate a list of request headers for +// signature. +func getCanonicalHeaders(req http.Request) string { + var headers []string + vals := make(map[string][]string) + for k, vv := range req.Header { + if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { + continue // ignored header + } + headers = append(headers, strings.ToLower(k)) + vals[strings.ToLower(k)] = vv + } + headers = append(headers, "host") + sort.Strings(headers) + + var buf bytes.Buffer + // Save all the headers in canonical form
: newline + // separated for each header. + for _, k := range headers { + buf.WriteString(k) + buf.WriteByte(':') + switch { + case k == "host": + buf.WriteString(req.URL.Host) + fallthrough + default: + for idx, v := range vals[k] { + if idx > 0 { + buf.WriteByte(',') + } + buf.WriteString(v) + } + buf.WriteByte('\n') + } + } + return buf.String() +} + +// getSignedHeaders generate all signed request headers. +// i.e lexically sorted, semicolon-separated list of lowercase +// request header names. +func getSignedHeaders(req http.Request) string { + var headers []string + for k := range req.Header { + if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { + continue // Ignored header found continue. + } + headers = append(headers, strings.ToLower(k)) + } + headers = append(headers, "host") + sort.Strings(headers) + return strings.Join(headers, ";") +} + +// getCanonicalRequest generate a canonical request of style. +// +// canonicalRequest = +// \n +// \n +// \n +// \n +// \n +// +func getCanonicalRequest(req http.Request) string { + req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1) + canonicalRequest := strings.Join([]string{ + req.Method, + s3utils.EncodePath(req.URL.Path), + req.URL.RawQuery, + getCanonicalHeaders(req), + getSignedHeaders(req), + getHashedPayload(req), + }, "\n") + return canonicalRequest +} + +// getStringToSign a string based on selected query values. +func getStringToSignV4(t time.Time, location, canonicalRequest string) string { + stringToSign := signV4Algorithm + "\n" + t.Format(iso8601DateFormat) + "\n" + stringToSign = stringToSign + getScope(location, t) + "\n" + stringToSign = stringToSign + hex.EncodeToString(sum256([]byte(canonicalRequest))) + return stringToSign +} + +// PreSignV4 presign the request, in accordance with +// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html. +func PreSignV4(req http.Request, accessKeyID, secretAccessKey, location string, expires int64) *http.Request { + // Presign is not needed for anonymous credentials. + if accessKeyID == "" || secretAccessKey == "" { + return &req + } + + // Initial time. + t := time.Now().UTC() + + // Get credential string. + credential := GetCredential(accessKeyID, location, t) + + // Get all signed headers. + signedHeaders := getSignedHeaders(req) + + // Set URL query. + query := req.URL.Query() + query.Set("X-Amz-Algorithm", signV4Algorithm) + query.Set("X-Amz-Date", t.Format(iso8601DateFormat)) + query.Set("X-Amz-Expires", strconv.FormatInt(expires, 10)) + query.Set("X-Amz-SignedHeaders", signedHeaders) + query.Set("X-Amz-Credential", credential) + req.URL.RawQuery = query.Encode() + + // Get canonical request. + canonicalRequest := getCanonicalRequest(req) + + // Get string to sign from canonical request. + stringToSign := getStringToSignV4(t, location, canonicalRequest) + + // Gext hmac signing key. + signingKey := getSigningKey(secretAccessKey, location, t) + + // Calculate signature. + signature := getSignature(signingKey, stringToSign) + + // Add signature header to RawQuery. + req.URL.RawQuery += "&X-Amz-Signature=" + signature + + return &req +} + +// PostPresignSignatureV4 - presigned signature for PostPolicy +// requests. +func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string { + // Get signining key. + signingkey := getSigningKey(secretAccessKey, location, t) + // Calculate signature. + signature := getSignature(signingkey, policyBase64) + return signature +} + +// SignV4 sign the request before Do(), in accordance with +// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html. +func SignV4(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request { + // Signature calculation is not needed for anonymous credentials. + if accessKeyID == "" || secretAccessKey == "" { + return &req + } + + // Initial time. + t := time.Now().UTC() + + // Set x-amz-date. + req.Header.Set("X-Amz-Date", t.Format(iso8601DateFormat)) + + // Get canonical request. + canonicalRequest := getCanonicalRequest(req) + + // Get string to sign from canonical request. + stringToSign := getStringToSignV4(t, location, canonicalRequest) + + // Get hmac signing key. + signingKey := getSigningKey(secretAccessKey, location, t) + + // Get credential string. + credential := GetCredential(accessKeyID, location, t) + + // Get all signed headers. + signedHeaders := getSignedHeaders(req) + + // Calculate signature. + signature := getSignature(signingKey, stringToSign) + + // If regular request, construct the final authorization header. + parts := []string{ + signV4Algorithm + " Credential=" + credential, + "SignedHeaders=" + signedHeaders, + "Signature=" + signature, + } + + // Set authorization header. + auth := strings.Join(parts, ", ") + req.Header.Set("Authorization", auth) + + return &req +} diff --git a/pkg/madmin/vendor/github.com/minio/minio-go/pkg/s3signer/utils.go b/pkg/madmin/vendor/github.com/minio/minio-go/pkg/s3signer/utils.go new file mode 100644 index 000000000..0619b3082 --- /dev/null +++ b/pkg/madmin/vendor/github.com/minio/minio-go/pkg/s3signer/utils.go @@ -0,0 +1,39 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package s3signer + +import ( + "crypto/hmac" + "crypto/sha256" +) + +// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when +const unsignedPayload = "UNSIGNED-PAYLOAD" + +// sum256 calculate sha256 sum for an input byte array. +func sum256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +// sumHMAC calculate hmac between two input byte array. +func sumHMAC(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} diff --git a/pkg/madmin/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go b/pkg/madmin/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go new file mode 100644 index 000000000..353e2a0c7 --- /dev/null +++ b/pkg/madmin/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go @@ -0,0 +1,192 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package s3utils + +import ( + "bytes" + "encoding/hex" + "net" + "net/url" + "regexp" + "sort" + "strings" + "unicode/utf8" +) + +// Sentinel URL is the default url value which is invalid. +var sentinelURL = url.URL{} + +// IsValidDomain validates if input string is a valid domain name. +func IsValidDomain(host string) bool { + // See RFC 1035, RFC 3696. + host = strings.TrimSpace(host) + if len(host) == 0 || len(host) > 255 { + return false + } + // host cannot start or end with "-" + if host[len(host)-1:] == "-" || host[:1] == "-" { + return false + } + // host cannot start or end with "_" + if host[len(host)-1:] == "_" || host[:1] == "_" { + return false + } + // host cannot start or end with a "." + if host[len(host)-1:] == "." || host[:1] == "." { + return false + } + // All non alphanumeric characters are invalid. + if strings.ContainsAny(host, "`~!@#$%^&*()+={}[]|\\\"';:> 0 { + buf.WriteByte('&') + } + buf.WriteString(prefix) + buf.WriteString(percentEncodeSlash(EncodePath(v))) + } + } + return buf.String() +} + +// if object matches reserved string, no need to encode them +var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$") + +// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences +// +// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8 +// non english characters cannot be parsed due to the nature in which url.Encode() is written +// +// This function on the other hand is a direct replacement for url.Encode() technique to support +// pretty much every UTF-8 character. +func EncodePath(pathName string) string { + if reservedObjectNames.MatchString(pathName) { + return pathName + } + var encodedPathname string + for _, s := range pathName { + if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + } + switch s { + case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + default: + len := utf8.RuneLen(s) + if len < 0 { + // if utf8 cannot convert return the same string as is + return pathName + } + u := make([]byte, len) + utf8.EncodeRune(u, s) + for _, r := range u { + hex := hex.EncodeToString([]byte{r}) + encodedPathname = encodedPathname + "%" + strings.ToUpper(hex) + } + } + } + return encodedPathname +} diff --git a/pkg/madmin/vendor/vendor.json b/pkg/madmin/vendor/vendor.json new file mode 100644 index 000000000..3609b3390 --- /dev/null +++ b/pkg/madmin/vendor/vendor.json @@ -0,0 +1,19 @@ +{ + "comment": "", + "ignore": "test", + "package": [ + { + "checksumSHA1": "m/6/na9lVtamkfmIdIOi5pdccgw=", + "path": "github.com/minio/minio-go/pkg/s3signer", + "revision": "532b920ff28900244a2ef7d07468003df36fe7c5", + "revisionTime": "2016-12-20T20:43:13Z" + }, + { + "checksumSHA1": "bPvxFS1qu6W9lOqdt8aEfS5Sids=", + "path": "github.com/minio/minio-go/pkg/s3utils", + "revision": "532b920ff28900244a2ef7d07468003df36fe7c5", + "revisionTime": "2016-12-20T20:43:13Z" + } + ], + "rootPath": "github.com/minio/minio/pkg/madmin" +} From 1d134c1a94e735689196c1478883f2c9b39a519c Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Tue, 20 Dec 2016 15:18:37 -0800 Subject: [PATCH 025/100] admin: Fix wrong vendorized location for s3signer and s3utils. --- .../vendor/github.com/minio/minio-go/LICENSE | 202 ------------------ .../minio/minio-go/pkg/s3signer/utils.go | 39 ---- pkg/madmin/vendor/vendor.json | 19 -- .../pkg/s3signer/request-signature-v2.go | 14 +- .../pkg/s3signer/request-signature-v4.go | 4 +- .../minio/minio-go/pkg/s3signer/utils.go | 118 ++++++++++ .../minio/minio-go/pkg/s3utils/utils.go | 0 vendor/vendor.json | 12 ++ 8 files changed, 137 insertions(+), 271 deletions(-) delete mode 100644 pkg/madmin/vendor/github.com/minio/minio-go/LICENSE delete mode 100644 pkg/madmin/vendor/github.com/minio/minio-go/pkg/s3signer/utils.go delete mode 100644 pkg/madmin/vendor/vendor.json rename {pkg/madmin/vendor => vendor}/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go (96%) rename {pkg/madmin/vendor => vendor}/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go (99%) create mode 100644 vendor/github.com/minio/minio-go/pkg/s3signer/utils.go rename {pkg/madmin/vendor => vendor}/github.com/minio/minio-go/pkg/s3utils/utils.go (100%) diff --git a/pkg/madmin/vendor/github.com/minio/minio-go/LICENSE b/pkg/madmin/vendor/github.com/minio/minio-go/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/pkg/madmin/vendor/github.com/minio/minio-go/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/pkg/madmin/vendor/github.com/minio/minio-go/pkg/s3signer/utils.go b/pkg/madmin/vendor/github.com/minio/minio-go/pkg/s3signer/utils.go deleted file mode 100644 index 0619b3082..000000000 --- a/pkg/madmin/vendor/github.com/minio/minio-go/pkg/s3signer/utils.go +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package s3signer - -import ( - "crypto/hmac" - "crypto/sha256" -) - -// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when -const unsignedPayload = "UNSIGNED-PAYLOAD" - -// sum256 calculate sha256 sum for an input byte array. -func sum256(data []byte) []byte { - hash := sha256.New() - hash.Write(data) - return hash.Sum(nil) -} - -// sumHMAC calculate hmac between two input byte array. -func sumHMAC(key []byte, data []byte) []byte { - hash := hmac.New(sha256.New, key) - hash.Write(data) - return hash.Sum(nil) -} diff --git a/pkg/madmin/vendor/vendor.json b/pkg/madmin/vendor/vendor.json deleted file mode 100644 index 3609b3390..000000000 --- a/pkg/madmin/vendor/vendor.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "comment": "", - "ignore": "test", - "package": [ - { - "checksumSHA1": "m/6/na9lVtamkfmIdIOi5pdccgw=", - "path": "github.com/minio/minio-go/pkg/s3signer", - "revision": "532b920ff28900244a2ef7d07468003df36fe7c5", - "revisionTime": "2016-12-20T20:43:13Z" - }, - { - "checksumSHA1": "bPvxFS1qu6W9lOqdt8aEfS5Sids=", - "path": "github.com/minio/minio-go/pkg/s3utils", - "revision": "532b920ff28900244a2ef7d07468003df36fe7c5", - "revisionTime": "2016-12-20T20:43:13Z" - } - ], - "rootPath": "github.com/minio/minio/pkg/madmin" -} diff --git a/pkg/madmin/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go similarity index 96% rename from pkg/madmin/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go rename to vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go index e1ec6c02c..90aca9558 100644 --- a/pkg/madmin/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go +++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go @@ -29,8 +29,6 @@ import ( "strconv" "strings" "time" - - "github.com/minio/minio-go/pkg/s3utils" ) // Signature and API related constants. @@ -47,16 +45,16 @@ func encodeURL2Path(u *url.URL) (path string) { bucketName := hostSplits[0] path = "/" + bucketName path += u.Path - path = s3utils.EncodePath(path) + path = urlEncodePath(path) return } if strings.HasSuffix(u.Host, ".storage.googleapis.com") { path = "/" + strings.TrimSuffix(u.Host, ".storage.googleapis.com") path += u.Path - path = s3utils.EncodePath(path) + path = urlEncodePath(path) return } - path = s3utils.EncodePath(u.Path) + path = urlEncodePath(u.Path) return } @@ -97,10 +95,10 @@ func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires in query.Set("Expires", strconv.FormatInt(epochExpires, 10)) // Encode query and save. - req.URL.RawQuery = s3utils.QueryEncode(query) + req.URL.RawQuery = queryEncode(query) // Save signature finally. - req.URL.RawQuery += "&Signature=" + s3utils.EncodePath(signature) + req.URL.RawQuery += "&Signature=" + urlEncodePath(signature) // Return. return &req @@ -289,7 +287,7 @@ func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, isPreSign b // Get encoded URL path. if len(requestURL.Query()) > 0 { // Keep the usual queries unescaped for string to sign. - query, _ := url.QueryUnescape(s3utils.QueryEncode(requestURL.Query())) + query, _ := url.QueryUnescape(queryEncode(requestURL.Query())) path = path + "?" + query } buf.WriteString(path) diff --git a/pkg/madmin/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go similarity index 99% rename from pkg/madmin/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go rename to vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go index 3322b67cc..b18f5a755 100644 --- a/pkg/madmin/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go +++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go @@ -24,8 +24,6 @@ import ( "strconv" "strings" "time" - - "github.com/minio/minio-go/pkg/s3utils" ) // Signature and API related constants. @@ -187,7 +185,7 @@ func getCanonicalRequest(req http.Request) string { req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1) canonicalRequest := strings.Join([]string{ req.Method, - s3utils.EncodePath(req.URL.Path), + urlEncodePath(req.URL.Path), req.URL.RawQuery, getCanonicalHeaders(req), getSignedHeaders(req), diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/utils.go b/vendor/github.com/minio/minio-go/pkg/s3signer/utils.go new file mode 100644 index 000000000..ac58a0063 --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/s3signer/utils.go @@ -0,0 +1,118 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package s3signer + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "net/url" + "regexp" + "sort" + "strings" + "unicode/utf8" +) + +// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when +const unsignedPayload = "UNSIGNED-PAYLOAD" + +// sum256 calculate sha256 sum for an input byte array. +func sum256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +// sumHMAC calculate hmac between two input byte array. +func sumHMAC(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +//expects ascii encoded strings - from output of urlEncodePath +func percentEncodeSlash(s string) string { + return strings.Replace(s, "/", "%2F", -1) +} + +// queryEncode - encodes query values in their URL encoded form. In +// addition to the percent encoding performed by urlEncodePath() used +// here, it also percent encodes '/' (forward slash) +func queryEncode(v url.Values) string { + if v == nil { + return "" + } + var buf bytes.Buffer + keys := make([]string, 0, len(v)) + for k := range v { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + vs := v[k] + prefix := percentEncodeSlash(urlEncodePath(k)) + "=" + for _, v := range vs { + if buf.Len() > 0 { + buf.WriteByte('&') + } + buf.WriteString(prefix) + buf.WriteString(percentEncodeSlash(urlEncodePath(v))) + } + } + return buf.String() +} + +// urlEncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences +// +// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8 +// non english characters cannot be parsed due to the nature in which url.Encode() is written +// +// This function on the other hand is a direct replacement for url.Encode() technique to support +// pretty much every UTF-8 character. +func urlEncodePath(pathName string) string { + // if object matches reserved string, no need to encode them + reservedNames := regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$") + if reservedNames.MatchString(pathName) { + return pathName + } + var encodedPathname string + for _, s := range pathName { + if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + } + switch s { + case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + default: + len := utf8.RuneLen(s) + if len < 0 { + // if utf8 cannot convert return the same string as is + return pathName + } + u := make([]byte, len) + utf8.EncodeRune(u, s) + for _, r := range u { + hex := hex.EncodeToString([]byte{r}) + encodedPathname = encodedPathname + "%" + strings.ToUpper(hex) + } + } + } + return encodedPathname +} diff --git a/pkg/madmin/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go b/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go similarity index 100% rename from pkg/madmin/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go rename to vendor/github.com/minio/minio-go/pkg/s3utils/utils.go diff --git a/vendor/vendor.json b/vendor/vendor.json index 4ac32a7ac..e776a4e0e 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -169,6 +169,18 @@ "revision": "40505f5d08c721dfe5a6450fbdef3bcd6567aa97", "revisionTime": "2016-09-04T08:12:15Z" }, + { + "checksumSHA1": "l95EyvF0yFAItXsXYqsZ6g7yGy4=", + "path": "github.com/minio/minio-go/pkg/s3signer", + "revision": "d02caa62b9e1034f93a82d41da806651a666c5a3", + "revisionTime": "2016-12-14T00:10:05Z" + }, + { + "checksumSHA1": "bPvxFS1qu6W9lOqdt8aEfS5Sids=", + "path": "github.com/minio/minio-go/pkg/s3utils", + "revision": "532b920ff28900244a2ef7d07468003df36fe7c5", + "revisionTime": "2016-12-20T20:43:13Z" + }, { "checksumSHA1": "A8QOw1aWwc+RtjGozY0XeS5varo=", "path": "github.com/minio/minio-go/pkg/set", From e7b4e4e105558085199a58afc2611c8a0110b142 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Tue, 20 Dec 2016 18:05:25 -0800 Subject: [PATCH 026/100] admin: ServiceStatus() shouldn't have to write double http headers. (#3484) Fixes #3482 --- cmd/admin-handlers.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/admin-handlers.go b/cmd/admin-handlers.go index a4605fac9..63a3928b7 100644 --- a/cmd/admin-handlers.go +++ b/cmd/admin-handlers.go @@ -36,8 +36,8 @@ func (adminAPI adminAPIHandlers) ServiceStatusHandler(w http.ResponseWriter, r * if err != nil { writeErrorResponseNoHeader(w, r, ErrInternalError, r.URL.Path) errorIf(err, "Failed to marshal storage info into json.") + return } - w.WriteHeader(http.StatusOK) writeSuccessResponse(w, jsonBytes) } From f57f773189ec9eda37dbf5ffd8eb0372cf0a4b39 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Tue, 20 Dec 2016 18:49:48 -0800 Subject: [PATCH 027/100] admin: Add missing madmin examples and API docs. (#3483) --- cmd/admin-handlers.go | 18 +++ cmd/object-api-datatypes.go | 2 +- pkg/madmin/API.md | 135 ++++++++++++++++++ pkg/madmin/README.md | 122 ++++++++++++++++ pkg/madmin/{requests.go => api.go} | 59 ++++---- pkg/madmin/{client_test.go => api_test.go} | 12 +- pkg/madmin/constants.go | 8 +- .../service-restart.go} | 33 +++-- pkg/madmin/examples/service-status.go | 46 ++++++ pkg/madmin/examples/service-stop.go | 46 ++++++ pkg/madmin/{service-api.go => service.go} | 10 +- 11 files changed, 440 insertions(+), 51 deletions(-) create mode 100644 pkg/madmin/API.md create mode 100644 pkg/madmin/README.md rename pkg/madmin/{requests.go => api.go} (89%) rename pkg/madmin/{client_test.go => api_test.go} (76%) rename pkg/madmin/{client.go => examples/service-restart.go} (50%) create mode 100644 pkg/madmin/examples/service-status.go create mode 100644 pkg/madmin/examples/service-stop.go rename pkg/madmin/{service-api.go => service.go} (94%) diff --git a/cmd/admin-handlers.go b/cmd/admin-handlers.go index 63a3928b7..a08b1a772 100644 --- a/cmd/admin-handlers.go +++ b/cmd/admin-handlers.go @@ -25,6 +25,12 @@ const ( minioAdminOpHeader = "X-Minio-Operation" ) +// ServiceStatusHandler - GET /?service +// HTTP header x-minio-operation: status +// ---------- +// This implementation of the GET operation fetches server status information. +// provides total disk space available to use, online disks, offline disks and +// quorum threshold. func (adminAPI adminAPIHandlers) ServiceStatusHandler(w http.ResponseWriter, r *http.Request) { adminAPIErr := checkRequestAuthType(r, "", "", "") if adminAPIErr != ErrNone { @@ -41,6 +47,12 @@ func (adminAPI adminAPIHandlers) ServiceStatusHandler(w http.ResponseWriter, r * writeSuccessResponse(w, jsonBytes) } +// ServiceStopHandler - POST /?service +// HTTP header x-minio-operation: stop +// ---------- +// This implementation of the POST operation stops minio server gracefully, +// in a distributed setup stops all the servers in the cluster. Body sent +// if any on client request is ignored. func (adminAPI adminAPIHandlers) ServiceStopHandler(w http.ResponseWriter, r *http.Request) { adminAPIErr := checkRequestAuthType(r, "", "", "") if adminAPIErr != ErrNone { @@ -52,6 +64,12 @@ func (adminAPI adminAPIHandlers) ServiceStopHandler(w http.ResponseWriter, r *ht sendServiceCmd(globalAdminPeers, serviceStop) } +// ServiceRestartHandler - POST /?service +// HTTP header x-minio-operation: restart +// ---------- +// This implementation of the POST operation restarts minio server gracefully, +// in a distributed setup restarts all the servers in the cluster. Body sent +// if any on client request is ignored. func (adminAPI adminAPIHandlers) ServiceRestartHandler(w http.ResponseWriter, r *http.Request) { adminAPIErr := checkRequestAuthType(r, "", "", "") if adminAPIErr != ErrNone { diff --git a/cmd/object-api-datatypes.go b/cmd/object-api-datatypes.go index 93b512e81..bec29c0cc 100644 --- a/cmd/object-api-datatypes.go +++ b/cmd/object-api-datatypes.go @@ -26,7 +26,7 @@ const ( Unknown BackendType = iota // Filesystem backend. FS - // Multi disk single node XL backend. + // Multi disk XL (single, distributed) backend. XL // Add your own backend. ) diff --git a/pkg/madmin/API.md b/pkg/madmin/API.md new file mode 100644 index 000000000..2b93991a4 --- /dev/null +++ b/pkg/madmin/API.md @@ -0,0 +1,135 @@ +# Golang Admin Client API Reference [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/Minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) + +## Initialize Minio Admin Client object. + +## Minio + +```go + +package main + +import ( + "fmt" + + "github.com/minio/minio/pkg/madmin" +) + +func main() { + // Use a secure connection. + ssl := true + + // Initialize minio client object. + mdmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETKEY", ssl) + if err != nil { + fmt.Println(err) + return + } + + // Fetch service status. + st, err := mdmClnt.ServiceStatus() + if err != nil { + fmt.Println(err) + return + } + fmt.Printf("%#v\n", st) +} + +``` + +| Service operations|LockInfo operations|Healing operations| +|:---|:---|:---| +|[`ServiceStatus`](#ServiceStatus)| | | +|[`ServiceStop`](#ServiceStop)| | | +|[`ServiceRestart`](#ServiceRestart)| | | + +## 1. Constructor + + +### New(endpoint string, accessKeyID string, secretAccessKey string, ssl bool) (*AdminClient, error) +Initializes a new admin client object. + +__Parameters__ + + +|Param |Type |Description | +|:---|:---| :---| +|`endpoint` | _string_ |Minio endpoint. | +|`accessKeyID` |_string_ | Access key for the object storage endpoint. | +|`secretAccessKey` | _string_ |Secret key for the object storage endpoint. | +|`ssl` | _bool_ | Set this value to 'true' to enable secure (HTTPS) access. | + + +## 2. Service operations + + +### ServiceStatus() (ServiceStatusMetadata, error) +Fetch service status, replies disk space used, backend type and total disks offline/online (XL). + +| Param | Type | Description | +|---|---|---| +|`serviceStatus` | _ServiceStatusMetadata_ | Represents current server status info in following format: | + + +| Param | Type | Description | +|---|---|---| +|`st.Total` | _int64_ | Total disk space. | +|`st.Free` | _int64_ | Free disk space. | +|`st.Backend`| _struct{}_ | Represents backend type embedded structure. | + +| Param | Type | Description | +|---|---|---| +|`backend.Type` | _BackendType_ | Type of backend used by the server currently only FS or XL. | +|`backend.OnlineDisks`| _int_ | Total number of disks online (only applies to XL backend), is empty for FS. | +|`backend.OfflineDisks` | _int_ | Total number of disks offline (only applies to XL backend), is empty for FS. | +|`backend.ReadQuorum` | _int_ | Current total read quorum threshold before reads will be unavailable, is empty for FS. | +|`backend.WriteQuorum` | _int_ | Current total write quorum threshold before writes will be unavailable, is empty for FS. | + + + __Example__ + + + ```go + + st, err := madmClnt.ServiceStatus() + if err != nil { + log.Fatalln(err) + } + log.Printf("%#v\n", st) + + ``` + + +### ServiceStop() (error) +If successful shuts down the running minio service, for distributed setup stops all remote minio servers. + + __Example__ + + + ```go + + st, err := madmClnt.ServiceStop() + if err != nil { + log.Fatalln(err) + } + log.Printf("Succes") + + ``` + + +### ServiceRestart() (error) +If successful restarts the running minio service, for distributed setup restarts all remote minio servers. + + __Example__ + + + ```go + + + st, err := madmClnt.ServiceRestart() + if err != nil { + log.Fatalln(err) + } + log.Printf("Succes") + + ``` + diff --git a/pkg/madmin/README.md b/pkg/madmin/README.md new file mode 100644 index 000000000..51ba7ae31 --- /dev/null +++ b/pkg/madmin/README.md @@ -0,0 +1,122 @@ +# Minio Admin Library. [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/Minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +The Minio Admin Golang Client SDK provides APIs to manage Minio services. + +This quickstart guide will show you how to install the Minio Admin client SDK, connect to Minio admin service, and provide a walkthrough of a simple file uploader. + +This document assumes that you have a working [Golang setup](https://docs.minio.io/docs/how-to-install-golang). + +## Download from Github + +```sh + +go get -u github.com/minio/minio/pkg/madmin + +``` + +## Initialize Minio Admin Client + +You need four items to connect to Minio admin services. + + +| Parameter | Description| +| :--- | :--- | +| endpoint | URL to object storage service. | +| accessKeyID | Access key is the user ID that uniquely identifies your account. | +| secretAccessKey | Secret key is the password to your account. | +| secure | Set this value to 'true' to enable secure (HTTPS) access. | + + +```go + +package main + +import ( + "github.com/minio/minio/pkg/madmin" + "log" +) + +func main() { + endpoint := "your-minio.example.com:9000" + accessKeyID := "YOUR-ACCESSKEYID" + secretAccessKey := "YOUR-SECRETKEY" + useSSL := true + + // Initialize minio admin client object. + madmClnt, err := madmin.New(endpoint, accessKeyID, secretAccessKey, useSSL) + if err != nil { + log.Fatalln(err) + } + + log.Println("%v", madmClnt) // Minio admin client is now setup + + +``` + +## Quick Start Example - Service Status. + +This example program connects to minio server, gets the current disk status. + +We will use the Minio server running at [https://your-minio.example.com:9000](https://your-minio.example.com:9000) in this example. Feel free to use this service for testing and development. Access credentials shown in this example are open to the public. + +#### ServiceStatus.go + +```go +package main + +import ( + "log" + + "github.com/minio/minio/pkg/madmin" +) + +func main() { + endpoint := "your-minio.example.com:9000" + accessKeyID := "YOUR-ACCESSKEYID" + secretAccessKey := "YOUR-SECRETKEY" + useSSL := true + + // Initialize minio admin client. + mdmClnt, err := madmin.New(endpoint, accessKeyID, secretAccessKey, useSSL) + if err != nil { + log.Fatalln(err) + } + + st, err := madmClnt.ServiceStatus() + if err != nil { + log.Fatalln(err) + } + log.Printf("%#v\n", st) + +} + +``` + +#### Run ServiceStatus + +```sh + +go run service-status.go +2016/12/20 16:46:01 madmin.ServiceStatusMetadata{Total:177038229504, Free:120365559808, Backend:struct { Type madmin.BackendType; OnlineDisks int; OfflineDisks int; ReadQuorum int; WriteQuorum int }{Type:1, OnlineDisks:0, OfflineDisks:0, ReadQuorum:0, WriteQuorum:0}} + +``` + +## API Reference + +### API Reference : Service Operations + +* [`ServiceStatus`](./API.md#ServiceStatus) +* [`ServiceStop`](./API.md#ServiceStop) +* [`ServiceRestart`](./API.md#ServiceRestart) + +## Full Examples + +#### Full Examples : Service Operations + +* [service-status.go](https://github.com/minio/minio/blob/master/pkg/madmin/examples/service-status.go) +* [service-stop.go](https://github.com/minio/minio/blob/master/pkg/madmin/examples/service-stop.go) +* [service-restart.go](https://github.com/minio/minio/blob/master/pkg/madmin/examples/service-restart.go) + +## Contribute + +[Contributors Guide](https://github.com/minio/minio/blob/master/CONTRIBUTING.md) + diff --git a/pkg/madmin/requests.go b/pkg/madmin/api.go similarity index 89% rename from pkg/madmin/requests.go rename to pkg/madmin/api.go index 3c6c7b615..2bf06b8b6 100644 --- a/pkg/madmin/requests.go +++ b/pkg/madmin/api.go @@ -36,8 +36,8 @@ import ( "github.com/minio/minio-go/pkg/s3utils" ) -// Client implements Amazon S3 compatible methods. -type Client struct { +// AdminClient implements Amazon S3 compatible methods. +type AdminClient struct { /// Standard options. // AccessKeyID required for authorized requests. @@ -82,9 +82,8 @@ const ( libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion ) -// New - instantiate minio client Client, adds automatic verification -// of signature. -func New(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) { +// New - instantiate minio client Client, adds automatic verification of signature. +func New(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*AdminClient, error) { clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, secure) if err != nil { return nil, err @@ -104,7 +103,7 @@ func redirectHeaders(req *http.Request, via []*http.Request) error { return nil } -func privateNew(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Client, error) { +func privateNew(endpoint, accessKeyID, secretAccessKey string, secure bool) (*AdminClient, error) { // construct endpoint. endpointURL, err := getEndpointURL(endpoint, secure) if err != nil { @@ -112,20 +111,18 @@ func privateNew(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Cl } // instantiate new Client. - clnt := new(Client) - clnt.accessKeyID = accessKeyID - clnt.secretAccessKey = secretAccessKey - - // Remember whether we are using https or not - clnt.secure = secure - - // Save endpoint URL, user agent for future uses. - clnt.endpointURL = *endpointURL - - // Instantiate http client and bucket location cache. - clnt.httpClient = &http.Client{ - Transport: http.DefaultTransport, - CheckRedirect: redirectHeaders, + clnt := &AdminClient{ + accessKeyID: accessKeyID, + secretAccessKey: secretAccessKey, + // Remember whether we are using https or not + secure: secure, + // Save endpoint URL, user agent for future uses. + endpointURL: *endpointURL, + // Instantiate http client and bucket location cache. + httpClient: &http.Client{ + Transport: http.DefaultTransport, + CheckRedirect: redirectHeaders, + }, } // Return. @@ -133,7 +130,7 @@ func privateNew(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Cl } // SetAppInfo - add application details to user agent. -func (c *Client) SetAppInfo(appName string, appVersion string) { +func (c *AdminClient) SetAppInfo(appName string, appVersion string) { // if app name and version is not set, we do not a new user // agent. if appName != "" && appVersion != "" { @@ -147,7 +144,7 @@ func (c *Client) SetAppInfo(appName string, appVersion string) { } // SetCustomTransport - set new custom transport. -func (c *Client) SetCustomTransport(customHTTPTransport http.RoundTripper) { +func (c *AdminClient) SetCustomTransport(customHTTPTransport http.RoundTripper) { // Set this to override default transport // ``http.DefaultTransport``. // @@ -168,7 +165,7 @@ func (c *Client) SetCustomTransport(customHTTPTransport http.RoundTripper) { } // TraceOn - enable HTTP tracing. -func (c *Client) TraceOn(outputStream io.Writer) { +func (c *AdminClient) TraceOn(outputStream io.Writer) { // if outputStream is nil then default to os.Stdout. if outputStream == nil { outputStream = os.Stdout @@ -181,7 +178,7 @@ func (c *Client) TraceOn(outputStream io.Writer) { } // TraceOff - disable HTTP tracing. -func (c *Client) TraceOff() { +func (c *AdminClient) TraceOff() { // Disable tracing. c.isTraceEnabled = false } @@ -199,7 +196,7 @@ type requestData struct { } // Filter out signature value from Authorization header. -func (c Client) filterSignature(req *http.Request) { +func (c AdminClient) filterSignature(req *http.Request) { /// Signature V4 authorization header. // Save the original auth. @@ -219,7 +216,7 @@ func (c Client) filterSignature(req *http.Request) { } // dumpHTTP - dump HTTP request and response. -func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error { +func (c AdminClient) dumpHTTP(req *http.Request, resp *http.Response) error { // Starts http dump. _, err := fmt.Fprintln(c.traceOutput, "---------START-HTTP---------") if err != nil { @@ -288,7 +285,7 @@ func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error { } // do - execute http request. -func (c Client) do(req *http.Request) (*http.Response, error) { +func (c AdminClient) do(req *http.Request) (*http.Response, error) { var resp *http.Response var err error // Do the request in a loop in case of 307 http is met since golang still doesn't @@ -346,7 +343,7 @@ var successStatus = []int{ // executeMethod - instantiates a given method, and retries the // request upon any error up to maxRetries attempts in a binomially // delayed manner using a standard back off algorithm. -func (c Client) executeMethod(method string, reqData requestData) (res *http.Response, err error) { +func (c AdminClient) executeMethod(method string, reqData requestData) (res *http.Response, err error) { // Create a done channel to control 'ListObjects' go routine. doneCh := make(chan struct{}, 1) @@ -391,7 +388,7 @@ func (c Client) executeMethod(method string, reqData requestData) (res *http.Res } // set User agent. -func (c Client) setUserAgent(req *http.Request) { +func (c AdminClient) setUserAgent(req *http.Request) { req.Header.Set("User-Agent", libraryUserAgent) if c.appInfo.appName != "" && c.appInfo.appVersion != "" { req.Header.Set("User-Agent", libraryUserAgent+" "+c.appInfo.appName+"/"+c.appInfo.appVersion) @@ -399,7 +396,7 @@ func (c Client) setUserAgent(req *http.Request) { } // newRequest - instantiate a new HTTP request for a given method. -func (c Client) newRequest(method string, reqData requestData) (req *http.Request, err error) { +func (c AdminClient) newRequest(method string, reqData requestData) (req *http.Request, err error) { // If no method is supplied default to 'POST'. if method == "" { method = "POST" @@ -461,7 +458,7 @@ func (c Client) newRequest(method string, reqData requestData) (req *http.Reques } // makeTargetURL make a new target url. -func (c Client) makeTargetURL(queryValues url.Values) (*url.URL, error) { +func (c AdminClient) makeTargetURL(queryValues url.Values) (*url.URL, error) { host := c.endpointURL.Host scheme := c.endpointURL.Scheme diff --git a/pkg/madmin/client_test.go b/pkg/madmin/api_test.go similarity index 76% rename from pkg/madmin/client_test.go rename to pkg/madmin/api_test.go index 1aff33c45..e44702fa6 100644 --- a/pkg/madmin/client_test.go +++ b/pkg/madmin/api_test.go @@ -18,7 +18,15 @@ // Package madmin_test package madmin_test -import "testing" +import ( + "testing" -func TestMAdminClient(t *testing.T) { + "github.com/minio/minio/pkg/madmin" +) + +func TestMinioAdminClient(t *testing.T) { + _, err := madmin.New("localhost:9000", "food", "food123", true) + if err != nil { + t.Fatal(err) + } } diff --git a/pkg/madmin/constants.go b/pkg/madmin/constants.go index 299945b73..61fda4a61 100644 --- a/pkg/madmin/constants.go +++ b/pkg/madmin/constants.go @@ -16,4 +16,10 @@ package madmin -const unsignedPayload = "UNSIGNED-PAYLOAD" +const ( + // Unsigned payload. + unsignedPayload = "UNSIGNED-PAYLOAD" + + // Admin operation header. + minioAdminOpHeader = "X-Minio-Operation" +) diff --git a/pkg/madmin/client.go b/pkg/madmin/examples/service-restart.go similarity index 50% rename from pkg/madmin/client.go rename to pkg/madmin/examples/service-restart.go index 612e45927..26d991488 100644 --- a/pkg/madmin/client.go +++ b/pkg/madmin/examples/service-restart.go @@ -1,3 +1,5 @@ +// +build ignore + /* * Minio Cloud Storage, (C) 2016 Minio, Inc. * @@ -15,23 +17,30 @@ * */ -package madmin +package main -const ( - minioAdminOpHeader = "X-Minio-Operation" +import ( + "log" + + "github.com/minio/minio/pkg/madmin" ) -// AdminClient - interface to Minio Management API -type AdminClient struct { - client *Client -} +func main() { + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY are + // dummy values, please replace them with original values. -// NewAdminClient - create new Management client -func NewAdminClient(addr string, access string, secret string, secure bool) (*AdminClient, error) { - client, err := New(addr, access, secret, secure) + // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Minio Admin client object. + madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) if err != nil { - return nil, err + log.Fatalln(err) } - return &AdminClient{client: client}, nil + err = madmClnt.ServiceRestart() + if err != nil { + log.Fatalln(err) + } + log.Println("Success") } diff --git a/pkg/madmin/examples/service-status.go b/pkg/madmin/examples/service-status.go new file mode 100644 index 000000000..fbee6fb22 --- /dev/null +++ b/pkg/madmin/examples/service-status.go @@ -0,0 +1,46 @@ +// +build ignore + +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package main + +import ( + "log" + + "github.com/minio/minio/pkg/madmin" +) + +func main() { + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are + // dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Minio Admin client object. + madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + if err != nil { + log.Fatalln(err) + } + + st, err := madmClnt.ServiceStatus() + if err != nil { + log.Fatalln(err) + } + log.Println(st) +} diff --git a/pkg/madmin/examples/service-stop.go b/pkg/madmin/examples/service-stop.go new file mode 100644 index 000000000..056140774 --- /dev/null +++ b/pkg/madmin/examples/service-stop.go @@ -0,0 +1,46 @@ +// +build ignore + +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package main + +import ( + "log" + + "github.com/minio/minio/pkg/madmin" +) + +func main() { + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are + // dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Minio Admin client object. + madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + if err != nil { + log.Fatalln(err) + } + + err = madmClnt.ServiceStop() + if err != nil { + log.Fatalln(err) + } + log.Println("Success") +} diff --git a/pkg/madmin/service-api.go b/pkg/madmin/service.go similarity index 94% rename from pkg/madmin/service-api.go rename to pkg/madmin/service.go index 2d62f7b02..27027bc04 100644 --- a/pkg/madmin/service-api.go +++ b/pkg/madmin/service.go @@ -33,8 +33,9 @@ const ( Unknown BackendType = iota // Filesystem backend. FS - // Multi disk single node XL backend. + // Multi disk XL (single, distributed) backend. XL + // Add your own backend. ) @@ -48,6 +49,7 @@ type ServiceStatusMetadata struct { Backend struct { // Represents various backend types, currently on FS and XL. Type BackendType + // Following fields are only meaningful if BackendType is XL. OnlineDisks int // Online disks during server startup. OfflineDisks int // Offline disks during server startup. @@ -67,7 +69,7 @@ func (adm *AdminClient) ServiceStatus() (ServiceStatusMetadata, error) { reqData.customHeaders.Set(minioAdminOpHeader, "status") // Execute GET on bucket to list objects. - resp, err := adm.client.executeMethod("GET", reqData) + resp, err := adm.executeMethod("GET", reqData) defer closeResponse(resp) if err != nil { @@ -103,7 +105,7 @@ func (adm *AdminClient) ServiceStop() error { reqData.customHeaders.Set(minioAdminOpHeader, "stop") // Execute GET on bucket to list objects. - resp, err := adm.client.executeMethod("POST", reqData) + resp, err := adm.executeMethod("POST", reqData) defer closeResponse(resp) if err != nil { @@ -127,7 +129,7 @@ func (adm *AdminClient) ServiceRestart() error { reqData.customHeaders.Set(minioAdminOpHeader, "restart") // Execute GET on bucket to list objects. - resp, err := adm.client.executeMethod("POST", reqData) + resp, err := adm.executeMethod("POST", reqData) defer closeResponse(resp) if err != nil { From 15b4c4962198ce3bd560d3c15e7d06c070b38058 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Wed, 21 Dec 2016 11:29:32 -0800 Subject: [PATCH 028/100] fs/xl: Simplify bucket metadata reading. (#3486) ObjectLayer GetObject() now returns the entire object if starting offset is 0 and length is negative. This also allows to simplify handler layer code where we always had to use GetObjectInfo() before proceeding to read bucket metadata files examples `policy.json`. This also reduces one additional call overhead. --- cmd/bucket-policy.go | 10 +------- cmd/event-notifier.go | 28 ++--------------------- cmd/event-notifier_test.go | 2 +- cmd/fs-v1.go | 16 +++++++------ cmd/object-api-getobject_test.go | 39 +++++++++++++++++--------------- cmd/xl-v1-object.go | 16 +++++++------ 6 files changed, 43 insertions(+), 68 deletions(-) diff --git a/cmd/bucket-policy.go b/cmd/bucket-policy.go index 07d9ca05f..0f12aee62 100644 --- a/cmd/bucket-policy.go +++ b/cmd/bucket-policy.go @@ -150,16 +150,8 @@ func readBucketPolicyJSON(bucket string, objAPI ObjectLayer) (bucketPolicyReader objLock.RLock() defer objLock.RUnlock() - objInfo, err := objAPI.GetObjectInfo(minioMetaBucket, policyPath) - if err != nil { - if isErrObjectNotFound(err) || isErrIncompleteBody(err) { - return nil, BucketPolicyNotFound{Bucket: bucket} - } - errorIf(err, "Unable to load policy for the bucket %s.", bucket) - return nil, errorCause(err) - } var buffer bytes.Buffer - err = objAPI.GetObject(minioMetaBucket, policyPath, 0, objInfo.Size, &buffer) + err = objAPI.GetObject(minioMetaBucket, policyPath, 0, -1, &buffer) if err != nil { if isErrObjectNotFound(err) || isErrIncompleteBody(err) { return nil, BucketPolicyNotFound{Bucket: bucket} diff --git a/cmd/event-notifier.go b/cmd/event-notifier.go index 07865e168..1a1e63c98 100644 --- a/cmd/event-notifier.go +++ b/cmd/event-notifier.go @@ -308,20 +308,8 @@ func loadNotificationConfig(bucket string, objAPI ObjectLayer) (*notificationCon objLock.RLock() defer objLock.RUnlock() - objInfo, err := objAPI.GetObjectInfo(minioMetaBucket, ncPath) - if err != nil { - // 'notification.xml' not found return - // 'errNoSuchNotifications'. This is default when no - // bucket notifications are found on the bucket. - if isErrObjectNotFound(err) || isErrIncompleteBody(err) { - return nil, errNoSuchNotifications - } - errorIf(err, "Unable to load bucket-notification for bucket %s", bucket) - // Returns error for other errors. - return nil, err - } var buffer bytes.Buffer - err = objAPI.GetObject(minioMetaBucket, ncPath, 0, objInfo.Size, &buffer) + err := objAPI.GetObject(minioMetaBucket, ncPath, 0, -1, &buffer) // Read everything. if err != nil { // 'notification.xml' not found return // 'errNoSuchNotifications'. This is default when no @@ -363,20 +351,8 @@ func loadListenerConfig(bucket string, objAPI ObjectLayer) ([]listenerConfig, er objLock.RLock() defer objLock.RUnlock() - objInfo, err := objAPI.GetObjectInfo(minioMetaBucket, lcPath) - if err != nil { - // 'listener.json' not found return - // 'errNoSuchNotifications'. This is default when no - // bucket notifications are found on the bucket. - if isErrObjectNotFound(err) { - return nil, errNoSuchNotifications - } - errorIf(err, "Unable to load bucket-listeners for bucket %s", bucket) - // Returns error for other errors. - return nil, err - } var buffer bytes.Buffer - err = objAPI.GetObject(minioMetaBucket, lcPath, 0, objInfo.Size, &buffer) + err := objAPI.GetObject(minioMetaBucket, lcPath, 0, -1, &buffer) if err != nil { // 'notification.xml' not found return // 'errNoSuchNotifications'. This is default when no diff --git a/cmd/event-notifier_test.go b/cmd/event-notifier_test.go index c49d6b3ea..b255a56d1 100644 --- a/cmd/event-notifier_test.go +++ b/cmd/event-notifier_test.go @@ -69,7 +69,7 @@ func TestInitEventNotifierFaultyDisks(t *testing.T) { } // Test initEventNotifier() with faulty disks - for i := 1; i <= 5; i++ { + for i := 1; i <= 3; i++ { fs.storage = newNaughtyDisk(fsstorage, map[int]error{i: errFaultyDisk}, nil) if err := initEventNotifier(fs); errorCause(err) != errFaultyDisk { t.Fatal("Unexpected error:", err) diff --git a/cmd/fs-v1.go b/cmd/fs-v1.go index b7588a47f..1efe7d059 100644 --- a/cmd/fs-v1.go +++ b/cmd/fs-v1.go @@ -222,8 +222,8 @@ func (fs fsObjects) GetObject(bucket, object string, offset int64, length int64, if err = checkGetObjArgs(bucket, object); err != nil { return err } - // Offset and length cannot be negative. - if offset < 0 || length < 0 { + // Offset cannot be negative. + if offset < 0 { return toObjectErr(traceError(errUnexpected), bucket, object) } // Writer cannot be nil. @@ -237,12 +237,13 @@ func (fs fsObjects) GetObject(bucket, object string, offset int64, length int64, return toObjectErr(traceError(err), bucket, object) } - // Reply back invalid range if the input offset and length fall out of range. - if offset > fi.Size || length > fi.Size { - return traceError(InvalidRange{offset, length, fi.Size}) + // For negative length we read everything. + if length < 0 { + length = fi.Size - offset } - // Reply if we have inputs with offset and length falling out of file size range. - if offset+length > fi.Size { + + // Reply back invalid range if the input offset and length fall out of range. + if offset > fi.Size || offset+length > fi.Size { return traceError(InvalidRange{offset, length, fi.Size}) } @@ -291,6 +292,7 @@ func (fs fsObjects) GetObject(bucket, object string, offset int64, length int64, break } } + // Returns any error. return toObjectErr(err, bucket, object) } diff --git a/cmd/object-api-getobject_test.go b/cmd/object-api-getobject_test.go index 585f2886a..5385952a9 100644 --- a/cmd/object-api-getobject_test.go +++ b/cmd/object-api-getobject_test.go @@ -121,28 +121,28 @@ func testGetObject(obj ObjectLayer, instanceType string, t TestErrHandler) { // Fetching the entire object. // Test case - 8. {bucketName, objectName, 0, int64(len(bytesData[0].byteData)), buffers[1], buffers[1], true, bytesData[0].byteData, nil}, - // Test case with content-range 1 to objectSize . + // Test case with `length` parameter set to a negative value. // Test case - 9. + {bucketName, objectName, 0, int64(-1), buffers[1], buffers[1], true, bytesData[0].byteData, nil}, + // Test case with content-range 1 to objectSize . + // Test case - 10. {bucketName, objectName, 1, int64(len(bytesData[0].byteData) - 1), buffers[1], buffers[1], true, bytesData[0].byteData[1:], nil}, // Test case with content-range 100 to objectSize - 100. - // Test case - 10. + // Test case - 11. {bucketName, objectName, 100, int64(len(bytesData[0].byteData) - 200), buffers[1], buffers[1], true, bytesData[0].byteData[100 : len(bytesData[0].byteData)-100], nil}, // Test case with offset greater than the size of the object - // Test case - 11. + // Test case - 12. {bucketName, objectName, int64(len(bytesData[0].byteData) + 1), int64(len(bytesData[0].byteData)), buffers[0], NewEOFWriter(buffers[0], 100), false, []byte{}, InvalidRange{int64(len(bytesData[0].byteData) + 1), int64(len(bytesData[0].byteData)), int64(len(bytesData[0].byteData))}}, // Test case with offset greater than the size of the object. - // Test case - 12. + // Test case - 13. {bucketName, objectName, -1, int64(len(bytesData[0].byteData)), buffers[0], new(bytes.Buffer), false, []byte{}, errUnexpected}, // Test case length parameter is more than the object size. - // Test case - 13. + // Test case - 14. {bucketName, objectName, 0, int64(len(bytesData[0].byteData) + 1), buffers[1], buffers[1], false, bytesData[0].byteData, InvalidRange{0, int64(len(bytesData[0].byteData) + 1), int64(len(bytesData[0].byteData))}}, - // Test case with `length` parameter set to a negative value. - // Test case - 14. - {bucketName, objectName, 0, int64(-1), buffers[1], buffers[1], false, bytesData[0].byteData, errUnexpected}, // Test case with offset + length > objectSize parameter set to a negative value. // Test case - 15. {bucketName, objectName, 2, int64(len(bytesData[0].byteData)), buffers[1], buffers[1], false, bytesData[0].byteData, @@ -391,34 +391,37 @@ func testGetObjectDiskNotFound(obj ObjectLayer, instanceType string, disks []str // Fetching the entire object. // Test case - 8. {bucketName, objectName, 0, int64(len(bytesData[0].byteData)), buffers[1], buffers[1], true, bytesData[0].byteData, nil}, - // Test case with content-range 1 to objectSize . + // Test case with `length` parameter set to a negative value. // Test case - 9. + {bucketName, objectName, 0, int64(-1), buffers[1], buffers[1], true, bytesData[0].byteData, nil}, + // Test case with `length` parameter set to a negative value and offset is positive. + // Test case - 10. + {bucketName, objectName, 1, int64(-1), buffers[1], buffers[1], true, bytesData[0].byteData[1:], nil}, + // Test case with content-range 1 to objectSize . + // Test case - 11. {bucketName, objectName, 1, int64(len(bytesData[0].byteData) - 1), buffers[1], buffers[1], true, bytesData[0].byteData[1:], nil}, // Test case with content-range 100 to objectSize - 100. - // Test case - 10. + // Test case - 12. {bucketName, objectName, 100, int64(len(bytesData[0].byteData) - 200), buffers[1], buffers[1], true, bytesData[0].byteData[100 : len(bytesData[0].byteData)-100], nil}, // Test case with offset greater than the size of the object - // Test case - 11. + // Test case - 13. {bucketName, objectName, int64(len(bytesData[0].byteData) + 1), int64(len(bytesData[0].byteData)), buffers[0], NewEOFWriter(buffers[0], 100), false, []byte{}, InvalidRange{int64(len(bytesData[0].byteData) + 1), int64(len(bytesData[0].byteData)), int64(len(bytesData[0].byteData))}}, // Test case with offset greater than the size of the object. - // Test case - 12. + // Test case - 14. {bucketName, objectName, -1, int64(len(bytesData[0].byteData)), buffers[0], new(bytes.Buffer), false, []byte{}, errUnexpected}, // Test case length parameter is more than the object size. - // Test case - 13. + // Test case - 15. {bucketName, objectName, 0, int64(len(bytesData[0].byteData) + 1), buffers[1], buffers[1], false, bytesData[0].byteData, InvalidRange{0, int64(len(bytesData[0].byteData) + 1), int64(len(bytesData[0].byteData))}}, - // Test case with `length` parameter set to a negative value. - // Test case - 14. - {bucketName, objectName, 0, int64(-1), buffers[1], buffers[1], false, bytesData[0].byteData, errUnexpected}, // Test case with offset + length > objectSize parameter set to a negative value. - // Test case - 15. + // Test case - 16. {bucketName, objectName, 2, int64(len(bytesData[0].byteData)), buffers[1], buffers[1], false, bytesData[0].byteData, InvalidRange{2, int64(len(bytesData[0].byteData)), int64(len(bytesData[0].byteData))}}, // Test case with the writer set to nil. - // Test case - 16. + // Test case - 17. {bucketName, objectName, 0, int64(len(bytesData[0].byteData)), buffers[1], nil, false, bytesData[0].byteData, errUnexpected}, } diff --git a/cmd/xl-v1-object.go b/cmd/xl-v1-object.go index a39fef4a1..28af752b7 100644 --- a/cmd/xl-v1-object.go +++ b/cmd/xl-v1-object.go @@ -53,10 +53,12 @@ func (xl xlObjects) GetObject(bucket, object string, startOffset int64, length i if err := checkGetObjArgs(bucket, object); err != nil { return err } - // Start offset and length cannot be negative. - if startOffset < 0 || length < 0 { + + // Start offset cannot be negative. + if startOffset < 0 { return traceError(errUnexpected) } + // Writer cannot be nil. if writer == nil { return traceError(errUnexpected) @@ -88,13 +90,13 @@ func (xl xlObjects) GetObject(bucket, object string, startOffset int64, length i // Reorder parts metadata based on erasure distribution order. metaArr = getOrderedPartsMetadata(xlMeta.Erasure.Distribution, metaArr) - // Reply back invalid range if the input offset and length fall out of range. - if startOffset > xlMeta.Stat.Size || length > xlMeta.Stat.Size { - return traceError(InvalidRange{startOffset, length, xlMeta.Stat.Size}) + // For negative length read everything. + if length < 0 { + length = xlMeta.Stat.Size - startOffset } - // Reply if we have inputs with offset and length. - if startOffset+length > xlMeta.Stat.Size { + // Reply back invalid range if the input offset and length fall out of range. + if startOffset > xlMeta.Stat.Size || startOffset+length > xlMeta.Stat.Size { return traceError(InvalidRange{startOffset, length, xlMeta.Stat.Size}) } From 1ac36a95aaac37c13d9006b5a7a6adc0173c50f6 Mon Sep 17 00:00:00 2001 From: Andreas Auernhammer Date: Wed, 21 Dec 2016 23:20:01 +0100 Subject: [PATCH 029/100] replace blake2b implementation (#3481) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * replace blake2b implementation replace the blake2b-simd with the golang/x/crypto implementation ``` name old time/op new time/op delta Size64-8 715ns ±13% 614ns ± 3% ~ (p=0.084 n=6+6) Size128-8 612ns ± 5% 634ns ± 8% ~ (p=0.084 n=6+6) Size1K-8 2.18µs ± 5% 2.09µs ± 7% ~ (p=0.084 n=6+6) Size8K-8 13.1µs ± 2% 13.4µs ± 3% ~ (p=0.084 n=6+6) Size32K-8 48.5µs ± 1% 49.5µs ± 3% ~ (p=0.775 n=6+6) Size128K-8 199µs ± 0% 198µs ± 3% ~ (p=0.468 n=6+6) name old speed new speed delta Size64-8 92.6MB/s ±11% 104.2MB/s ± 3% ~ (p=0.139 n=6+6) Size128-8 208MB/s ± 6% 202MB/s ± 8% ~ (p=0.102 n=6+6) Size1K-8 466MB/s ± 7% 492MB/s ± 7% ~ (p=0.139 n=6+6) Size8K-8 621MB/s ± 2% 610MB/s ± 3% ~ (p=0.102 n=6+6) Size32K-8 672MB/s ± 2% 669MB/s ± 1% ~ (p=0.818 n=6+6) Size128K-8 657MB/s ± 1% 672MB/s ± 0% +2.28% (p=0.002 n=6+6) name old time/op new time/op delta Size64-4 334ns ± 1% 243ns ± 0% -27.14% (p=0.029 n=4+4) Size128-4 296ns ± 1% 242ns ± 0% -18.21% (p=0.029 n=4+4) Size1K-4 1.44µs ± 0% 1.28µs ± 0% -10.83% (p=0.029 n=4+4) Size8K-4 10.0µs ± 0% 9.4µs ± 0% -6.23% (p=0.029 n=4+4) Size32K-4 39.8µs ± 1% 37.3µs ± 0% -6.31% (p=0.029 n=4+4) Size128K-4 162µs ± 3% 149µs ± 0% -7.72% (p=0.029 n=4+4) name old speed new speed delta Size64-4 192MB/s ± 1% 263MB/s ± 0% +37.24% (p=0.029 n=4+4) Size128-4 431MB/s ± 0% 526MB/s ± 0% +22.04% (p=0.029 n=4+4) Size1K-4 713MB/s ± 0% 800MB/s ± 0% +12.17% (p=0.029 n=4+4) Size8K-4 815MB/s ± 0% 869MB/s ± 0% +6.64% (p=0.029 n=4+4) Size32K-4 823MB/s ± 1% 878MB/s ± 0% +6.72% (p=0.029 n=4+4) Size128K-4 810MB/s ± 3% 877MB/s ± 0% +8.23% (p=0.029 n=4+4) ``` See: https://go-review.googlesource.com/#/c/34319/ --- cmd/erasure-utils.go | 14 +- vendor/github.com/minio/blake2b-simd/LICENSE | 202 --- .../github.com/minio/blake2b-simd/README.md | 144 -- .../github.com/minio/blake2b-simd/blake2b.go | 301 ---- .../minio/blake2b-simd/compressAvx2_amd64.go | 47 - .../minio/blake2b-simd/compressAvx2_amd64.s | 671 -------- .../minio/blake2b-simd/compressAvx_amd64.go | 41 - .../minio/blake2b-simd/compressAvx_amd64.s | 682 -------- .../minio/blake2b-simd/compressSse_amd64.go | 41 - .../minio/blake2b-simd/compressSse_amd64.s | 770 --------- .../minio/blake2b-simd/compress_amd64.go | 30 - .../minio/blake2b-simd/compress_generic.go | 1419 ----------------- .../minio/blake2b-simd/compress_noasm.go | 23 - vendor/github.com/minio/blake2b-simd/cpuid.go | 60 - .../github.com/minio/blake2b-simd/cpuid_386.s | 33 - .../minio/blake2b-simd/cpuid_amd64.s | 34 - vendor/golang.org/x/crypto/blake2b/blake2b.go | 188 +++ .../x/crypto/blake2b/blake2bAVX2_amd64.go | 41 + .../x/crypto/blake2b/blake2bAVX2_amd64.s | 502 ++++++ .../x/crypto/blake2b/blake2b_amd64.go | 25 + .../x/crypto/blake2b/blake2b_amd64.s | 290 ++++ .../x/crypto/blake2b/blake2b_generic.go | 179 +++ .../x/crypto/blake2b/blake2b_ref.go | 14 + .../x/crypto/blake2b/blake2b_test.go | 448 ++++++ 24 files changed, 1698 insertions(+), 4501 deletions(-) delete mode 100644 vendor/github.com/minio/blake2b-simd/LICENSE delete mode 100644 vendor/github.com/minio/blake2b-simd/README.md delete mode 100644 vendor/github.com/minio/blake2b-simd/blake2b.go delete mode 100644 vendor/github.com/minio/blake2b-simd/compressAvx2_amd64.go delete mode 100644 vendor/github.com/minio/blake2b-simd/compressAvx2_amd64.s delete mode 100644 vendor/github.com/minio/blake2b-simd/compressAvx_amd64.go delete mode 100644 vendor/github.com/minio/blake2b-simd/compressAvx_amd64.s delete mode 100644 vendor/github.com/minio/blake2b-simd/compressSse_amd64.go delete mode 100644 vendor/github.com/minio/blake2b-simd/compressSse_amd64.s delete mode 100644 vendor/github.com/minio/blake2b-simd/compress_amd64.go delete mode 100644 vendor/github.com/minio/blake2b-simd/compress_generic.go delete mode 100644 vendor/github.com/minio/blake2b-simd/compress_noasm.go delete mode 100644 vendor/github.com/minio/blake2b-simd/cpuid.go delete mode 100644 vendor/github.com/minio/blake2b-simd/cpuid_386.s delete mode 100644 vendor/github.com/minio/blake2b-simd/cpuid_amd64.s create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_generic.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_ref.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_test.go diff --git a/cmd/erasure-utils.go b/cmd/erasure-utils.go index 0b8c6f47a..498997eba 100644 --- a/cmd/erasure-utils.go +++ b/cmd/erasure-utils.go @@ -24,7 +24,7 @@ import ( "sync" "github.com/klauspost/reedsolomon" - "github.com/minio/blake2b-simd" + "golang.org/x/crypto/blake2b" ) // newHashWriters - inititialize a slice of hashes for the disk count. @@ -40,11 +40,19 @@ func newHashWriters(diskCount int, algo string) []hash.Hash { func newHash(algo string) hash.Hash { switch algo { case "blake2b": - return blake2b.New512() + // ignore the error, because New512 without a key never fails + // New512 only returns a non-nil error, if the length of the passed + // key > 64 bytes - but we use blake2b as hash fucntion (no key) + h, _ := blake2b.New512(nil) + return h // Add new hashes here. default: // Default to blake2b. - return blake2b.New512() + // ignore the error, because New512 without a key never fails + // New512 only returns a non-nil error, if the length of the passed + // key > 64 bytes - but we use blake2b as hash fucntion (no key) + h, _ := blake2b.New512(nil) + return h } } diff --git a/vendor/github.com/minio/blake2b-simd/LICENSE b/vendor/github.com/minio/blake2b-simd/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/minio/blake2b-simd/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/minio/blake2b-simd/README.md b/vendor/github.com/minio/blake2b-simd/README.md deleted file mode 100644 index 31fcbf749..000000000 --- a/vendor/github.com/minio/blake2b-simd/README.md +++ /dev/null @@ -1,144 +0,0 @@ -BLAKE2b-SIMD -============ - -Pure Go implementation of BLAKE2b using SIMD optimizations. - -Introduction ------------- - -This package was initially based on the pure go [BLAKE2b](https://github.com/dchest/blake2b) implementation of Dmitry Chestnykh and merged with the (`cgo` dependent) AVX optimized [BLAKE2](https://github.com/codahale/blake2) implementation (which in turn is based on the [official implementation](https://github.com/BLAKE2/BLAKE2). It does so by using [Go's Assembler](https://golang.org/doc/asm) for amd64 architectures with a golang only fallback for other architectures. - -In addition to AVX there is also support for AVX2 as well as SSE. Best performance is obtained with AVX2 which gives roughly a **4X** performance increase approaching hashing speeds of **1GB/sec** on a single core. - -Benchmarks ----------- - -This is a summary of the performance improvements. Full details are shown below. - -| Technology | 128K | -| ---------- |:-----:| -| AVX2 | 3.94x | -| AVX | 3.28x | -| SSE | 2.85x | - -asm2plan9s ----------- - -In order to be able to work more easily with AVX2/AVX instructions, a separate tool was developed to convert AVX2/AVX instructions into the corresponding BYTE sequence as accepted by Go assembly. See [asm2plan9s](https://github.com/minio/asm2plan9s) for more information. - -bt2sum ------- - -[bt2sum](https://github.com/s3git/bt2sum) is a utility that takes advantages of the BLAKE2b SIMD optimizations to compute check sums using the BLAKE2 Tree hashing mode in so called 'unlimited fanout' mode. - -Technical details ------------------ - -BLAKE2b is a hashing algorithm that operates on 64-bit integer values. The AVX2 version uses the 256-bit wide YMM registers in order to essentially process four operations in parallel. AVX and SSE operate on 128-bit values simultaneously (two operations in parallel). Below are excerpts from `compressAvx2_amd64.s`, `compressAvx_amd64.s`, and `compress_generic.go` respectively. - -``` - VPADDQ YMM0,YMM0,YMM1 /* v0 += v4, v1 += v5, v2 += v6, v3 += v7 */ -``` - -``` - VPADDQ XMM0,XMM0,XMM2 /* v0 += v4, v1 += v5 */ - VPADDQ XMM1,XMM1,XMM3 /* v2 += v6, v3 += v7 */ -``` - -``` - v0 += v4 - v1 += v5 - v2 += v6 - v3 += v7 -``` - -Detailed benchmarks -------------------- - -Example performance metrics were generated on Intel(R) Xeon(R) CPU E5-2620 v3 @ 2.40GHz - 6 physical cores, 12 logical cores running Ubuntu GNU/Linux with kernel version 4.4.0-24-generic (vanilla with no optimizations). - -### AVX2 - -``` -$ benchcmp go.txt avx2.txt -benchmark old ns/op new ns/op delta -BenchmarkHash64-12 1481 849 -42.67% -BenchmarkHash128-12 1428 746 -47.76% -BenchmarkHash1K-12 6379 2227 -65.09% -BenchmarkHash8K-12 37219 11714 -68.53% -BenchmarkHash32K-12 140716 35935 -74.46% -BenchmarkHash128K-12 561656 142634 -74.60% - -benchmark old MB/s new MB/s speedup -BenchmarkHash64-12 43.20 75.37 1.74x -BenchmarkHash128-12 89.64 171.35 1.91x -BenchmarkHash1K-12 160.52 459.69 2.86x -BenchmarkHash8K-12 220.10 699.32 3.18x -BenchmarkHash32K-12 232.87 911.85 3.92x -BenchmarkHash128K-12 233.37 918.93 3.94x -``` - -### AVX2: Comparison to other hashing techniques - -``` -$ go test -bench=Comparison -BenchmarkComparisonMD5-12 1000 1726121 ns/op 607.48 MB/s -BenchmarkComparisonSHA1-12 500 2005164 ns/op 522.94 MB/s -BenchmarkComparisonSHA256-12 300 5531036 ns/op 189.58 MB/s -BenchmarkComparisonSHA512-12 500 3423030 ns/op 306.33 MB/s -BenchmarkComparisonBlake2B-12 1000 1232690 ns/op 850.64 MB/s -``` - -Benchmarks below were generated on a MacBook Pro with a 2.7 GHz Intel Core i7. - -### AVX - -``` -$ benchcmp go.txt avx.txt -benchmark old ns/op new ns/op delta -BenchmarkHash64-8 813 458 -43.67% -BenchmarkHash128-8 766 401 -47.65% -BenchmarkHash1K-8 4881 1763 -63.88% -BenchmarkHash8K-8 36127 12273 -66.03% -BenchmarkHash32K-8 140582 43155 -69.30% -BenchmarkHash128K-8 567850 173246 -69.49% - -benchmark old MB/s new MB/s speedup -BenchmarkHash64-8 78.63 139.57 1.78x -BenchmarkHash128-8 166.98 318.73 1.91x -BenchmarkHash1K-8 209.76 580.68 2.77x -BenchmarkHash8K-8 226.76 667.46 2.94x -BenchmarkHash32K-8 233.09 759.29 3.26x -BenchmarkHash128K-8 230.82 756.56 3.28x -``` - -### SSE - -``` -$ benchcmp go.txt sse.txt -benchmark old ns/op new ns/op delta -BenchmarkHash64-8 813 478 -41.21% -BenchmarkHash128-8 766 411 -46.34% -BenchmarkHash1K-8 4881 1870 -61.69% -BenchmarkHash8K-8 36127 12427 -65.60% -BenchmarkHash32K-8 140582 49512 -64.78% -BenchmarkHash128K-8 567850 199040 -64.95% - -benchmark old MB/s new MB/s speedup -BenchmarkHash64-8 78.63 133.78 1.70x -BenchmarkHash128-8 166.98 311.23 1.86x -BenchmarkHash1K-8 209.76 547.37 2.61x -BenchmarkHash8K-8 226.76 659.20 2.91x -BenchmarkHash32K-8 233.09 661.81 2.84x -BenchmarkHash128K-8 230.82 658.52 2.85x -``` - -License -------- - -Released under the Apache License v2.0. You can find the complete text in the file LICENSE. - -Contributing ------------- - -Contributions are welcome, please send PRs for any enhancements. diff --git a/vendor/github.com/minio/blake2b-simd/blake2b.go b/vendor/github.com/minio/blake2b-simd/blake2b.go deleted file mode 100644 index 538466a1a..000000000 --- a/vendor/github.com/minio/blake2b-simd/blake2b.go +++ /dev/null @@ -1,301 +0,0 @@ -// Written in 2012 by Dmitry Chestnykh. -// -// To the extent possible under law, the author have dedicated all copyright -// and related and neighboring rights to this software to the public domain -// worldwide. This software is distributed without any warranty. -// http://creativecommons.org/publicdomain/zero/1.0/ - -// Package blake2b implements BLAKE2b cryptographic hash function. -package blake2b - -import ( - "encoding/binary" - "errors" - "hash" -) - -const ( - BlockSize = 128 // block size of algorithm - Size = 64 // maximum digest size - SaltSize = 16 // maximum salt size - PersonSize = 16 // maximum personalization string size - KeySize = 64 // maximum size of key -) - -type digest struct { - h [8]uint64 // current chain value - t [2]uint64 // message bytes counter - f [2]uint64 // finalization flags - x [BlockSize]byte // buffer for data not yet compressed - nx int // number of bytes in buffer - - ih [8]uint64 // initial chain value (after config) - paddedKey [BlockSize]byte // copy of key, padded with zeros - isKeyed bool // indicates whether hash was keyed - size uint8 // digest size in bytes - isLastNode bool // indicates processing of the last node in tree hashing -} - -// Initialization values. -var iv = [8]uint64{ - 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, - 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1, - 0x510e527fade682d1, 0x9b05688c2b3e6c1f, - 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179, -} - -// Config is used to configure hash function parameters and keying. -// All parameters are optional. -type Config struct { - Size uint8 // digest size (if zero, default size of 64 bytes is used) - Key []byte // key for prefix-MAC - Salt []byte // salt (if < 16 bytes, padded with zeros) - Person []byte // personalization (if < 16 bytes, padded with zeros) - Tree *Tree // parameters for tree hashing -} - -// Tree represents parameters for tree hashing. -type Tree struct { - Fanout uint8 // fanout - MaxDepth uint8 // maximal depth - LeafSize uint32 // leaf maximal byte length (0 for unlimited) - NodeOffset uint64 // node offset (0 for first, leftmost or leaf) - NodeDepth uint8 // node depth (0 for leaves) - InnerHashSize uint8 // inner hash byte length - IsLastNode bool // indicates processing of the last node of layer -} - -var ( - defaultConfig = &Config{Size: Size} - config256 = &Config{Size: 32} -) - -func verifyConfig(c *Config) error { - if c.Size > Size { - return errors.New("digest size is too large") - } - if len(c.Key) > KeySize { - return errors.New("key is too large") - } - if len(c.Salt) > SaltSize { - // Smaller salt is okay: it will be padded with zeros. - return errors.New("salt is too large") - } - if len(c.Person) > PersonSize { - // Smaller personalization is okay: it will be padded with zeros. - return errors.New("personalization is too large") - } - if c.Tree != nil { - if c.Tree.Fanout == 1 { - return errors.New("fanout of 1 is not allowed in tree mode") - } - if c.Tree.MaxDepth < 2 { - return errors.New("incorrect tree depth") - } - if c.Tree.InnerHashSize < 1 || c.Tree.InnerHashSize > Size { - return errors.New("incorrect tree inner hash size") - } - } - return nil -} - -// New returns a new hash.Hash configured with the given Config. -// Config can be nil, in which case the default one is used, calculating 64-byte digest. -// Returns non-nil error if Config contains invalid parameters. -func New(c *Config) (hash.Hash, error) { - if c == nil { - c = defaultConfig - } else { - if c.Size == 0 { - // Set default size if it's zero. - c.Size = Size - } - if err := verifyConfig(c); err != nil { - return nil, err - } - } - d := new(digest) - d.initialize(c) - return d, nil -} - -// initialize initializes digest with the given -// config, which must be non-nil and verified. -func (d *digest) initialize(c *Config) { - // Create parameter block. - var p [BlockSize]byte - p[0] = c.Size - p[1] = uint8(len(c.Key)) - if c.Salt != nil { - copy(p[32:], c.Salt) - } - if c.Person != nil { - copy(p[48:], c.Person) - } - if c.Tree != nil { - p[2] = c.Tree.Fanout - p[3] = c.Tree.MaxDepth - binary.LittleEndian.PutUint32(p[4:], c.Tree.LeafSize) - binary.LittleEndian.PutUint64(p[8:], c.Tree.NodeOffset) - p[16] = c.Tree.NodeDepth - p[17] = c.Tree.InnerHashSize - } else { - p[2] = 1 - p[3] = 1 - } - - // Initialize. - d.size = c.Size - for i := 0; i < 8; i++ { - d.h[i] = iv[i] ^ binary.LittleEndian.Uint64(p[i*8:]) - } - if c.Tree != nil && c.Tree.IsLastNode { - d.isLastNode = true - } - - // Process key. - if c.Key != nil { - copy(d.paddedKey[:], c.Key) - d.Write(d.paddedKey[:]) - d.isKeyed = true - } - // Save a copy of initialized state. - copy(d.ih[:], d.h[:]) -} - -// New512 returns a new hash.Hash computing the BLAKE2b 64-byte checksum. -func New512() hash.Hash { - d := new(digest) - d.initialize(defaultConfig) - return d -} - -// New256 returns a new hash.Hash computing the BLAKE2b 32-byte checksum. -func New256() hash.Hash { - d := new(digest) - d.initialize(config256) - return d -} - -// NewMAC returns a new hash.Hash computing BLAKE2b prefix- -// Message Authentication Code of the given size in bytes -// (up to 64) with the given key (up to 64 bytes in length). -func NewMAC(outBytes uint8, key []byte) hash.Hash { - d, err := New(&Config{Size: outBytes, Key: key}) - if err != nil { - panic(err.Error()) - } - return d -} - -// Reset resets the state of digest to the initial state -// after configuration and keying. -func (d *digest) Reset() { - copy(d.h[:], d.ih[:]) - d.t[0] = 0 - d.t[1] = 0 - d.f[0] = 0 - d.f[1] = 0 - d.nx = 0 - if d.isKeyed { - d.Write(d.paddedKey[:]) - } -} - -// Size returns the digest size in bytes. -func (d *digest) Size() int { return int(d.size) } - -// BlockSize returns the algorithm block size in bytes. -func (d *digest) BlockSize() int { return BlockSize } - -func (d *digest) Write(p []byte) (nn int, err error) { - nn = len(p) - left := BlockSize - d.nx - if len(p) > left { - // Process buffer. - copy(d.x[d.nx:], p[:left]) - p = p[left:] - compress(d, d.x[:]) - d.nx = 0 - } - // Process full blocks except for the last one. - if len(p) > BlockSize { - n := len(p) &^ (BlockSize - 1) - if n == len(p) { - n -= BlockSize - } - compress(d, p[:n]) - p = p[n:] - } - // Fill buffer. - d.nx += copy(d.x[d.nx:], p) - return -} - -// Sum returns the calculated checksum. -func (d *digest) Sum(in []byte) []byte { - // Make a copy of d so that caller can keep writing and summing. - d0 := *d - hash := d0.checkSum() - return append(in, hash[:d0.size]...) -} - -func (d *digest) checkSum() [Size]byte { - // Do not create unnecessary copies of the key. - if d.isKeyed { - for i := 0; i < len(d.paddedKey); i++ { - d.paddedKey[i] = 0 - } - } - - dec := BlockSize - uint64(d.nx) - if d.t[0] < dec { - d.t[1]-- - } - d.t[0] -= dec - - // Pad buffer with zeros. - for i := d.nx; i < len(d.x); i++ { - d.x[i] = 0 - } - // Set last block flag. - d.f[0] = 0xffffffffffffffff - if d.isLastNode { - d.f[1] = 0xffffffffffffffff - } - // Compress last block. - compress(d, d.x[:]) - - var out [Size]byte - j := 0 - for _, s := range d.h[:(d.size-1)/8+1] { - out[j+0] = byte(s >> 0) - out[j+1] = byte(s >> 8) - out[j+2] = byte(s >> 16) - out[j+3] = byte(s >> 24) - out[j+4] = byte(s >> 32) - out[j+5] = byte(s >> 40) - out[j+6] = byte(s >> 48) - out[j+7] = byte(s >> 56) - j += 8 - } - return out -} - -// Sum512 returns a 64-byte BLAKE2b hash of data. -func Sum512(data []byte) [64]byte { - var d digest - d.initialize(defaultConfig) - d.Write(data) - return d.checkSum() -} - -// Sum256 returns a 32-byte BLAKE2b hash of data. -func Sum256(data []byte) (out [32]byte) { - var d digest - d.initialize(config256) - d.Write(data) - sum := d.checkSum() - copy(out[:], sum[:32]) - return -} diff --git a/vendor/github.com/minio/blake2b-simd/compressAvx2_amd64.go b/vendor/github.com/minio/blake2b-simd/compressAvx2_amd64.go deleted file mode 100644 index ec53599f8..000000000 --- a/vendor/github.com/minio/blake2b-simd/compressAvx2_amd64.go +++ /dev/null @@ -1,47 +0,0 @@ -//+build !noasm -//+build !appengine - -/* - * Minio Cloud Storage, (C) 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package blake2b - -//go:noescape -func compressAVX2Loop(p []uint8, in, iv, t, f, shffle, out []uint64) - -func compressAVX2(d *digest, p []uint8) { - var ( - in [8]uint64 - out [8]uint64 - shffle [8]uint64 - ) - - // vector for PSHUFB instruction - shffle[0] = 0x0201000706050403 - shffle[1] = 0x0a09080f0e0d0c0b - shffle[2] = 0x0201000706050403 - shffle[3] = 0x0a09080f0e0d0c0b - shffle[4] = 0x0100070605040302 - shffle[5] = 0x09080f0e0d0c0b0a - shffle[6] = 0x0100070605040302 - shffle[7] = 0x09080f0e0d0c0b0a - - in[0], in[1], in[2], in[3], in[4], in[5], in[6], in[7] = d.h[0], d.h[1], d.h[2], d.h[3], d.h[4], d.h[5], d.h[6], d.h[7] - - compressAVX2Loop(p, in[:], iv[:], d.t[:], d.f[:], shffle[:], out[:]) - - d.h[0], d.h[1], d.h[2], d.h[3], d.h[4], d.h[5], d.h[6], d.h[7] = out[0], out[1], out[2], out[3], out[4], out[5], out[6], out[7] -} diff --git a/vendor/github.com/minio/blake2b-simd/compressAvx2_amd64.s b/vendor/github.com/minio/blake2b-simd/compressAvx2_amd64.s deleted file mode 100644 index 24df234b5..000000000 --- a/vendor/github.com/minio/blake2b-simd/compressAvx2_amd64.s +++ /dev/null @@ -1,671 +0,0 @@ -//+build !noasm !appengine - -// -// Minio Cloud Storage, (C) 2016 Minio, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -// -// Based on AVX2 implementation from https://github.com/sneves/blake2-avx2/blob/master/blake2b-common.h -// -// Use github.com/fwessels/asm2plan9s on this file to assemble instructions to their Plan9 equivalent -// -// Assembly code below essentially follows the ROUND macro (see blake2b-round.h) which is defined as: -// #define ROUND(r) \ -// LOAD_MSG_ ##r ##_1(b0, b1); \ -// G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ -// LOAD_MSG_ ##r ##_2(b0, b1); \ -// G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ -// DIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h); \ -// LOAD_MSG_ ##r ##_3(b0, b1); \ -// G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ -// LOAD_MSG_ ##r ##_4(b0, b1); \ -// G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ -// UNDIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h); -// -// as well as the go equivalent in https://github.com/dchest/blake2b/blob/master/block.go -// -// As in the macro, G1/G2 in the 1st and 2nd half are identical (so literal copy of assembly) -// -// Rounds are also the same, except for the loading of the message (and rounds 1 & 11 and -// rounds 2 & 12 are identical) -// - -#define G1 \ - \ // G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); - BYTE $0xc5; BYTE $0xfd; BYTE $0xd4; BYTE $0xc4 \ // VPADDQ YMM0,YMM0,YMM4 /* v0 += m[0], v1 += m[2], v2 += m[4], v3 += m[6] */ - BYTE $0xc5; BYTE $0xfd; BYTE $0xd4; BYTE $0xc1 \ // VPADDQ YMM0,YMM0,YMM1 /* v0 += v4, v1 += v5, v2 += v6, v3 += v7 */ - BYTE $0xc5; BYTE $0xe5; BYTE $0xef; BYTE $0xd8 \ // VPXOR YMM3,YMM3,YMM0 /* v12 ^= v0, v13 ^= v1, v14 ^= v2, v15 ^= v3 */ - BYTE $0xc5; BYTE $0xfd; BYTE $0x70; BYTE $0xdb; BYTE $0xb1 \ // VPSHUFD YMM3,YMM3,0xb1 /* v12 = v12<<(64-32) | v12>>32, v13 = */ - BYTE $0xc5; BYTE $0xed; BYTE $0xd4; BYTE $0xd3 \ // VPADDQ YMM2,YMM2,YMM3 /* v8 += v12, v9 += v13, v10 += v14, v11 += v15 */ - BYTE $0xc5; BYTE $0xf5; BYTE $0xef; BYTE $0xca \ // VPXOR YMM1,YMM1,YMM2 /* v4 ^= v8, v5 ^= v9, v6 ^= v10, v7 ^= v11 */ - BYTE $0xc4; BYTE $0xe2; BYTE $0x75; BYTE $0x00; BYTE $0xce // VPSHUFB YMM1,YMM1,YMM6 /* v4 = v4<<(64-24) | v4>>24, ..., ..., v7 = v7<<(64-24) | v7>>24 */ - -#define G2 \ - BYTE $0xc5; BYTE $0xfd; BYTE $0xd4; BYTE $0xc5 \ // VPADDQ YMM0,YMM0,YMM5 /* v0 += m[1], v1 += m[3], v2 += m[5], v3 += m[7] */ - BYTE $0xc5; BYTE $0xfd; BYTE $0xd4; BYTE $0xc1 \ // VPADDQ YMM0,YMM0,YMM1 /* v0 += v4, v1 += v5, v2 += v6, v3 += v7 */ - BYTE $0xc5; BYTE $0xe5; BYTE $0xef; BYTE $0xd8 \ // VPXOR YMM3,YMM3,YMM0 /* v12 ^= v0, v13 ^= v1, v14 ^= v2, v15 ^= v3 */ - BYTE $0xc4; BYTE $0xe2; BYTE $0x65; BYTE $0x00; BYTE $0xdf \ // VPSHUFB YMM3,YMM3,YMM7 /* v12 = v12<<(64-16) | v12>>16, ..., ..., v15 = v15<<(64-16) | v15>>16 */ - BYTE $0xc5; BYTE $0xed; BYTE $0xd4; BYTE $0xd3 \ // VPADDQ YMM2,YMM2,YMM3 /* v8 += v12, v9 += v13, v10 += v14, v11 += v15 */ - BYTE $0xc5; BYTE $0xf5; BYTE $0xef; BYTE $0xca \ // VPXOR YMM1,YMM1,YMM2 /* v4 ^= v8, v5 ^= v9, v6 ^= v10, v7 ^= v11 */ - BYTE $0xc5; BYTE $0x75; BYTE $0xd4; BYTE $0xf9 \ // VPADDQ YMM15,YMM1,YMM1 /* temp reg = reg*2 */ - BYTE $0xc5; BYTE $0xf5; BYTE $0x73; BYTE $0xd1; BYTE $0x3f \ // VPSRLQ YMM1,YMM1,0x3f /* reg = reg>>63 */ - BYTE $0xc4; BYTE $0xc1; BYTE $0x75; BYTE $0xef; BYTE $0xcf // VPXOR YMM1,YMM1,YMM15 /* ORed together: v4 = v4<<(64-63) | v4>>63, v5 = v5<<(64-63) | v5>>63 */ - -#define DIAGONALIZE \ - BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb \ // VPERMQ YMM3, YMM3, 0x93 - BYTE $0x93 \ - BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2 \ // VPERMQ YMM2, YMM2, 0x4e - BYTE $0x4e \ - BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9 \ // VPERMQ YMM1, YMM1, 0x39 - BYTE $0x39 \ - // DO NOT DELETE -- macro delimiter (previous line extended) - -#define UNDIAGONALIZE \ - BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb \ // VPERMQ YMM3, YMM3, 0x39 - BYTE $0x39 \ - BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2 \ // VPERMQ YMM2, YMM2, 0x4e - BYTE $0x4e \ - BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9 \ // VPERMQ YMM1, YMM1, 0x93 - BYTE $0x93 \ - // DO NOT DELETE -- macro delimiter (previous line extended) - -#define LOAD_SHUFFLE \ - MOVQ shffle+120(FP), SI \ // SI: &shuffle - BYTE $0xc5; BYTE $0xfe; BYTE $0x6f; BYTE $0x36 \ // VMOVDQU YMM6, [rsi] - BYTE $0xc5; BYTE $0xfe; BYTE $0x6f; BYTE $0x7e; BYTE $0x20 // VMOVDQU YMM7, 32[rsi] - -// func compressAVX2Loop(compressSSE(p []uint8, in, iv, t, f, shffle, out []uint64) -TEXT ·compressAVX2Loop(SB), 7, $0 - - // REGISTER USE - // Y0 - Y3: v0 - v15 - // Y4 - Y5: m[0] - m[7] - // Y6 - Y7: shuffle value - // Y8 - Y9: temp registers - // Y10 -Y13: copy of full message - // Y15: temp register - - // Load digest - MOVQ in+24(FP), SI // SI: &in - BYTE $0xc5; BYTE $0xfe; BYTE $0x6f; BYTE $0x06 // VMOVDQU YMM0, [rsi] - BYTE $0xc5; BYTE $0xfe; BYTE $0x6f; BYTE $0x4e; BYTE $0x20 // VMOVDQU YMM1, 32[rsi] - - // Already store digest into &out (so we can reload it later generically) - MOVQ out+144(FP), SI // SI: &out - BYTE $0xc5; BYTE $0xfe; BYTE $0x7f; BYTE $0x06 // VMOVDQU [rsi], YMM0 - BYTE $0xc5; BYTE $0xfe; BYTE $0x7f; BYTE $0x4e; BYTE $0x20 // VMOVDQU 32[rsi], YMM1 - - // Initialize message pointer and loop counter - MOVQ message+0(FP), DX // DX: &p (message) - MOVQ message_len+8(FP), R8 // R8: len(message) - SHRQ $7, R8 // len(message) / 128 - CMPQ R8, $0 - JEQ complete - -loop: - // Increment counter - MOVQ t+72(FP), SI // SI: &t - MOVQ 0(SI), R9 // - ADDQ $128, R9 // /* d.t[0] += BlockSize */ - MOVQ R9, 0(SI) // - CMPQ R9, $128 // /* if d.t[0] < BlockSize { */ - JGE noincr // - MOVQ 8(SI), R9 // - ADDQ $1, R9 // /* d.t[1]++ */ - MOVQ R9, 8(SI) // -noincr: // /* } */ - - // Load initialization vector - MOVQ iv+48(FP), SI // SI: &iv - BYTE $0xc5; BYTE $0xfe; BYTE $0x6f; BYTE $0x16 // VMOVDQU YMM2, [rsi] - BYTE $0xc5; BYTE $0xfe; BYTE $0x6f; BYTE $0x5e; BYTE $0x20 // VMOVDQU YMM3, 32[rsi] - MOVQ t+72(FP), SI // SI: &t - BYTE $0xc4; BYTE $0x63; BYTE $0x3d; BYTE $0x38; BYTE $0x06 // VINSERTI128 YMM8, YMM8, [rsi], 0 /* Y8 = t[0]+t[1] */ - BYTE $0x00 - MOVQ t+96(FP), SI // SI: &f - BYTE $0xc4; BYTE $0x63; BYTE $0x3d; BYTE $0x38; BYTE $0x06 // VINSERTI128 YMM8, YMM8, [rsi], 1 /* Y8 = t[0]+t[1]+f[0]+f[1] */ - BYTE $0x01 - BYTE $0xc4; BYTE $0xc1; BYTE $0x65; BYTE $0xef; BYTE $0xd8 // VPXOR YMM3,YMM3,YMM8 /* Y3 = Y3 ^ Y8 */ - - BYTE $0xc5; BYTE $0x7e; BYTE $0x6f; BYTE $0x12 // VMOVDQU YMM10, [rdx] /* Y10 = m[0]+ m[1]+ m[2]+ m[3] */ - BYTE $0xc5; BYTE $0x7e; BYTE $0x6f; BYTE $0x5a; BYTE $0x20 // VMOVDQU YMM11, 32[rdx] /* Y11 = m[4]+ m[5]+ m[6]+ m[7] */ - BYTE $0xc5; BYTE $0x7e; BYTE $0x6f; BYTE $0x62; BYTE $0x40 // VMOVDQU YMM12, 64[rdx] /* Y12 = m[8]+ m[9]+m[10]+m[11] */ - BYTE $0xc5; BYTE $0x7e; BYTE $0x6f; BYTE $0x6a; BYTE $0x60 // VMOVDQU YMM13, 96[rdx] /* Y13 = m[12]+m[13]+m[14]+m[15] */ - - LOAD_SHUFFLE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 1 - /////////////////////////////////////////////////////////////////////////// - - BYTE $0xc4; BYTE $0xc1; BYTE $0x2d; BYTE $0x6c; BYTE $0xe3 // VPUNPCKLQDQ YMM4, YMM10, YMM11 /* m[0], m[4], m[2], m[6] */ - BYTE $0xc4; BYTE $0xc1; BYTE $0x2d; BYTE $0x6d; BYTE $0xeb // VPUNPCKHQDQ YMM5, YMM10, YMM11 /* m[1], m[5], m[3], m[7] */ - BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xe4 // VPERMQ YMM4, YMM4, 0xd8 /* 0x1101 1000 = 0xd8 */ - BYTE $0xd8 - BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xed // VPERMQ YMM5, YMM5, 0xd8 /* 0x1101 1000 = 0xd8 */ - BYTE $0xd8 - - G1 - G2 - - DIAGONALIZE - - BYTE $0xc4; BYTE $0xc1; BYTE $0x1d; BYTE $0x6c; BYTE $0xe5 // VPUNPCKLQDQ YMM4, YMM12, YMM13 /* m[8], m[12], m[10], m[14] */ - BYTE $0xc4; BYTE $0xc1; BYTE $0x1d; BYTE $0x6d; BYTE $0xed // VPUNPCKHQDQ YMM5, YMM12, YMM13 /* m[9], m[13], m[11], m[15] */ - BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xe4 // VPERMQ YMM4, YMM4, 0xd8 /* 0x1101 1000 = 0xd8 */ - BYTE $0xd8 - BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xed // VPERMQ YMM5, YMM5, 0xd8 /* 0x1101 1000 = 0xd8 */ - BYTE $0xd8 - - G1 - G2 - - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 2 - /////////////////////////////////////////////////////////////////////////// - - BYTE $0xc4; BYTE $0x41; BYTE $0x25; BYTE $0x6c; BYTE $0xc5 // VPUNPCKLQDQ YMM8, YMM11, YMM13 /* m[4], ____, ____, m[14] */ - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc0 // VPERMQ YMM8, YMM8, 0x03 /* m[14], m[4], ____, ____ */ /* xxxx 0011 = 0x03 */ - BYTE $0x03 - BYTE $0xc4; BYTE $0x41; BYTE $0x1d; BYTE $0x6d; BYTE $0xcd // VPUNPCKHQDQ YMM9, YMM12, YMM13 /* m[9], m[13], ____, ____ */ - BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe1 // VPERM2I128 YMM4, YMM8, YMM9, 0x20 /* m[9], m[13], ____, ____ */ /* 0010 0000 = 0x20 */ - BYTE $0x20 - - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc4 // VPERMQ YMM8, YMM12, 0x02 /* m[10], m[8], ____, ____ */ /* xxxx 0010 = 0x02 */ - BYTE $0x02 - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xcd // VPERMQ YMM9, YMM13, 0x30 /* ____, ____, m[15], ____ */ /* xx11 xxxx = 0x30 */ - BYTE $0x30 - BYTE $0xc4; BYTE $0x41; BYTE $0x35; BYTE $0x6c; BYTE $0xcb // VPUNPCKLQDQ YMM9, YMM9, YMM11 /* ____, ____, m[15], m[6] */ - BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe9 // VPERM2I128 YMM5, YMM8, YMM9, 0x30 /* m[9], m[13], m[15], m[6] */ /* 0011 0000 = 0x30 */ - BYTE $0x30 - - G1 - G2 - - DIAGONALIZE - - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc2 // VPERMQ YMM8, YMM10, 0x01 /* m[1], m[0], ____, ____ */ /* xxxx 0001 = 0x01 */ - BYTE $0x01 - BYTE $0xc4; BYTE $0x41; BYTE $0x25; BYTE $0x6d; BYTE $0xcc // VPUNPCKHQDQ YMM9, YMM11, YMM12 /* m[5], ____, ____, m[11] */ - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc9 // VPERMQ YMM9, YMM9, 0x03 /* m[11], m[5], ____, ____ */ /* xxxx 0011 = 0x03 */ - BYTE $0x03 - BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe1 // VPERM2I128 YMM4, YMM8, YMM9, 0x20 /* m[1], m[0], m[11], m[5] */ /* 0010 0000 = 0x20 */ - BYTE $0x20 - - BYTE $0xc4; BYTE $0x41; BYTE $0x2d; BYTE $0x6c; BYTE $0xc5 // VPUNPCKLQDQ YMM8, YMM10, YMM13 /* ___, m[12], m[2], ____ */ - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc0 // VPERMQ YMM8, YMM8, 0x09 /* m[12], m[2], ____, ____ */ /* xxxx 1001 = 0x09 */ - BYTE $0x09 - BYTE $0xc4; BYTE $0x41; BYTE $0x25; BYTE $0x6d; BYTE $0xca // VPUNPCKHQDQ YMM9, YMM11, YMM10 /* ____, ____, m[7], m[3] */ - BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe9 // VPERM2I128 YMM5, YMM8, YMM9, 0x30 /* m[9], m[13], m[15], m[6] */ /* 0011 0000 = 0x30 */ - BYTE $0x30 - - G1 - G2 - - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 3 - /////////////////////////////////////////////////////////////////////////// - - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc5 // VPERMQ YMM8, YMM13, 0x00 - BYTE $0x00 - BYTE $0xc4; BYTE $0x41; BYTE $0x1d; BYTE $0x6d; BYTE $0xc0 // VPUNPCKHQDQ YMM8, YMM12, YMM8 - BYTE $0xc4; BYTE $0x41; BYTE $0x25; BYTE $0x6d; BYTE $0xcd // VPUNPCKHQDQ YMM9, YMM11, YMM13 - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc9 // VPERMQ YMM9, YMM9, 0x0c - BYTE $0x0c - BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe1 // VPERM2I128 YMM4, YMM8, YMM9, 0x21 - BYTE $0x21 - - BYTE $0xc4; BYTE $0x41; BYTE $0x1d; BYTE $0x6c; BYTE $0xc2 // VPUNPCKLQDQ YMM8, YMM12, YMM10 - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xcd // VPERMQ YMM9, YMM13, 0x55 - BYTE $0x55 - BYTE $0xc4; BYTE $0x41; BYTE $0x2d; BYTE $0x6c; BYTE $0xc9 // VPUNPCKLQDQ YMM9, YMM10, YMM9 - BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe9 // VPERM2I128 YMM5, YMM8, YMM9, 0x30 - BYTE $0x30 - - G1 - G2 - - DIAGONALIZE - - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc2 // VPERMQ YMM8, YMM10, 0xff - BYTE $0xff - BYTE $0xc4; BYTE $0x41; BYTE $0x1d; BYTE $0x6c; BYTE $0xc0 // VPUNPCKLQDQ YMM8, YMM12, YMM8 - BYTE $0xc4; BYTE $0x41; BYTE $0x25; BYTE $0x6d; BYTE $0xcc // VPUNPCKHQDQ YMM9, YMM11, YMM12 - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc9 // VPERMQ YMM9, YMM9, 0x60 - BYTE $0x60 - BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe1 // VPERM2I128 YMM4, YMM8, YMM9, 0x31 - BYTE $0x31 - - BYTE $0xc4; BYTE $0x41; BYTE $0x15; BYTE $0x6c; BYTE $0xc3 // VPUNPCKLQDQ YMM8, YMM13, YMM11 - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xcb // VPERMQ YMM9, YMM11, 0x00 - BYTE $0x00 - BYTE $0xc4; BYTE $0x41; BYTE $0x2d; BYTE $0x6d; BYTE $0xc9 // VPUNPCKHQDQ YMM9, YMM10, YMM9 - BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe9 // VPERM2I128 YMM5, YMM8, YMM9, 0x21 - BYTE $0x21 - - G1 - G2 - - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 4 - /////////////////////////////////////////////////////////////////////////// - - BYTE $0xc4; BYTE $0x41; BYTE $0x25; BYTE $0x6d; BYTE $0xc2 // VPUNPCKHQDQ YMM8, YMM11, YMM10 - BYTE $0xc4; BYTE $0x41; BYTE $0x15; BYTE $0x6d; BYTE $0xcc // VPUNPCKHQDQ YMM9, YMM13, YMM12 - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc9 // VPERMQ YMM9, YMM9, 0x0c - BYTE $0x0c - BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe1 // VPERM2I128 YMM4, YMM8, YMM9, 0x21 - BYTE $0x21 - - BYTE $0xc4; BYTE $0x41; BYTE $0x1d; BYTE $0x6d; BYTE $0xc2 // VPUNPCKHQDQ YMM8, YMM12, YMM10 - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xcd // VPERMQ YMM9, YMM13, 0x08 - BYTE $0x08 - BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe9 // VPERM2I128 YMM5, YMM8, YMM9, 0x20 - BYTE $0x20 - - G1 - G2 - - DIAGONALIZE - - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc3 // VPERMQ YMM8, YMM11, 0x55 - BYTE $0x55 - BYTE $0xc4; BYTE $0x41; BYTE $0x2d; BYTE $0x6c; BYTE $0xc0 // VPUNPCKLQDQ YMM8, YMM10, YMM8 - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xcd // VPERMQ YMM9, YMM13, 0xff - BYTE $0xff - BYTE $0xc4; BYTE $0x41; BYTE $0x25; BYTE $0x6c; BYTE $0xc9 // VPUNPCKLQDQ YMM9, YMM11, YMM9 - BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe1 // VPERM2I128 YMM4, YMM8, YMM9, 0x21 - BYTE $0x21 - - BYTE $0xc4; BYTE $0x41; BYTE $0x25; BYTE $0x6c; BYTE $0xc4 // VPUNPCKLQDQ YMM8, YMM11, YMM12 - BYTE $0xc4; BYTE $0x41; BYTE $0x2d; BYTE $0x6c; BYTE $0xcc // VPUNPCKLQDQ YMM9, YMM10, YMM12 - BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe9 // VPERM2I128 YMM5, YMM8, YMM9, 0x21 - BYTE $0x21 - - G1 - G2 - - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 5 - /////////////////////////////////////////////////////////////////////////// - - BYTE $0xc4; BYTE $0x41; BYTE $0x1d; BYTE $0x6d; BYTE $0xc3 // VPUNPCKHQDQ YMM8, YMM12, YMM11 - BYTE $0xc4; BYTE $0x41; BYTE $0x2d; BYTE $0x6c; BYTE $0xcc // VPUNPCKLQDQ YMM9, YMM10, YMM12 - BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe1 // VPERM2I128 YMM4, YMM8, YMM9, 0x30 - BYTE $0x30 - - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc3 // VPERMQ YMM8, YMM11, 0xff - BYTE $0xff - BYTE $0xc4; BYTE $0x41; BYTE $0x2d; BYTE $0x6c; BYTE $0xc0 // VPUNPCKLQDQ YMM8, YMM10, YMM8 - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xcd // VPERMQ YMM9, YMM13, 0xff - BYTE $0xff - BYTE $0xc4; BYTE $0x41; BYTE $0x25; BYTE $0x6c; BYTE $0xc9 // VPUNPCKLQDQ YMM9, YMM11, YMM9 - BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe9 // VPERM2I128 YMM5, YMM8, YMM9, 0x20 - BYTE $0x20 - - G1 - G2 - - DIAGONALIZE - - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc4 // VPERMQ YMM8, YMM12, 0xff - BYTE $0xff - BYTE $0xc4; BYTE $0x41; BYTE $0x15; BYTE $0x6c; BYTE $0xc0 // VPUNPCKLQDQ YMM8, YMM13, YMM8 - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xca // VPERMQ YMM9, YMM10, 0xff - BYTE $0xff - BYTE $0xc4; BYTE $0x41; BYTE $0x25; BYTE $0x6c; BYTE $0xc9 // VPUNPCKLQDQ YMM9, YMM11, YMM9 - BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe1 // VPERM2I128 YMM4, YMM8, YMM9, 0x31 - BYTE $0x31 - - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc5 // VPERMQ YMM8, YMM13, 0x00 - BYTE $0x00 - BYTE $0xc4; BYTE $0x41; BYTE $0x2d; BYTE $0x6d; BYTE $0xc0 // VPUNPCKHQDQ YMM8, YMM10, YMM8 - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xcd // VPERMQ YMM9, YMM13, 0x55 - BYTE $0x55 - BYTE $0xc4; BYTE $0x41; BYTE $0x1d; BYTE $0x6c; BYTE $0xc9 // VPUNPCKLQDQ YMM9, YMM12, YMM9 - BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe9 // VPERM2I128 YMM5, YMM8, YMM9, 0x20 - BYTE $0x20 - - G1 - G2 - - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 6 - /////////////////////////////////////////////////////////////////////////// - - BYTE $0xc4; BYTE $0x41; BYTE $0x2d; BYTE $0x6c; BYTE $0xc3 // VPUNPCKLQDQ YMM8, YMM10, YMM11 - BYTE $0xc4; BYTE $0x41; BYTE $0x2d; BYTE $0x6c; BYTE $0xcc // VPUNPCKLQDQ YMM9, YMM10, YMM12 - BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe1 // VPERM2I128 YMM4, YMM8, YMM9, 0x21 - BYTE $0x21 - - BYTE $0xc4; BYTE $0x41; BYTE $0x15; BYTE $0x6c; BYTE $0xc4 // VPUNPCKLQDQ YMM8, YMM13, YMM12 - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc0 // VPERMQ YMM8, YMM8, 0x0c - BYTE $0x0c - BYTE $0xc4; BYTE $0x41; BYTE $0x1d; BYTE $0x6d; BYTE $0xca // VPUNPCKHQDQ YMM9, YMM12, YMM10 - BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe9 // VPERM2I128 YMM5, YMM8, YMM9, 0x30 - BYTE $0x30 - - G1 - G2 - - DIAGONALIZE - - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc3 // VPERMQ YMM8, YMM11, 0x0c - BYTE $0x0c - BYTE $0xc4; BYTE $0x41; BYTE $0x15; BYTE $0x6d; BYTE $0xca // VPUNPCKHQDQ YMM9, YMM13, YMM10 - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc9 // VPERMQ YMM9, YMM9, 0x60 - BYTE $0x60 - BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe1 // VPERM2I128 YMM4, YMM8, YMM9, 0x30 - BYTE $0x30 - - BYTE $0xc4; BYTE $0x41; BYTE $0x15; BYTE $0x6d; BYTE $0xc3 // VPUNPCKHQDQ YMM8, YMM13, YMM11 - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xcc // VPERMQ YMM9, YMM12, 0x55 - BYTE $0x55 - BYTE $0xc4; BYTE $0x41; BYTE $0x15; BYTE $0x6c; BYTE $0xc9 // VPUNPCKLQDQ YMM9, YMM13, YMM9 - BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe9 // VPERM2I128 YMM5, YMM8, YMM9, 0x30 - BYTE $0x30 - - G1 - G2 - - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 7 - /////////////////////////////////////////////////////////////////////////// - - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc2 // VPERMQ YMM8, YMM10, 0x55 - BYTE $0x55 - BYTE $0xc4; BYTE $0x41; BYTE $0x15; BYTE $0x6c; BYTE $0xc0 // VPUNPCKLQDQ YMM8, YMM13, YMM8 - BYTE $0xc4; BYTE $0x41; BYTE $0x15; BYTE $0x6c; BYTE $0xcb // VPUNPCKLQDQ YMM9, YMM13, YMM11 - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc9 // VPERMQ YMM9, YMM9, 0x60 - BYTE $0x60 - BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe1 // VPERM2I128 YMM4, YMM8, YMM9, 0x30 - BYTE $0x30 - - BYTE $0xc4; BYTE $0x41; BYTE $0x25; BYTE $0x6d; BYTE $0xc5 // VPUNPCKHQDQ YMM8, YMM11, YMM13 - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc0 // VPERMQ YMM8, YMM8, 0x0c - BYTE $0x0c - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xcc // VPERMQ YMM9, YMM12, 0xaa - BYTE $0xaa - BYTE $0xc4; BYTE $0x41; BYTE $0x15; BYTE $0x6d; BYTE $0xc9 // VPUNPCKHQDQ YMM9, YMM13, YMM9 - BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe9 // VPERM2I128 YMM5, YMM8, YMM9, 0x20 - BYTE $0x20 - - G1 - G2 - - DIAGONALIZE - - BYTE $0xc4; BYTE $0x41; BYTE $0x2d; BYTE $0x6c; BYTE $0xc3 // VPUNPCKLQDQ YMM8, YMM10, YMM11 - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc0 // VPERMQ YMM8, YMM8, 0x0c - BYTE $0x0c - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xcc // VPERMQ YMM9, YMM12, 0x01 - BYTE $0x01 - BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe1 // VPERM2I128 YMM4, YMM8, YMM9, 0x20 - BYTE $0x20 - - BYTE $0xc4; BYTE $0x41; BYTE $0x25; BYTE $0x6d; BYTE $0xc2 // VPUNPCKHQDQ YMM8, YMM11, YMM10 - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xcc // VPERMQ YMM9, YMM12, 0xff - BYTE $0xff - BYTE $0xc4; BYTE $0x41; BYTE $0x2d; BYTE $0x6c; BYTE $0xc9 // VPUNPCKLQDQ YMM9, YMM10, YMM9 - BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe9 // VPERM2I128 YMM5, YMM8, YMM9, 0x31 - BYTE $0x31 - - G1 - G2 - - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 8 - /////////////////////////////////////////////////////////////////////////// - - BYTE $0xc4; BYTE $0x41; BYTE $0x15; BYTE $0x6d; BYTE $0xc3 // VPUNPCKHQDQ YMM8, YMM13, YMM11 - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc0 // VPERMQ YMM8, YMM8, 0x0c - BYTE $0x0c - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xca // VPERMQ YMM9, YMM10, 0xff - BYTE $0xff - BYTE $0xc4; BYTE $0x41; BYTE $0x15; BYTE $0x6c; BYTE $0xc9 // VPUNPCKLQDQ YMM9, YMM13, YMM9 - BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe1 // VPERM2I128 YMM4, YMM8, YMM9, 0x20 - BYTE $0x20 - - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc5 // VPERMQ YMM8, YMM13, 0xaa - BYTE $0xaa - BYTE $0xc4; BYTE $0x41; BYTE $0x1d; BYTE $0x6d; BYTE $0xc0 // VPUNPCKHQDQ YMM8, YMM12, YMM8 - BYTE $0xc4; BYTE $0x41; BYTE $0x2d; BYTE $0x6d; BYTE $0xcc // VPUNPCKHQDQ YMM9, YMM10, YMM12 - BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe9 // VPERM2I128 YMM5, YMM8, YMM9, 0x21 - BYTE $0x21 - - G1 - G2 - - DIAGONALIZE - - BYTE $0xc4; BYTE $0x41; BYTE $0x25; BYTE $0x6d; BYTE $0xc5 // VPUNPCKHQDQ YMM8, YMM11, YMM13 - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc0 // VPERMQ YMM8, YMM8, 0x0c - BYTE $0x0c - BYTE $0xc4; BYTE $0x41; BYTE $0x1d; BYTE $0x6c; BYTE $0xca // VPUNPCKLQDQ YMM9, YMM12, YMM10 - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc9 // VPERMQ YMM9, YMM9, 0x0c - BYTE $0x0c - BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe1 // VPERM2I128 YMM4, YMM8, YMM9, 0x20 - BYTE $0x20 - - BYTE $0xc4; BYTE $0x41; BYTE $0x2d; BYTE $0x6c; BYTE $0xc3 // VPUNPCKLQDQ YMM8, YMM10, YMM11 - BYTE $0xc4; BYTE $0x41; BYTE $0x25; BYTE $0x6c; BYTE $0xcc // VPUNPCKLQDQ YMM9, YMM11, YMM12 - BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe9 // VPERM2I128 YMM5, YMM8, YMM9, 0x30 - BYTE $0x30 - - G1 - G2 - - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 9 - /////////////////////////////////////////////////////////////////////////// - - BYTE $0xc4; BYTE $0x41; BYTE $0x25; BYTE $0x6c; BYTE $0xc5 // VPUNPCKLQDQ YMM8, YMM11, YMM13 - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xca // VPERMQ YMM9, YMM10, 0x00 - BYTE $0x00 - BYTE $0xc4; BYTE $0x41; BYTE $0x1d; BYTE $0x6d; BYTE $0xc9 // VPUNPCKHQDQ YMM9, YMM12, YMM9 - BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe1 // VPERM2I128 YMM4, YMM8, YMM9, 0x31 - BYTE $0x31 - - BYTE $0xc4; BYTE $0x41; BYTE $0x15; BYTE $0x6d; BYTE $0xc4 // VPUNPCKHQDQ YMM8, YMM13, YMM12 - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc0 // VPERMQ YMM8, YMM8, 0x60 - BYTE $0x60 - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xcc // VPERMQ YMM9, YMM12, 0x00 - BYTE $0x00 - BYTE $0xc4; BYTE $0x41; BYTE $0x2d; BYTE $0x6d; BYTE $0xc9 // VPUNPCKHQDQ YMM9, YMM10, YMM9 - BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe9 // VPERM2I128 YMM5, YMM8, YMM9, 0x31 - BYTE $0x31 - - G1 - G2 - - DIAGONALIZE - - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xcc // VPERMQ YMM9, YMM12, 0xaa - BYTE $0xaa - BYTE $0xc4; BYTE $0x41; BYTE $0x2d; BYTE $0x6d; BYTE $0xc9 // VPUNPCKHQDQ YMM9, YMM10, YMM9 - BYTE $0xc4; BYTE $0xc3; BYTE $0x15; BYTE $0x46; BYTE $0xe1 // VPERM2I128 YMM4, YMM13, YMM9, 0x20 - BYTE $0x20 - - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc3 // VPERMQ YMM8, YMM11, 0xff - BYTE $0xff - BYTE $0xc4; BYTE $0x41; BYTE $0x2d; BYTE $0x6c; BYTE $0xc0 // VPUNPCKLQDQ YMM8, YMM10, YMM8 - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xcb // VPERMQ YMM9, YMM11, 0x04 - BYTE $0x04 - BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe9 // VPERM2I128 YMM5, YMM8, YMM9, 0x21 - BYTE $0x21 - - G1 - G2 - - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 10 - /////////////////////////////////////////////////////////////////////////// - - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc4 // VPERMQ YMM8, YMM12, 0x20 - BYTE $0x20 - BYTE $0xc4; BYTE $0x41; BYTE $0x25; BYTE $0x6d; BYTE $0xca // VPUNPCKHQDQ YMM9, YMM11, YMM10 - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc9 // VPERMQ YMM9, YMM9, 0x60 - BYTE $0x60 - BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe1 // VPERM2I128 YMM4, YMM8, YMM9, 0x31 - BYTE $0x31 - - BYTE $0xc4; BYTE $0x41; BYTE $0x2d; BYTE $0x6c; BYTE $0xc3 // VPUNPCKLQDQ YMM8, YMM10, YMM11 - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc0 // VPERMQ YMM8, YMM8, 0x60 - BYTE $0x60 - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xcb // VPERMQ YMM9, YMM11, 0x60 - BYTE $0x60 - BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe9 // VPERM2I128 YMM5, YMM8, YMM9, 0x31 - BYTE $0x31 - - G1 - G2 - - DIAGONALIZE - - BYTE $0xc4; BYTE $0x41; BYTE $0x15; BYTE $0x6d; BYTE $0xc4 // VPUNPCKHQDQ YMM8, YMM13, YMM12 - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc0 // VPERMQ YMM8, YMM8, 0x60 - BYTE $0x60 - BYTE $0xc4; BYTE $0x41; BYTE $0x2d; BYTE $0x6d; BYTE $0xcd // VPUNPCKHQDQ YMM9, YMM10, YMM13 - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc9 // VPERMQ YMM9, YMM9, 0x60 - BYTE $0x60 - BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe1 // VPERM2I128 YMM4, YMM8, YMM9, 0x31 - BYTE $0x31 - - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc5 // VPERMQ YMM8, YMM13, 0xaa - BYTE $0xaa - BYTE $0xc4; BYTE $0x41; BYTE $0x1d; BYTE $0x6d; BYTE $0xc0 // VPUNPCKHQDQ YMM8, YMM12, YMM8 - BYTE $0xc4; BYTE $0x41; BYTE $0x15; BYTE $0x6c; BYTE $0xca // VPUNPCKLQDQ YMM9, YMM13, YMM10 - BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe9 // VPERM2I128 YMM5, YMM8, YMM9, 0x21 - BYTE $0x21 - - G1 - G2 - - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 1 1 - /////////////////////////////////////////////////////////////////////////// - - BYTE $0xc4; BYTE $0xc1; BYTE $0x2d; BYTE $0x6c; BYTE $0xe3 // VPUNPCKLQDQ YMM4, YMM10, YMM11 /* m[0], m[4], m[2], m[6] */ - BYTE $0xc4; BYTE $0xc1; BYTE $0x2d; BYTE $0x6d; BYTE $0xeb // VPUNPCKHQDQ YMM5, YMM10, YMM11 /* m[1], m[5], m[3], m[7] */ - BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xe4 // VPERMQ YMM4, YMM4, 0xd8 /* 0x1101 1000 = 0xd8 */ - BYTE $0xd8 - BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xed // VPERMQ YMM5, YMM5, 0xd8 /* 0x1101 1000 = 0xd8 */ - BYTE $0xd8 - - G1 - G2 - - DIAGONALIZE - - BYTE $0xc4; BYTE $0xc1; BYTE $0x1d; BYTE $0x6c; BYTE $0xe5 // VPUNPCKLQDQ YMM4, YMM12, YMM13 /* m[8], m[12], m[10], m[14] */ - BYTE $0xc4; BYTE $0xc1; BYTE $0x1d; BYTE $0x6d; BYTE $0xed // VPUNPCKHQDQ YMM5, YMM12, YMM13 /* m[9], m[13], m[11], m[15] */ - BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xe4 // VPERMQ YMM4, YMM4, 0xd8 /* 0x1101 1000 = 0xd8 */ - BYTE $0xd8 - BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xed // VPERMQ YMM5, YMM5, 0xd8 /* 0x1101 1000 = 0xd8 */ - BYTE $0xd8 - - G1 - G2 - - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 1 2 - /////////////////////////////////////////////////////////////////////////// - - BYTE $0xc4; BYTE $0x41; BYTE $0x25; BYTE $0x6c; BYTE $0xc5 // VPUNPCKLQDQ YMM8, YMM11, YMM13 /* m[4], ____, ____, m[14] */ - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc0 // VPERMQ YMM8, YMM8, 0x03 /* m[14], m[4], ____, ____ */ /* xxxx 0011 = 0x03 */ - BYTE $0x03 - BYTE $0xc4; BYTE $0x41; BYTE $0x1d; BYTE $0x6d; BYTE $0xcd // VPUNPCKHQDQ YMM9, YMM12, YMM13 /* m[9], m[13], ____, ____ */ - BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe1 // VPERM2I128 YMM4, YMM8, YMM9, 0x20 /* m[9], m[13], ____, ____ */ /* 0010 0000 = 0x20 */ - BYTE $0x20 - - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc4 // VPERMQ YMM8, YMM12, 0x02 /* m[10], m[8], ____, ____ */ /* xxxx 0010 = 0x02 */ - BYTE $0x02 - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xcd // VPERMQ YMM9, YMM13, 0x30 /* ____, ____, m[15], ____ */ /* xx11 xxxx = 0x30 */ - BYTE $0x30 - BYTE $0xc4; BYTE $0x41; BYTE $0x35; BYTE $0x6c; BYTE $0xcb // VPUNPCKLQDQ YMM9, YMM9, YMM11 /* ____, ____, m[15], m[6] */ - BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe9 // VPERM2I128 YMM5, YMM8, YMM9, 0x30 /* m[9], m[13], m[15], m[6] */ /* 0011 0000 = 0x30 */ - BYTE $0x30 - - G1 - G2 - - DIAGONALIZE - - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc2 // VPERMQ YMM8, YMM10, 0x01 /* m[1], m[0], ____, ____ */ /* xxxx 0001 = 0x01 */ - BYTE $0x01 - BYTE $0xc4; BYTE $0x41; BYTE $0x25; BYTE $0x6d; BYTE $0xcc // VPUNPCKHQDQ YMM9, YMM11, YMM12 /* m[5], ____, ____, m[11] */ - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc9 // VPERMQ YMM9, YMM9, 0x03 /* m[11], m[5], ____, ____ */ /* xxxx 0011 = 0x03 */ - BYTE $0x03 - BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe1 // VPERM2I128 YMM4, YMM8, YMM9, 0x20 /* m[1], m[0], m[11], m[5] */ /* 0010 0000 = 0x20 */ - BYTE $0x20 - - BYTE $0xc4; BYTE $0x41; BYTE $0x2d; BYTE $0x6c; BYTE $0xc5 // VPUNPCKLQDQ YMM8, YMM10, YMM13 /* ___, m[12], m[2], ____ */ - BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc0 // VPERMQ YMM8, YMM8, 0x09 /* m[12], m[2], ____, ____ */ /* xxxx 1001 = 0x09 */ - BYTE $0x09 - BYTE $0xc4; BYTE $0x41; BYTE $0x25; BYTE $0x6d; BYTE $0xca // VPUNPCKHQDQ YMM9, YMM11, YMM10 /* ____, ____, m[7], m[3] */ - BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe9 // VPERM2I128 YMM5, YMM8, YMM9, 0x30 /* m[9], m[13], m[15], m[6] */ /* 0011 0000 = 0x30 */ - BYTE $0x30 - - G1 - G2 - - UNDIAGONALIZE - - // Reload digest (most current value store in &out) - MOVQ out+144(FP), SI // SI: &in - BYTE $0xc5; BYTE $0x7e; BYTE $0x6f; BYTE $0x26 // VMOVDQU YMM12, [rsi] - BYTE $0xc5; BYTE $0x7e; BYTE $0x6f; BYTE $0x6e; BYTE $0x20 // VMOVDQU YMM13, 32[rsi] - - BYTE $0xc5; BYTE $0xfd; BYTE $0xef; BYTE $0xc2 // VPXOR YMM0,YMM0,YMM2 /* X0 = X0 ^ X4, X1 = X1 ^ X5 */ - BYTE $0xc4; BYTE $0xc1; BYTE $0x7d; BYTE $0xef; BYTE $0xc4 // VPXOR YMM0,YMM0,YMM12 /* X0 = X0 ^ X12, X1 = X1 ^ X13 */ - BYTE $0xc5; BYTE $0xf5; BYTE $0xef; BYTE $0xcb // VPXOR YMM1,YMM1,YMM3 /* X2 = X2 ^ X6, X3 = X3 ^ X7 */ - BYTE $0xc4; BYTE $0xc1; BYTE $0x75; BYTE $0xef; BYTE $0xcd // VPXOR YMM1,YMM1,YMM13 /* X2 = X2 ^ X14, X3 = X3 ^ X15 */ - - // Store digest into &out - MOVQ out+144(FP), SI // SI: &out - BYTE $0xc5; BYTE $0xfe; BYTE $0x7f; BYTE $0x06 // VMOVDQU [rsi], YMM0 - BYTE $0xc5; BYTE $0xfe; BYTE $0x7f; BYTE $0x4e; BYTE $0x20 // VMOVDQU 32[rsi], YMM1 - - // Increment message pointer and check if there's more to do - ADDQ $128, DX // message += 128 - SUBQ $1, R8 - JNZ loop - -complete: - BYTE $0xc5; BYTE $0xf8; BYTE $0x77 // VZEROUPPER /* Prevent further context switches */ - RET - diff --git a/vendor/github.com/minio/blake2b-simd/compressAvx_amd64.go b/vendor/github.com/minio/blake2b-simd/compressAvx_amd64.go deleted file mode 100644 index cfa12c04f..000000000 --- a/vendor/github.com/minio/blake2b-simd/compressAvx_amd64.go +++ /dev/null @@ -1,41 +0,0 @@ -//+build !noasm -//+build !appengine - -/* - * Minio Cloud Storage, (C) 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package blake2b - -//go:noescape -func blockAVXLoop(p []uint8, in, iv, t, f, shffle, out []uint64) - -func compressAVX(d *digest, p []uint8) { - var ( - in [8]uint64 - out [8]uint64 - shffle [2]uint64 - ) - - // vector for PSHUFB instruction - shffle[0] = 0x0201000706050403 - shffle[1] = 0x0a09080f0e0d0c0b - - in[0], in[1], in[2], in[3], in[4], in[5], in[6], in[7] = d.h[0], d.h[1], d.h[2], d.h[3], d.h[4], d.h[5], d.h[6], d.h[7] - - blockAVXLoop(p, in[:], iv[:], d.t[:], d.f[:], shffle[:], out[:]) - - d.h[0], d.h[1], d.h[2], d.h[3], d.h[4], d.h[5], d.h[6], d.h[7] = out[0], out[1], out[2], out[3], out[4], out[5], out[6], out[7] -} diff --git a/vendor/github.com/minio/blake2b-simd/compressAvx_amd64.s b/vendor/github.com/minio/blake2b-simd/compressAvx_amd64.s deleted file mode 100644 index f68e17392..000000000 --- a/vendor/github.com/minio/blake2b-simd/compressAvx_amd64.s +++ /dev/null @@ -1,682 +0,0 @@ -//+build !noasm !appengine - -// -// Minio Cloud Storage, (C) 2016 Minio, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -// -// Based on SSE implementation from https://github.com/BLAKE2/BLAKE2/blob/master/sse/blake2b.c -// -// Use github.com/fwessels/asm2plan9s on this file to assemble instructions to their Plan9 equivalent -// -// Assembly code below essentially follows the ROUND macro (see blake2b-round.h) which is defined as: -// #define ROUND(r) \ -// LOAD_MSG_ ##r ##_1(b0, b1); \ -// G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ -// LOAD_MSG_ ##r ##_2(b0, b1); \ -// G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ -// DIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h); \ -// LOAD_MSG_ ##r ##_3(b0, b1); \ -// G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ -// LOAD_MSG_ ##r ##_4(b0, b1); \ -// G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ -// UNDIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h); -// -// as well as the go equivalent in https://github.com/dchest/blake2b/blob/master/block.go -// -// As in the macro, G1/G2 in the 1st and 2nd half are identical (so literal copy of assembly) -// -// Rounds are also the same, except for the loading of the message (and rounds 1 & 11 and -// rounds 2 & 12 are identical) -// - -#define G1 \ - \ // G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); - LONG $0xd479c1c4; BYTE $0xc0 \ // VPADDQ XMM0,XMM0,XMM8 /* v0 += m[0], v1 += m[2] */ - LONG $0xd471c1c4; BYTE $0xc9 \ // VPADDQ XMM1,XMM1,XMM9 /* v2 += m[4], v3 += m[6] */ - LONG $0xc2d4f9c5 \ // VPADDQ XMM0,XMM0,XMM2 /* v0 += v4, v1 += v5 */ - LONG $0xcbd4f1c5 \ // VPADDQ XMM1,XMM1,XMM3 /* v2 += v6, v3 += v7 */ - LONG $0xf0efc9c5 \ // VPXOR XMM6,XMM6,XMM0 /* v12 ^= v0, v13 ^= v1 */ - LONG $0xf9efc1c5 \ // VPXOR XMM7,XMM7,XMM1 /* v14 ^= v2, v15 ^= v3 */ - LONG $0xf670f9c5; BYTE $0xb1 \ // VPSHUFD XMM6,XMM6,0xb1 /* v12 = v12<<(64-32) | v12>>32, v13 = v13<<(64-32) | v13>>32 */ - LONG $0xff70f9c5; BYTE $0xb1 \ // VPSHUFD XMM7,XMM7,0xb1 /* v14 = v14<<(64-32) | v14>>32, v15 = v15<<(64-32) | v15>>32 */ - LONG $0xe6d4d9c5 \ // VPADDQ XMM4,XMM4,XMM6 /* v8 += v12, v9 += v13 */ - LONG $0xefd4d1c5 \ // VPADDQ XMM5,XMM5,XMM7 /* v10 += v14, v11 += v15 */ - LONG $0xd4efe9c5 \ // VPXOR XMM2,XMM2,XMM4 /* v4 ^= v8, v5 ^= v9 */ - LONG $0xddefe1c5 \ // VPXOR XMM3,XMM3,XMM5 /* v6 ^= v10, v7 ^= v11 */ - LONG $0x0069c2c4; BYTE $0xd4 \ // VPSHUFB XMM2,XMM2,XMM12 /* v4 = v4<<(64-24) | v4>>24, v5 = v5<<(64-24) | v5>>24 */ - LONG $0x0061c2c4; BYTE $0xdc // VPSHUFB XMM3,XMM3,XMM12 /* v6 = v6<<(64-24) | v6>>24, v7 = v7<<(64-24) | v7>>24 */ - -#define G2 \ - \ // G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); - LONG $0xd479c1c4; BYTE $0xc2 \ // VPADDQ XMM0,XMM0,XMM10 /* v0 += m[1], v1 += m[3] */ - LONG $0xd471c1c4; BYTE $0xcb \ // VPADDQ XMM1,XMM1,XMM11 /* v2 += m[5], v3 += m[7] */ - LONG $0xc2d4f9c5 \ // VPADDQ XMM0,XMM0,XMM2 /* v0 += v4, v1 += v5 */ - LONG $0xcbd4f1c5 \ // VPADDQ XMM1,XMM1,XMM3 /* v2 += v6, v3 += v7 */ - LONG $0xf0efc9c5 \ // VPXOR XMM6,XMM6,XMM0 /* v12 ^= v0, v13 ^= v1 */ - LONG $0xf9efc1c5 \ // VPXOR XMM7,XMM7,XMM1 /* v14 ^= v2, v15 ^= v3 */ - LONG $0xf670fbc5; BYTE $0x39 \ // VPSHUFLW XMM6,XMM6,0x39 /* combined with next ... */ - LONG $0xf670fac5; BYTE $0x39 \ // VPSHUFHW XMM6,XMM6,0x39 /* v12 = v12<<(64-16) | v12>>16, v13 = v13<<(64-16) | v13>>16 */ - LONG $0xff70fbc5; BYTE $0x39 \ // VPSHUFLW XMM7,XMM7,0x39 /* combined with next ... */ - LONG $0xff70fac5; BYTE $0x39 \ // VPSHUFHW XMM7,XMM7,0x39 /* v14 = v14<<(64-16) | v14>>16, v15 = v15<<(64-16) | v15>>16 */ - LONG $0xe6d4d9c5 \ // VPADDQ XMM4,XMM4,XMM6 /* v8 += v12, v9 += v13 */ - LONG $0xefd4d1c5 \ // VPADDQ XMM5,XMM5,XMM7 /* v10 += v14, v11 += v15 */ - LONG $0xd4efe9c5 \ // VPXOR XMM2,XMM2,XMM4 /* v4 ^= v8, v5 ^= v9 */ - LONG $0xddefe1c5 \ // VPXOR XMM3,XMM3,XMM5 /* v6 ^= v10, v7 ^= v11 */ - LONG $0xfad469c5 \ // VPADDQ XMM15,XMM2,XMM2 /* temp reg = reg*2 */ - LONG $0xd273e9c5; BYTE $0x3f \ // VPSRLQ XMM2,XMM2,0x3f /* reg = reg>>63 */ - LONG $0xef69c1c4; BYTE $0xd7 \ // VPXOR XMM2,XMM2,XMM15 /* ORed together: v4 = v4<<(64-63) | v4>>63, v5 = v5<<(64-63) | v5>>63 */ - LONG $0xfbd461c5 \ // VPADDQ XMM15,XMM3,XMM3 /* temp reg = reg*2 */ - LONG $0xd373e1c5; BYTE $0x3f \ // VPSRLQ XMM3,XMM3,0x3f /* reg = reg>>63 */ - LONG $0xef61c1c4; BYTE $0xdf // VPXOR XMM3,XMM3,XMM15 /* ORed together: v6 = v6<<(64-63) | v6>>63, v7 = v7<<(64-63) | v7>>63 */ - -#define DIAGONALIZE \ - \ // DIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h); - MOVOU X6, X13 \ /* t0 = row4l;\ */ - MOVOU X2, X14 \ /* t1 = row2l;\ */ - MOVOU X4, X6 \ /* row4l = row3l;\ */ - MOVOU X5, X4 \ /* row3l = row3h;\ */ - MOVOU X6, X5 \ /* row3h = row4l;\ */ - LONG $0x6c1141c4; BYTE $0xfd \ // VPUNPCKLQDQ XMM15, XMM13, XMM13 /* _mm_unpacklo_epi64(t0, t0) */ - LONG $0x6d41c1c4; BYTE $0xf7 \ // VPUNPCKHQDQ XMM6, XMM7, XMM15 /* row4l = _mm_unpackhi_epi64(row4h, ); \ */ - LONG $0xff6c41c5 \ // VPUNPCKLQDQ XMM15, XMM7, XMM7 /* _mm_unpacklo_epi64(row4h, row4h) */ - LONG $0x6d11c1c4; BYTE $0xff \ // VPUNPCKHQDQ XMM7, XMM13, XMM15 /* row4h = _mm_unpackhi_epi64(t0, ); \ */ - LONG $0xfb6c61c5 \ // VPUNPCKLQDQ XMM15, XMM3, XMM3 /* _mm_unpacklo_epi64(row2h, row2h) */ - LONG $0x6d69c1c4; BYTE $0xd7 \ // VPUNPCKHQDQ XMM2, XMM2, XMM15 /* row2l = _mm_unpackhi_epi64(row2l, ); \ */ - LONG $0x6c0941c4; BYTE $0xfe \ // VPUNPCKLQDQ XMM15, XMM14, XMM14 /* _mm_unpacklo_epi64(t1, t1) */ - LONG $0x6d61c1c4; BYTE $0xdf // VPUNPCKHQDQ XMM3, XMM3, XMM15 /* row2h = _mm_unpackhi_epi64(row2h, ) */ - -#define UNDIAGONALIZE \ - \ // UNDIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h); - MOVOU X4, X13 \ /* t0 = row3l;\ */ - MOVOU X5, X4 \ /* row3l = row3h;\ */ - MOVOU X13, X5 \ /* row3h = t0;\ */ - MOVOU X2, X13 \ /* t0 = row2l;\ */ - MOVOU X6, X14 \ /* t1 = row4l;\ */ - LONG $0xfa6c69c5 \ // VPUNPCKLQDQ XMM15, XMM2, XMM2 /* _mm_unpacklo_epi64(row2l, row2l) */ - LONG $0x6d61c1c4; BYTE $0xd7 \ // VPUNPCKHQDQ XMM2, XMM3, XMM15 /* row2l = _mm_unpackhi_epi64(row2h, ); \ */ - LONG $0xfb6c61c5 \ // VPUNPCKLQDQ XMM15, XMM3, XMM3 /* _mm_unpacklo_epi64(row2h, row2h) */ - LONG $0x6d11c1c4; BYTE $0xdf \ // VPUNPCKHQDQ XMM3, XMM13, XMM15 /* row2h = _mm_unpackhi_epi64(t0, ); \ */ - LONG $0xff6c41c5 \ // VPUNPCKLQDQ XMM15, XMM7, XMM7 /* _mm_unpacklo_epi64(row4h, row4h) */ - LONG $0x6d49c1c4; BYTE $0xf7 \ // VPUNPCKHQDQ XMM6, XMM6, XMM15 /* row4l = _mm_unpackhi_epi64(row4l, ); \ */ - LONG $0x6c0941c4; BYTE $0xfe \ // VPUNPCKLQDQ XMM15, XMM14, XMM14 /* _mm_unpacklo_epi64(t1, t1) */ - LONG $0x6d41c1c4; BYTE $0xff // VPUNPCKHQDQ XMM7, XMM7, XMM15 /* row4h = _mm_unpackhi_epi64(row4h, ) */ - -#define LOAD_SHUFFLE \ - \ // Load shuffle value - MOVQ shffle+120(FP), SI \ // SI: &shuffle - MOVOU 0(SI), X12 // X12 = 03040506 07000102 0b0c0d0e 0f08090a - -// func blockAVXLoop(p []uint8, in, iv, t, f, shffle, out []uint64) -TEXT ·blockAVXLoop(SB), 7, $0 - // REGISTER USE - // R8: loop counter - // DX: message pointer - // SI: temp pointer for loading - // X0 - X7: v0 - v15 - // X8 - X11: m[0] - m[7] - // X12: shuffle value - // X13 - X15: temp registers - - // Load digest - MOVQ in+24(FP), SI // SI: &in - MOVOU 0(SI), X0 // X0 = in[0]+in[1] /* row1l = LOAD( &S->h[0] ); */ - MOVOU 16(SI), X1 // X1 = in[2]+in[3] /* row1h = LOAD( &S->h[2] ); */ - MOVOU 32(SI), X2 // X2 = in[4]+in[5] /* row2l = LOAD( &S->h[4] ); */ - MOVOU 48(SI), X3 // X3 = in[6]+in[7] /* row2h = LOAD( &S->h[6] ); */ - - // Already store digest into &out (so we can reload it later generically) - MOVQ out+144(FP), SI // SI: &out - MOVOU X0, 0(SI) // out[0]+out[1] = X0 - MOVOU X1, 16(SI) // out[2]+out[3] = X1 - MOVOU X2, 32(SI) // out[4]+out[5] = X2 - MOVOU X3, 48(SI) // out[6]+out[7] = X3 - - // Initialize message pointer and loop counter - MOVQ message+0(FP), DX // DX: &p (message) - MOVQ message_len+8(FP), R8 // R8: len(message) - SHRQ $7, R8 // len(message) / 128 - CMPQ R8, $0 - JEQ complete - -loop: - // Increment counter - MOVQ t+72(FP), SI // SI: &t - MOVQ 0(SI), R9 - ADDQ $128, R9 // /* d.t[0] += BlockSize */ - MOVQ R9, 0(SI) - CMPQ R9, $128 // /* if d.t[0] < BlockSize { */ - JGE noincr - MOVQ 8(SI), R9 - ADDQ $1, R9 // /* d.t[1]++ */ - MOVQ R9, 8(SI) -noincr: // /* } */ - - // Load initialization vector - MOVQ iv+48(FP), SI // SI: &iv - MOVOU 0(SI), X4 // X4 = iv[0]+iv[1] /* row3l = LOAD( &blake2b_IV[0] ); */ - MOVOU 16(SI), X5 // X5 = iv[2]+iv[3] /* row3h = LOAD( &blake2b_IV[2] ); */ - MOVOU 32(SI), X6 // X6 = iv[4]+iv[5] /* LOAD( &blake2b_IV[4] ) */ - MOVOU 48(SI), X7 // X7 = iv[6]+iv[7] /* LOAD( &blake2b_IV[6] ) */ - MOVQ t+72(FP), SI // SI: &t - MOVOU 0(SI), X8 // X8 = t[0]+t[1] /* LOAD( &S->t[0] ) */ - PXOR X8, X6 // X6 = X6 ^ X8 /* row4l = _mm_xor_si128( , ); */ - MOVQ t+96(FP), SI // SI: &f - MOVOU 0(SI), X8 // X8 = f[0]+f[1] /* LOAD( &S->f[0] ) */ - PXOR X8, X7 // X7 = X7 ^ X8 /* row4h = _mm_xor_si128( , ); */ - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 1 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) - MOVOU 0(DX), X12 // X12 = m[0]+m[1] - MOVOU 16(DX), X13 // X13 = m[2]+m[3] - MOVOU 32(DX), X14 // X14 = m[4]+m[5] - MOVOU 48(DX), X15 // X15 = m[6]+m[7] - LONG $0x6c1941c4; BYTE $0xc5 // VPUNPCKLQDQ XMM8, XMM12, XMM13 /* m[0], m[2] */ - LONG $0x6c0941c4; BYTE $0xcf // VPUNPCKLQDQ XMM9, XMM14, XMM15 /* m[4], m[6] */ - LONG $0x6d1941c4; BYTE $0xd5 // VPUNPCKHQDQ XMM10, XMM12, XMM13 /* m[1], m[3] */ - LONG $0x6d0941c4; BYTE $0xdf // VPUNPCKHQDQ XMM11, XMM14, XMM15 /* m[5], m[7] */ - - LOAD_SHUFFLE - G1 - G2 - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) - MOVOU 64(DX), X12 // X12 = m[8]+ m[9] - MOVOU 80(DX), X13 // X13 = m[10]+m[11] - MOVOU 96(DX), X14 // X14 = m[12]+m[13] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - LONG $0x6c1941c4; BYTE $0xc5 // VPUNPCKLQDQ XMM8, XMM12, XMM13 /* m[8],m[10] */ - LONG $0x6c0941c4; BYTE $0xcf // VPUNPCKLQDQ XMM9, XMM14, XMM15 /* m[12],m[14] */ - LONG $0x6d1941c4; BYTE $0xd5 // VPUNPCKHQDQ XMM10, XMM12, XMM13 /* m[9],m[11] */ - LONG $0x6d0941c4; BYTE $0xdf // VPUNPCKHQDQ XMM11, XMM14, XMM15 /* m[13],m[15] */ - - LOAD_SHUFFLE - G1 - G2 - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 2 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) - MOVOU 112(DX), X12 // X12 = m[14]+m[15] - MOVOU 32(DX), X13 // X13 = m[4]+ m[5] - MOVOU 64(DX), X14 // X14 = m[8]+ m[9] - MOVOU 96(DX), X15 // X15 = m[12]+m[13] - LONG $0x6c1941c4; BYTE $0xc5 // VPUNPCKLQDQ XMM8, XMM12, XMM13 /* m[14], m[4] */ - LONG $0x6d0941c4; BYTE $0xcf // VPUNPCKHQDQ XMM9, XMM14, XMM15 /* m[9], m[13] */ - MOVOU 80(DX), X13 // X13 = m[10]+m[11] - MOVOU 48(DX), X15 // X15 = m[6]+ m[7] - LONG $0x6c1141c4; BYTE $0xd6 // VPUNPCKLQDQ XMM10, XMM13, XMM14 /* m[10], m[8] */ - LONG $0x0f0143c4; WORD $0x08dc // VPALIGNR XMM11, XMM15, XMM12, 0x8 /* m[15], m[6] */ - - LOAD_SHUFFLE - G1 - G2 - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 32(DX), X13 // X13 = m[4]+ m[5] - MOVOU 80(DX), X14 // X14 = m[10]+m[11] - LONG $0x0f1943c4; WORD $0x08c4 // VPALIGNR XMM8, XMM12, XMM12, 0x8 /* m[1], m[0] */ - LONG $0x6d0941c4; BYTE $0xcd // VPUNPCKHQDQ XMM9, XMM14, XMM13 /* m[11], m[5] */ - MOVOU 16(DX), X12 // X12 = m[2]+ m[3] - MOVOU 48(DX), X13 // X13 = m[6]+ m[7] - MOVOU 96(DX), X14 // X14 = m[12]+m[13] - LONG $0x6c0941c4; BYTE $0xd4 // VPUNPCKLQDQ XMM10, XMM14, XMM12 /* m[12], m[2] */ - LONG $0x6d1141c4; BYTE $0xdc // VPUNPCKHQDQ XMM11, XMM13, XMM12 /* m[7], m[3] */ - - LOAD_SHUFFLE - G1 - G2 - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 3 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) - MOVOU 32(DX), X12 // X12 = m[4]+ m[5] - MOVOU 80(DX), X13 // X13 = m[10]+m[11] - MOVOU 96(DX), X14 // X14 = m[12]+m[13] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - LONG $0x0f0943c4; WORD $0x08c5 // VPALIGNR XMM8, XMM14, XMM13, 0x8 /* m[11], m[12] */ - LONG $0x6d1941c4; BYTE $0xcf // VPUNPCKHQDQ XMM9, XMM12, XMM15 /* m[5], m[15] */ - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 16(DX), X13 // X13 = m[2]+ m[3] - MOVOU 64(DX), X15 // X15 = m[8]+ m[9] - LONG $0x6c0141c4; BYTE $0xd4 // VPUNPCKLQDQ XMM10, XMM15, XMM12 /* m[8], m[0] */ - LONG $0x6d0941c4; BYTE $0xde // VPUNPCKHQDQ XMM11, XMM14, XMM14 /* ___, m[13] */ - LONG $0x6c1141c4; BYTE $0xdb // VPUNPCKLQDQ XMM11, XMM13, XMM11 /* m[2], ___ */ - - LOAD_SHUFFLE - G1 - G2 - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) - MOVOU 16(DX), X12 // X12 = m[2]+ m[3] - MOVOU 48(DX), X13 // X13 = m[6]+ m[7] - MOVOU 64(DX), X14 // X14 = m[8]+ m[9] - MOVOU 80(DX), X15 // X15 = m[10]+m[11] - LONG $0x6d1941c4; BYTE $0xc4 // VPUNPCKHQDQ XMM8, XMM12, XMM12 /* ___, m[3] */ - LONG $0x6c0141c4; BYTE $0xc0 // VPUNPCKLQDQ XMM8, XMM15, XMM8 /* m[10], ___ */ - LONG $0x6d1141c4; BYTE $0xce // VPUNPCKHQDQ XMM9, XMM13, XMM14 /* m[7], m[9] */ - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 32(DX), X14 // X14 = m[4]+ m[5] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - LONG $0x6c0141c4; BYTE $0xd5 // VPUNPCKLQDQ XMM10, XMM15, XMM13 /* m[14], m[6] */ - LONG $0x0f0943c4; WORD $0x08dc // VPALIGNR XMM11, XMM14, XMM12, 0x8 /* m[1], m[4] */ - - LOAD_SHUFFLE - G1 - G2 - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 4 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) - MOVOU 16(DX), X12 // X12 = m[2]+ m[3] - MOVOU 48(DX), X13 // X13 = m[6]+ m[7] - MOVOU 80(DX), X14 // X14 = m[10]+m[11] - MOVOU 96(DX), X15 // X15 = m[12]+m[13] - LONG $0x6d1141c4; BYTE $0xc4 // VPUNPCKHQDQ XMM8, XMM13, XMM12 /* m[7], m[3] */ - LONG $0x6d0141c4; BYTE $0xce // VPUNPCKHQDQ XMM9, XMM15, XMM14 /* m[13], m[11] */ - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 64(DX), X13 // X13 = m[8]+ m[9] - MOVOU 112(DX), X14 // X14 = m[14]+m[15] - LONG $0x6d1141c4; BYTE $0xd4 // VPUNPCKHQDQ XMM10, XMM13, XMM12 /* m[9], m[1] */ - LONG $0x6c0141c4; BYTE $0xde // VPUNPCKLQDQ XMM11, XMM15, XMM14 /* m[12], m[14] */ - - LOAD_SHUFFLE - G1 - G2 - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) - MOVOU 16(DX), X12 // X12 = m[2]+ m[3] - MOVOU 32(DX), X13 // X13 = m[4]+ m[5] - MOVOU 80(DX), X14 // X14 = m[10]+m[11] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - LONG $0x6d1141c4; BYTE $0xc5 // VPUNPCKHQDQ XMM8, XMM13, XMM13 /* ___, m[5] */ - LONG $0x6c1941c4; BYTE $0xc0 // VPUNPCKLQDQ XMM8, XMM12, XMM8 /* m[2], ____ */ - LONG $0x6d0141c4; BYTE $0xcf // VPUNPCKHQDQ XMM9, XMM15, XMM15 /* ___, m[15] */ - LONG $0x6c1141c4; BYTE $0xc9 // VPUNPCKLQDQ XMM9, XMM13, XMM9 /* m[4], ____ */ - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 48(DX), X13 // X13 = m[6]+ m[7] - MOVOU 64(DX), X15 // X15 = m[8]+ m[9] - LONG $0x6c1141c4; BYTE $0xd6 // VPUNPCKLQDQ XMM10, XMM13, XMM14 /* m[6], m[10] */ - LONG $0x6c1941c4; BYTE $0xdf // VPUNPCKLQDQ XMM11, XMM12, XMM15 /* m[0], m[8] */ - - LOAD_SHUFFLE - G1 - G2 - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 5 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) - MOVOU 16(DX), X12 // X12 = m[2]+ m[3] - MOVOU 32(DX), X13 // X13 = m[4]+ m[5] - MOVOU 64(DX), X14 // X14 = m[8]+ m[9] - MOVOU 80(DX), X15 // X15 = m[10]+m[11] - LONG $0x6d0941c4; BYTE $0xc5 // VPUNPCKHQDQ XMM8, XMM14, XMM13 /* m[9], m[5] */ - LONG $0x6c1941c4; BYTE $0xcf // VPUNPCKLQDQ XMM9, XMM12, XMM15 /* m[2], m[10] */ - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 48(DX), X14 // X14 = m[6]+ m[7] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - LONG $0x6d0941c4; BYTE $0xd6 // VPUNPCKHQDQ XMM10, XMM14, XMM14 /* ___, m[7] */ - LONG $0x6c1941c4; BYTE $0xd2 // VPUNPCKLQDQ XMM10, XMM12, XMM10 /* m[0], ____ */ - LONG $0x6d0141c4; BYTE $0xdf // VPUNPCKHQDQ XMM11, XMM15, XMM15 /* ___, m[15] */ - LONG $0x6c1141c4; BYTE $0xdb // VPUNPCKLQDQ XMM11, XMM13, XMM11 /* m[4], ____ */ - - LOAD_SHUFFLE - G1 - G2 - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) - MOVOU 16(DX), X12 // X12 = m[2]+ m[3] - MOVOU 48(DX), X13 // X13 = m[6]+ m[7] - MOVOU 80(DX), X14 // X14 = m[10]+m[11] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - LONG $0x6d0941c4; BYTE $0xc6 // VPUNPCKHQDQ XMM8, XMM14, XMM14 /* ___, m[11] */ - LONG $0x6c0141c4; BYTE $0xc0 // VPUNPCKLQDQ XMM8, XMM15, XMM8 /* m[14], ____ */ - LONG $0x6d1941c4; BYTE $0xcc // VPUNPCKHQDQ XMM9, XMM12, XMM12 /* ___, m[3] */ - LONG $0x6c1141c4; BYTE $0xc9 // VPUNPCKLQDQ XMM9, XMM13, XMM9 /* m[6], ____ */ - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 64(DX), X13 // X13 = m[8]+ m[9] - MOVOU 96(DX), X14 // X14 = m[12]+m[13] - LONG $0x0f0943c4; WORD $0x08d4 // VPALIGNR XMM10, XMM14, XMM12, 0x8 /* m[1], m[12] */ - LONG $0x6d0941c4; BYTE $0xde // VPUNPCKHQDQ XMM11, XMM14, XMM14 /* ___, m[13] */ - LONG $0x6c1141c4; BYTE $0xdb // VPUNPCKLQDQ XMM11, XMM13, XMM11 /* m[8], ____ */ - - LOAD_SHUFFLE - G1 - G2 - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 6 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 16(DX), X13 // X13 = m[2]+ m[3] - MOVOU 48(DX), X14 // X14 = m[6]+ m[7] - MOVOU 64(DX), X15 // X15 = m[8]+ m[9] - LONG $0x6c1141c4; BYTE $0xc6 // VPUNPCKLQDQ XMM8, XMM13, XMM14 /* m[2], m[6] */ - LONG $0x6c1941c4; BYTE $0xcf // VPUNPCKLQDQ XMM9, XMM12, XMM15 /* m[0], m[8] */ - MOVOU 80(DX), X12 // X12 = m[10]+m[11] - MOVOU 96(DX), X14 // X14 = m[12]+m[13] - LONG $0x6c0941c4; BYTE $0xd4 // VPUNPCKLQDQ XMM10, XMM14, XMM12 /* m[12], m[10] */ - LONG $0x6d1941c4; BYTE $0xdd // VPUNPCKHQDQ XMM11, XMM12, XMM13 /* m[11], m[3] */ - - LOAD_SHUFFLE - G1 - G2 - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 32(DX), X13 // X13 = m[4]+ m[5] - MOVOU 48(DX), X14 // X14 = m[6]+ m[7] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - LONG $0x6d0941c4; BYTE $0xc6 // VPUNPCKHQDQ XMM8, XMM14, XMM14 /* ___, m[7] */ - LONG $0x6c1141c4; BYTE $0xc0 // VPUNPCKLQDQ XMM8, XMM13, XMM8 /* m[4], ____ */ - LONG $0x6d0141c4; BYTE $0xcc // VPUNPCKHQDQ XMM9, XMM15, XMM12 /* m[15], m[1] */ - MOVOU 64(DX), X12 // X12 = m[8]+ m[9] - MOVOU 96(DX), X14 // X14 = m[12]+m[13] - LONG $0x6d0941c4; BYTE $0xd5 // VPUNPCKHQDQ XMM10, XMM14, XMM13 /* m[13], m[5] */ - LONG $0x6d1941c4; BYTE $0xdc // VPUNPCKHQDQ XMM11, XMM12, XMM12 /* ___, m[9] */ - LONG $0x6c0141c4; BYTE $0xdb // VPUNPCKLQDQ XMM11, XMM15, XMM11 /* m[14], ____ */ - - LOAD_SHUFFLE - G1 - G2 - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 7 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 32(DX), X13 // X13 = m[4]+ m[5] - MOVOU 96(DX), X14 // X14 = m[12]+m[13] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - LONG $0x6d1941c4; BYTE $0xc4 // VPUNPCKHQDQ XMM8, XMM12, XMM12 /* ___, m[1] */ - LONG $0x6c0941c4; BYTE $0xc0 // VPUNPCKLQDQ XMM8, XMM14, XMM8 /* m[12], ____ */ - LONG $0x6c0141c4; BYTE $0xcd // VPUNPCKLQDQ XMM9, XMM15, XMM13 /* m[14], m[4] */ - MOVOU 80(DX), X12 // X12 = m[10]+m[11] - LONG $0x6d1141c4; BYTE $0xd7 // VPUNPCKHQDQ XMM10, XMM13, XMM15 /* m[5], m[15] */ - LONG $0x0f1943c4; WORD $0x08de // VPALIGNR XMM11, XMM12, XMM14, 0x8 /* m[13], m[10] */ - - LOAD_SHUFFLE - G1 - G2 - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 48(DX), X13 // X13 = m[6]+ m[7] - MOVOU 64(DX), X14 // X14 = m[8]+ m[9] - MOVOU 80(DX), X15 // X15 = m[10]+m[11] - LONG $0x6c1941c4; BYTE $0xc5 // VPUNPCKLQDQ XMM8, XMM12, XMM13 /* m[0], m[6] */ - LONG $0x0f0943c4; WORD $0x08ce // VPALIGNR XMM9, XMM14, XMM14, 0x8 /* m[9], m[8] */ - MOVOU 16(DX), X14 // X14 = m[2]+ m[3] - LONG $0x6d1141c4; BYTE $0xd6 // VPUNPCKHQDQ XMM10, XMM13, XMM14 /* m[7], m[3] */ - LONG $0x6d0141c4; BYTE $0xdf // VPUNPCKHQDQ XMM11, XMM15, XMM15 /* ___, m[11] */ - LONG $0x6c0941c4; BYTE $0xdb // VPUNPCKLQDQ XMM11, XMM14, XMM11 /* m[2], ____ */ - - LOAD_SHUFFLE - G1 - G2 - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 8 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) - MOVOU 16(DX), X12 // X12 = m[2]+ m[3] - MOVOU 48(DX), X13 // X13 = m[6]+ m[7] - MOVOU 96(DX), X14 // X14 = m[12]+m[13] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - LONG $0x6d0941c4; BYTE $0xc5 // VPUNPCKHQDQ XMM8, XMM14, XMM13 /* m[13], m[7] */ - LONG $0x6d1941c4; BYTE $0xcc // VPUNPCKHQDQ XMM9, XMM12, XMM12 /* ___, m[3] */ - LONG $0x6c0941c4; BYTE $0xc9 // VPUNPCKLQDQ XMM9, XMM14, XMM9 /* m[12], ____ */ - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 64(DX), X13 // X13 = m[8]+ m[9] - MOVOU 80(DX), X14 // X14 = m[10]+m[11] - LONG $0x0f0143c4; WORD $0x08d6 // VPALIGNR XMM10, XMM15, XMM14, 0x8 /* m[11], m[14] */ - LONG $0x6d1941c4; BYTE $0xdd // VPUNPCKHQDQ XMM11, XMM12, XMM13 /* m[1], m[9] */ - - LOAD_SHUFFLE - G1 - G2 - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) - MOVOU 16(DX), X12 // X12 = m[2]+ m[3] - MOVOU 32(DX), X13 // X13 = m[4]+ m[5] - MOVOU 64(DX), X14 // X14 = m[8]+ m[9] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - LONG $0x6d1141c4; BYTE $0xc7 // VPUNPCKHQDQ XMM8, XMM13, XMM15 /* m[5], m[15] */ - LONG $0x6c0941c4; BYTE $0xcc // VPUNPCKLQDQ XMM9, XMM14, XMM12 /* m[8], m[2] */ - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 48(DX), X14 // X14 = m[6]+ m[7] - MOVOU 80(DX), X15 // X15 = m[10]+m[11] - LONG $0x6c1941c4; BYTE $0xd5 // VPUNPCKLQDQ XMM10, XMM12, XMM13 /* m[0], m[4] */ - LONG $0x6c0941c4; BYTE $0xdf // VPUNPCKLQDQ XMM11, XMM14, XMM15 /* m[6], m[10] */ - - LOAD_SHUFFLE - G1 - G2 - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 9 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 48(DX), X13 // X13 = m[6]+ m[7] - MOVOU 80(DX), X14 // X14 = m[10]+m[11] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - LONG $0x6c1141c4; BYTE $0xc7 // VPUNPCKLQDQ XMM8, XMM13, XMM15 /* m[6], m[14] */ - LONG $0x0f1943c4; WORD $0x08ce // VPALIGNR XMM9, XMM12, XMM14, 0x8 /* m[11], m[0] */ - MOVOU 16(DX), X13 // X13 = m[2]+ m[3] - MOVOU 64(DX), X14 // X14 = m[8]+ m[9] - LONG $0x6d0141c4; BYTE $0xd6 // VPUNPCKHQDQ XMM10, XMM15, XMM14 /* m[15], m[9] */ - LONG $0x0f0943c4; WORD $0x08dd // VPALIGNR XMM11, XMM14, XMM13, 0x8 /* m[3], m[8] */ - - LOAD_SHUFFLE - G1 - G2 - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 16(DX), X13 // X13 = m[2]+ m[3] - MOVOU 80(DX), X14 // X14 = m[10]+m[11] - MOVOU 96(DX), X15 // X15 = m[12]+m[13] - LONG $0x6d0141c4; BYTE $0xc7 // VPUNPCKHQDQ XMM8, XMM15, XMM15 /* ___, m[13] */ - LONG $0x6c0141c4; BYTE $0xc0 // VPUNPCKLQDQ XMM8, XMM15, XMM8 /* m[12], ____ */ - LONG $0x0f0943c4; WORD $0x08cc // VPALIGNR XMM9, XMM14, XMM12, 0x8 /* m[1], m[10] */ - MOVOU 32(DX), X12 // X12 = m[4]+ m[5] - MOVOU 48(DX), X15 // X15 = m[6]+ m[7] - LONG $0x6d0141c4; BYTE $0xd7 // VPUNPCKHQDQ XMM10, XMM15, XMM15 /* ___, m[7] */ - LONG $0x6c1141c4; BYTE $0xd2 // VPUNPCKLQDQ XMM10, XMM13, XMM10 /* m[2], ____ */ - LONG $0x6d1941c4; BYTE $0xdc // VPUNPCKHQDQ XMM11, XMM12, XMM12 /* ___, m[5] */ - LONG $0x6c1941c4; BYTE $0xdb // VPUNPCKLQDQ XMM11, XMM12, XMM11 /* m[4], ____ */ - - LOAD_SHUFFLE - G1 - G2 - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 1 0 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 48(DX), X13 // X13 = m[6]+ m[7] - MOVOU 64(DX), X14 // X14 = m[8]+ m[9] - MOVOU 80(DX), X15 // X15 = m[10]+m[11] - LONG $0x6c0141c4; BYTE $0xc6 // VPUNPCKLQDQ XMM8, XMM15, XMM14 /* m[10], m[8] */ - LONG $0x6d1141c4; BYTE $0xcc // VPUNPCKHQDQ XMM9, XMM13, XMM12 /* m[7], m[1] */ - MOVOU 16(DX), X12 // X12 = m[2]+ m[3] - MOVOU 32(DX), X14 // X14 = m[4]+ m[5] - LONG $0x6c1941c4; BYTE $0xd6 // VPUNPCKLQDQ XMM10, XMM12, XMM14 /* m[2], m[4] */ - LONG $0x6d0941c4; BYTE $0xde // VPUNPCKHQDQ XMM11, XMM14, XMM14 /* ___, m[5] */ - LONG $0x6c1141c4; BYTE $0xdb // VPUNPCKLQDQ XMM11, XMM13, XMM11 /* m[6], ____ */ - - LOAD_SHUFFLE - G1 - G2 - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) - MOVOU 16(DX), X12 // X12 = m[2]+ m[3] - MOVOU 64(DX), X13 // X13 = m[8]+ m[9] - MOVOU 96(DX), X14 // X14 = m[12]+m[13] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - LONG $0x6d0141c4; BYTE $0xc5 // VPUNPCKHQDQ XMM8, XMM15, XMM13 /* m[15], m[9] */ - LONG $0x6d1941c4; BYTE $0xce // VPUNPCKHQDQ XMM9, XMM12, XMM14 /* m[3], m[13] */ - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 80(DX), X13 // X13 = m[10]+m[11] - LONG $0x0f0143c4; WORD $0x08d5 // VPALIGNR XMM10, XMM15, XMM13, 0x8 /* m[11], m[14] */ - LONG $0x6c0941c4; BYTE $0xdc // VPUNPCKLQDQ XMM11, XMM14, XMM12 /* m[12], m[0] */ - - LOAD_SHUFFLE - G1 - G2 - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 1 1 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) - MOVOU 0(DX), X12 // X12 = m[0]+m[1] - MOVOU 16(DX), X13 // X13 = m[2]+m[3] - MOVOU 32(DX), X14 // X14 = m[4]+m[5] - MOVOU 48(DX), X15 // X15 = m[6]+m[7] - LONG $0x6c1941c4; BYTE $0xc5 // VPUNPCKLQDQ XMM8, XMM12, XMM13 /* m[0], m[2] */ - LONG $0x6c0941c4; BYTE $0xcf // VPUNPCKLQDQ XMM9, XMM14, XMM15 /* m[4], m[6] */ - LONG $0x6d1941c4; BYTE $0xd5 // VPUNPCKHQDQ XMM10, XMM12, XMM13 /* m[1], m[3] */ - LONG $0x6d0941c4; BYTE $0xdf // VPUNPCKHQDQ XMM11, XMM14, XMM15 /* m[5], m[7] */ - - LOAD_SHUFFLE - G1 - G2 - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) - MOVOU 64(DX), X12 // X12 = m[8]+ m[9] - MOVOU 80(DX), X13 // X13 = m[10]+m[11] - MOVOU 96(DX), X14 // X14 = m[12]+m[13] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - LONG $0x6c1941c4; BYTE $0xc5 // VPUNPCKLQDQ XMM8, XMM12, XMM13 /* m[8],m[10] */ - LONG $0x6c0941c4; BYTE $0xcf // VPUNPCKLQDQ XMM9, XMM14, XMM15 /* m[12],m[14] */ - LONG $0x6d1941c4; BYTE $0xd5 // VPUNPCKHQDQ XMM10, XMM12, XMM13 /* m[9],m[11] */ - LONG $0x6d0941c4; BYTE $0xdf // VPUNPCKHQDQ XMM11, XMM14, XMM15 /* m[13],m[15] */ - - LOAD_SHUFFLE - G1 - G2 - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 1 2 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) - MOVOU 112(DX), X12 // X12 = m[14]+m[15] - MOVOU 32(DX), X13 // X13 = m[4]+ m[5] - MOVOU 64(DX), X14 // X14 = m[8]+ m[9] - MOVOU 96(DX), X15 // X15 = m[12]+m[13] - LONG $0x6c1941c4; BYTE $0xc5 // VPUNPCKLQDQ XMM8, XMM12, XMM13 /* m[14], m[4] */ - LONG $0x6d0941c4; BYTE $0xcf // VPUNPCKHQDQ XMM9, XMM14, XMM15 /* m[9], m[13] */ - MOVOU 80(DX), X13 // X13 = m[10]+m[11] - MOVOU 48(DX), X15 // X15 = m[6]+ m[7] - LONG $0x6c1141c4; BYTE $0xd6 // VPUNPCKLQDQ XMM10, XMM13, XMM14 /* m[10], m[8] */ - LONG $0x0f0143c4; WORD $0x08dc // VPALIGNR XMM11, XMM15, XMM12, 0x8 /* m[15], m[6] */ - - LOAD_SHUFFLE - G1 - G2 - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 32(DX), X13 // X13 = m[4]+ m[5] - MOVOU 80(DX), X14 // X14 = m[10]+m[11] - LONG $0x0f1943c4; WORD $0x08c4 // VPALIGNR XMM8, XMM12, XMM12, 0x8 /* m[1], m[0] */ - LONG $0x6d0941c4; BYTE $0xcd // VPUNPCKHQDQ XMM9, XMM14, XMM13 /* m[11], m[5] */ - MOVOU 16(DX), X12 // X12 = m[2]+ m[3] - MOVOU 48(DX), X13 // X13 = m[6]+ m[7] - MOVOU 96(DX), X14 // X14 = m[12]+m[13] - LONG $0x6c0941c4; BYTE $0xd4 // VPUNPCKLQDQ XMM10, XMM14, XMM12 /* m[12], m[2] */ - LONG $0x6d1141c4; BYTE $0xdc // VPUNPCKHQDQ XMM11, XMM13, XMM12 /* m[7], m[3] */ - - LOAD_SHUFFLE - G1 - G2 - UNDIAGONALIZE - - // Reload digest (most current value store in &out) - MOVQ out+144(FP), SI // SI: &in - MOVOU 0(SI), X12 // X12 = in[0]+in[1] /* row1l = LOAD( &S->h[0] ); */ - MOVOU 16(SI), X13 // X13 = in[2]+in[3] /* row1h = LOAD( &S->h[2] ); */ - MOVOU 32(SI), X14 // X14 = in[4]+in[5] /* row2l = LOAD( &S->h[4] ); */ - MOVOU 48(SI), X15 // X15 = in[6]+in[7] /* row2h = LOAD( &S->h[6] ); */ - - // Final computations and prepare for storing - PXOR X4, X0 // X0 = X0 ^ X4 /* row1l = _mm_xor_si128( row3l, row1l ); */ - PXOR X5, X1 // X1 = X1 ^ X5 /* row1h = _mm_xor_si128( row3h, row1h ); */ - PXOR X12, X0 // X0 = X0 ^ X12 /* STORE( &S->h[0], _mm_xor_si128( LOAD( &S->h[0] ), row1l ) ); */ - PXOR X13, X1 // X1 = X1 ^ X13 /* STORE( &S->h[2], _mm_xor_si128( LOAD( &S->h[2] ), row1h ) ); */ - PXOR X6, X2 // X2 = X2 ^ X6 /* row2l = _mm_xor_si128( row4l, row2l ); */ - PXOR X7, X3 // X3 = X3 ^ X7 /* row2h = _mm_xor_si128( row4h, row2h ); */ - PXOR X14, X2 // X2 = X2 ^ X14 /* STORE( &S->h[4], _mm_xor_si128( LOAD( &S->h[4] ), row2l ) ); */ - PXOR X15, X3 // X3 = X3 ^ X15 /* STORE( &S->h[6], _mm_xor_si128( LOAD( &S->h[6] ), row2h ) ); */ - - // Store digest into &out - MOVQ out+144(FP), SI // SI: &out - MOVOU X0, 0(SI) // out[0]+out[1] = X0 - MOVOU X1, 16(SI) // out[2]+out[3] = X1 - MOVOU X2, 32(SI) // out[4]+out[5] = X2 - MOVOU X3, 48(SI) // out[6]+out[7] = X3 - - // Increment message pointer and check if there's more to do - ADDQ $128, DX // message += 128 - SUBQ $1, R8 - JNZ loop - -complete: - RET diff --git a/vendor/github.com/minio/blake2b-simd/compressSse_amd64.go b/vendor/github.com/minio/blake2b-simd/compressSse_amd64.go deleted file mode 100644 index d539a7ade..000000000 --- a/vendor/github.com/minio/blake2b-simd/compressSse_amd64.go +++ /dev/null @@ -1,41 +0,0 @@ -//+build !noasm -//+build !appengine - -/* - * Minio Cloud Storage, (C) 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package blake2b - -//go:noescape -func blockSSELoop(p []uint8, in, iv, t, f, shffle, out []uint64) - -func compressSSE(d *digest, p []uint8) { - var ( - in [8]uint64 - out [8]uint64 - shffle [2]uint64 - ) - - // vector for PSHUFB instruction - shffle[0] = 0x0201000706050403 - shffle[1] = 0x0a09080f0e0d0c0b - - in[0], in[1], in[2], in[3], in[4], in[5], in[6], in[7] = d.h[0], d.h[1], d.h[2], d.h[3], d.h[4], d.h[5], d.h[6], d.h[7] - - blockSSELoop(p, in[:], iv[:], d.t[:], d.f[:], shffle[:], out[:]) - - d.h[0], d.h[1], d.h[2], d.h[3], d.h[4], d.h[5], d.h[6], d.h[7] = out[0], out[1], out[2], out[3], out[4], out[5], out[6], out[7] -} diff --git a/vendor/github.com/minio/blake2b-simd/compressSse_amd64.s b/vendor/github.com/minio/blake2b-simd/compressSse_amd64.s deleted file mode 100644 index 6f31c949e..000000000 --- a/vendor/github.com/minio/blake2b-simd/compressSse_amd64.s +++ /dev/null @@ -1,770 +0,0 @@ -//+build !noasm !appengine - -// -// Minio Cloud Storage, (C) 2016 Minio, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -// -// Based on SSE implementation from https://github.com/BLAKE2/BLAKE2/blob/master/sse/blake2b.c -// -// Use github.com/fwessels/asm2plan9s on this file to assemble instructions to their Plan9 equivalent -// -// Assembly code below essentially follows the ROUND macro (see blake2b-round.h) which is defined as: -// #define ROUND(r) \ -// LOAD_MSG_ ##r ##_1(b0, b1); \ -// G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ -// LOAD_MSG_ ##r ##_2(b0, b1); \ -// G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ -// DIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h); \ -// LOAD_MSG_ ##r ##_3(b0, b1); \ -// G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ -// LOAD_MSG_ ##r ##_4(b0, b1); \ -// G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ -// UNDIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h); -// -// as well as the go equivalent in https://github.com/dchest/blake2b/blob/master/block.go -// -// As in the macro, G1/G2 in the 1st and 2nd half are identical (so literal copy of assembly) -// -// Rounds are also the same, except for the loading of the message (and rounds 1 & 11 and -// rounds 2 & 12 are identical) -// - -#define G1 \ - \ // G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); - LONG $0xd40f4166; BYTE $0xc0 \ // PADDQ XMM0,XMM8 /* v0 += m[0], v1 += m[2] */ - LONG $0xd40f4166; BYTE $0xc9 \ // PADDQ XMM1,XMM9 /* v2 += m[4], v3 += m[6] */ - LONG $0xc2d40f66 \ // PADDQ XMM0,XMM2 /* v0 += v4, v1 += v5 */ - LONG $0xcbd40f66 \ // PADDQ XMM1,XMM3 /* v2 += v6, v3 += v7 */ - LONG $0xf0ef0f66 \ // PXOR XMM6,XMM0 /* v12 ^= v0, v13 ^= v1 */ - LONG $0xf9ef0f66 \ // PXOR XMM7,XMM1 /* v14 ^= v2, v15 ^= v3 */ - LONG $0xf6700f66; BYTE $0xb1 \ // PSHUFD XMM6,XMM6,0xb1 /* v12 = v12<<(64-32) | v12>>32, v13 = v13<<(64-32) | v13>>32 */ - LONG $0xff700f66; BYTE $0xb1 \ // PSHUFD XMM7,XMM7,0xb1 /* v14 = v14<<(64-32) | v14>>32, v15 = v15<<(64-32) | v15>>32 */ - LONG $0xe6d40f66 \ // PADDQ XMM4,XMM6 /* v8 += v12, v9 += v13 */ - LONG $0xefd40f66 \ // PADDQ XMM5,XMM7 /* v10 += v14, v11 += v15 */ - LONG $0xd4ef0f66 \ // PXOR XMM2,XMM4 /* v4 ^= v8, v5 ^= v9 */ - LONG $0xddef0f66 \ // PXOR XMM3,XMM5 /* v6 ^= v10, v7 ^= v11 */ - LONG $0x380f4166; WORD $0xd400 \ // PSHUFB XMM2,XMM12 /* v4 = v4<<(64-24) | v4>>24, v5 = v5<<(64-24) | v5>>24 */ - LONG $0x380f4166; WORD $0xdc00 // PSHUFB XMM3,XMM12 /* v6 = v6<<(64-24) | v6>>24, v7 = v7<<(64-24) | v7>>24 */ - -#define G2 \ - \ // G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); - LONG $0xd40f4166; BYTE $0xc2 \ // PADDQ XMM0,XMM10 /* v0 += m[1], v1 += m[3] */ - LONG $0xd40f4166; BYTE $0xcb \ // PADDQ XMM1,XMM11 /* v2 += m[5], v3 += m[7] */ - LONG $0xc2d40f66 \ // PADDQ XMM0,XMM2 /* v0 += v4, v1 += v5 */ - LONG $0xcbd40f66 \ // PADDQ XMM1,XMM3 /* v2 += v6, v3 += v7 */ - LONG $0xf0ef0f66 \ // PXOR XMM6,XMM0 /* v12 ^= v0, v13 ^= v1 */ - LONG $0xf9ef0f66 \ // PXOR XMM7,XMM1 /* v14 ^= v2, v15 ^= v3 */ - LONG $0xf6700ff2; BYTE $0x39 \ // PSHUFLW XMM6,XMM6,0x39 /* combined with next ... */ - LONG $0xf6700ff3; BYTE $0x39 \ // PSHUFHW XMM6,XMM6,0x39 /* v12 = v12<<(64-16) | v12>>16, v13 = v13<<(64-16) | v13>>16 */ - LONG $0xff700ff2; BYTE $0x39 \ // PSHUFLW XMM7,XMM7,0x39 /* combined with next ... */ - LONG $0xff700ff3; BYTE $0x39 \ // PSHUFHW XMM7,XMM7,0x39 /* v14 = v14<<(64-16) | v14>>16, v15 = v15<<(64-16) | v15>>16 */ - LONG $0xe6d40f66 \ // PADDQ XMM4,XMM6 /* v8 += v12, v9 += v13 */ - LONG $0xefd40f66 \ // PADDQ XMM5,XMM7 /* v10 += v14, v11 += v15 */ - LONG $0xd4ef0f66 \ // PXOR XMM2,XMM4 /* v4 ^= v8, v5 ^= v9 */ - LONG $0xddef0f66 \ // PXOR XMM3,XMM5 /* v6 ^= v10, v7 ^= v11 */ - MOVOU X2, X15 \ - LONG $0xd40f4466; BYTE $0xfa \ // PADDQ XMM15,XMM2 /* temp reg = reg*2 */ - LONG $0xd2730f66; BYTE $0x3f \ // PSRLQ XMM2,0x3f /* reg = reg>>63 */ - LONG $0xef0f4166; BYTE $0xd7 \ // PXOR XMM2,XMM15 /* ORed together: v4 = v4<<(64-63) | v4>>63, v5 = v5<<(64-63) | v5>>63 */ - MOVOU X3, X15 \ - LONG $0xd40f4466; BYTE $0xfb \ // PADDQ XMM15,XMM3 /* temp reg = reg*2 */ - LONG $0xd3730f66; BYTE $0x3f \ // PSRLQ XMM3,0x3f /* reg = reg>>63 */ - LONG $0xef0f4166; BYTE $0xdf // PXOR XMM3,XMM15 /* ORed together: v6 = v6<<(64-63) | v6>>63, v7 = v7<<(64-63) | v7>>63 */ - -#define DIAGONALIZE \ - \ // DIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h); - MOVOU X6, X13 \ /* t0 = row4l;\ */ - MOVOU X2, X14 \ /* t1 = row2l;\ */ - MOVOU X4, X6 \ /* row4l = row3l;\ */ - MOVOU X5, X4 \ /* row3l = row3h;\ */ - MOVOU X6, X5 \ /* row3h = row4l;\ */ - LONG $0x6c0f4566; BYTE $0xfd \ // PUNPCKLQDQ XMM15, XMM13 /* _mm_unpacklo_epi64(t0, t0) */ - MOVOU X7, X6 \ - LONG $0x6d0f4166; BYTE $0xf7 \ // PUNPCKHQDQ XMM6, XMM15 /* row4l = _mm_unpackhi_epi64(row4h, ); \ */ - LONG $0x6c0f4466; BYTE $0xff \ // PUNPCKLQDQ XMM15, XMM7 /* _mm_unpacklo_epi64(row4h, row4h) */ - MOVOU X13, X7 \ - LONG $0x6d0f4166; BYTE $0xff \ // PUNPCKHQDQ XMM7, XMM15 /* row4h = _mm_unpackhi_epi64(t0, ); \ */ - LONG $0x6c0f4466; BYTE $0xfb \ // PUNPCKLQDQ XMM15, XMM3 /* _mm_unpacklo_epi64(row2h, row2h) */ - LONG $0x6d0f4166; BYTE $0xd7 \ // PUNPCKHQDQ XMM2, XMM15 /* row2l = _mm_unpackhi_epi64(row2l, ); \ */ - LONG $0x6c0f4566; BYTE $0xfe \ // PUNPCKLQDQ XMM15, XMM14 /* _mm_unpacklo_epi64(t1, t1) */ - LONG $0x6d0f4166; BYTE $0xdf // PUNPCKHQDQ XMM3, XMM15 /* row2h = _mm_unpackhi_epi64(row2h, ) */ - -#define UNDIAGONALIZE \ - \ // UNDIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h); - MOVOU X4, X13 \ /* t0 = row3l;\ */ - MOVOU X5, X4 \ /* row3l = row3h;\ */ - MOVOU X13, X5 \ /* row3h = t0;\ */ - MOVOU X2, X13 \ /* t0 = row2l;\ */ - MOVOU X6, X14 \ /* t1 = row4l;\ */ - LONG $0x6c0f4466; BYTE $0xfa \ // PUNPCKLQDQ XMM15, XMM2 /* _mm_unpacklo_epi64(row2l, row2l) */ - MOVOU X3, X2 \ - LONG $0x6d0f4166; BYTE $0xd7 \ // PUNPCKHQDQ XMM2, XMM15 /* row2l = _mm_unpackhi_epi64(row2h, ); \ */ - LONG $0x6c0f4466; BYTE $0xfb \ // PUNPCKLQDQ XMM15, XMM3 /* _mm_unpacklo_epi64(row2h, row2h) */ - MOVOU X13, X3 \ - LONG $0x6d0f4166; BYTE $0xdf \ // PUNPCKHQDQ XMM3, XMM15 /* row2h = _mm_unpackhi_epi64(t0, ); \ */ - LONG $0x6c0f4466; BYTE $0xff \ // PUNPCKLQDQ XMM15, XMM7 /* _mm_unpacklo_epi64(row4h, row4h) */ - LONG $0x6d0f4166; BYTE $0xf7 \ // PUNPCKHQDQ XMM6, XMM15 /* row4l = _mm_unpackhi_epi64(row4l, ); \ */ - LONG $0x6c0f4566; BYTE $0xfe \ // PUNPCKLQDQ XMM15, XMM14 /* _mm_unpacklo_epi64(t1, t1) */ - LONG $0x6d0f4166; BYTE $0xff // PUNPCKHQDQ XMM7, XMM15 /* row4h = _mm_unpackhi_epi64(row4h, ) */ - -#define LOAD_SHUFFLE \ - \ // Load shuffle value - MOVQ shffle+120(FP), SI \ // SI: &shuffle - MOVOU 0(SI), X12 // X12 = 03040506 07000102 0b0c0d0e 0f08090a - -// func blockSSELoop(p []uint8, in, iv, t, f, shffle, out []uint64) -TEXT ·blockSSELoop(SB), 7, $0 - // REGISTER USE - // R8: loop counter - // DX: message pointer - // SI: temp pointer for loading - // X0 - X7: v0 - v15 - // X8 - X11: m[0] - m[7] - // X12: shuffle value - // X13 - X15: temp registers - - // Load digest - MOVQ in+24(FP), SI // SI: &in - MOVOU 0(SI), X0 // X0 = in[0]+in[1] /* row1l = LOAD( &S->h[0] ); */ - MOVOU 16(SI), X1 // X1 = in[2]+in[3] /* row1h = LOAD( &S->h[2] ); */ - MOVOU 32(SI), X2 // X2 = in[4]+in[5] /* row2l = LOAD( &S->h[4] ); */ - MOVOU 48(SI), X3 // X3 = in[6]+in[7] /* row2h = LOAD( &S->h[6] ); */ - - // Already store digest into &out (so we can reload it later generically) - MOVQ out+144(FP), SI // SI: &out - MOVOU X0, 0(SI) // out[0]+out[1] = X0 - MOVOU X1, 16(SI) // out[2]+out[3] = X1 - MOVOU X2, 32(SI) // out[4]+out[5] = X2 - MOVOU X3, 48(SI) // out[6]+out[7] = X3 - - // Initialize message pointer and loop counter - MOVQ message+0(FP), DX // DX: &p (message) - MOVQ message_len+8(FP), R8 // R8: len(message) - SHRQ $7, R8 // len(message) / 128 - CMPQ R8, $0 - JEQ complete - -loop: - // Increment counter - MOVQ t+72(FP), SI // SI: &t - MOVQ 0(SI), R9 - ADDQ $128, R9 // /* d.t[0] += BlockSize */ - MOVQ R9, 0(SI) - CMPQ R9, $128 // /* if d.t[0] < BlockSize { */ - JGE noincr - MOVQ 8(SI), R9 - ADDQ $1, R9 // /* d.t[1]++ */ - MOVQ R9, 8(SI) - -noincr: // /* } */ - - // Load initialization vector - MOVQ iv+48(FP), SI // SI: &iv - MOVOU 0(SI), X4 // X4 = iv[0]+iv[1] /* row3l = LOAD( &blake2b_IV[0] ); */ - MOVOU 16(SI), X5 // X5 = iv[2]+iv[3] /* row3h = LOAD( &blake2b_IV[2] ); */ - MOVOU 32(SI), X6 // X6 = iv[4]+iv[5] /* LOAD( &blake2b_IV[4] ) */ - MOVOU 48(SI), X7 // X7 = iv[6]+iv[7] /* LOAD( &blake2b_IV[6] ) */ - MOVQ t+72(FP), SI // SI: &t - MOVOU 0(SI), X8 // X8 = t[0]+t[1] /* LOAD( &S->t[0] ) */ - PXOR X8, X6 // X6 = X6 ^ X8 /* row4l = _mm_xor_si128( , ); */ - MOVQ t+96(FP), SI // SI: &f - MOVOU 0(SI), X8 // X8 = f[0]+f[1] /* LOAD( &S->f[0] ) */ - PXOR X8, X7 // X7 = X7 ^ X8 /* row4h = _mm_xor_si128( , ); */ - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 1 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) - MOVOU 0(DX), X12 // X12 = m[0]+m[1] - MOVOU 16(DX), X13 // X13 = m[2]+m[3] - MOVOU 32(DX), X14 // X14 = m[4]+m[5] - MOVOU 48(DX), X15 // X15 = m[6]+m[7] - MOVOU X12, X8 - LONG $0x6c0f4566; BYTE $0xc5 // PUNPCKLQDQ XMM8, XMM13 /* m[0], m[2] */ - MOVOU X14, X9 - LONG $0x6c0f4566; BYTE $0xcf // PUNPCKLQDQ XMM9, XMM15 /* m[4], m[6] */ - MOVOU X12, X10 - LONG $0x6d0f4566; BYTE $0xd5 // PUNPCKHQDQ XMM10, XMM13 /* m[1], m[3] */ - MOVOU X14, X11 - LONG $0x6d0f4566; BYTE $0xdf // PUNPCKHQDQ XMM11, XMM15 /* m[5], m[7] */ - - LOAD_SHUFFLE - G1 - G2 - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) - MOVOU 64(DX), X12 // X12 = m[8]+ m[9] - MOVOU 80(DX), X13 // X13 = m[10]+m[11] - MOVOU 96(DX), X14 // X14 = m[12]+m[13] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - MOVOU X12, X8 - LONG $0x6c0f4566; BYTE $0xc5 // PUNPCKLQDQ XMM8, XMM13 /* m[8],m[10] */ - MOVOU X14, X9 - LONG $0x6c0f4566; BYTE $0xcf // PUNPCKLQDQ XMM9, XMM15 /* m[12],m[14] */ - MOVOU X12, X10 - LONG $0x6d0f4566; BYTE $0xd5 // PUNPCKHQDQ XMM10, XMM13 /* m[9],m[11] */ - MOVOU X14, X11 - LONG $0x6d0f4566; BYTE $0xdf // PUNPCKHQDQ XMM11, XMM15 /* m[13],m[15] */ - - LOAD_SHUFFLE - G1 - G2 - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 2 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) - MOVOU 112(DX), X12 // X12 = m[14]+m[15] - MOVOU 32(DX), X13 // X13 = m[4]+ m[5] - MOVOU 64(DX), X14 // X14 = m[8]+ m[9] - MOVOU 96(DX), X15 // X15 = m[12]+m[13] - MOVOU X12, X8 - LONG $0x6c0f4566; BYTE $0xc5 // PUNPCKLQDQ XMM8, XMM13 /* m[14], m[4] */ - MOVOU X14, X9 - LONG $0x6d0f4566; BYTE $0xcf // PUNPCKHQDQ XMM9, XMM15 /* m[9], m[13] */ - MOVOU 80(DX), X10 // X10 = m[10]+m[11] - MOVOU 48(DX), X11 // X11 = m[6]+ m[7] - LONG $0x6c0f4566; BYTE $0xd6 // PUNPCKLQDQ XMM10, XMM14 /* m[10], m[8] */ - LONG $0x3a0f4566; WORD $0xdc0f; BYTE $0x08 // PALIGNR XMM11, XMM12, 0x8 /* m[15], m[6] */; ; ; ; ; - - LOAD_SHUFFLE - G1 - G2 - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 32(DX), X13 // X13 = m[4]+ m[5] - MOVOU 80(DX), X14 // X14 = m[10]+m[11] - MOVOU X12, X8 - LONG $0x3a0f4566; WORD $0xc40f; BYTE $0x08 // PALIGNR XMM8, XMM12, 0x8 /* m[1], m[0] */ - MOVOU X14, X9 - LONG $0x6d0f4566; BYTE $0xcd // PUNPCKHQDQ XMM9, XMM13 /* m[11], m[5] */ - MOVOU 16(DX), X12 // X12 = m[2]+ m[3] - MOVOU 48(DX), X11 // X11 = m[6]+ m[7] - MOVOU 96(DX), X10 // X10 = m[12]+m[13] - LONG $0x6c0f4566; BYTE $0xd4 // PUNPCKLQDQ XMM10, XMM12 /* m[12], m[2] */ - LONG $0x6d0f4566; BYTE $0xdc // PUNPCKHQDQ XMM11, XMM12 /* m[7], m[3] */ - - LOAD_SHUFFLE - G1 - G2 - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 3 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) - MOVOU 32(DX), X12 // X12 = m[4]+ m[5] - MOVOU 80(DX), X13 // X13 = m[10]+m[11] - MOVOU 96(DX), X14 // X14 = m[12]+m[13] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - MOVOU X14, X8 - LONG $0x3a0f4566; WORD $0xc50f; BYTE $0x08 // PALIGNR XMM8, XMM13, 0x8 /* m[11], m[12] */ - MOVOU X12, X9 - LONG $0x6d0f4566; BYTE $0xcf // PUNPCKHQDQ XMM9, XMM15 /* m[5], m[15] */ - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 16(DX), X13 // X13 = m[2]+ m[3] - MOVOU 64(DX), X10 // X10 = m[8]+ m[9] - LONG $0x6c0f4566; BYTE $0xd4 // PUNPCKLQDQ XMM10, XMM12 /* m[8], m[0] */ - LONG $0x6d0f4566; BYTE $0xf6 // PUNPCKHQDQ XMM14, XMM14 /* ___, m[13] */ - MOVOU X13, X11 - LONG $0x6c0f4566; BYTE $0xde // PUNPCKLQDQ XMM11, XMM14 /* m[2], ___ */ - - LOAD_SHUFFLE - G1 - G2 - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) - MOVOU 16(DX), X12 // X12 = m[2]+ m[3] - MOVOU 48(DX), X13 // X13 = m[6]+ m[7] - MOVOU 64(DX), X14 // X14 = m[8]+ m[9] - MOVOU 80(DX), X15 // X15 = m[10]+m[11] - MOVOU X12, X9 - LONG $0x6d0f4566; BYTE $0xcc // PUNPCKHQDQ XMM9, XMM12 /* ___, m[3] */ - MOVOU X15, X8 - LONG $0x6c0f4566; BYTE $0xc1 // PUNPCKLQDQ XMM8, XMM9 /* m[10], ___ */ - MOVOU X13, X9 - LONG $0x6d0f4566; BYTE $0xce // PUNPCKHQDQ XMM9, XMM14 /* m[7], m[9] */ - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 32(DX), X11 // X11 = m[4]+ m[5] - MOVOU 112(DX), X10 // X10 = m[14]+m[15] - LONG $0x6c0f4566; BYTE $0xd5 // PUNPCKLQDQ XMM10, XMM13 /* m[14], m[6] */ - LONG $0x3a0f4566; WORD $0xdc0f; BYTE $0x08 // PALIGNR XMM11, XMM12, 0x8 /* m[1], m[4] */ - - LOAD_SHUFFLE - G1 - G2 - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 4 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) - MOVOU 16(DX), X12 // X12 = m[2]+ m[3] - MOVOU 48(DX), X13 // X13 = m[6]+ m[7] - MOVOU 80(DX), X14 // X14 = m[10]+m[11] - MOVOU 96(DX), X15 // X15 = m[12]+m[13] - MOVOU X13, X8 - LONG $0x6d0f4566; BYTE $0xc4 // PUNPCKHQDQ XMM8, XMM12 /* m[7], m[3] */ - MOVOU X15, X9 - LONG $0x6d0f4566; BYTE $0xce // PUNPCKHQDQ XMM9, XMM14 /* m[13], m[11] */ - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 64(DX), X10 // X10 = m[8]+ m[9] - MOVOU 112(DX), X14 // X14 = m[14]+m[15] - LONG $0x6d0f4566; BYTE $0xd4 // PUNPCKHQDQ XMM10, XMM12 /* m[9], m[1] */ - MOVOU X15, X11 - LONG $0x6c0f4566; BYTE $0xde // PUNPCKLQDQ XMM11, XMM14 /* m[12], m[14] */ - - LOAD_SHUFFLE - G1 - G2 - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) - MOVOU 16(DX), X12 // X12 = m[2]+ m[3] - MOVOU 32(DX), X13 // X13 = m[4]+ m[5] - MOVOU 80(DX), X14 // X14 = m[10]+m[11] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - MOVOU X13, X9 - LONG $0x6d0f4566; BYTE $0xcd // PUNPCKHQDQ XMM9, XMM13 /* ___, m[5] */ - MOVOU X12, X8 - LONG $0x6c0f4566; BYTE $0xc1 // PUNPCKLQDQ XMM8, XMM9 /* m[2], ____ */ - MOVOU X15, X10 - LONG $0x6d0f4566; BYTE $0xd7 // PUNPCKHQDQ XMM10, XMM15 /* ___, m[15] */ - MOVOU X13, X9 - LONG $0x6c0f4566; BYTE $0xca // PUNPCKLQDQ XMM9, XMM10 /* m[4], ____ */ - MOVOU 0(DX), X11 // X11 = m[0]+ m[1] - MOVOU 48(DX), X10 // X10 = m[6]+ m[7] - MOVOU 64(DX), X15 // X15 = m[8]+ m[9] - LONG $0x6c0f4566; BYTE $0xd6 // PUNPCKLQDQ XMM10, XMM14 /* m[6], m[10] */ - LONG $0x6c0f4566; BYTE $0xdf // PUNPCKLQDQ XMM11, XMM15 /* m[0], m[8] */ - - LOAD_SHUFFLE - G1 - G2 - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 5 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) - MOVOU 16(DX), X12 // X12 = m[2]+ m[3] - MOVOU 32(DX), X13 // X13 = m[4]+ m[5] - MOVOU 64(DX), X14 // X14 = m[8]+ m[9] - MOVOU 80(DX), X15 // X15 = m[10]+m[11] - MOVOU X14, X8 - LONG $0x6d0f4566; BYTE $0xc5 // PUNPCKHQDQ XMM8, XMM13 /* m[9], m[5] */ - MOVOU X12, X9 - LONG $0x6c0f4566; BYTE $0xcf // PUNPCKLQDQ XMM9, XMM15 /* m[2], m[10] */ - MOVOU 0(DX), X10 // X10 = m[0]+ m[1] - MOVOU 48(DX), X14 // X14 = m[6]+ m[7] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - LONG $0x6d0f4566; BYTE $0xf6 // PUNPCKHQDQ XMM14, XMM14 /* ___, m[7] */ - LONG $0x6c0f4566; BYTE $0xd6 // PUNPCKLQDQ XMM10, XMM14 /* m[0], ____ */ - LONG $0x6d0f4566; BYTE $0xff // PUNPCKHQDQ XMM15, XMM15 /* ___, m[15] */ - MOVOU X13, X11 - LONG $0x6c0f4566; BYTE $0xdf // PUNPCKLQDQ XMM11, XMM15 /* m[4], ____ */ - - LOAD_SHUFFLE - G1 - G2 - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) - MOVOU 16(DX), X12 // X12 = m[2]+ m[3] - MOVOU 48(DX), X13 // X13 = m[6]+ m[7] - MOVOU 80(DX), X14 // X14 = m[10]+m[11] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - LONG $0x6d0f4566; BYTE $0xf6 // PUNPCKHQDQ XMM14, XMM14 /* ___, m[11] */ - MOVOU X15, X8 - LONG $0x6c0f4566; BYTE $0xc6 // PUNPCKLQDQ XMM8, XMM14 /* m[14], ____ */ - LONG $0x6d0f4566; BYTE $0xe4 // PUNPCKHQDQ XMM12, XMM12 /* ___, m[3] */ - MOVOU X13, X9 - LONG $0x6c0f4566; BYTE $0xcc // PUNPCKLQDQ XMM9, XMM12 /* m[6], ____ */ - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 64(DX), X11 // X11 = m[8]+ m[9] - MOVOU 96(DX), X14 // X14 = m[12]+m[13] - MOVOU X14, X10 - LONG $0x3a0f4566; WORD $0xd40f; BYTE $0x08 // PALIGNR XMM10, XMM12, 0x8 /* m[1], m[12] */ - LONG $0x6d0f4566; BYTE $0xf6 // PUNPCKHQDQ XMM14, XMM14 /* ___, m[13] */ - LONG $0x6c0f4566; BYTE $0xde // PUNPCKLQDQ XMM11, XMM14 /* m[8], ____ */ - - LOAD_SHUFFLE - G1 - G2 - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 6 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 16(DX), X13 // X13 = m[2]+ m[3] - MOVOU 48(DX), X14 // X14 = m[6]+ m[7] - MOVOU 64(DX), X15 // X15 = m[8]+ m[9] - MOVOU X13, X8 - LONG $0x6c0f4566; BYTE $0xc6 // PUNPCKLQDQ XMM8, XMM14 /* m[2], m[6] */ - MOVOU X12, X9 - LONG $0x6c0f4566; BYTE $0xcf // PUNPCKLQDQ XMM9, XMM15 /* m[0], m[8] */ - MOVOU 80(DX), X12 // X12 = m[10]+m[11] - MOVOU 96(DX), X10 // X10 = m[12]+m[13] - LONG $0x6c0f4566; BYTE $0xd4 // PUNPCKLQDQ XMM10, XMM12 /* m[12], m[10] */ - MOVOU X12, X11 - LONG $0x6d0f4566; BYTE $0xdd // PUNPCKHQDQ XMM11, XMM13 /* m[11], m[3] */ - - LOAD_SHUFFLE - G1 - G2 - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 32(DX), X13 // X13 = m[4]+ m[5] - MOVOU 48(DX), X14 // X14 = m[6]+ m[7] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - MOVOU X14, X9 - LONG $0x6d0f4566; BYTE $0xce // PUNPCKHQDQ XMM9, XMM14 /* ___, m[7] */ - MOVOU X13, X8 - LONG $0x6c0f4566; BYTE $0xc1 // PUNPCKLQDQ XMM8, XMM9 /* m[4], ____ */ - MOVOU X15, X9 - LONG $0x6d0f4566; BYTE $0xcc // PUNPCKHQDQ XMM9, XMM12 /* m[15], m[1] */ - MOVOU 64(DX), X12 // X12 = m[8]+ m[9] - MOVOU 96(DX), X10 // X10 = m[12]+m[13] - LONG $0x6d0f4566; BYTE $0xd5 // PUNPCKHQDQ XMM10, XMM13 /* m[13], m[5] */ - LONG $0x6d0f4566; BYTE $0xe4 // PUNPCKHQDQ XMM12, XMM12 /* ___, m[9] */ - MOVOU X15, X11 - LONG $0x6c0f4566; BYTE $0xdc // PUNPCKLQDQ XMM11, XMM12 /* m[14], ____ */ - - LOAD_SHUFFLE - G1 - G2 - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 7 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 32(DX), X13 // X13 = m[4]+ m[5] - MOVOU 96(DX), X14 // X14 = m[12]+m[13] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - MOVOU X12, X9 - LONG $0x6d0f4566; BYTE $0xcc // PUNPCKHQDQ XMM9, XMM12 /* ___, m[1] */ - MOVOU X14, X8 - LONG $0x6c0f4566; BYTE $0xc1 // PUNPCKLQDQ XMM8, XMM9 /* m[12], ____ */ - MOVOU X15, X9 - LONG $0x6c0f4566; BYTE $0xcd // PUNPCKLQDQ XMM9, XMM13 /* m[14], m[4] */ - MOVOU 80(DX), X11 // X11 = m[10]+m[11] - MOVOU X13, X10 - LONG $0x6d0f4566; BYTE $0xd7 // PUNPCKHQDQ XMM10, XMM15 /* m[5], m[15] */ - LONG $0x3a0f4566; WORD $0xde0f; BYTE $0x08 // PALIGNR XMM11, XMM14, 0x8 /* m[13], m[10] */ - - LOAD_SHUFFLE - G1 - G2 - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 48(DX), X13 // X13 = m[6]+ m[7] - MOVOU 64(DX), X14 // X14 = m[8]+ m[9] - MOVOU 80(DX), X15 // X15 = m[10]+m[11] - MOVOU X12, X8 - LONG $0x6c0f4566; BYTE $0xc5 // PUNPCKLQDQ XMM8, XMM13 /* m[0], m[6] */ - MOVOU X14, X9 - LONG $0x3a0f4566; WORD $0xce0f; BYTE $0x08 // PALIGNR XMM9, XMM14, 0x8 /* m[9], m[8] */ - MOVOU 16(DX), X11 // X14 = m[2]+ m[3] - MOVOU X13, X10 - LONG $0x6d0f4566; BYTE $0xd3 // PUNPCKHQDQ XMM10, XMM11 /* m[7], m[3] */ - LONG $0x6d0f4566; BYTE $0xff // PUNPCKHQDQ XMM15, XMM15 /* ___, m[11] */ - LONG $0x6c0f4566; BYTE $0xdf // PUNPCKLQDQ XMM11, XMM15 /* m[2], ____ */ - - LOAD_SHUFFLE - G1 - G2 - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 8 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) - MOVOU 16(DX), X12 // X12 = m[2]+ m[3] - MOVOU 48(DX), X13 // X13 = m[6]+ m[7] - MOVOU 96(DX), X14 // X14 = m[12]+m[13] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - MOVOU X14, X8 - LONG $0x6d0f4566; BYTE $0xc5 // PUNPCKHQDQ XMM8, XMM13 /* m[13], m[7] */ - MOVOU X12, X10 - LONG $0x6d0f4566; BYTE $0xd4 // PUNPCKHQDQ XMM10, XMM12 /* ___, m[3] */ - MOVOU X14, X9 - LONG $0x6c0f4566; BYTE $0xca // PUNPCKLQDQ XMM9, XMM10 /* m[12], ____ */ - MOVOU 0(DX), X11 // X11 = m[0]+ m[1] - MOVOU 64(DX), X13 // X13 = m[8]+ m[9] - MOVOU 80(DX), X14 // X14 = m[10]+m[11] - MOVOU X15, X10 - LONG $0x3a0f4566; WORD $0xd60f; BYTE $0x08 // PALIGNR XMM10, XMM14, 0x8 /* m[11], m[14] */ - LONG $0x6d0f4566; BYTE $0xdd // PUNPCKHQDQ XMM11, XMM13 /* m[1], m[9] */ - - LOAD_SHUFFLE - G1 - G2 - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) - MOVOU 16(DX), X12 // X12 = m[2]+ m[3] - MOVOU 32(DX), X13 // X13 = m[4]+ m[5] - MOVOU 64(DX), X14 // X14 = m[8]+ m[9] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - MOVOU X13, X8 - LONG $0x6d0f4566; BYTE $0xc7 // PUNPCKHQDQ XMM8, XMM15 /* m[5], m[15] */ - MOVOU X14, X9 - LONG $0x6c0f4566; BYTE $0xcc // PUNPCKLQDQ XMM9, XMM12 /* m[8], m[2] */ - MOVOU 0(DX), X10 // X10 = m[0]+ m[1] - MOVOU 48(DX), X11 // X11 = m[6]+ m[7] - MOVOU 80(DX), X15 // X15 = m[10]+m[11] - LONG $0x6c0f4566; BYTE $0xd5 // PUNPCKLQDQ XMM10, XMM13 /* m[0], m[4] */ - LONG $0x6c0f4566; BYTE $0xdf // PUNPCKLQDQ XMM11, XMM15 /* m[6], m[10] */ - - LOAD_SHUFFLE - G1 - G2 - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 9 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 48(DX), X13 // X13 = m[6]+ m[7] - MOVOU 80(DX), X14 // X14 = m[10]+m[11] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - MOVOU X13, X8 - LONG $0x6c0f4566; BYTE $0xc7 // PUNPCKLQDQ XMM8, XMM15 /* m[6], m[14] */ - MOVOU X12, X9 - LONG $0x3a0f4566; WORD $0xce0f; BYTE $0x08 // PALIGNR XMM9, XMM14, 0x8 /* m[11], m[0] */ - MOVOU 16(DX), X13 // X13 = m[2]+ m[3] - MOVOU 64(DX), X11 // X11 = m[8]+ m[9] - MOVOU X15, X10 - LONG $0x6d0f4566; BYTE $0xd3 // PUNPCKHQDQ XMM10, XMM11 /* m[15], m[9] */ - LONG $0x3a0f4566; WORD $0xdd0f; BYTE $0x08 // PALIGNR XMM11, XMM13, 0x8 /* m[3], m[8] */ - - LOAD_SHUFFLE - G1 - G2 - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 16(DX), X13 // X13 = m[2]+ m[3] - MOVOU 80(DX), X14 // X14 = m[10]+m[11] - MOVOU 96(DX), X15 // X15 = m[12]+m[13] - MOVOU X15, X9 - LONG $0x6d0f4566; BYTE $0xcf // PUNPCKHQDQ XMM9, XMM15 /* ___, m[13] */ - MOVOU X15, X8 - LONG $0x6c0f4566; BYTE $0xc1 // PUNPCKLQDQ XMM8, XMM9 /* m[12], ____ */ - MOVOU X14, X9 - LONG $0x3a0f4566; WORD $0xcc0f; BYTE $0x08 // PALIGNR XMM9, XMM12, 0x8 /* m[1], m[10] */ - MOVOU 32(DX), X12 // X12 = m[4]+ m[5] - MOVOU 48(DX), X15 // X15 = m[6]+ m[7] - MOVOU X15, X11 - LONG $0x6d0f4566; BYTE $0xdf // PUNPCKHQDQ XMM11, XMM15 /* ___, m[7] */ - MOVOU X13, X10 - LONG $0x6c0f4566; BYTE $0xd3 // PUNPCKLQDQ XMM10, XMM11 /* m[2], ____ */ - MOVOU X12, X15 - LONG $0x6d0f4566; BYTE $0xfc // PUNPCKHQDQ XMM15, XMM12 /* ___, m[5] */ - MOVOU X12, X11 - LONG $0x6c0f4566; BYTE $0xdf // PUNPCKLQDQ XMM11, XMM15 /* m[4], ____ */ - - LOAD_SHUFFLE - G1 - G2 - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 1 0 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 48(DX), X13 // X13 = m[6]+ m[7] - MOVOU 64(DX), X14 // X14 = m[8]+ m[9] - MOVOU 80(DX), X15 // X15 = m[10]+m[11] - MOVOU X15, X8 - LONG $0x6c0f4566; BYTE $0xc6 // PUNPCKLQDQ XMM8, XMM14 /* m[10], m[8] */ - MOVOU X13, X9 - LONG $0x6d0f4566; BYTE $0xcc // PUNPCKHQDQ XMM9, XMM12 /* m[7], m[1] */ - MOVOU 16(DX), X10 // X10 = m[2]+ m[3] - MOVOU 32(DX), X14 // X14 = m[4]+ m[5] - LONG $0x6c0f4566; BYTE $0xd6 // PUNPCKLQDQ XMM10, XMM14 /* m[2], m[4] */ - MOVOU X14, X15 - LONG $0x6d0f4566; BYTE $0xfe // PUNPCKHQDQ XMM15, XMM14 /* ___, m[5] */ - MOVOU X13, X11 - LONG $0x6c0f4566; BYTE $0xdf // PUNPCKLQDQ XMM11, XMM15 /* m[6], ____ */ - - LOAD_SHUFFLE - G1 - G2 - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) - MOVOU 16(DX), X12 // X12 = m[2]+ m[3] - MOVOU 64(DX), X13 // X13 = m[8]+ m[9] - MOVOU 96(DX), X14 // X14 = m[12]+m[13] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - MOVOU X15, X8 - LONG $0x6d0f4566; BYTE $0xc5 // PUNPCKHQDQ XMM8, XMM13 /* m[15], m[9] */ - MOVOU X12, X9 - LONG $0x6d0f4566; BYTE $0xce // PUNPCKHQDQ XMM9, XMM14 /* m[3], m[13] */ - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 80(DX), X13 // X13 = m[10]+m[11] - MOVOU X15, X10 - LONG $0x3a0f4566; WORD $0xd50f; BYTE $0x08 // PALIGNR XMM10, XMM13, 0x8 /* m[11], m[14] */ - MOVOU X14, X11 - LONG $0x6c0f4566; BYTE $0xdc // PUNPCKLQDQ XMM11, XMM12 /* m[12], m[0] */ - - LOAD_SHUFFLE - G1 - G2 - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 1 1 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) - MOVOU 0(DX), X12 // X12 = m[0]+m[1] - MOVOU 16(DX), X13 // X13 = m[2]+m[3] - MOVOU 32(DX), X14 // X14 = m[4]+m[5] - MOVOU 48(DX), X15 // X15 = m[6]+m[7] - MOVOU X12, X8 - LONG $0x6c0f4566; BYTE $0xc5 // PUNPCKLQDQ XMM8, XMM13 /* m[0], m[2] */ - MOVOU X14, X9 - LONG $0x6c0f4566; BYTE $0xcf // PUNPCKLQDQ XMM9, XMM15 /* m[4], m[6] */ - MOVOU X12, X10 - LONG $0x6d0f4566; BYTE $0xd5 // PUNPCKHQDQ XMM10, XMM13 /* m[1], m[3] */ - MOVOU X14, X11 - LONG $0x6d0f4566; BYTE $0xdf // PUNPCKHQDQ XMM11, XMM15 /* m[5], m[7] */ - - LOAD_SHUFFLE - G1 - G2 - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) - MOVOU 64(DX), X12 // X12 = m[8]+ m[9] - MOVOU 80(DX), X13 // X13 = m[10]+m[11] - MOVOU 96(DX), X14 // X14 = m[12]+m[13] - MOVOU 112(DX), X15 // X15 = m[14]+m[15] - MOVOU X12, X8 - LONG $0x6c0f4566; BYTE $0xc5 // PUNPCKLQDQ XMM8, XMM13 /* m[8],m[10] */ - MOVOU X14, X9 - LONG $0x6c0f4566; BYTE $0xcf // PUNPCKLQDQ XMM9, XMM15 /* m[12],m[14] */ - MOVOU X12, X10 - LONG $0x6d0f4566; BYTE $0xd5 // PUNPCKHQDQ XMM10, XMM13 /* m[9],m[11] */ - MOVOU X14, X11 - LONG $0x6d0f4566; BYTE $0xdf // PUNPCKHQDQ XMM11, XMM15 /* m[13],m[15] */ - - LOAD_SHUFFLE - G1 - G2 - UNDIAGONALIZE - - /////////////////////////////////////////////////////////////////////////// - // R O U N D 1 2 - /////////////////////////////////////////////////////////////////////////// - - // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) - MOVOU 112(DX), X12 // X12 = m[14]+m[15] - MOVOU 32(DX), X13 // X13 = m[4]+ m[5] - MOVOU 64(DX), X14 // X14 = m[8]+ m[9] - MOVOU 96(DX), X15 // X15 = m[12]+m[13] - MOVOU X12, X8 - LONG $0x6c0f4566; BYTE $0xc5 // PUNPCKLQDQ XMM8, XMM13 /* m[14], m[4] */ - MOVOU X14, X9 - LONG $0x6d0f4566; BYTE $0xcf // PUNPCKHQDQ XMM9, XMM15 /* m[9], m[13] */ - MOVOU 80(DX), X10 // X10 = m[10]+m[11] - MOVOU 48(DX), X11 // X11 = m[6]+ m[7] - LONG $0x6c0f4566; BYTE $0xd6 // PUNPCKLQDQ XMM10, XMM14 /* m[10], m[8] */ - LONG $0x3a0f4566; WORD $0xdc0f; BYTE $0x08 // PALIGNR XMM11, XMM12, 0x8 /* m[15], m[6] */; ; ; ; ; - - LOAD_SHUFFLE - G1 - G2 - DIAGONALIZE - - // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) - MOVOU 0(DX), X12 // X12 = m[0]+ m[1] - MOVOU 32(DX), X13 // X13 = m[4]+ m[5] - MOVOU 80(DX), X14 // X14 = m[10]+m[11] - MOVOU X12, X8 - LONG $0x3a0f4566; WORD $0xc40f; BYTE $0x08 // PALIGNR XMM8, XMM12, 0x8 /* m[1], m[0] */ - MOVOU X14, X9 - LONG $0x6d0f4566; BYTE $0xcd // PUNPCKHQDQ XMM9, XMM13 /* m[11], m[5] */ - MOVOU 16(DX), X12 // X12 = m[2]+ m[3] - MOVOU 48(DX), X11 // X11 = m[6]+ m[7] - MOVOU 96(DX), X10 // X10 = m[12]+m[13] - LONG $0x6c0f4566; BYTE $0xd4 // PUNPCKLQDQ XMM10, XMM12 /* m[12], m[2] */ - LONG $0x6d0f4566; BYTE $0xdc // PUNPCKHQDQ XMM11, XMM12 /* m[7], m[3] */ - - LOAD_SHUFFLE - G1 - G2 - UNDIAGONALIZE - - // Reload digest (most current value store in &out) - MOVQ out+144(FP), SI // SI: &in - MOVOU 0(SI), X12 // X12 = in[0]+in[1] /* row1l = LOAD( &S->h[0] ); */ - MOVOU 16(SI), X13 // X13 = in[2]+in[3] /* row1h = LOAD( &S->h[2] ); */ - MOVOU 32(SI), X14 // X14 = in[4]+in[5] /* row2l = LOAD( &S->h[4] ); */ - MOVOU 48(SI), X15 // X15 = in[6]+in[7] /* row2h = LOAD( &S->h[6] ); */ - - // Final computations and prepare for storing - PXOR X4, X0 // X0 = X0 ^ X4 /* row1l = _mm_xor_si128( row3l, row1l ); */ - PXOR X5, X1 // X1 = X1 ^ X5 /* row1h = _mm_xor_si128( row3h, row1h ); */ - PXOR X12, X0 // X0 = X0 ^ X12 /* STORE( &S->h[0], _mm_xor_si128( LOAD( &S->h[0] ), row1l ) ); */ - PXOR X13, X1 // X1 = X1 ^ X13 /* STORE( &S->h[2], _mm_xor_si128( LOAD( &S->h[2] ), row1h ) ); */ - PXOR X6, X2 // X2 = X2 ^ X6 /* row2l = _mm_xor_si128( row4l, row2l ); */ - PXOR X7, X3 // X3 = X3 ^ X7 /* row2h = _mm_xor_si128( row4h, row2h ); */ - PXOR X14, X2 // X2 = X2 ^ X14 /* STORE( &S->h[4], _mm_xor_si128( LOAD( &S->h[4] ), row2l ) ); */ - PXOR X15, X3 // X3 = X3 ^ X15 /* STORE( &S->h[6], _mm_xor_si128( LOAD( &S->h[6] ), row2h ) ); */ - - // Store digest into &out - MOVQ out+144(FP), SI // SI: &out - MOVOU X0, 0(SI) // out[0]+out[1] = X0 - MOVOU X1, 16(SI) // out[2]+out[3] = X1 - MOVOU X2, 32(SI) // out[4]+out[5] = X2 - MOVOU X3, 48(SI) // out[6]+out[7] = X3 - - // Increment message pointer and check if there's more to do - ADDQ $128, DX // message += 128 - SUBQ $1, R8 - JNZ loop - -complete: - RET diff --git a/vendor/github.com/minio/blake2b-simd/compress_amd64.go b/vendor/github.com/minio/blake2b-simd/compress_amd64.go deleted file mode 100644 index 4fc5e388c..000000000 --- a/vendor/github.com/minio/blake2b-simd/compress_amd64.go +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package blake2b - -func compress(d *digest, p []uint8) { - // Verifies if AVX2 or AVX is available, use optimized code path. - if avx2 { - compressAVX2(d, p) - } else if avx { - compressAVX(d, p) - } else if ssse3 { - compressSSE(d, p) - } else { - compressGeneric(d, p) - } -} diff --git a/vendor/github.com/minio/blake2b-simd/compress_generic.go b/vendor/github.com/minio/blake2b-simd/compress_generic.go deleted file mode 100644 index e9e16e8b9..000000000 --- a/vendor/github.com/minio/blake2b-simd/compress_generic.go +++ /dev/null @@ -1,1419 +0,0 @@ -// Written in 2012 by Dmitry Chestnykh. -// -// To the extent possible under law, the author have dedicated all copyright -// and related and neighboring rights to this software to the public domain -// worldwide. This software is distributed without any warranty. -// http://creativecommons.org/publicdomain/zero/1.0/ - -package blake2b - -func compressGeneric(d *digest, p []uint8) { - h0, h1, h2, h3, h4, h5, h6, h7 := d.h[0], d.h[1], d.h[2], d.h[3], d.h[4], d.h[5], d.h[6], d.h[7] - - for len(p) >= BlockSize { - // Increment counter. - d.t[0] += BlockSize - if d.t[0] < BlockSize { - d.t[1]++ - } - // Initialize compression function. - v0, v1, v2, v3, v4, v5, v6, v7 := h0, h1, h2, h3, h4, h5, h6, h7 - v8 := iv[0] - v9 := iv[1] - v10 := iv[2] - v11 := iv[3] - v12 := iv[4] ^ d.t[0] - v13 := iv[5] ^ d.t[1] - v14 := iv[6] ^ d.f[0] - v15 := iv[7] ^ d.f[1] - - j := 0 - var m [16]uint64 - for i := range m { - m[i] = uint64(p[j]) | uint64(p[j+1])<<8 | uint64(p[j+2])<<16 | - uint64(p[j+3])<<24 | uint64(p[j+4])<<32 | uint64(p[j+5])<<40 | - uint64(p[j+6])<<48 | uint64(p[j+7])<<56 - j += 8 - } - - // Round 1. - v0 += m[0] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-32) | v12>>32 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-24) | v4>>24 - v1 += m[2] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-32) | v13>>32 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-24) | v5>>24 - v2 += m[4] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-32) | v14>>32 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-24) | v6>>24 - v3 += m[6] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-32) | v15>>32 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-24) | v7>>24 - v2 += m[5] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-16) | v14>>16 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-63) | v6>>63 - v3 += m[7] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-16) | v15>>16 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-63) | v7>>63 - v1 += m[3] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-16) | v13>>16 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-63) | v5>>63 - v0 += m[1] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-16) | v12>>16 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-63) | v4>>63 - v0 += m[8] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-32) | v15>>32 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-24) | v5>>24 - v1 += m[10] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-32) | v12>>32 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-24) | v6>>24 - v2 += m[12] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-32) | v13>>32 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-24) | v7>>24 - v3 += m[14] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-32) | v14>>32 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-24) | v4>>24 - v2 += m[13] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-16) | v13>>16 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-63) | v7>>63 - v3 += m[15] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-16) | v14>>16 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-63) | v4>>63 - v1 += m[11] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-16) | v12>>16 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-63) | v6>>63 - v0 += m[9] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-16) | v15>>16 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-63) | v5>>63 - - // Round 2. - v0 += m[14] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-32) | v12>>32 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-24) | v4>>24 - v1 += m[4] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-32) | v13>>32 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-24) | v5>>24 - v2 += m[9] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-32) | v14>>32 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-24) | v6>>24 - v3 += m[13] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-32) | v15>>32 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-24) | v7>>24 - v2 += m[15] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-16) | v14>>16 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-63) | v6>>63 - v3 += m[6] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-16) | v15>>16 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-63) | v7>>63 - v1 += m[8] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-16) | v13>>16 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-63) | v5>>63 - v0 += m[10] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-16) | v12>>16 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-63) | v4>>63 - v0 += m[1] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-32) | v15>>32 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-24) | v5>>24 - v1 += m[0] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-32) | v12>>32 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-24) | v6>>24 - v2 += m[11] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-32) | v13>>32 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-24) | v7>>24 - v3 += m[5] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-32) | v14>>32 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-24) | v4>>24 - v2 += m[7] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-16) | v13>>16 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-63) | v7>>63 - v3 += m[3] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-16) | v14>>16 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-63) | v4>>63 - v1 += m[2] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-16) | v12>>16 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-63) | v6>>63 - v0 += m[12] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-16) | v15>>16 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-63) | v5>>63 - - // Round 3. - v0 += m[11] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-32) | v12>>32 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-24) | v4>>24 - v1 += m[12] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-32) | v13>>32 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-24) | v5>>24 - v2 += m[5] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-32) | v14>>32 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-24) | v6>>24 - v3 += m[15] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-32) | v15>>32 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-24) | v7>>24 - v2 += m[2] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-16) | v14>>16 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-63) | v6>>63 - v3 += m[13] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-16) | v15>>16 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-63) | v7>>63 - v1 += m[0] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-16) | v13>>16 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-63) | v5>>63 - v0 += m[8] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-16) | v12>>16 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-63) | v4>>63 - v0 += m[10] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-32) | v15>>32 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-24) | v5>>24 - v1 += m[3] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-32) | v12>>32 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-24) | v6>>24 - v2 += m[7] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-32) | v13>>32 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-24) | v7>>24 - v3 += m[9] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-32) | v14>>32 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-24) | v4>>24 - v2 += m[1] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-16) | v13>>16 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-63) | v7>>63 - v3 += m[4] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-16) | v14>>16 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-63) | v4>>63 - v1 += m[6] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-16) | v12>>16 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-63) | v6>>63 - v0 += m[14] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-16) | v15>>16 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-63) | v5>>63 - - // Round 4. - v0 += m[7] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-32) | v12>>32 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-24) | v4>>24 - v1 += m[3] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-32) | v13>>32 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-24) | v5>>24 - v2 += m[13] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-32) | v14>>32 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-24) | v6>>24 - v3 += m[11] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-32) | v15>>32 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-24) | v7>>24 - v2 += m[12] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-16) | v14>>16 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-63) | v6>>63 - v3 += m[14] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-16) | v15>>16 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-63) | v7>>63 - v1 += m[1] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-16) | v13>>16 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-63) | v5>>63 - v0 += m[9] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-16) | v12>>16 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-63) | v4>>63 - v0 += m[2] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-32) | v15>>32 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-24) | v5>>24 - v1 += m[5] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-32) | v12>>32 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-24) | v6>>24 - v2 += m[4] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-32) | v13>>32 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-24) | v7>>24 - v3 += m[15] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-32) | v14>>32 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-24) | v4>>24 - v2 += m[0] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-16) | v13>>16 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-63) | v7>>63 - v3 += m[8] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-16) | v14>>16 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-63) | v4>>63 - v1 += m[10] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-16) | v12>>16 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-63) | v6>>63 - v0 += m[6] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-16) | v15>>16 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-63) | v5>>63 - - // Round 5. - v0 += m[9] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-32) | v12>>32 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-24) | v4>>24 - v1 += m[5] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-32) | v13>>32 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-24) | v5>>24 - v2 += m[2] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-32) | v14>>32 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-24) | v6>>24 - v3 += m[10] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-32) | v15>>32 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-24) | v7>>24 - v2 += m[4] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-16) | v14>>16 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-63) | v6>>63 - v3 += m[15] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-16) | v15>>16 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-63) | v7>>63 - v1 += m[7] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-16) | v13>>16 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-63) | v5>>63 - v0 += m[0] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-16) | v12>>16 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-63) | v4>>63 - v0 += m[14] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-32) | v15>>32 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-24) | v5>>24 - v1 += m[11] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-32) | v12>>32 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-24) | v6>>24 - v2 += m[6] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-32) | v13>>32 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-24) | v7>>24 - v3 += m[3] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-32) | v14>>32 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-24) | v4>>24 - v2 += m[8] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-16) | v13>>16 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-63) | v7>>63 - v3 += m[13] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-16) | v14>>16 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-63) | v4>>63 - v1 += m[12] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-16) | v12>>16 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-63) | v6>>63 - v0 += m[1] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-16) | v15>>16 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-63) | v5>>63 - - // Round 6. - v0 += m[2] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-32) | v12>>32 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-24) | v4>>24 - v1 += m[6] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-32) | v13>>32 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-24) | v5>>24 - v2 += m[0] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-32) | v14>>32 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-24) | v6>>24 - v3 += m[8] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-32) | v15>>32 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-24) | v7>>24 - v2 += m[11] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-16) | v14>>16 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-63) | v6>>63 - v3 += m[3] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-16) | v15>>16 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-63) | v7>>63 - v1 += m[10] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-16) | v13>>16 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-63) | v5>>63 - v0 += m[12] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-16) | v12>>16 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-63) | v4>>63 - v0 += m[4] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-32) | v15>>32 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-24) | v5>>24 - v1 += m[7] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-32) | v12>>32 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-24) | v6>>24 - v2 += m[15] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-32) | v13>>32 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-24) | v7>>24 - v3 += m[1] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-32) | v14>>32 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-24) | v4>>24 - v2 += m[14] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-16) | v13>>16 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-63) | v7>>63 - v3 += m[9] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-16) | v14>>16 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-63) | v4>>63 - v1 += m[5] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-16) | v12>>16 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-63) | v6>>63 - v0 += m[13] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-16) | v15>>16 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-63) | v5>>63 - - // Round 7. - v0 += m[12] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-32) | v12>>32 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-24) | v4>>24 - v1 += m[1] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-32) | v13>>32 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-24) | v5>>24 - v2 += m[14] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-32) | v14>>32 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-24) | v6>>24 - v3 += m[4] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-32) | v15>>32 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-24) | v7>>24 - v2 += m[13] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-16) | v14>>16 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-63) | v6>>63 - v3 += m[10] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-16) | v15>>16 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-63) | v7>>63 - v1 += m[15] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-16) | v13>>16 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-63) | v5>>63 - v0 += m[5] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-16) | v12>>16 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-63) | v4>>63 - v0 += m[0] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-32) | v15>>32 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-24) | v5>>24 - v1 += m[6] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-32) | v12>>32 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-24) | v6>>24 - v2 += m[9] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-32) | v13>>32 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-24) | v7>>24 - v3 += m[8] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-32) | v14>>32 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-24) | v4>>24 - v2 += m[2] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-16) | v13>>16 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-63) | v7>>63 - v3 += m[11] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-16) | v14>>16 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-63) | v4>>63 - v1 += m[3] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-16) | v12>>16 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-63) | v6>>63 - v0 += m[7] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-16) | v15>>16 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-63) | v5>>63 - - // Round 8. - v0 += m[13] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-32) | v12>>32 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-24) | v4>>24 - v1 += m[7] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-32) | v13>>32 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-24) | v5>>24 - v2 += m[12] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-32) | v14>>32 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-24) | v6>>24 - v3 += m[3] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-32) | v15>>32 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-24) | v7>>24 - v2 += m[1] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-16) | v14>>16 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-63) | v6>>63 - v3 += m[9] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-16) | v15>>16 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-63) | v7>>63 - v1 += m[14] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-16) | v13>>16 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-63) | v5>>63 - v0 += m[11] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-16) | v12>>16 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-63) | v4>>63 - v0 += m[5] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-32) | v15>>32 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-24) | v5>>24 - v1 += m[15] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-32) | v12>>32 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-24) | v6>>24 - v2 += m[8] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-32) | v13>>32 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-24) | v7>>24 - v3 += m[2] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-32) | v14>>32 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-24) | v4>>24 - v2 += m[6] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-16) | v13>>16 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-63) | v7>>63 - v3 += m[10] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-16) | v14>>16 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-63) | v4>>63 - v1 += m[4] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-16) | v12>>16 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-63) | v6>>63 - v0 += m[0] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-16) | v15>>16 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-63) | v5>>63 - - // Round 9. - v0 += m[6] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-32) | v12>>32 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-24) | v4>>24 - v1 += m[14] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-32) | v13>>32 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-24) | v5>>24 - v2 += m[11] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-32) | v14>>32 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-24) | v6>>24 - v3 += m[0] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-32) | v15>>32 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-24) | v7>>24 - v2 += m[3] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-16) | v14>>16 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-63) | v6>>63 - v3 += m[8] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-16) | v15>>16 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-63) | v7>>63 - v1 += m[9] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-16) | v13>>16 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-63) | v5>>63 - v0 += m[15] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-16) | v12>>16 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-63) | v4>>63 - v0 += m[12] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-32) | v15>>32 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-24) | v5>>24 - v1 += m[13] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-32) | v12>>32 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-24) | v6>>24 - v2 += m[1] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-32) | v13>>32 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-24) | v7>>24 - v3 += m[10] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-32) | v14>>32 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-24) | v4>>24 - v2 += m[4] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-16) | v13>>16 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-63) | v7>>63 - v3 += m[5] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-16) | v14>>16 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-63) | v4>>63 - v1 += m[7] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-16) | v12>>16 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-63) | v6>>63 - v0 += m[2] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-16) | v15>>16 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-63) | v5>>63 - - // Round 10. - v0 += m[10] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-32) | v12>>32 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-24) | v4>>24 - v1 += m[8] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-32) | v13>>32 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-24) | v5>>24 - v2 += m[7] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-32) | v14>>32 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-24) | v6>>24 - v3 += m[1] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-32) | v15>>32 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-24) | v7>>24 - v2 += m[6] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-16) | v14>>16 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-63) | v6>>63 - v3 += m[5] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-16) | v15>>16 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-63) | v7>>63 - v1 += m[4] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-16) | v13>>16 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-63) | v5>>63 - v0 += m[2] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-16) | v12>>16 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-63) | v4>>63 - v0 += m[15] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-32) | v15>>32 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-24) | v5>>24 - v1 += m[9] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-32) | v12>>32 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-24) | v6>>24 - v2 += m[3] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-32) | v13>>32 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-24) | v7>>24 - v3 += m[13] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-32) | v14>>32 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-24) | v4>>24 - v2 += m[12] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-16) | v13>>16 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-63) | v7>>63 - v3 += m[0] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-16) | v14>>16 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-63) | v4>>63 - v1 += m[14] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-16) | v12>>16 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-63) | v6>>63 - v0 += m[11] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-16) | v15>>16 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-63) | v5>>63 - - // Round 11. - v0 += m[0] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-32) | v12>>32 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-24) | v4>>24 - v1 += m[2] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-32) | v13>>32 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-24) | v5>>24 - v2 += m[4] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-32) | v14>>32 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-24) | v6>>24 - v3 += m[6] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-32) | v15>>32 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-24) | v7>>24 - v2 += m[5] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-16) | v14>>16 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-63) | v6>>63 - v3 += m[7] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-16) | v15>>16 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-63) | v7>>63 - v1 += m[3] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-16) | v13>>16 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-63) | v5>>63 - v0 += m[1] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-16) | v12>>16 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-63) | v4>>63 - v0 += m[8] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-32) | v15>>32 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-24) | v5>>24 - v1 += m[10] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-32) | v12>>32 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-24) | v6>>24 - v2 += m[12] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-32) | v13>>32 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-24) | v7>>24 - v3 += m[14] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-32) | v14>>32 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-24) | v4>>24 - v2 += m[13] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-16) | v13>>16 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-63) | v7>>63 - v3 += m[15] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-16) | v14>>16 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-63) | v4>>63 - v1 += m[11] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-16) | v12>>16 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-63) | v6>>63 - v0 += m[9] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-16) | v15>>16 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-63) | v5>>63 - - // Round 12. - v0 += m[14] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-32) | v12>>32 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-24) | v4>>24 - v1 += m[4] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-32) | v13>>32 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-24) | v5>>24 - v2 += m[9] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-32) | v14>>32 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-24) | v6>>24 - v3 += m[13] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-32) | v15>>32 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-24) | v7>>24 - v2 += m[15] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-16) | v14>>16 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-63) | v6>>63 - v3 += m[6] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-16) | v15>>16 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-63) | v7>>63 - v1 += m[8] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-16) | v13>>16 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-63) | v5>>63 - v0 += m[10] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-16) | v12>>16 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-63) | v4>>63 - v0 += m[1] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-32) | v15>>32 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-24) | v5>>24 - v1 += m[0] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-32) | v12>>32 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-24) | v6>>24 - v2 += m[11] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-32) | v13>>32 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-24) | v7>>24 - v3 += m[5] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-32) | v14>>32 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-24) | v4>>24 - v2 += m[7] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-16) | v13>>16 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-63) | v7>>63 - v3 += m[3] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-16) | v14>>16 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-63) | v4>>63 - v1 += m[2] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-16) | v12>>16 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-63) | v6>>63 - v0 += m[12] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-16) | v15>>16 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-63) | v5>>63 - - h0 ^= v0 ^ v8 - h1 ^= v1 ^ v9 - h2 ^= v2 ^ v10 - h3 ^= v3 ^ v11 - h4 ^= v4 ^ v12 - h5 ^= v5 ^ v13 - h6 ^= v6 ^ v14 - h7 ^= v7 ^ v15 - - p = p[BlockSize:] - } - d.h[0], d.h[1], d.h[2], d.h[3], d.h[4], d.h[5], d.h[6], d.h[7] = h0, h1, h2, h3, h4, h5, h6, h7 -} diff --git a/vendor/github.com/minio/blake2b-simd/compress_noasm.go b/vendor/github.com/minio/blake2b-simd/compress_noasm.go deleted file mode 100644 index d3c675847..000000000 --- a/vendor/github.com/minio/blake2b-simd/compress_noasm.go +++ /dev/null @@ -1,23 +0,0 @@ -//+build !amd64 noasm appengine - -/* - * Minio Cloud Storage, (C) 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package blake2b - -func compress(d *digest, p []uint8) { - compressGeneric(d, p) -} diff --git a/vendor/github.com/minio/blake2b-simd/cpuid.go b/vendor/github.com/minio/blake2b-simd/cpuid.go deleted file mode 100644 index a9f95508e..000000000 --- a/vendor/github.com/minio/blake2b-simd/cpuid.go +++ /dev/null @@ -1,60 +0,0 @@ -// +build 386,!gccgo amd64,!gccgo - -// Copyright 2016 Frank Wessels -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package blake2b - -func cpuid(op uint32) (eax, ebx, ecx, edx uint32) -func cpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) -func xgetbv(index uint32) (eax, edx uint32) - -// True when SIMD instructions are available. -var avx2 = haveAVX2() -var avx = haveAVX() -var ssse3 = haveSSSE3() - -// haveAVX returns true when there is AVX support -func haveAVX() bool { - _, _, c, _ := cpuid(1) - - // Check XGETBV, OXSAVE and AVX bits - if c&(1<<26) != 0 && c&(1<<27) != 0 && c&(1<<28) != 0 { - // Check for OS support - eax, _ := xgetbv(0) - return (eax & 0x6) == 0x6 - } - return false -} - -// haveAVX2 returns true when there is AVX2 support -func haveAVX2() bool { - mfi, _, _, _ := cpuid(0) - - // Check AVX2, AVX2 requires OS support, but BMI1/2 don't. - if mfi >= 7 && haveAVX() { - _, ebx, _, _ := cpuidex(7, 0) - return (ebx & 0x00000020) != 0 - } - return false -} - -// haveSSSE3 returns true when there is SSSE3 support -func haveSSSE3() bool { - - _, _, c, _ := cpuid(1) - - return (c & 0x00000200) != 0 -} diff --git a/vendor/github.com/minio/blake2b-simd/cpuid_386.s b/vendor/github.com/minio/blake2b-simd/cpuid_386.s deleted file mode 100644 index fa38814ec..000000000 --- a/vendor/github.com/minio/blake2b-simd/cpuid_386.s +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. - -// +build 386,!gccgo - -// func cpuid(op uint32) (eax, ebx, ecx, edx uint32) -TEXT ·cpuid(SB), 7, $0 - XORL CX, CX - MOVL op+0(FP), AX - CPUID - MOVL AX, eax+4(FP) - MOVL BX, ebx+8(FP) - MOVL CX, ecx+12(FP) - MOVL DX, edx+16(FP) - RET - -// func cpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) -TEXT ·cpuidex(SB), 7, $0 - MOVL op+0(FP), AX - MOVL op2+4(FP), CX - CPUID - MOVL AX, eax+8(FP) - MOVL BX, ebx+12(FP) - MOVL CX, ecx+16(FP) - MOVL DX, edx+20(FP) - RET - -// func xgetbv(index uint32) (eax, edx uint32) -TEXT ·xgetbv(SB), 7, $0 - MOVL index+0(FP), CX - BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV - MOVL AX, eax+4(FP) - MOVL DX, edx+8(FP) - RET diff --git a/vendor/github.com/minio/blake2b-simd/cpuid_amd64.s b/vendor/github.com/minio/blake2b-simd/cpuid_amd64.s deleted file mode 100644 index fb45a6560..000000000 --- a/vendor/github.com/minio/blake2b-simd/cpuid_amd64.s +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. - -// +build amd64,!gccgo - -// func cpuid(op uint32) (eax, ebx, ecx, edx uint32) -TEXT ·cpuid(SB), 7, $0 - XORQ CX, CX - MOVL op+0(FP), AX - CPUID - MOVL AX, eax+8(FP) - MOVL BX, ebx+12(FP) - MOVL CX, ecx+16(FP) - MOVL DX, edx+20(FP) - RET - - -// func cpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) -TEXT ·cpuidex(SB), 7, $0 - MOVL op+0(FP), AX - MOVL op2+4(FP), CX - CPUID - MOVL AX, eax+8(FP) - MOVL BX, ebx+12(FP) - MOVL CX, ecx+16(FP) - MOVL DX, edx+20(FP) - RET - -// func xgetbv(index uint32) (eax, edx uint32) -TEXT ·xgetbv(SB), 7, $0 - MOVL index+0(FP), CX - BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV - MOVL AX, eax+8(FP) - MOVL DX, edx+12(FP) - RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b.go b/vendor/golang.org/x/crypto/blake2b/blake2b.go new file mode 100644 index 000000000..b736632f9 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b.go @@ -0,0 +1,188 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package blake2b implements the BLAKE2b hash algorithm as +// defined in RFC 7693. +package blake2b + +import ( + "encoding/binary" + "errors" + "hash" +) + +const ( + // The blocksize of BLAKE2b in bytes. + BlockSize = 128 + // The hash size of BLAKE2b-512 in bytes. + Size = 64 + // The hash size of BLAKE2b-384 in bytes. + Size384 = 48 + // The hash size of BLAKE2b-256 in bytes. + Size256 = 32 +) + +var errKeySize = errors.New("blake2b: invalid key size") + +var iv = [8]uint64{ + 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1, + 0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179, +} + +// Sum512 returns the BLAKE2b-512 checksum of the data. +func Sum512(data []byte) [Size]byte { + var sum [Size]byte + checkSum(&sum, Size, data) + return sum +} + +// Sum384 returns the BLAKE2b-384 checksum of the data. +func Sum384(data []byte) [Size384]byte { + var sum [Size]byte + var sum384 [Size384]byte + checkSum(&sum, Size384, data) + copy(sum384[:], sum[:Size384]) + return sum384 +} + +// Sum256 returns the BLAKE2b-256 checksum of the data. +func Sum256(data []byte) [Size256]byte { + var sum [Size]byte + var sum256 [Size256]byte + checkSum(&sum, Size256, data) + copy(sum256[:], sum[:Size256]) + return sum256 +} + +// New512 returns a new hash.Hash computing the BLAKE2b-512 checksum. A non-nil +// key turns the hash into a MAC. The key must between zero and 64 bytes long. +func New512(key []byte) (hash.Hash, error) { return newDigest(Size, key) } + +// New384 returns a new hash.Hash computing the BLAKE2b-384 checksum. A non-nil +// key turns the hash into a MAC. The key must between zero and 64 bytes long. +func New384(key []byte) (hash.Hash, error) { return newDigest(Size384, key) } + +// New256 returns a new hash.Hash computing the BLAKE2b-256 checksum. A non-nil +// key turns the hash into a MAC. The key must between zero and 64 bytes long. +func New256(key []byte) (hash.Hash, error) { return newDigest(Size256, key) } + +func newDigest(hashSize int, key []byte) (*digest, error) { + if len(key) > Size { + return nil, errKeySize + } + d := &digest{ + size: hashSize, + keyLen: len(key), + } + copy(d.key[:], key) + d.Reset() + return d, nil +} + +func checkSum(sum *[Size]byte, hashSize int, data []byte) { + h := iv + h[0] ^= uint64(hashSize) | (1 << 16) | (1 << 24) + var c [2]uint64 + + if length := len(data); length > BlockSize { + n := length &^ (BlockSize - 1) + if length == n { + n -= BlockSize + } + hashBlocks(&h, &c, 0, data[:n]) + data = data[n:] + } + + var block [BlockSize]byte + offset := copy(block[:], data) + remaining := uint64(BlockSize - offset) + if c[0] < remaining { + c[1]-- + } + c[0] -= remaining + + hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:]) + + for i, v := range h[:(hashSize+7)/8] { + binary.LittleEndian.PutUint64(sum[8*i:], v) + } +} + +type digest struct { + h [8]uint64 + c [2]uint64 + size int + block [BlockSize]byte + offset int + + key [BlockSize]byte + keyLen int +} + +func (d *digest) BlockSize() int { return BlockSize } + +func (d *digest) Size() int { return d.size } + +func (d *digest) Reset() { + d.h = iv + d.h[0] ^= uint64(d.size) | (uint64(d.keyLen) << 8) | (1 << 16) | (1 << 24) + d.offset, d.c[0], d.c[1] = 0, 0, 0 + if d.keyLen > 0 { + d.block = d.key + d.offset = BlockSize + } +} + +func (d *digest) Write(p []byte) (n int, err error) { + n = len(p) + + if d.offset > 0 { + remaining := BlockSize - d.offset + if n <= remaining { + d.offset += copy(d.block[d.offset:], p) + return + } + copy(d.block[d.offset:], p[:remaining]) + hashBlocks(&d.h, &d.c, 0, d.block[:]) + d.offset = 0 + p = p[remaining:] + } + + if length := len(p); length > BlockSize { + nn := length &^ (BlockSize - 1) + if length == nn { + nn -= BlockSize + } + hashBlocks(&d.h, &d.c, 0, p[:nn]) + p = p[nn:] + } + + if len(p) > 0 { + d.offset += copy(d.block[:], p) + } + + return +} + +func (d *digest) Sum(b []byte) []byte { + var block [BlockSize]byte + copy(block[:], d.block[:d.offset]) + remaining := uint64(BlockSize - d.offset) + + c := d.c + if c[0] < remaining { + c[1]-- + } + c[0] -= remaining + + h := d.h + hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:]) + + var sum [Size]byte + for i, v := range h[:(d.size+7)/8] { + binary.LittleEndian.PutUint64(sum[8*i:], v) + } + + return append(b, sum[:d.size]...) +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go new file mode 100644 index 000000000..422ba849c --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go @@ -0,0 +1,41 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7,amd64,!gccgo,!appengine + +package blake2b + +var useAVX2 = supportAVX2() +var useAVX = supportAVX() +var useSSE4 = supportSSE4() + +//go:noescape +func supportSSE4() bool + +//go:noescape +func supportAVX() bool + +//go:noescape +func supportAVX2() bool + +//go:noescape +func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +//go:noescape +func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +//go:noescape +func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + if useAVX2 { + hashBlocksAVX2(h, c, flag, blocks) + } else if useAVX { + hashBlocksAVX(h, c, flag, blocks) + } else if useSSE4 { + hashBlocksSSE4(h, c, flag, blocks) + } else { + hashBlocksGeneric(h, c, flag, blocks) + } +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s new file mode 100644 index 000000000..86bc182a5 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s @@ -0,0 +1,502 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7,amd64,!gccgo,!appengine + +#include "textflag.h" + +DATA ·AVX2_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 +DATA ·AVX2_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b +DATA ·AVX2_iv0<>+0x10(SB)/8, $0x3c6ef372fe94f82b +DATA ·AVX2_iv0<>+0x18(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·AVX2_iv0<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX2_iv1<>+0x00(SB)/8, $0x510e527fade682d1 +DATA ·AVX2_iv1<>+0x08(SB)/8, $0x9b05688c2b3e6c1f +DATA ·AVX2_iv1<>+0x10(SB)/8, $0x1f83d9abfb41bd6b +DATA ·AVX2_iv1<>+0x18(SB)/8, $0x5be0cd19137e2179 +GLOBL ·AVX2_iv1<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX2_c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·AVX2_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +DATA ·AVX2_c40<>+0x10(SB)/8, $0x0201000706050403 +DATA ·AVX2_c40<>+0x18(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·AVX2_c40<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX2_c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·AVX2_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +DATA ·AVX2_c48<>+0x10(SB)/8, $0x0100070605040302 +DATA ·AVX2_c48<>+0x18(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·AVX2_c48<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 +DATA ·AVX_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b +GLOBL ·AVX_iv0<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b +DATA ·AVX_iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·AVX_iv1<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_iv2<>+0x00(SB)/8, $0x510e527fade682d1 +DATA ·AVX_iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f +GLOBL ·AVX_iv2<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b +DATA ·AVX_iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 +GLOBL ·AVX_iv3<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·AVX_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·AVX_c40<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·AVX_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·AVX_c48<>(SB), (NOPTR+RODATA), $16 + +// unfortunately the BYTE representation of VPERMQ must be used +#define ROUND_AVX2(m0, m1, m2, m3, t, c40, c48) \ + VPADDQ m0, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFD $-79, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPSHUFB c40, Y1, Y1; \ + VPADDQ m1, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFB c48, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPADDQ Y1, Y1, t; \ + VPSRLQ $63, Y1, Y1; \ + VPXOR t, Y1, Y1; \ + BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x39 \ // VPERMQ 0x39, Y1, Y1 + BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2; BYTE $0x4e \ // VPERMQ 0x4e, Y2, Y2 + BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x93 \ // VPERMQ 0x93, Y3, Y3 + VPADDQ m2, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFD $-79, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPSHUFB c40, Y1, Y1; \ + VPADDQ m3, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFB c48, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPADDQ Y1, Y1, t; \ + VPSRLQ $63, Y1, Y1; \ + VPXOR t, Y1, Y1; \ + BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x39 \ // VPERMQ 0x39, Y3, Y3 + BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2; BYTE $0x4e \ // VPERMQ 0x4e, Y2, Y2 + BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x93 \ // VPERMQ 0x93, Y1, Y1 + +// load msg into Y12, Y13, Y14, Y15 +#define LOAD_MSG_AVX2(src, i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, i11, i12, i13, i14, i15) \ + MOVQ i0*8(src), X12; \ + PINSRQ $1, i1*8(src), X12; \ + MOVQ i2*8(src), X11; \ + PINSRQ $1, i3*8(src), X11; \ + VINSERTI128 $1, X11, Y12, Y12; \ + MOVQ i4*8(src), X13; \ + PINSRQ $1, i5*8(src), X13; \ + MOVQ i6*8(src), X11; \ + PINSRQ $1, i7*8(src), X11; \ + VINSERTI128 $1, X11, Y13, Y13; \ + MOVQ i8*8(src), X14; \ + PINSRQ $1, i9*8(src), X14; \ + MOVQ i10*8(src), X11; \ + PINSRQ $1, i11*8(src), X11; \ + VINSERTI128 $1, X11, Y14, Y14; \ + MOVQ i12*8(src), X15; \ + PINSRQ $1, i13*8(src), X15; \ + MOVQ i14*8(src), X11; \ + PINSRQ $1, i15*8(src), X11; \ + VINSERTI128 $1, X11, Y15, Y15 + +// func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +TEXT ·hashBlocksAVX2(SB), 4, $320-48 // frame size = 288 + 32 byte alignment + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + + MOVQ SP, DX + MOVQ SP, R9 + ADDQ $31, R9 + ANDQ $~31, R9 + MOVQ R9, SP + + MOVQ CX, 16(SP) + XORQ CX, CX + MOVQ CX, 24(SP) + + VMOVDQU ·AVX2_c40<>(SB), Y4 + VMOVDQU ·AVX2_c48<>(SB), Y5 + + VMOVDQU 0(AX), Y8 + VMOVDQU 32(AX), Y9 + VMOVDQU ·AVX2_iv0<>(SB), Y6 + VMOVDQU ·AVX2_iv1<>(SB), Y7 + + MOVQ 0(BX), R8 + MOVQ 8(BX), R9 + MOVQ R9, 8(SP) + +loop: + ADDQ $128, R8 + MOVQ R8, 0(SP) + CMPQ R8, $128 + JGE noinc + INCQ R9 + MOVQ R9, 8(SP) + +noinc: + VMOVDQA Y8, Y0 + VMOVDQA Y9, Y1 + VMOVDQA Y6, Y2 + VPXOR 0(SP), Y7, Y3 + + LOAD_MSG_AVX2(SI, 0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15) + VMOVDQA Y12, 32(SP) + VMOVDQA Y13, 64(SP) + VMOVDQA Y14, 96(SP) + VMOVDQA Y15, 128(SP) + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2(SI, 14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3) + VMOVDQA Y12, 160(SP) + VMOVDQA Y13, 192(SP) + VMOVDQA Y14, 224(SP) + VMOVDQA Y15, 256(SP) + + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2(SI, 11, 12, 5, 15, 8, 0, 2, 13, 10, 3, 7, 9, 14, 6, 1, 4) + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2(SI, 7, 3, 13, 11, 9, 1, 12, 14, 2, 5, 4, 15, 6, 10, 0, 8) + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2(SI, 9, 5, 2, 10, 0, 7, 4, 15, 14, 11, 6, 3, 1, 12, 8, 13) + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2(SI, 2, 6, 0, 8, 12, 10, 11, 3, 4, 7, 15, 1, 13, 5, 14, 9) + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2(SI, 12, 1, 14, 4, 5, 15, 13, 10, 0, 6, 9, 8, 7, 3, 2, 11) + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2(SI, 13, 7, 12, 3, 11, 14, 1, 9, 5, 15, 8, 2, 0, 4, 6, 10) + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2(SI, 6, 14, 11, 0, 15, 9, 3, 8, 12, 13, 1, 10, 2, 7, 4, 5) + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2(SI, 10, 8, 7, 1, 2, 4, 6, 5, 15, 9, 3, 13, 11, 14, 12, 0) + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + + ROUND_AVX2(32(SP), 64(SP), 96(SP), 128(SP), Y10, Y4, Y5) + ROUND_AVX2(160(SP), 192(SP), 224(SP), 256(SP), Y10, Y4, Y5) + + VPXOR Y0, Y8, Y8 + VPXOR Y1, Y9, Y9 + VPXOR Y2, Y8, Y8 + VPXOR Y3, Y9, Y9 + + LEAQ 128(SI), SI + SUBQ $128, DI + JNE loop + + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + + VMOVDQU Y8, 0(AX) + VMOVDQU Y9, 32(AX) + + MOVQ DX, SP + RET + +// unfortunately the BYTE representation of VPUNPCKLQDQ and VPUNPCKHQDQ must be used +#define VPUNPCKLQDQ_X8_X8_X10 BYTE $0xC4; BYTE $0x41; BYTE $0x39; BYTE $0x6C; BYTE $0xD0 +#define VPUNPCKHQDQ_X7_X10_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xF2 +#define VPUNPCKLQDQ_X7_X7_X10 BYTE $0xC5; BYTE $0x41; BYTE $0x6C; BYTE $0xD7 +#define VPUNPCKHQDQ_X8_X10_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x39; BYTE $0x6D; BYTE $0xFA +#define VPUNPCKLQDQ_X3_X3_X10 BYTE $0xC5; BYTE $0x61; BYTE $0x6C; BYTE $0xD3 +#define VPUNPCKHQDQ_X2_X10_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x69; BYTE $0x6D; BYTE $0xD2 +#define VPUNPCKLQDQ_X9_X9_X10 BYTE $0xC4; BYTE $0x41; BYTE $0x31; BYTE $0x6C; BYTE $0xD1 +#define VPUNPCKHQDQ_X3_X10_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xDA +#define VPUNPCKLQDQ_X2_X2_X10 BYTE $0xC5; BYTE $0x69; BYTE $0x6C; BYTE $0xD2 +#define VPUNPCKHQDQ_X3_X10_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xD2 +#define VPUNPCKHQDQ_X8_X10_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x39; BYTE $0x6D; BYTE $0xDA +#define VPUNPCKHQDQ_X6_X10_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x49; BYTE $0x6D; BYTE $0xF2 +#define VPUNPCKHQDQ_X7_X10_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xFA + +// shuffle X2 and X6 using the temp registers X8, X9, X10 +#define SHUFFLE_AVX() \ + VMOVDQA X4, X9; \ + VMOVDQA X5, X4; \ + VMOVDQA X9, X5; \ + VMOVDQA X6, X8; \ + VPUNPCKLQDQ_X8_X8_X10; \ + VPUNPCKHQDQ_X7_X10_X6; \ + VPUNPCKLQDQ_X7_X7_X10; \ + VPUNPCKHQDQ_X8_X10_X7; \ + VPUNPCKLQDQ_X3_X3_X10; \ + VMOVDQA X2, X9; \ + VPUNPCKHQDQ_X2_X10_X2; \ + VPUNPCKLQDQ_X9_X9_X10; \ + VPUNPCKHQDQ_X3_X10_X3; \ + +// inverse shuffle X2 and X6 using the temp registers X8, X9, X10 +#define SHUFFLE_AVX_INV() \ + VMOVDQA X4, X9; \ + VMOVDQA X5, X4; \ + VMOVDQA X9, X5; \ + VMOVDQA X2, X8; \ + VPUNPCKLQDQ_X2_X2_X10; \ + VPUNPCKHQDQ_X3_X10_X2; \ + VPUNPCKLQDQ_X3_X3_X10; \ + VPUNPCKHQDQ_X8_X10_X3; \ + VPUNPCKLQDQ_X7_X7_X10; \ + VMOVDQA X6, X9; \ + VPUNPCKHQDQ_X6_X10_X6; \ + VPUNPCKLQDQ_X9_X9_X10; \ + VPUNPCKHQDQ_X7_X10_X7; \ + +#define HALF_ROUND_AVX(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ + VPADDQ m0, v0, v0; \ + VPADDQ v2, v0, v0; \ + VPADDQ m1, v1, v1; \ + VPADDQ v3, v1, v1; \ + VPXOR v0, v6, v6; \ + VPXOR v1, v7, v7; \ + VPSHUFD $-79, v6, v6; \ + VPSHUFD $-79, v7, v7; \ + VPADDQ v6, v4, v4; \ + VPADDQ v7, v5, v5; \ + VPXOR v4, v2, v2; \ + VPXOR v5, v3, v3; \ + VPSHUFB c40, v2, v2; \ + VPSHUFB c40, v3, v3; \ + VPADDQ m2, v0, v0; \ + VPADDQ v2, v0, v0; \ + VPADDQ m3, v1, v1; \ + VPADDQ v3, v1, v1; \ + VPXOR v0, v6, v6; \ + VPXOR v1, v7, v7; \ + VPSHUFB c48, v6, v6; \ + VPSHUFB c48, v7, v7; \ + VPADDQ v6, v4, v4; \ + VPADDQ v7, v5, v5; \ + VPXOR v4, v2, v2; \ + VPXOR v5, v3, v3; \ + VPADDQ v2, v2, t0; \ + VPSRLQ $63, v2, v2; \ + VPXOR t0, v2, v2; \ + VPADDQ v3, v3, t0; \ + VPSRLQ $63, v3, v3; \ + VPXOR t0, v3, v3 + +// unfortunately the BYTE representation of VPINSRQ must be used +#define VPINSRQ_1_R10_X8_X8 BYTE $0xC4; BYTE $0x43; BYTE $0xB9; BYTE $0x22; BYTE $0xC2; BYTE $0x01 +#define VPINSRQ_1_R11_X9_X9 BYTE $0xC4; BYTE $0x43; BYTE $0xB1; BYTE $0x22; BYTE $0xCB; BYTE $0x01 +#define VPINSRQ_1_R12_X10_X10 BYTE $0xC4; BYTE $0x43; BYTE $0xA9; BYTE $0x22; BYTE $0xD4; BYTE $0x01 +#define VPINSRQ_1_R13_X11_X11 BYTE $0xC4; BYTE $0x43; BYTE $0xA1; BYTE $0x22; BYTE $0xDD; BYTE $0x01 + +#define VPINSRQ_1_R9_X8_X8 BYTE $0xC4; BYTE $0x43; BYTE $0xB9; BYTE $0x22; BYTE $0xC1; BYTE $0x01 + +// load src into X8, X9, X10 and X11 using R10, R11, R12 and R13 for temp registers +#define LOAD_MSG_AVX(src, i0, i1, i2, i3, i4, i5, i6, i7) \ + MOVQ i0*8(src), X8; \ + MOVQ i1*8(src), R10; \ + MOVQ i2*8(src), X9; \ + MOVQ i3*8(src), R11; \ + MOVQ i4*8(src), X10; \ + MOVQ i5*8(src), R12; \ + MOVQ i6*8(src), X11; \ + MOVQ i7*8(src), R13; \ + VPINSRQ_1_R10_X8_X8; \ + VPINSRQ_1_R11_X9_X9; \ + VPINSRQ_1_R12_X10_X10; \ + VPINSRQ_1_R13_X11_X11 + +// func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +TEXT ·hashBlocksAVX(SB), 4, $288-48 // frame size = 272 + 16 byte alignment + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + + MOVQ SP, BP + MOVQ SP, R9 + ADDQ $15, R9 + ANDQ $~15, R9 + MOVQ R9, SP + + MOVOU ·AVX_c40<>(SB), X13 + MOVOU ·AVX_c48<>(SB), X14 + + VMOVDQU ·AVX_iv3<>(SB), X0 + VMOVDQA X0, 0(SP) + XORQ CX, 0(SP) // 0(SP) = ·AVX_iv3 ^ (CX || 0) + + VMOVDQU 0(AX), X12 + VMOVDQU 16(AX), X15 + VMOVDQU 32(AX), X2 + VMOVDQU 48(AX), X3 + + MOVQ 0(BX), R8 + MOVQ 8(BX), R9 + +loop: + ADDQ $128, R8 + CMPQ R8, $128 + JGE noinc + INCQ R9 + +noinc: + MOVQ R8, X8 + VPINSRQ_1_R9_X8_X8 + + VMOVDQA X12, X0 + VMOVDQA X15, X1 + VMOVDQU ·AVX_iv0<>(SB), X4 + VMOVDQU ·AVX_iv1<>(SB), X5 + VMOVDQU ·AVX_iv2<>(SB), X6 + + VPXOR X8, X6, X6 + VMOVDQA 0(SP), X7 + + LOAD_MSG_AVX(SI, 0, 2, 4, 6, 1, 3, 5, 7) + VMOVDQA X8, 16(SP) + VMOVDQA X9, 32(SP) + VMOVDQA X10, 48(SP) + VMOVDQA X11, 64(SP) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_AVX() + LOAD_MSG_AVX(SI, 8, 10, 12, 14, 9, 11, 13, 15) + VMOVDQA X8, 80(SP) + VMOVDQA X9, 96(SP) + VMOVDQA X10, 112(SP) + VMOVDQA X11, 128(SP) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(SI, 14, 4, 9, 13, 10, 8, 15, 6) + VMOVDQA X8, 144(SP) + VMOVDQA X9, 160(SP) + VMOVDQA X10, 176(SP) + VMOVDQA X11, 192(SP) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_AVX() + LOAD_MSG_AVX(SI, 1, 0, 11, 5, 12, 2, 7, 3) + VMOVDQA X8, 208(SP) + VMOVDQA X9, 224(SP) + VMOVDQA X10, 240(SP) + VMOVDQA X11, 256(SP) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(SI, 11, 12, 5, 15, 8, 0, 2, 13) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_AVX() + LOAD_MSG_AVX(SI, 10, 3, 7, 9, 14, 6, 1, 4) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(SI, 7, 3, 13, 11, 9, 1, 12, 14) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_AVX() + LOAD_MSG_AVX(SI, 2, 5, 4, 15, 6, 10, 0, 8) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(SI, 9, 5, 2, 10, 0, 7, 4, 15) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_AVX() + LOAD_MSG_AVX(SI, 14, 11, 6, 3, 1, 12, 8, 13) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(SI, 2, 6, 0, 8, 12, 10, 11, 3) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_AVX() + LOAD_MSG_AVX(SI, 4, 7, 15, 1, 13, 5, 14, 9) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(SI, 12, 1, 14, 4, 5, 15, 13, 10) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_AVX() + LOAD_MSG_AVX(SI, 0, 6, 9, 8, 7, 3, 2, 11) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(SI, 13, 7, 12, 3, 11, 14, 1, 9) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_AVX() + LOAD_MSG_AVX(SI, 5, 15, 8, 2, 0, 4, 6, 10) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(SI, 6, 14, 11, 0, 15, 9, 3, 8) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_AVX() + LOAD_MSG_AVX(SI, 12, 13, 1, 10, 2, 7, 4, 5) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(SI, 10, 8, 7, 1, 2, 4, 6, 5) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_AVX() + LOAD_MSG_AVX(SI, 15, 9, 3, 13, 11, 14, 12, 0) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_AVX_INV() + + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X11, X13, X14) + SHUFFLE_AVX() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 80(SP), 96(SP), 112(SP), 128(SP), X11, X13, X14) + SHUFFLE_AVX_INV() + + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 144(SP), 160(SP), 176(SP), 192(SP), X11, X13, X14) + SHUFFLE_AVX() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 208(SP), 224(SP), 240(SP), 256(SP), X11, X13, X14) + SHUFFLE_AVX_INV() + + VMOVDQU 32(AX), X10 + VMOVDQU 48(AX), X11 + VPXOR X0, X12, X12 + VPXOR X1, X15, X15 + VPXOR X2, X10, X10 + VPXOR X3, X11, X11 + VPXOR X4, X12, X12 + VPXOR X5, X15, X15 + VPXOR X6, X10, X2 + VPXOR X7, X11, X3 + VMOVDQU X2, 32(AX) + VMOVDQU X3, 48(AX) + + LEAQ 128(SI), SI + SUBQ $128, DI + JNE loop + + VMOVDQU X12, 0(AX) + VMOVDQU X15, 16(AX) + + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + + VZEROUPPER + + MOVQ BP, SP + RET + +// func supportAVX2() bool +TEXT ·supportAVX2(SB), 4, $0-1 + MOVQ runtime·support_avx2(SB), AX + MOVB AX, ret+0(FP) + RET + +// func supportAVX() bool +TEXT ·supportAVX(SB), 4, $0-1 + MOVQ runtime·support_avx(SB), AX + MOVB AX, ret+0(FP) + RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go new file mode 100644 index 000000000..c9a22fd7d --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go @@ -0,0 +1,25 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7,amd64,!gccgo,!appengine + +package blake2b + +var useAVX2 = false +var useAVX = false +var useSSE4 = supportSSE4() + +//go:noescape +func supportSSE4() bool + +//go:noescape +func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + if useSSE4 { + hashBlocksSSE4(h, c, flag, blocks) + } else { + hashBlocksGeneric(h, c, flag, blocks) + } +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s new file mode 100644 index 000000000..04e45beb5 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s @@ -0,0 +1,290 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +#include "textflag.h" + +DATA ·iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 +DATA ·iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b +GLOBL ·iv0<>(SB), (NOPTR+RODATA), $16 + +DATA ·iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b +DATA ·iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·iv1<>(SB), (NOPTR+RODATA), $16 + +DATA ·iv2<>+0x00(SB)/8, $0x510e527fade682d1 +DATA ·iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f +GLOBL ·iv2<>(SB), (NOPTR+RODATA), $16 + +DATA ·iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b +DATA ·iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 +GLOBL ·iv3<>(SB), (NOPTR+RODATA), $16 + +DATA ·c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·c40<>(SB), (NOPTR+RODATA), $16 + +DATA ·c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·c48<>(SB), (NOPTR+RODATA), $16 + +#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v6, t1; \ + PUNPCKLQDQ v6, t2; \ + PUNPCKHQDQ v7, v6; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ v7, t2; \ + MOVO t1, v7; \ + MOVO v2, t1; \ + PUNPCKHQDQ t2, v7; \ + PUNPCKLQDQ v3, t2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v3 + +#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v2, t1; \ + PUNPCKLQDQ v2, t2; \ + PUNPCKHQDQ v3, v2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ v3, t2; \ + MOVO t1, v3; \ + MOVO v6, t1; \ + PUNPCKHQDQ t2, v3; \ + PUNPCKLQDQ v7, t2; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v7 + +#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ + PADDQ m0, v0; \ + PADDQ m1, v1; \ + PADDQ v2, v0; \ + PADDQ v3, v1; \ + PXOR v0, v6; \ + PXOR v1, v7; \ + PSHUFD $0xB1, v6, v6; \ + PSHUFD $0xB1, v7, v7; \ + PADDQ v6, v4; \ + PADDQ v7, v5; \ + PXOR v4, v2; \ + PXOR v5, v3; \ + PSHUFB c40, v2; \ + PSHUFB c40, v3; \ + PADDQ m2, v0; \ + PADDQ m3, v1; \ + PADDQ v2, v0; \ + PADDQ v3, v1; \ + PXOR v0, v6; \ + PXOR v1, v7; \ + PSHUFB c48, v6; \ + PSHUFB c48, v7; \ + PADDQ v6, v4; \ + PADDQ v7, v5; \ + PXOR v4, v2; \ + PXOR v5, v3; \ + MOVOU v2, t0; \ + PADDQ v2, t0; \ + PSRLQ $63, v2; \ + PXOR t0, v2; \ + MOVOU v3, t0; \ + PADDQ v3, t0; \ + PSRLQ $63, v3; \ + PXOR t0, v3 + +#define LOAD_MSG(m0, m1, m2, m3, src, i0, i1, i2, i3, i4, i5, i6, i7) \ + MOVQ i0*8(src), m0; \ + PINSRQ $1, i1*8(src), m0; \ + MOVQ i2*8(src), m1; \ + PINSRQ $1, i3*8(src), m1; \ + MOVQ i4*8(src), m2; \ + PINSRQ $1, i5*8(src), m2; \ + MOVQ i6*8(src), m3; \ + PINSRQ $1, i7*8(src), m3 + +// func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +TEXT ·hashBlocksSSE4(SB), 4, $288-48 // frame size = 272 + 16 byte alignment + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + + MOVQ SP, BP + MOVQ SP, R9 + ADDQ $15, R9 + ANDQ $~15, R9 + MOVQ R9, SP + + MOVOU ·iv3<>(SB), X0 + MOVO X0, 0(SP) + XORQ CX, 0(SP) // 0(SP) = ·iv3 ^ (CX || 0) + + MOVOU ·c40<>(SB), X13 + MOVOU ·c48<>(SB), X14 + + MOVOU 0(AX), X12 + MOVOU 16(AX), X15 + + MOVQ 0(BX), R8 + MOVQ 8(BX), R9 + +loop: + ADDQ $128, R8 + CMPQ R8, $128 + JGE noinc + INCQ R9 + +noinc: + MOVQ R8, X8 + PINSRQ $1, R9, X8 + + MOVO X12, X0 + MOVO X15, X1 + MOVOU 32(AX), X2 + MOVOU 48(AX), X3 + MOVOU ·iv0<>(SB), X4 + MOVOU ·iv1<>(SB), X5 + MOVOU ·iv2<>(SB), X6 + + PXOR X8, X6 + MOVO 0(SP), X7 + + LOAD_MSG(X8, X9, X10, X11, SI, 0, 2, 4, 6, 1, 3, 5, 7) + MOVO X8, 16(SP) + MOVO X9, 32(SP) + MOVO X10, 48(SP) + MOVO X11, 64(SP) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 8, 10, 12, 14, 9, 11, 13, 15) + MOVO X8, 80(SP) + MOVO X9, 96(SP) + MOVO X10, 112(SP) + MOVO X11, 128(SP) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 14, 4, 9, 13, 10, 8, 15, 6) + MOVO X8, 144(SP) + MOVO X9, 160(SP) + MOVO X10, 176(SP) + MOVO X11, 192(SP) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 1, 0, 11, 5, 12, 2, 7, 3) + MOVO X8, 208(SP) + MOVO X9, 224(SP) + MOVO X10, 240(SP) + MOVO X11, 256(SP) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 11, 12, 5, 15, 8, 0, 2, 13) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 10, 3, 7, 9, 14, 6, 1, 4) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 7, 3, 13, 11, 9, 1, 12, 14) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 2, 5, 4, 15, 6, 10, 0, 8) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 9, 5, 2, 10, 0, 7, 4, 15) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 14, 11, 6, 3, 1, 12, 8, 13) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 2, 6, 0, 8, 12, 10, 11, 3) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 4, 7, 15, 1, 13, 5, 14, 9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 12, 1, 14, 4, 5, 15, 13, 10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 0, 6, 9, 8, 7, 3, 2, 11) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 13, 7, 12, 3, 11, 14, 1, 9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 5, 15, 8, 2, 0, 4, 6, 10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 6, 14, 11, 0, 15, 9, 3, 8) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 12, 13, 1, 10, 2, 7, 4, 5) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 10, 8, 7, 1, 2, 4, 6, 5) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 15, 9, 3, 13, 11, 14, 12, 0) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 80(SP), 96(SP), 112(SP), 128(SP), X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 144(SP), 160(SP), 176(SP), 192(SP), X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 208(SP), 224(SP), 240(SP), 256(SP), X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + MOVOU 32(AX), X10 + MOVOU 48(AX), X11 + PXOR X0, X12 + PXOR X1, X15 + PXOR X2, X10 + PXOR X3, X11 + PXOR X4, X12 + PXOR X5, X15 + PXOR X6, X10 + PXOR X7, X11 + MOVOU X10, 32(AX) + MOVOU X11, 48(AX) + + LEAQ 128(SI), SI + SUBQ $128, DI + JNE loop + + MOVOU X12, 0(AX) + MOVOU X15, 16(AX) + + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + + MOVQ BP, SP + RET + +// func supportSSE4() bool +TEXT ·supportSSE4(SB), 4, $0-1 + MOVL $1, AX + CPUID + SHRL $19, CX // Bit 19 indicates SSE4 support + ANDL $1, CX // CX != 0 if support SSE4 + MOVB CX, ret+0(FP) + RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go b/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go new file mode 100644 index 000000000..4bd2abc91 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go @@ -0,0 +1,179 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blake2b + +import "encoding/binary" + +// the precomputed values for BLAKE2b +// there are 12 16-byte arrays - one for each round +// the entries are calculated from the sigma constants. +var precomputed = [12][16]byte{ + {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, + {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, + {11, 12, 5, 15, 8, 0, 2, 13, 10, 3, 7, 9, 14, 6, 1, 4}, + {7, 3, 13, 11, 9, 1, 12, 14, 2, 5, 4, 15, 6, 10, 0, 8}, + {9, 5, 2, 10, 0, 7, 4, 15, 14, 11, 6, 3, 1, 12, 8, 13}, + {2, 6, 0, 8, 12, 10, 11, 3, 4, 7, 15, 1, 13, 5, 14, 9}, + {12, 1, 14, 4, 5, 15, 13, 10, 0, 6, 9, 8, 7, 3, 2, 11}, + {13, 7, 12, 3, 11, 14, 1, 9, 5, 15, 8, 2, 0, 4, 6, 10}, + {6, 14, 11, 0, 15, 9, 3, 8, 12, 13, 1, 10, 2, 7, 4, 5}, + {10, 8, 7, 1, 2, 4, 6, 5, 15, 9, 3, 13, 11, 14, 12, 0}, + {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, // equal to the first + {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, // equal to the second +} + +func hashBlocksGeneric(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + var m [16]uint64 + c0, c1 := c[0], c[1] + + for i := 0; i < len(blocks); { + c0 += BlockSize + if c0 < BlockSize { + c1++ + } + + v0, v1, v2, v3, v4, v5, v6, v7 := h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7] + v8, v9, v10, v11, v12, v13, v14, v15 := iv[0], iv[1], iv[2], iv[3], iv[4], iv[5], iv[6], iv[7] + v12 ^= c0 + v13 ^= c1 + v14 ^= flag + + for j := range m { + m[j] = binary.LittleEndian.Uint64(blocks[i:]) + i += 8 + } + + for j := range precomputed { + s := &(precomputed[j]) + + v0 += m[s[0]] + v0 += v4 + v12 ^= v0 + v12 = v12<<(64-32) | v12>>32 + v8 += v12 + v4 ^= v8 + v4 = v4<<(64-24) | v4>>24 + v1 += m[s[1]] + v1 += v5 + v13 ^= v1 + v13 = v13<<(64-32) | v13>>32 + v9 += v13 + v5 ^= v9 + v5 = v5<<(64-24) | v5>>24 + v2 += m[s[2]] + v2 += v6 + v14 ^= v2 + v14 = v14<<(64-32) | v14>>32 + v10 += v14 + v6 ^= v10 + v6 = v6<<(64-24) | v6>>24 + v3 += m[s[3]] + v3 += v7 + v15 ^= v3 + v15 = v15<<(64-32) | v15>>32 + v11 += v15 + v7 ^= v11 + v7 = v7<<(64-24) | v7>>24 + + v0 += m[s[4]] + v0 += v4 + v12 ^= v0 + v12 = v12<<(64-16) | v12>>16 + v8 += v12 + v4 ^= v8 + v4 = v4<<(64-63) | v4>>63 + v1 += m[s[5]] + v1 += v5 + v13 ^= v1 + v13 = v13<<(64-16) | v13>>16 + v9 += v13 + v5 ^= v9 + v5 = v5<<(64-63) | v5>>63 + v2 += m[s[6]] + v2 += v6 + v14 ^= v2 + v14 = v14<<(64-16) | v14>>16 + v10 += v14 + v6 ^= v10 + v6 = v6<<(64-63) | v6>>63 + v3 += m[s[7]] + v3 += v7 + v15 ^= v3 + v15 = v15<<(64-16) | v15>>16 + v11 += v15 + v7 ^= v11 + v7 = v7<<(64-63) | v7>>63 + + v0 += m[s[8]] + v0 += v5 + v15 ^= v0 + v15 = v15<<(64-32) | v15>>32 + v10 += v15 + v5 ^= v10 + v5 = v5<<(64-24) | v5>>24 + v1 += m[s[9]] + v1 += v6 + v12 ^= v1 + v12 = v12<<(64-32) | v12>>32 + v11 += v12 + v6 ^= v11 + v6 = v6<<(64-24) | v6>>24 + v2 += m[s[10]] + v2 += v7 + v13 ^= v2 + v13 = v13<<(64-32) | v13>>32 + v8 += v13 + v7 ^= v8 + v7 = v7<<(64-24) | v7>>24 + v3 += m[s[11]] + v3 += v4 + v14 ^= v3 + v14 = v14<<(64-32) | v14>>32 + v9 += v14 + v4 ^= v9 + v4 = v4<<(64-24) | v4>>24 + + v0 += m[s[12]] + v0 += v5 + v15 ^= v0 + v15 = v15<<(64-16) | v15>>16 + v10 += v15 + v5 ^= v10 + v5 = v5<<(64-63) | v5>>63 + v1 += m[s[13]] + v1 += v6 + v12 ^= v1 + v12 = v12<<(64-16) | v12>>16 + v11 += v12 + v6 ^= v11 + v6 = v6<<(64-63) | v6>>63 + v2 += m[s[14]] + v2 += v7 + v13 ^= v2 + v13 = v13<<(64-16) | v13>>16 + v8 += v13 + v7 ^= v8 + v7 = v7<<(64-63) | v7>>63 + v3 += m[s[15]] + v3 += v4 + v14 ^= v3 + v14 = v14<<(64-16) | v14>>16 + v9 += v14 + v4 ^= v9 + v4 = v4<<(64-63) | v4>>63 + + } + + h[0] ^= v0 ^ v8 + h[1] ^= v1 ^ v9 + h[2] ^= v2 ^ v10 + h[3] ^= v3 ^ v11 + h[4] ^= v4 ^ v12 + h[5] ^= v5 ^ v13 + h[6] ^= v6 ^ v14 + h[7] ^= v7 ^ v15 + } + c[0], c[1] = c0, c1 +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go new file mode 100644 index 000000000..2c3c68b0d --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go @@ -0,0 +1,14 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine gccgo + +package blake2b + +var useAVX2 = false +var useSSE4 = false + +func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + hashBlocksGeneric(h, c, flag, blocks) +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_test.go b/vendor/golang.org/x/crypto/blake2b/blake2b_test.go new file mode 100644 index 000000000..a38fceb20 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_test.go @@ -0,0 +1,448 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blake2b + +import ( + "bytes" + "encoding/hex" + "fmt" + "hash" + "testing" +) + +func fromHex(s string) []byte { + b, err := hex.DecodeString(s) + if err != nil { + panic(err) + } + return b +} + +func TestHashes(t *testing.T) { + defer func(sse4, avx, avx2 bool) { + useSSE4, useAVX, useAVX2 = sse4, useAVX, avx2 + }(useSSE4, useAVX, useAVX2) + + if useAVX2 { + t.Log("AVX2 version") + testHashes(t) + useAVX2 = false + } + if useAVX { + t.Log("AVX version") + testHashes(t) + useAVX = false + } + if useSSE4 { + t.Log("SSE4 version") + testHashes(t) + useSSE4 = false + } + t.Log("generic version") + testHashes(t) +} + +func testHashes(t *testing.T) { + key, _ := hex.DecodeString("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f") + + input := make([]byte, 255) + for i := range input { + input[i] = byte(i) + } + + for i, expectedHex := range hashes { + h, err := New512(key) + if err != nil { + t.Fatalf("#%d: error from New512: %v", i, err) + } + + h.Write(input[:i]) + sum := h.Sum(nil) + + if gotHex := fmt.Sprintf("%x", sum); gotHex != expectedHex { + t.Fatalf("#%d (single write): got %s, wanted %s", i, gotHex, expectedHex) + } + + h.Reset() + for j := 0; j < i; j++ { + h.Write(input[j : j+1]) + } + + sum = h.Sum(sum[:0]) + if gotHex := fmt.Sprintf("%x", sum); gotHex != expectedHex { + t.Fatalf("#%d (byte-by-byte): got %s, wanted %s", i, gotHex, expectedHex) + } + } +} + +func generateSequence(out []byte, seed uint32) { + a := 0xDEAD4BAD * seed // prime + b := uint32(1) + + for i := range out { // fill the buf + a, b = b, a+b + out[i] = byte(b >> 24) + } +} + +func computeMAC(msg []byte, hashSize int, key []byte) (sum []byte) { + var h hash.Hash + switch hashSize { + case Size: + h, _ = New512(key) + case Size384: + h, _ = New384(key) + case Size256: + h, _ = New256(key) + case 20: + h, _ = newDigest(20, key) + default: + panic("unexpected hashSize") + } + + h.Write(msg) + return h.Sum(sum) +} + +func computeHash(msg []byte, hashSize int) (sum []byte) { + switch hashSize { + case Size: + hash := Sum512(msg) + return hash[:] + case Size384: + hash := Sum384(msg) + return hash[:] + case Size256: + hash := Sum256(msg) + return hash[:] + case 20: + var hash [64]byte + checkSum(&hash, 20, msg) + return hash[:20] + default: + panic("unexpected hashSize") + } +} + +// Test function from RFC 7693. +func TestSelfTest(t *testing.T) { + hashLens := [4]int{20, 32, 48, 64} + msgLens := [6]int{0, 3, 128, 129, 255, 1024} + + msg := make([]byte, 1024) + key := make([]byte, 64) + + h, _ := New256(nil) + for _, hashSize := range hashLens { + for _, msgLength := range msgLens { + generateSequence(msg[:msgLength], uint32(msgLength)) // unkeyed hash + + md := computeHash(msg[:msgLength], hashSize) + h.Write(md) + + generateSequence(key[:], uint32(hashSize)) // keyed hash + md = computeMAC(msg[:msgLength], hashSize, key[:hashSize]) + h.Write(md) + } + } + + sum := h.Sum(nil) + expected := [32]byte{ + 0xc2, 0x3a, 0x78, 0x00, 0xd9, 0x81, 0x23, 0xbd, + 0x10, 0xf5, 0x06, 0xc6, 0x1e, 0x29, 0xda, 0x56, + 0x03, 0xd7, 0x63, 0xb8, 0xbb, 0xad, 0x2e, 0x73, + 0x7f, 0x5e, 0x76, 0x5a, 0x7b, 0xcc, 0xd4, 0x75, + } + if !bytes.Equal(sum, expected[:]) { + t.Fatalf("got %x, wanted %x", sum, expected) + } +} + +// Benchmarks + +func benchmarkSum(b *testing.B, size int) { + data := make([]byte, size) + b.SetBytes(int64(size)) + b.ResetTimer() + for i := 0; i < b.N; i++ { + Sum512(data) + } +} + +func benchmarkWrite(b *testing.B, size int) { + data := make([]byte, size) + h, _ := New512(nil) + b.SetBytes(int64(size)) + b.ResetTimer() + for i := 0; i < b.N; i++ { + h.Write(data) + } +} + +func BenchmarkWrite128(b *testing.B) { benchmarkWrite(b, 128) } +func BenchmarkWrite1K(b *testing.B) { benchmarkWrite(b, 1024) } + +func BenchmarkSum128(b *testing.B) { benchmarkSum(b, 128) } +func BenchmarkSum1K(b *testing.B) { benchmarkSum(b, 1024) } + +// These values were taken from https://blake2.net/blake2b-test.txt. +var hashes = []string{ + "10ebb67700b1868efb4417987acf4690ae9d972fb7a590c2f02871799aaa4786b5e996e8f0f4eb981fc214b005f42d2ff4233499391653df7aefcbc13fc51568", + "961f6dd1e4dd30f63901690c512e78e4b45e4742ed197c3c5e45c549fd25f2e4187b0bc9fe30492b16b0d0bc4ef9b0f34c7003fac09a5ef1532e69430234cebd", + "da2cfbe2d8409a0f38026113884f84b50156371ae304c4430173d08a99d9fb1b983164a3770706d537f49e0c916d9f32b95cc37a95b99d857436f0232c88a965", + "33d0825dddf7ada99b0e7e307104ad07ca9cfd9692214f1561356315e784f3e5a17e364ae9dbb14cb2036df932b77f4b292761365fb328de7afdc6d8998f5fc1", + "beaa5a3d08f3807143cf621d95cd690514d0b49efff9c91d24b59241ec0eefa5f60196d407048bba8d2146828ebcb0488d8842fd56bb4f6df8e19c4b4daab8ac", + "098084b51fd13deae5f4320de94a688ee07baea2800486689a8636117b46c1f4c1f6af7f74ae7c857600456a58a3af251dc4723a64cc7c0a5ab6d9cac91c20bb", + "6044540d560853eb1c57df0077dd381094781cdb9073e5b1b3d3f6c7829e12066bbaca96d989a690de72ca3133a83652ba284a6d62942b271ffa2620c9e75b1f", + "7a8cfe9b90f75f7ecb3acc053aaed6193112b6f6a4aeeb3f65d3de541942deb9e2228152a3c4bbbe72fc3b12629528cfbb09fe630f0474339f54abf453e2ed52", + "380beaf6ea7cc9365e270ef0e6f3a64fb902acae51dd5512f84259ad2c91f4bc4108db73192a5bbfb0cbcf71e46c3e21aee1c5e860dc96e8eb0b7b8426e6abe9", + "60fe3c4535e1b59d9a61ea8500bfac41a69dffb1ceadd9aca323e9a625b64da5763bad7226da02b9c8c4f1a5de140ac5a6c1124e4f718ce0b28ea47393aa6637", + "4fe181f54ad63a2983feaaf77d1e7235c2beb17fa328b6d9505bda327df19fc37f02c4b6f0368ce23147313a8e5738b5fa2a95b29de1c7f8264eb77b69f585cd", + "f228773ce3f3a42b5f144d63237a72d99693adb8837d0e112a8a0f8ffff2c362857ac49c11ec740d1500749dac9b1f4548108bf3155794dcc9e4082849e2b85b", + "962452a8455cc56c8511317e3b1f3b2c37df75f588e94325fdd77070359cf63a9ae6e930936fdf8e1e08ffca440cfb72c28f06d89a2151d1c46cd5b268ef8563", + "43d44bfa18768c59896bf7ed1765cb2d14af8c260266039099b25a603e4ddc5039d6ef3a91847d1088d401c0c7e847781a8a590d33a3c6cb4df0fab1c2f22355", + "dcffa9d58c2a4ca2cdbb0c7aa4c4c1d45165190089f4e983bb1c2cab4aaeff1fa2b5ee516fecd780540240bf37e56c8bcca7fab980e1e61c9400d8a9a5b14ac6", + "6fbf31b45ab0c0b8dad1c0f5f4061379912dde5aa922099a030b725c73346c524291adef89d2f6fd8dfcda6d07dad811a9314536c2915ed45da34947e83de34e", + "a0c65bddde8adef57282b04b11e7bc8aab105b99231b750c021f4a735cb1bcfab87553bba3abb0c3e64a0b6955285185a0bd35fb8cfde557329bebb1f629ee93", + "f99d815550558e81eca2f96718aed10d86f3f1cfb675cce06b0eff02f617c5a42c5aa760270f2679da2677c5aeb94f1142277f21c7f79f3c4f0cce4ed8ee62b1", + "95391da8fc7b917a2044b3d6f5374e1ca072b41454d572c7356c05fd4bc1e0f40b8bb8b4a9f6bce9be2c4623c399b0dca0dab05cb7281b71a21b0ebcd9e55670", + "04b9cd3d20d221c09ac86913d3dc63041989a9a1e694f1e639a3ba7e451840f750c2fc191d56ad61f2e7936bc0ac8e094b60caeed878c18799045402d61ceaf9", + "ec0e0ef707e4ed6c0c66f9e089e4954b058030d2dd86398fe84059631f9ee591d9d77375355149178c0cf8f8e7c49ed2a5e4f95488a2247067c208510fadc44c", + "9a37cce273b79c09913677510eaf7688e89b3314d3532fd2764c39de022a2945b5710d13517af8ddc0316624e73bec1ce67df15228302036f330ab0cb4d218dd", + "4cf9bb8fb3d4de8b38b2f262d3c40f46dfe747e8fc0a414c193d9fcf753106ce47a18f172f12e8a2f1c26726545358e5ee28c9e2213a8787aafbc516d2343152", + "64e0c63af9c808fd893137129867fd91939d53f2af04be4fa268006100069b2d69daa5c5d8ed7fddcb2a70eeecdf2b105dd46a1e3b7311728f639ab489326bc9", + "5e9c93158d659b2def06b0c3c7565045542662d6eee8a96a89b78ade09fe8b3dcc096d4fe48815d88d8f82620156602af541955e1f6ca30dce14e254c326b88f", + "7775dff889458dd11aef417276853e21335eb88e4dec9cfb4e9edb49820088551a2ca60339f12066101169f0dfe84b098fddb148d9da6b3d613df263889ad64b", + "f0d2805afbb91f743951351a6d024f9353a23c7ce1fc2b051b3a8b968c233f46f50f806ecb1568ffaa0b60661e334b21dde04f8fa155ac740eeb42e20b60d764", + "86a2af316e7d7754201b942e275364ac12ea8962ab5bd8d7fb276dc5fbffc8f9a28cae4e4867df6780d9b72524160927c855da5b6078e0b554aa91e31cb9ca1d", + "10bdf0caa0802705e706369baf8a3f79d72c0a03a80675a7bbb00be3a45e516424d1ee88efb56f6d5777545ae6e27765c3a8f5e493fc308915638933a1dfee55", + "b01781092b1748459e2e4ec178696627bf4ebafebba774ecf018b79a68aeb84917bf0b84bb79d17b743151144cd66b7b33a4b9e52c76c4e112050ff5385b7f0b", + "c6dbc61dec6eaeac81e3d5f755203c8e220551534a0b2fd105a91889945a638550204f44093dd998c076205dffad703a0e5cd3c7f438a7e634cd59fededb539e", + "eba51acffb4cea31db4b8d87e9bf7dd48fe97b0253ae67aa580f9ac4a9d941f2bea518ee286818cc9f633f2a3b9fb68e594b48cdd6d515bf1d52ba6c85a203a7", + "86221f3ada52037b72224f105d7999231c5e5534d03da9d9c0a12acb68460cd375daf8e24386286f9668f72326dbf99ba094392437d398e95bb8161d717f8991", + "5595e05c13a7ec4dc8f41fb70cb50a71bce17c024ff6de7af618d0cc4e9c32d9570d6d3ea45b86525491030c0d8f2b1836d5778c1ce735c17707df364d054347", + "ce0f4f6aca89590a37fe034dd74dd5fa65eb1cbd0a41508aaddc09351a3cea6d18cb2189c54b700c009f4cbf0521c7ea01be61c5ae09cb54f27bc1b44d658c82", + "7ee80b06a215a3bca970c77cda8761822bc103d44fa4b33f4d07dcb997e36d55298bceae12241b3fa07fa63be5576068da387b8d5859aeab701369848b176d42", + "940a84b6a84d109aab208c024c6ce9647676ba0aaa11f86dbb7018f9fd2220a6d901a9027f9abcf935372727cbf09ebd61a2a2eeb87653e8ecad1bab85dc8327", + "2020b78264a82d9f4151141adba8d44bf20c5ec062eee9b595a11f9e84901bf148f298e0c9f8777dcdbc7cc4670aac356cc2ad8ccb1629f16f6a76bcefbee760", + "d1b897b0e075ba68ab572adf9d9c436663e43eb3d8e62d92fc49c9be214e6f27873fe215a65170e6bea902408a25b49506f47babd07cecf7113ec10c5dd31252", + "b14d0c62abfa469a357177e594c10c194243ed2025ab8aa5ad2fa41ad318e0ff48cd5e60bec07b13634a711d2326e488a985f31e31153399e73088efc86a5c55", + "4169c5cc808d2697dc2a82430dc23e3cd356dc70a94566810502b8d655b39abf9e7f902fe717e0389219859e1945df1af6ada42e4ccda55a197b7100a30c30a1", + "258a4edb113d66c839c8b1c91f15f35ade609f11cd7f8681a4045b9fef7b0b24c82cda06a5f2067b368825e3914e53d6948ede92efd6e8387fa2e537239b5bee", + "79d2d8696d30f30fb34657761171a11e6c3f1e64cbe7bebee159cb95bfaf812b4f411e2f26d9c421dc2c284a3342d823ec293849e42d1e46b0a4ac1e3c86abaa", + "8b9436010dc5dee992ae38aea97f2cd63b946d94fedd2ec9671dcde3bd4ce9564d555c66c15bb2b900df72edb6b891ebcadfeff63c9ea4036a998be7973981e7", + "c8f68e696ed28242bf997f5b3b34959508e42d613810f1e2a435c96ed2ff560c7022f361a9234b9837feee90bf47922ee0fd5f8ddf823718d86d1e16c6090071", + "b02d3eee4860d5868b2c39ce39bfe81011290564dd678c85e8783f29302dfc1399ba95b6b53cd9ebbf400cca1db0ab67e19a325f2d115812d25d00978ad1bca4", + "7693ea73af3ac4dad21ca0d8da85b3118a7d1c6024cfaf557699868217bc0c2f44a199bc6c0edd519798ba05bd5b1b4484346a47c2cadf6bf30b785cc88b2baf", + "a0e5c1c0031c02e48b7f09a5e896ee9aef2f17fc9e18e997d7f6cac7ae316422c2b1e77984e5f3a73cb45deed5d3f84600105e6ee38f2d090c7d0442ea34c46d", + "41daa6adcfdb69f1440c37b596440165c15ada596813e2e22f060fcd551f24dee8e04ba6890387886ceec4a7a0d7fc6b44506392ec3822c0d8c1acfc7d5aebe8", + "14d4d40d5984d84c5cf7523b7798b254e275a3a8cc0a1bd06ebc0bee726856acc3cbf516ff667cda2058ad5c3412254460a82c92187041363cc77a4dc215e487", + "d0e7a1e2b9a447fee83e2277e9ff8010c2f375ae12fa7aaa8ca5a6317868a26a367a0b69fbc1cf32a55d34eb370663016f3d2110230eba754028a56f54acf57c", + "e771aa8db5a3e043e8178f39a0857ba04a3f18e4aa05743cf8d222b0b095825350ba422f63382a23d92e4149074e816a36c1cd28284d146267940b31f8818ea2", + "feb4fd6f9e87a56bef398b3284d2bda5b5b0e166583a66b61e538457ff0584872c21a32962b9928ffab58de4af2edd4e15d8b35570523207ff4e2a5aa7754caa", + "462f17bf005fb1c1b9e671779f665209ec2873e3e411f98dabf240a1d5ec3f95ce6796b6fc23fe171903b502023467dec7273ff74879b92967a2a43a5a183d33", + "d3338193b64553dbd38d144bea71c5915bb110e2d88180dbc5db364fd6171df317fc7268831b5aef75e4342b2fad8797ba39eddcef80e6ec08159350b1ad696d", + "e1590d585a3d39f7cb599abd479070966409a6846d4377acf4471d065d5db94129cc9be92573b05ed226be1e9b7cb0cabe87918589f80dadd4ef5ef25a93d28e", + "f8f3726ac5a26cc80132493a6fedcb0e60760c09cfc84cad178175986819665e76842d7b9fedf76dddebf5d3f56faaad4477587af21606d396ae570d8e719af2", + "30186055c07949948183c850e9a756cc09937e247d9d928e869e20bafc3cd9721719d34e04a0899b92c736084550186886efba2e790d8be6ebf040b209c439a4", + "f3c4276cb863637712c241c444c5cc1e3554e0fddb174d035819dd83eb700b4ce88df3ab3841ba02085e1a99b4e17310c5341075c0458ba376c95a6818fbb3e2", + "0aa007c4dd9d5832393040a1583c930bca7dc5e77ea53add7e2b3f7c8e231368043520d4a3ef53c969b6bbfd025946f632bd7f765d53c21003b8f983f75e2a6a", + "08e9464720533b23a04ec24f7ae8c103145f765387d738777d3d343477fd1c58db052142cab754ea674378e18766c53542f71970171cc4f81694246b717d7564", + "d37ff7ad297993e7ec21e0f1b4b5ae719cdc83c5db687527f27516cbffa822888a6810ee5c1ca7bfe3321119be1ab7bfa0a502671c8329494df7ad6f522d440f", + "dd9042f6e464dcf86b1262f6accfafbd8cfd902ed3ed89abf78ffa482dbdeeb6969842394c9a1168ae3d481a017842f660002d42447c6b22f7b72f21aae021c9", + "bd965bf31e87d70327536f2a341cebc4768eca275fa05ef98f7f1b71a0351298de006fba73fe6733ed01d75801b4a928e54231b38e38c562b2e33ea1284992fa", + "65676d800617972fbd87e4b9514e1c67402b7a331096d3bfac22f1abb95374abc942f16e9ab0ead33b87c91968a6e509e119ff07787b3ef483e1dcdccf6e3022", + "939fa189699c5d2c81ddd1ffc1fa207c970b6a3685bb29ce1d3e99d42f2f7442da53e95a72907314f4588399a3ff5b0a92beb3f6be2694f9f86ecf2952d5b41c", + "c516541701863f91005f314108ceece3c643e04fc8c42fd2ff556220e616aaa6a48aeb97a84bad74782e8dff96a1a2fa949339d722edcaa32b57067041df88cc", + "987fd6e0d6857c553eaebb3d34970a2c2f6e89a3548f492521722b80a1c21a153892346d2cba6444212d56da9a26e324dccbc0dcde85d4d2ee4399eec5a64e8f", + "ae56deb1c2328d9c4017706bce6e99d41349053ba9d336d677c4c27d9fd50ae6aee17e853154e1f4fe7672346da2eaa31eea53fcf24a22804f11d03da6abfc2b", + "49d6a608c9bde4491870498572ac31aac3fa40938b38a7818f72383eb040ad39532bc06571e13d767e6945ab77c0bdc3b0284253343f9f6c1244ebf2ff0df866", + "da582ad8c5370b4469af862aa6467a2293b2b28bd80ae0e91f425ad3d47249fdf98825cc86f14028c3308c9804c78bfeeeee461444ce243687e1a50522456a1d", + "d5266aa3331194aef852eed86d7b5b2633a0af1c735906f2e13279f14931a9fc3b0eac5ce9245273bd1aa92905abe16278ef7efd47694789a7283b77da3c70f8", + "2962734c28252186a9a1111c732ad4de4506d4b4480916303eb7991d659ccda07a9911914bc75c418ab7a4541757ad054796e26797feaf36e9f6ad43f14b35a4", + "e8b79ec5d06e111bdfafd71e9f5760f00ac8ac5d8bf768f9ff6f08b8f026096b1cc3a4c973333019f1e3553e77da3f98cb9f542e0a90e5f8a940cc58e59844b3", + "dfb320c44f9d41d1efdcc015f08dd5539e526e39c87d509ae6812a969e5431bf4fa7d91ffd03b981e0d544cf72d7b1c0374f8801482e6dea2ef903877eba675e", + "d88675118fdb55a5fb365ac2af1d217bf526ce1ee9c94b2f0090b2c58a06ca58187d7fe57c7bed9d26fca067b4110eefcd9a0a345de872abe20de368001b0745", + "b893f2fc41f7b0dd6e2f6aa2e0370c0cff7df09e3acfcc0e920b6e6fad0ef747c40668417d342b80d2351e8c175f20897a062e9765e6c67b539b6ba8b9170545", + "6c67ec5697accd235c59b486d7b70baeedcbd4aa64ebd4eef3c7eac189561a726250aec4d48cadcafbbe2ce3c16ce2d691a8cce06e8879556d4483ed7165c063", + "f1aa2b044f8f0c638a3f362e677b5d891d6fd2ab0765f6ee1e4987de057ead357883d9b405b9d609eea1b869d97fb16d9b51017c553f3b93c0a1e0f1296fedcd", + "cbaa259572d4aebfc1917acddc582b9f8dfaa928a198ca7acd0f2aa76a134a90252e6298a65b08186a350d5b7626699f8cb721a3ea5921b753ae3a2dce24ba3a", + "fa1549c9796cd4d303dcf452c1fbd5744fd9b9b47003d920b92de34839d07ef2a29ded68f6fc9e6c45e071a2e48bd50c5084e96b657dd0404045a1ddefe282ed", + "5cf2ac897ab444dcb5c8d87c495dbdb34e1838b6b629427caa51702ad0f9688525f13bec503a3c3a2c80a65e0b5715e8afab00ffa56ec455a49a1ad30aa24fcd", + "9aaf80207bace17bb7ab145757d5696bde32406ef22b44292ef65d4519c3bb2ad41a59b62cc3e94b6fa96d32a7faadae28af7d35097219aa3fd8cda31e40c275", + "af88b163402c86745cb650c2988fb95211b94b03ef290eed9662034241fd51cf398f8073e369354c43eae1052f9b63b08191caa138aa54fea889cc7024236897", + "48fa7d64e1ceee27b9864db5ada4b53d00c9bc7626555813d3cd6730ab3cc06ff342d727905e33171bde6e8476e77fb1720861e94b73a2c538d254746285f430", + "0e6fd97a85e904f87bfe85bbeb34f69e1f18105cf4ed4f87aec36c6e8b5f68bd2a6f3dc8a9ecb2b61db4eedb6b2ea10bf9cb0251fb0f8b344abf7f366b6de5ab", + "06622da5787176287fdc8fed440bad187d830099c94e6d04c8e9c954cda70c8bb9e1fc4a6d0baa831b9b78ef6648681a4867a11da93ee36e5e6a37d87fc63f6f", + "1da6772b58fabf9c61f68d412c82f182c0236d7d575ef0b58dd22458d643cd1dfc93b03871c316d8430d312995d4197f0874c99172ba004a01ee295abac24e46", + "3cd2d9320b7b1d5fb9aab951a76023fa667be14a9124e394513918a3f44096ae4904ba0ffc150b63bc7ab1eeb9a6e257e5c8f000a70394a5afd842715de15f29", + "04cdc14f7434e0b4be70cb41db4c779a88eaef6accebcb41f2d42fffe7f32a8e281b5c103a27021d0d08362250753cdf70292195a53a48728ceb5844c2d98bab", + "9071b7a8a075d0095b8fb3ae5113785735ab98e2b52faf91d5b89e44aac5b5d4ebbf91223b0ff4c71905da55342e64655d6ef8c89a4768c3f93a6dc0366b5bc8", + "ebb30240dd96c7bc8d0abe49aa4edcbb4afdc51ff9aaf720d3f9e7fbb0f9c6d6571350501769fc4ebd0b2141247ff400d4fd4be414edf37757bb90a32ac5c65a", + "8532c58bf3c8015d9d1cbe00eef1f5082f8f3632fbe9f1ed4f9dfb1fa79e8283066d77c44c4af943d76b300364aecbd0648c8a8939bd204123f4b56260422dec", + "fe9846d64f7c7708696f840e2d76cb4408b6595c2f81ec6a28a7f2f20cb88cfe6ac0b9e9b8244f08bd7095c350c1d0842f64fb01bb7f532dfcd47371b0aeeb79", + "28f17ea6fb6c42092dc264257e29746321fb5bdaea9873c2a7fa9d8f53818e899e161bc77dfe8090afd82bf2266c5c1bc930a8d1547624439e662ef695f26f24", + "ec6b7d7f030d4850acae3cb615c21dd25206d63e84d1db8d957370737ba0e98467ea0ce274c66199901eaec18a08525715f53bfdb0aacb613d342ebdceeddc3b", + "b403d3691c03b0d3418df327d5860d34bbfcc4519bfbce36bf33b208385fadb9186bc78a76c489d89fd57e7dc75412d23bcd1dae8470ce9274754bb8585b13c5", + "31fc79738b8772b3f55cd8178813b3b52d0db5a419d30ba9495c4b9da0219fac6df8e7c23a811551a62b827f256ecdb8124ac8a6792ccfecc3b3012722e94463", + "bb2039ec287091bcc9642fc90049e73732e02e577e2862b32216ae9bedcd730c4c284ef3968c368b7d37584f97bd4b4dc6ef6127acfe2e6ae2509124e66c8af4", + "f53d68d13f45edfcb9bd415e2831e938350d5380d3432278fc1c0c381fcb7c65c82dafe051d8c8b0d44e0974a0e59ec7bf7ed0459f86e96f329fc79752510fd3", + "8d568c7984f0ecdf7640fbc483b5d8c9f86634f6f43291841b309a350ab9c1137d24066b09da9944bac54d5bb6580d836047aac74ab724b887ebf93d4b32eca9", + "c0b65ce5a96ff774c456cac3b5f2c4cd359b4ff53ef93a3da0778be4900d1e8da1601e769e8f1b02d2a2f8c5b9fa10b44f1c186985468feeb008730283a6657d", + "4900bba6f5fb103ece8ec96ada13a5c3c85488e05551da6b6b33d988e611ec0fe2e3c2aa48ea6ae8986a3a231b223c5d27cec2eadde91ce07981ee652862d1e4", + "c7f5c37c7285f927f76443414d4357ff789647d7a005a5a787e03c346b57f49f21b64fa9cf4b7e45573e23049017567121a9c3d4b2b73ec5e9413577525db45a", + "ec7096330736fdb2d64b5653e7475da746c23a4613a82687a28062d3236364284ac01720ffb406cfe265c0df626a188c9e5963ace5d3d5bb363e32c38c2190a6", + "82e744c75f4649ec52b80771a77d475a3bc091989556960e276a5f9ead92a03f718742cdcfeaee5cb85c44af198adc43a4a428f5f0c2ddb0be36059f06d7df73", + "2834b7a7170f1f5b68559ab78c1050ec21c919740b784a9072f6e5d69f828d70c919c5039fb148e39e2c8a52118378b064ca8d5001cd10a5478387b966715ed6", + "16b4ada883f72f853bb7ef253efcab0c3e2161687ad61543a0d2824f91c1f81347d86be709b16996e17f2dd486927b0288ad38d13063c4a9672c39397d3789b6", + "78d048f3a69d8b54ae0ed63a573ae350d89f7c6cf1f3688930de899afa037697629b314e5cd303aa62feea72a25bf42b304b6c6bcb27fae21c16d925e1fbdac3", + "0f746a48749287ada77a82961f05a4da4abdb7d77b1220f836d09ec814359c0ec0239b8c7b9ff9e02f569d1b301ef67c4612d1de4f730f81c12c40cc063c5caa", + "f0fc859d3bd195fbdc2d591e4cdac15179ec0f1dc821c11df1f0c1d26e6260aaa65b79fafacafd7d3ad61e600f250905f5878c87452897647a35b995bcadc3a3", + "2620f687e8625f6a412460b42e2cef67634208ce10a0cbd4dff7044a41b7880077e9f8dc3b8d1216d3376a21e015b58fb279b521d83f9388c7382c8505590b9b", + "227e3aed8d2cb10b918fcb04f9de3e6d0a57e08476d93759cd7b2ed54a1cbf0239c528fb04bbf288253e601d3bc38b21794afef90b17094a182cac557745e75f", + "1a929901b09c25f27d6b35be7b2f1c4745131fdebca7f3e2451926720434e0db6e74fd693ad29b777dc3355c592a361c4873b01133a57c2e3b7075cbdb86f4fc", + "5fd7968bc2fe34f220b5e3dc5af9571742d73b7d60819f2888b629072b96a9d8ab2d91b82d0a9aaba61bbd39958132fcc4257023d1eca591b3054e2dc81c8200", + "dfcce8cf32870cc6a503eadafc87fd6f78918b9b4d0737db6810be996b5497e7e5cc80e312f61e71ff3e9624436073156403f735f56b0b01845c18f6caf772e6", + "02f7ef3a9ce0fff960f67032b296efca3061f4934d690749f2d01c35c81c14f39a67fa350bc8a0359bf1724bffc3bca6d7c7bba4791fd522a3ad353c02ec5aa8", + "64be5c6aba65d594844ae78bb022e5bebe127fd6b6ffa5a13703855ab63b624dcd1a363f99203f632ec386f3ea767fc992e8ed9686586aa27555a8599d5b808f", + "f78585505c4eaa54a8b5be70a61e735e0ff97af944ddb3001e35d86c4e2199d976104b6ae31750a36a726ed285064f5981b503889fef822fcdc2898dddb7889a", + "e4b5566033869572edfd87479a5bb73c80e8759b91232879d96b1dda36c012076ee5a2ed7ae2de63ef8406a06aea82c188031b560beafb583fb3de9e57952a7e", + "e1b3e7ed867f6c9484a2a97f7715f25e25294e992e41f6a7c161ffc2adc6daaeb7113102d5e6090287fe6ad94ce5d6b739c6ca240b05c76fb73f25dd024bf935", + "85fd085fdc12a080983df07bd7012b0d402a0f4043fcb2775adf0bad174f9b08d1676e476985785c0a5dcc41dbff6d95ef4d66a3fbdc4a74b82ba52da0512b74", + "aed8fa764b0fbff821e05233d2f7b0900ec44d826f95e93c343c1bc3ba5a24374b1d616e7e7aba453a0ada5e4fab5382409e0d42ce9c2bc7fb39a99c340c20f0", + "7ba3b2e297233522eeb343bd3ebcfd835a04007735e87f0ca300cbee6d416565162171581e4020ff4cf176450f1291ea2285cb9ebffe4c56660627685145051c", + "de748bcf89ec88084721e16b85f30adb1a6134d664b5843569babc5bbd1a15ca9b61803c901a4fef32965a1749c9f3a4e243e173939dc5a8dc495c671ab52145", + "aaf4d2bdf200a919706d9842dce16c98140d34bc433df320aba9bd429e549aa7a3397652a4d768277786cf993cde2338673ed2e6b66c961fefb82cd20c93338f", + "c408218968b788bf864f0997e6bc4c3dba68b276e2125a4843296052ff93bf5767b8cdce7131f0876430c1165fec6c4f47adaa4fd8bcfacef463b5d3d0fa61a0", + "76d2d819c92bce55fa8e092ab1bf9b9eab237a25267986cacf2b8ee14d214d730dc9a5aa2d7b596e86a1fd8fa0804c77402d2fcd45083688b218b1cdfa0dcbcb", + "72065ee4dd91c2d8509fa1fc28a37c7fc9fa7d5b3f8ad3d0d7a25626b57b1b44788d4caf806290425f9890a3a2a35a905ab4b37acfd0da6e4517b2525c9651e4", + "64475dfe7600d7171bea0b394e27c9b00d8e74dd1e416a79473682ad3dfdbb706631558055cfc8a40e07bd015a4540dcdea15883cbbf31412df1de1cd4152b91", + "12cd1674a4488a5d7c2b3160d2e2c4b58371bedad793418d6f19c6ee385d70b3e06739369d4df910edb0b0a54cbff43d54544cd37ab3a06cfa0a3ddac8b66c89", + "60756966479dedc6dd4bcff8ea7d1d4ce4d4af2e7b097e32e3763518441147cc12b3c0ee6d2ecabf1198cec92e86a3616fba4f4e872f5825330adbb4c1dee444", + "a7803bcb71bc1d0f4383dde1e0612e04f872b715ad30815c2249cf34abb8b024915cb2fc9f4e7cc4c8cfd45be2d5a91eab0941c7d270e2da4ca4a9f7ac68663a", + "b84ef6a7229a34a750d9a98ee2529871816b87fbe3bc45b45fa5ae82d5141540211165c3c5d7a7476ba5a4aa06d66476f0d9dc49a3f1ee72c3acabd498967414", + "fae4b6d8efc3f8c8e64d001dabec3a21f544e82714745251b2b4b393f2f43e0da3d403c64db95a2cb6e23ebb7b9e94cdd5ddac54f07c4a61bd3cb10aa6f93b49", + "34f7286605a122369540141ded79b8957255da2d4155abbf5a8dbb89c8eb7ede8eeef1daa46dc29d751d045dc3b1d658bb64b80ff8589eddb3824b13da235a6b", + "3b3b48434be27b9eababba43bf6b35f14b30f6a88dc2e750c358470d6b3aa3c18e47db4017fa55106d8252f016371a00f5f8b070b74ba5f23cffc5511c9f09f0", + "ba289ebd6562c48c3e10a8ad6ce02e73433d1e93d7c9279d4d60a7e879ee11f441a000f48ed9f7c4ed87a45136d7dccdca482109c78a51062b3ba4044ada2469", + "022939e2386c5a37049856c850a2bb10a13dfea4212b4c732a8840a9ffa5faf54875c5448816b2785a007da8a8d2bc7d71a54e4e6571f10b600cbdb25d13ede3", + "e6fec19d89ce8717b1a087024670fe026f6c7cbda11caef959bb2d351bf856f8055d1c0ebdaaa9d1b17886fc2c562b5e99642fc064710c0d3488a02b5ed7f6fd", + "94c96f02a8f576aca32ba61c2b206f907285d9299b83ac175c209a8d43d53bfe683dd1d83e7549cb906c28f59ab7c46f8751366a28c39dd5fe2693c9019666c8", + "31a0cd215ebd2cb61de5b9edc91e6195e31c59a5648d5c9f737e125b2605708f2e325ab3381c8dce1a3e958886f1ecdc60318f882cfe20a24191352e617b0f21", + "91ab504a522dce78779f4c6c6ba2e6b6db5565c76d3e7e7c920caf7f757ef9db7c8fcf10e57f03379ea9bf75eb59895d96e149800b6aae01db778bb90afbc989", + "d85cabc6bd5b1a01a5afd8c6734740da9fd1c1acc6db29bfc8a2e5b668b028b6b3154bfb8703fa3180251d589ad38040ceb707c4bad1b5343cb426b61eaa49c1", + "d62efbec2ca9c1f8bd66ce8b3f6a898cb3f7566ba6568c618ad1feb2b65b76c3ce1dd20f7395372faf28427f61c9278049cf0140df434f5633048c86b81e0399", + "7c8fdc6175439e2c3db15bafa7fb06143a6a23bc90f449e79deef73c3d492a671715c193b6fea9f036050b946069856b897e08c00768f5ee5ddcf70b7cd6d0e0", + "58602ee7468e6bc9df21bd51b23c005f72d6cb013f0a1b48cbec5eca299299f97f09f54a9a01483eaeb315a6478bad37ba47ca1347c7c8fc9e6695592c91d723", + "27f5b79ed256b050993d793496edf4807c1d85a7b0a67c9c4fa99860750b0ae66989670a8ffd7856d7ce411599e58c4d77b232a62bef64d15275be46a68235ff", + "3957a976b9f1887bf004a8dca942c92d2b37ea52600f25e0c9bc5707d0279c00c6e85a839b0d2d8eb59c51d94788ebe62474a791cadf52cccf20f5070b6573fc", + "eaa2376d55380bf772ecca9cb0aa4668c95c707162fa86d518c8ce0ca9bf7362b9f2a0adc3ff59922df921b94567e81e452f6c1a07fc817cebe99604b3505d38", + "c1e2c78b6b2734e2480ec550434cb5d613111adcc21d475545c3b1b7e6ff12444476e5c055132e2229dc0f807044bb919b1a5662dd38a9ee65e243a3911aed1a", + "8ab48713389dd0fcf9f965d3ce66b1e559a1f8c58741d67683cd971354f452e62d0207a65e436c5d5d8f8ee71c6abfe50e669004c302b31a7ea8311d4a916051", + "24ce0addaa4c65038bd1b1c0f1452a0b128777aabc94a29df2fd6c7e2f85f8ab9ac7eff516b0e0a825c84a24cfe492eaad0a6308e46dd42fe8333ab971bb30ca", + "5154f929ee03045b6b0c0004fa778edee1d139893267cc84825ad7b36c63de32798e4a166d24686561354f63b00709a1364b3c241de3febf0754045897467cd4", + "e74e907920fd87bd5ad636dd11085e50ee70459c443e1ce5809af2bc2eba39f9e6d7128e0e3712c316da06f4705d78a4838e28121d4344a2c79c5e0db307a677", + "bf91a22334bac20f3fd80663b3cd06c4e8802f30e6b59f90d3035cc9798a217ed5a31abbda7fa6842827bdf2a7a1c21f6fcfccbb54c6c52926f32da816269be1", + "d9d5c74be5121b0bd742f26bffb8c89f89171f3f934913492b0903c271bbe2b3395ef259669bef43b57f7fcc3027db01823f6baee66e4f9fead4d6726c741fce", + "50c8b8cf34cd879f80e2faab3230b0c0e1cc3e9dcadeb1b9d97ab923415dd9a1fe38addd5c11756c67990b256e95ad6d8f9fedce10bf1c90679cde0ecf1be347", + "0a386e7cd5dd9b77a035e09fe6fee2c8ce61b5383c87ea43205059c5e4cd4f4408319bb0a82360f6a58e6c9ce3f487c446063bf813bc6ba535e17fc1826cfc91", + "1f1459cb6b61cbac5f0efe8fc487538f42548987fcd56221cfa7beb22504769e792c45adfb1d6b3d60d7b749c8a75b0bdf14e8ea721b95dca538ca6e25711209", + "e58b3836b7d8fedbb50ca5725c6571e74c0785e97821dab8b6298c10e4c079d4a6cdf22f0fedb55032925c16748115f01a105e77e00cee3d07924dc0d8f90659", + "b929cc6505f020158672deda56d0db081a2ee34c00c1100029bdf8ea98034fa4bf3e8655ec697fe36f40553c5bb46801644a627d3342f4fc92b61f03290fb381", + "72d353994b49d3e03153929a1e4d4f188ee58ab9e72ee8e512f29bc773913819ce057ddd7002c0433ee0a16114e3d156dd2c4a7e80ee53378b8670f23e33ef56", + "c70ef9bfd775d408176737a0736d68517ce1aaad7e81a93c8c1ed967ea214f56c8a377b1763e676615b60f3988241eae6eab9685a5124929d28188f29eab06f7", + "c230f0802679cb33822ef8b3b21bf7a9a28942092901d7dac3760300831026cf354c9232df3e084d9903130c601f63c1f4a4a4b8106e468cd443bbe5a734f45f", + "6f43094cafb5ebf1f7a4937ec50f56a4c9da303cbb55ac1f27f1f1976cd96beda9464f0e7b9c54620b8a9fba983164b8be3578425a024f5fe199c36356b88972", + "3745273f4c38225db2337381871a0c6aafd3af9b018c88aa02025850a5dc3a42a1a3e03e56cbf1b0876d63a441f1d2856a39b8801eb5af325201c415d65e97fe", + "c50c44cca3ec3edaae779a7e179450ebdda2f97067c690aa6c5a4ac7c30139bb27c0df4db3220e63cb110d64f37ffe078db72653e2daacf93ae3f0a2d1a7eb2e", + "8aef263e385cbc61e19b28914243262af5afe8726af3ce39a79c27028cf3ecd3f8d2dfd9cfc9ad91b58f6f20778fd5f02894a3d91c7d57d1e4b866a7f364b6be", + "28696141de6e2d9bcb3235578a66166c1448d3e905a1b482d423be4bc5369bc8c74dae0acc9cc123e1d8ddce9f97917e8c019c552da32d39d2219b9abf0fa8c8", + "2fb9eb2085830181903a9dafe3db428ee15be7662224efd643371fb25646aee716e531eca69b2bdc8233f1a8081fa43da1500302975a77f42fa592136710e9dc", + "66f9a7143f7a3314a669bf2e24bbb35014261d639f495b6c9c1f104fe8e320aca60d4550d69d52edbd5a3cdeb4014ae65b1d87aa770b69ae5c15f4330b0b0ad8", + "f4c4dd1d594c3565e3e25ca43dad82f62abea4835ed4cd811bcd975e46279828d44d4c62c3679f1b7f7b9dd4571d7b49557347b8c5460cbdc1bef690fb2a08c0", + "8f1dc9649c3a84551f8f6e91cac68242a43b1f8f328ee92280257387fa7559aa6db12e4aeadc2d26099178749c6864b357f3f83b2fb3efa8d2a8db056bed6bcc", + "3139c1a7f97afd1675d460ebbc07f2728aa150df849624511ee04b743ba0a833092f18c12dc91b4dd243f333402f59fe28abdbbbae301e7b659c7a26d5c0f979", + "06f94a2996158a819fe34c40de3cf0379fd9fb85b3e363ba3926a0e7d960e3f4c2e0c70c7ce0ccb2a64fc29869f6e7ab12bd4d3f14fce943279027e785fb5c29", + "c29c399ef3eee8961e87565c1ce263925fc3d0ce267d13e48dd9e732ee67b0f69fad56401b0f10fcaac119201046cca28c5b14abdea3212ae65562f7f138db3d", + "4cec4c9df52eef05c3f6faaa9791bc7445937183224ecc37a1e58d0132d35617531d7e795f52af7b1eb9d147de1292d345fe341823f8e6bc1e5badca5c656108", + "898bfbae93b3e18d00697eab7d9704fa36ec339d076131cefdf30edbe8d9cc81c3a80b129659b163a323bab9793d4feed92d54dae966c77529764a09be88db45", + "ee9bd0469d3aaf4f14035be48a2c3b84d9b4b1fff1d945e1f1c1d38980a951be197b25fe22c731f20aeacc930ba9c4a1f4762227617ad350fdabb4e80273a0f4", + "3d4d3113300581cd96acbf091c3d0f3c310138cd6979e6026cde623e2dd1b24d4a8638bed1073344783ad0649cc6305ccec04beb49f31c633088a99b65130267", + "95c0591ad91f921ac7be6d9ce37e0663ed8011c1cfd6d0162a5572e94368bac02024485e6a39854aa46fe38e97d6c6b1947cd272d86b06bb5b2f78b9b68d559d", + "227b79ded368153bf46c0a3ca978bfdbef31f3024a5665842468490b0ff748ae04e7832ed4c9f49de9b1706709d623e5c8c15e3caecae8d5e433430ff72f20eb", + "5d34f3952f0105eef88ae8b64c6ce95ebfade0e02c69b08762a8712d2e4911ad3f941fc4034dc9b2e479fdbcd279b902faf5d838bb2e0c6495d372b5b7029813", + "7f939bf8353abce49e77f14f3750af20b7b03902e1a1e7fb6aaf76d0259cd401a83190f15640e74f3e6c5a90e839c7821f6474757f75c7bf9002084ddc7a62dc", + "062b61a2f9a33a71d7d0a06119644c70b0716a504de7e5e1be49bd7b86e7ed6817714f9f0fc313d06129597e9a2235ec8521de36f7290a90ccfc1ffa6d0aee29", + "f29e01eeae64311eb7f1c6422f946bf7bea36379523e7b2bbaba7d1d34a22d5ea5f1c5a09d5ce1fe682cced9a4798d1a05b46cd72dff5c1b355440b2a2d476bc", + "ec38cd3bbab3ef35d7cb6d5c914298351d8a9dc97fcee051a8a02f58e3ed6184d0b7810a5615411ab1b95209c3c810114fdeb22452084e77f3f847c6dbaafe16", + "c2aef5e0ca43e82641565b8cb943aa8ba53550caef793b6532fafad94b816082f0113a3ea2f63608ab40437ecc0f0229cb8fa224dcf1c478a67d9b64162b92d1", + "15f534efff7105cd1c254d074e27d5898b89313b7d366dc2d7d87113fa7d53aae13f6dba487ad8103d5e854c91fdb6e1e74b2ef6d1431769c30767dde067a35c", + "89acbca0b169897a0a2714c2df8c95b5b79cb69390142b7d6018bb3e3076b099b79a964152a9d912b1b86412b7e372e9cecad7f25d4cbab8a317be36492a67d7", + "e3c0739190ed849c9c962fd9dbb55e207e624fcac1eb417691515499eea8d8267b7e8f1287a63633af5011fde8c4ddf55bfdf722edf88831414f2cfaed59cb9a", + "8d6cf87c08380d2d1506eee46fd4222d21d8c04e585fbfd08269c98f702833a156326a0724656400ee09351d57b440175e2a5de93cc5f80db6daf83576cf75fa", + "da24bede383666d563eeed37f6319baf20d5c75d1635a6ba5ef4cfa1ac95487e96f8c08af600aab87c986ebad49fc70a58b4890b9c876e091016daf49e1d322e", + "f9d1d1b1e87ea7ae753a029750cc1cf3d0157d41805e245c5617bb934e732f0ae3180b78e05bfe76c7c3051e3e3ac78b9b50c05142657e1e03215d6ec7bfd0fc", + "11b7bc1668032048aa43343de476395e814bbbc223678db951a1b03a021efac948cfbe215f97fe9a72a2f6bc039e3956bfa417c1a9f10d6d7ba5d3d32ff323e5", + "b8d9000e4fc2b066edb91afee8e7eb0f24e3a201db8b6793c0608581e628ed0bcc4e5aa6787992a4bcc44e288093e63ee83abd0bc3ec6d0934a674a4da13838a", + "ce325e294f9b6719d6b61278276ae06a2564c03bb0b783fafe785bdf89c7d5acd83e78756d301b445699024eaeb77b54d477336ec2a4f332f2b3f88765ddb0c3", + "29acc30e9603ae2fccf90bf97e6cc463ebe28c1b2f9b4b765e70537c25c702a29dcbfbf14c99c54345ba2b51f17b77b5f15db92bbad8fa95c471f5d070a137cc", + "3379cbaae562a87b4c0425550ffdd6bfe1203f0d666cc7ea095be407a5dfe61ee91441cd5154b3e53b4f5fb31ad4c7a9ad5c7af4ae679aa51a54003a54ca6b2d", + "3095a349d245708c7cf550118703d7302c27b60af5d4e67fc978f8a4e60953c7a04f92fcf41aee64321ccb707a895851552b1e37b00bc5e6b72fa5bcef9e3fff", + "07262d738b09321f4dbccec4bb26f48cb0f0ed246ce0b31b9a6e7bc683049f1f3e5545f28ce932dd985c5ab0f43bd6de0770560af329065ed2e49d34624c2cbb", + "b6405eca8ee3316c87061cc6ec18dba53e6c250c63ba1f3bae9e55dd3498036af08cd272aa24d713c6020d77ab2f3919af1a32f307420618ab97e73953994fb4", + "7ee682f63148ee45f6e5315da81e5c6e557c2c34641fc509c7a5701088c38a74756168e2cd8d351e88fd1a451f360a01f5b2580f9b5a2e8cfc138f3dd59a3ffc", + "1d263c179d6b268f6fa016f3a4f29e943891125ed8593c81256059f5a7b44af2dcb2030d175c00e62ecaf7ee96682aa07ab20a611024a28532b1c25b86657902", + "106d132cbdb4cd2597812846e2bc1bf732fec5f0a5f65dbb39ec4e6dc64ab2ce6d24630d0f15a805c3540025d84afa98e36703c3dbee713e72dde8465bc1be7e", + "0e79968226650667a8d862ea8da4891af56a4e3a8b6d1750e394f0dea76d640d85077bcec2cc86886e506751b4f6a5838f7f0b5fef765d9dc90dcdcbaf079f08", + "521156a82ab0c4e566e5844d5e31ad9aaf144bbd5a464fdca34dbd5717e8ff711d3ffebbfa085d67fe996a34f6d3e4e60b1396bf4b1610c263bdbb834d560816", + "1aba88befc55bc25efbce02db8b9933e46f57661baeabeb21cc2574d2a518a3cba5dc5a38e49713440b25f9c744e75f6b85c9d8f4681f676160f6105357b8406", + "5a9949fcb2c473cda968ac1b5d08566dc2d816d960f57e63b898fa701cf8ebd3f59b124d95bfbbedc5f1cf0e17d5eaed0c02c50b69d8a402cabcca4433b51fd4", + "b0cead09807c672af2eb2b0f06dde46cf5370e15a4096b1a7d7cbb36ec31c205fbefca00b7a4162fa89fb4fb3eb78d79770c23f44e7206664ce3cd931c291e5d", + "bb6664931ec97044e45b2ae420ae1c551a8874bc937d08e969399c3964ebdba8346cdd5d09caafe4c28ba7ec788191ceca65ddd6f95f18583e040d0f30d0364d", + "65bc770a5faa3792369803683e844b0be7ee96f29f6d6a35568006bd5590f9a4ef639b7a8061c7b0424b66b60ac34af3119905f33a9d8c3ae18382ca9b689900", + "ea9b4dca333336aaf839a45c6eaa48b8cb4c7ddabffea4f643d6357ea6628a480a5b45f2b052c1b07d1fedca918b6f1139d80f74c24510dcbaa4be70eacc1b06", + "e6342fb4a780ad975d0e24bce149989b91d360557e87994f6b457b895575cc02d0c15bad3ce7577f4c63927ff13f3e381ff7e72bdbe745324844a9d27e3f1c01", + "3e209c9b33e8e461178ab46b1c64b49a07fb745f1c8bc95fbfb94c6b87c69516651b264ef980937fad41238b91ddc011a5dd777c7efd4494b4b6ecd3a9c22ac0", + "fd6a3d5b1875d80486d6e69694a56dbb04a99a4d051f15db2689776ba1c4882e6d462a603b7015dc9f4b7450f05394303b8652cfb404a266962c41bae6e18a94", + "951e27517e6bad9e4195fc8671dee3e7e9be69cee1422cb9fecfce0dba875f7b310b93ee3a3d558f941f635f668ff832d2c1d033c5e2f0997e4c66f147344e02", + "8eba2f874f1ae84041903c7c4253c82292530fc8509550bfdc34c95c7e2889d5650b0ad8cb988e5c4894cb87fbfbb19612ea93ccc4c5cad17158b9763464b492", + "16f712eaa1b7c6354719a8e7dbdfaf55e4063a4d277d947550019b38dfb564830911057d50506136e2394c3b28945cc964967d54e3000c2181626cfb9b73efd2", + "c39639e7d5c7fb8cdd0fd3e6a52096039437122f21c78f1679cea9d78a734c56ecbeb28654b4f18e342c331f6f7229ec4b4bc281b2d80a6eb50043f31796c88c", + "72d081af99f8a173dcc9a0ac4eb3557405639a29084b54a40172912a2f8a395129d5536f0918e902f9e8fa6000995f4168ddc5f893011be6a0dbc9b8a1a3f5bb", + "c11aa81e5efd24d5fc27ee586cfd8847fbb0e27601ccece5ecca0198e3c7765393bb74457c7e7a27eb9170350e1fb53857177506be3e762cc0f14d8c3afe9077", + "c28f2150b452e6c0c424bcde6f8d72007f9310fed7f2f87de0dbb64f4479d6c1441ba66f44b2accee61609177ed340128b407ecec7c64bbe50d63d22d8627727", + "f63d88122877ec30b8c8b00d22e89000a966426112bd44166e2f525b769ccbe9b286d437a0129130dde1a86c43e04bedb594e671d98283afe64ce331de9828fd", + "348b0532880b88a6614a8d7408c3f913357fbb60e995c60205be9139e74998aede7f4581e42f6b52698f7fa1219708c14498067fd1e09502de83a77dd281150c", + "5133dc8bef725359dff59792d85eaf75b7e1dcd1978b01c35b1b85fcebc63388ad99a17b6346a217dc1a9622ebd122ecf6913c4d31a6b52a695b86af00d741a0", + "2753c4c0e98ecad806e88780ec27fccd0f5c1ab547f9e4bf1659d192c23aa2cc971b58b6802580baef8adc3b776ef7086b2545c2987f348ee3719cdef258c403", + "b1663573ce4b9d8caefc865012f3e39714b9898a5da6ce17c25a6a47931a9ddb9bbe98adaa553beed436e89578455416c2a52a525cf2862b8d1d49a2531b7391", + "64f58bd6bfc856f5e873b2a2956ea0eda0d6db0da39c8c7fc67c9f9feefcff3072cdf9e6ea37f69a44f0c61aa0da3693c2db5b54960c0281a088151db42b11e8", + "0764c7be28125d9065c4b98a69d60aede703547c66a12e17e1c618994132f5ef82482c1e3fe3146cc65376cc109f0138ed9a80e49f1f3c7d610d2f2432f20605", + "f748784398a2ff03ebeb07e155e66116a839741a336e32da71ec696001f0ad1b25cd48c69cfca7265eca1dd71904a0ce748ac4124f3571076dfa7116a9cf00e9", + "3f0dbc0186bceb6b785ba78d2a2a013c910be157bdaffae81bb6663b1a73722f7f1228795f3ecada87cf6ef0078474af73f31eca0cc200ed975b6893f761cb6d", + "d4762cd4599876ca75b2b8fe249944dbd27ace741fdab93616cbc6e425460feb51d4e7adcc38180e7fc47c89024a7f56191adb878dfde4ead62223f5a2610efe", + "cd36b3d5b4c91b90fcbba79513cfee1907d8645a162afd0cd4cf4192d4a5f4c892183a8eacdb2b6b6a9d9aa8c11ac1b261b380dbee24ca468f1bfd043c58eefe", + "98593452281661a53c48a9d8cd790826c1a1ce567738053d0bee4a91a3d5bd92eefdbabebe3204f2031ca5f781bda99ef5d8ae56e5b04a9e1ecd21b0eb05d3e1", + "771f57dd2775ccdab55921d3e8e30ccf484d61fe1c1b9c2ae819d0fb2a12fab9be70c4a7a138da84e8280435daade5bbe66af0836a154f817fb17f3397e725a3", + "c60897c6f828e21f16fbb5f15b323f87b6c8955eabf1d38061f707f608abdd993fac3070633e286cf8339ce295dd352df4b4b40b2f29da1dd50b3a05d079e6bb", + "8210cd2c2d3b135c2cf07fa0d1433cd771f325d075c6469d9c7f1ba0943cd4ab09808cabf4acb9ce5bb88b498929b4b847f681ad2c490d042db2aec94214b06b", + "1d4edfffd8fd80f7e4107840fa3aa31e32598491e4af7013c197a65b7f36dd3ac4b478456111cd4309d9243510782fa31b7c4c95fa951520d020eb7e5c36e4ef", + "af8e6e91fab46ce4873e1a50a8ef448cc29121f7f74deef34a71ef89cc00d9274bc6c2454bbb3230d8b2ec94c62b1dec85f3593bfa30ea6f7a44d7c09465a253", + "29fd384ed4906f2d13aa9fe7af905990938bed807f1832454a372ab412eea1f5625a1fcc9ac8343b7c67c5aba6e0b1cc4644654913692c6b39eb9187ceacd3ec", + "a268c7885d9874a51c44dffed8ea53e94f78456e0b2ed99ff5a3924760813826d960a15edbedbb5de5226ba4b074e71b05c55b9756bb79e55c02754c2c7b6c8a", + "0cf8545488d56a86817cd7ecb10f7116b7ea530a45b6ea497b6c72c997e09e3d0da8698f46bb006fc977c2cd3d1177463ac9057fdd1662c85d0c126443c10473", + "b39614268fdd8781515e2cfebf89b4d5402bab10c226e6344e6b9ae000fb0d6c79cb2f3ec80e80eaeb1980d2f8698916bd2e9f747236655116649cd3ca23a837", + "74bef092fc6f1e5dba3663a3fb003b2a5ba257496536d99f62b9d73f8f9eb3ce9ff3eec709eb883655ec9eb896b9128f2afc89cf7d1ab58a72f4a3bf034d2b4a", + "3a988d38d75611f3ef38b8774980b33e573b6c57bee0469ba5eed9b44f29945e7347967fba2c162e1c3be7f310f2f75ee2381e7bfd6b3f0baea8d95dfb1dafb1", + "58aedfce6f67ddc85a28c992f1c0bd0969f041e66f1ee88020a125cbfcfebcd61709c9c4eba192c15e69f020d462486019fa8dea0cd7a42921a19d2fe546d43d", + "9347bd291473e6b4e368437b8e561e065f649a6d8ada479ad09b1999a8f26b91cf6120fd3bfe014e83f23acfa4c0ad7b3712b2c3c0733270663112ccd9285cd9", + "b32163e7c5dbb5f51fdc11d2eac875efbbcb7e7699090a7e7ff8a8d50795af5d74d9ff98543ef8cdf89ac13d0485278756e0ef00c817745661e1d59fe38e7537", + "1085d78307b1c4b008c57a2e7e5b234658a0a82e4ff1e4aaac72b312fda0fe27d233bc5b10e9cc17fdc7697b540c7d95eb215a19a1a0e20e1abfa126efd568c7", + "4e5c734c7dde011d83eac2b7347b373594f92d7091b9ca34cb9c6f39bdf5a8d2f134379e16d822f6522170ccf2ddd55c84b9e6c64fc927ac4cf8dfb2a17701f2", + "695d83bd990a1117b3d0ce06cc888027d12a054c2677fd82f0d4fbfc93575523e7991a5e35a3752e9b70ce62992e268a877744cdd435f5f130869c9a2074b338", + "a6213743568e3b3158b9184301f3690847554c68457cb40fc9a4b8cfd8d4a118c301a07737aeda0f929c68913c5f51c80394f53bff1c3e83b2e40ca97eba9e15", + "d444bfa2362a96df213d070e33fa841f51334e4e76866b8139e8af3bb3398be2dfaddcbc56b9146de9f68118dc5829e74b0c28d7711907b121f9161cb92b69a9", + "142709d62e28fcccd0af97fad0f8465b971e82201dc51070faa0372aa43e92484be1c1e73ba10906d5d1853db6a4106e0a7bf9800d373d6dee2d46d62ef2a461", +} From 5878fcc086b2472a2b60dc855f8ecc501675796f Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Thu, 22 Dec 2016 08:25:03 -0800 Subject: [PATCH 030/100] bit-rot: Default to sha256 on ARM64. (#3488) This is to utilize an optimized version of sha256 checksum which @fwessels implemented. blake2b lacks such optimizations on ARM platform, this can provide us significant boost in performance. blake2b on ARM64 as expected would be slower. ``` BenchmarkSize1K-4 30000 44015 ns/op 23.26 MB/s BenchmarkSize8K-4 5000 335448 ns/op 24.42 MB/s BenchmarkSize32K-4 1000 1333960 ns/op 24.56 MB/s BenchmarkSize128K-4 300 5328286 ns/op 24.60 MB/s ``` sha256 on ARM64 is faster by orders of magnitude giving close to AVX performance of blake2b. ``` BenchmarkHash8Bytes-4 1000000 1446 ns/op 5.53 MB/s BenchmarkHash1K-4 500000 3229 ns/op 317.12 MB/s BenchmarkHash8K-4 100000 14430 ns/op 567.69 MB/s BenchmarkHash1M-4 1000 1640126 ns/op 639.33 MB/s ``` --- cmd/erasure-utils.go | 14 +++++++++----- cmd/xl-v1-metadata.go | 26 +++++++++++++++++++++++--- 2 files changed, 32 insertions(+), 8 deletions(-) diff --git a/cmd/erasure-utils.go b/cmd/erasure-utils.go index 498997eba..5897d5e02 100644 --- a/cmd/erasure-utils.go +++ b/cmd/erasure-utils.go @@ -24,6 +24,7 @@ import ( "sync" "github.com/klauspost/reedsolomon" + "github.com/minio/sha256-simd" "golang.org/x/crypto/blake2b" ) @@ -37,23 +38,26 @@ func newHashWriters(diskCount int, algo string) []hash.Hash { } // newHash - gives you a newly allocated hash depending on the input algorithm. -func newHash(algo string) hash.Hash { +func newHash(algo string) (h hash.Hash) { switch algo { + case "sha256": + // sha256 checksum specially on ARM64 platforms or whenever + // requested as dictated by `xl.json` entry. + h = sha256.New() case "blake2b": // ignore the error, because New512 without a key never fails // New512 only returns a non-nil error, if the length of the passed // key > 64 bytes - but we use blake2b as hash fucntion (no key) - h, _ := blake2b.New512(nil) - return h + h, _ = blake2b.New512(nil) // Add new hashes here. default: // Default to blake2b. // ignore the error, because New512 without a key never fails // New512 only returns a non-nil error, if the length of the passed // key > 64 bytes - but we use blake2b as hash fucntion (no key) - h, _ := blake2b.New512(nil) - return h + h, _ = blake2b.New512(nil) } + return h } // Hash buffer pool is a pool of reusable diff --git a/cmd/xl-v1-metadata.go b/cmd/xl-v1-metadata.go index 37b21deee..8edd7d2ba 100644 --- a/cmd/xl-v1-metadata.go +++ b/cmd/xl-v1-metadata.go @@ -20,6 +20,7 @@ import ( "encoding/json" "errors" "path" + "runtime" "sort" "sync" "time" @@ -54,9 +55,28 @@ type checkSumInfo struct { } // Constant indicates current bit-rot algo used when creating objects. -const ( - bitRotAlgo = "blake2b" -) +// Depending on the architecture we are choosing a different checksum. +var bitRotAlgo = getDefaultBitRotAlgo() + +// Get the default bit-rot algo depending on the architecture. +// Currently this function defaults to "blake2b" as the preferred +// checksum algorithm on all architectures except ARM64. On ARM64 +// we use sha256 (optimized using sha2 instructions of ARM NEON chip). +func getDefaultBitRotAlgo() string { + switch runtime.GOARCH { + case "arm64": + // As a special case for ARM64 we use an optimized + // version of hash i.e sha256. This is done so that + // blake2b is sub-optimal and slower on ARM64. + // This would also allows erasure coded writes + // on ARM64 servers to be on-par with their + // counter-part X86_64 servers. + return "sha256" + default: + // Default for all other architectures we use blake2b. + return "blake2b" + } +} // erasureInfo - carries erasure coding related information, block // distribution and checksums. From 855ef4f1aa1a6a4974e48306568c1d4e85a4b50f Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Thu, 22 Dec 2016 10:47:10 -0800 Subject: [PATCH 031/100] Fix typo in erasure-utils.go --- cmd/erasure-utils.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/erasure-utils.go b/cmd/erasure-utils.go index 5897d5e02..c9b5f2f28 100644 --- a/cmd/erasure-utils.go +++ b/cmd/erasure-utils.go @@ -47,14 +47,14 @@ func newHash(algo string) (h hash.Hash) { case "blake2b": // ignore the error, because New512 without a key never fails // New512 only returns a non-nil error, if the length of the passed - // key > 64 bytes - but we use blake2b as hash fucntion (no key) + // key > 64 bytes - but we use blake2b as hash function (no key) h, _ = blake2b.New512(nil) // Add new hashes here. default: // Default to blake2b. // ignore the error, because New512 without a key never fails // New512 only returns a non-nil error, if the length of the passed - // key > 64 bytes - but we use blake2b as hash fucntion (no key) + // key > 64 bytes - but we use blake2b as hash function (no key) h, _ = blake2b.New512(nil) } return h From cdc6c2d57842353ee2e229618556ead2bbbf2c3a Mon Sep 17 00:00:00 2001 From: koolhead17 Date: Sat, 24 Dec 2016 11:36:01 +0530 Subject: [PATCH 032/100] docs: Removed unmaintained docs. (#3493) * docs: Removed unmaintained docs. * docs: removed associated screenshots with the older docs. --- ...inio-server-instances-on-single-machine.md | 80 ------------ .../minio-server-configuration-files-guide.md | 110 ---------------- docs/screenshots/miniomirror.jpeg | Bin 21362 -> 0 bytes docs/screenshots/multiport.png | Bin 72592 -> 0 bytes ...ication-between-two-sites-running-minio.md | 117 ------------------ 5 files changed, 307 deletions(-) delete mode 100644 docs/how-to-run-multiple-minio-server-instances-on-single-machine.md delete mode 100644 docs/minio-server-configuration-files-guide.md delete mode 100644 docs/screenshots/miniomirror.jpeg delete mode 100644 docs/screenshots/multiport.png delete mode 100644 docs/setup-replication-between-two-sites-running-minio.md diff --git a/docs/how-to-run-multiple-minio-server-instances-on-single-machine.md b/docs/how-to-run-multiple-minio-server-instances-on-single-machine.md deleted file mode 100644 index d581bf700..000000000 --- a/docs/how-to-run-multiple-minio-server-instances-on-single-machine.md +++ /dev/null @@ -1,80 +0,0 @@ -# How to run multiple Minio server instances on single machine. [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) - -![minio_MULTIVERSE](https://github.com/minio/minio/blob/master/docs/screenshots/multiport.png?raw=true) - - -In this document we will illustrate how to set up multiple Minio server instances on single machine. These Minio servers are running on their own port, data directory & configuration directory. - -## 1. Prerequisites - -* Download Minio server from [here](https://docs.minio.io/docs/minio) -* Download & Install mc from [here](https://docs.minio.io/docs/minio-client-quickstart-guide) -* Ports should be available for Minio server's use - -## 2. Install and Configure Minio Server - -Minio server is running on port 9002, 9003, 9004 with associated data directory and configuration file directory. - -**Minio server on port ``9002``** - -```sh -$ ./minio -C ~/.m1config server --address 192.168.1.11:9002 ~/data1/ - -Endpoint: http://192.168.1.11:9002 -AccessKey: XTW9SWKRWYKWE9M9K9RG -SecretKey: pZehbS5UNrA9BAhYHnWC/QVvQ7vGVge48WGHzG9t -Region: us-east-1 -``` - -**Minio server on port ``9003``** - -```sh -$ ./minio -C ~/.m2config server --address 192.168.1.11:9003 ~/data2/ - -Endpoint: http://192.168.1.11:9003 -AccessKey: UTD2WWPJOK754KMZKHWF -SecretKey: DbikDIY4+wItcexJa4nyrwQC0V2r7kLsK5SsRgHb -Region: us-east-1 -``` - -**Minio server on port ``9004``** - -```sh -$ ./minio -C ~/.m3config server --address 192.168.1.11:9004 ~/data3/ - -Endpoint: http://192.168.1.11:9004 -AccessKey: KXLOJ908VEJ2K9RGUFHQ -SecretKey: LpkeePMtEWAa6payiCovfrNKiFHhABsJhMwGynF8 -Region: us-east-1 -``` - -This is how directory structure will look like for ``minio1``, replace it with your local setup. - -```sh -$ tree -la minio1/ -minio1/ -├── data1 -└── .minio - ├── certs - ├── config.json - └── config.json.old - -3 directories, 2 files - -``` -**Testing it all** - -Using terminal comamnd ``netstat`` we can see ``Minio Server`` is running on different ports on same machine. - -```sh -$ netstat -ntlp | grep minio -tcp 0 0 192.168.1.11:9002 0.0.0.0:* LISTEN 29573/minio -tcp 0 0 192.168.1.11:9003 0.0.0.0:* LISTEN 29597/minio -tcp 0 0 192.168.1.11:9004 0.0.0.0:* LISTEN 29631/minio -``` - - -# Explore Further -* [Minio Quickstart Guide](https://docs.minio.io/docs/minio-quickstart-guide) -* [Minio Client Complete Guide](https://docs.minio.io/docs/minio-client-complete-guide) - diff --git a/docs/minio-server-configuration-files-guide.md b/docs/minio-server-configuration-files-guide.md deleted file mode 100644 index b42e0c415..000000000 --- a/docs/minio-server-configuration-files-guide.md +++ /dev/null @@ -1,110 +0,0 @@ -# Minio Server Configuration Files Guide [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) - -In this document we will walk through the configuration files of Minio Server. - -## Minio Server configuration directory -Minio Server configurations are stored in file name ``.minio``. It's a hidden file which resides on user's home directory. - -**This how the structure of the directory looks like:** - -```sh -$ tree ~/.minio/ -/Users/supernova/.minio/ -├── certs -├── config.json -└── config.json.old - -1 directory, 2 files -``` -### Files and directories. - -##### ``certs`` directory -``certs`` directory stores key & cert information, which are needed to run Minio in ``HTTPS``. You can read more on running Minio with ``HTTPS`` with Let's Encrypt cert with Concert [here](https://docs.minio.io/docs/generate-let-s-encypt-certificate-using-concert-for-minio) - -##### ``config.json`` -config.json is the configuration file for Minio, it gets generated after you install and start Minio. - -```sh - -$ cat config.json -{ - "version": "6", - "credential": { - "accessKey": "YI7S1CKXB76RGOGT6R8W", - "secretKey": "FJ9PWUVNXGPfiI72WMRFepN3LsFgW3MjsxSALroV" - }, - "region": "us-east-1", - "logger": { - "console": { - "enable": true, - "level": "fatal" - }, - "file": { - "enable": false, - "fileName": "", - "level": "error" - }, - "syslog": { - "enable": false, - "address": "", - "level": "debug" - } - }, - "notify": { - "amqp": { - "1": { - "enable": false, - "url": "", - "exchange": "", - "routingKey": "", - "exchangeType": "", - "mandatory": false, - "immediate": false, - "durable": false, - "internal": false, - "noWait": false, - "autoDeleted": false - } - }, - "elasticsearch": { - "1": { - "enable": false, - "url": "", - "index": "" - } - }, - "redis": { - "1": { - "enable": false, - "address": "", - "password": "", - "key": "" - } - } - } -} - - -``` - -``version`` : Represents `version` number of current configuration file. - -``credential`` : Represents authentication credentials for the server, value is automatically generated upon first server start. - -``region`` : Represents deployment region for the server, value defaults to `us-east-1`. - -``logger `` : Represents various logging types supported for server error logs, console logger is enabled by default. - -``notify``: Represents various notification types supported. These notification types should be configured prior to using bucket - - -##### ``config.json.old`` -This file keeps previous config file version details. - -## Explore Further -* [Minio Quickstart Guide](https://docs.minio.io/docs/minio-quickstart-guide) - - - - - diff --git a/docs/screenshots/miniomirror.jpeg b/docs/screenshots/miniomirror.jpeg deleted file mode 100644 index 86f98a440acb159c3c301088c73c00a85a82993d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 21362 zcmeFZcUY6zwm%+w$3c42Q9&R;kP>ZXgn*QQ2qAkjLUTb|`j=mjz z2mA!l*3||aJ9Z54mh}fX8UWk_oIQQ|%;{5S&zw1P?%dh)7dbCoyl~+n-xZF_oI?E9 zt_txB3jQn%68-sx|8Y6`0AN3REc(RuiDNea$JvjaU_W-$01yNm0~|l`<8l99PMkh|^3<7QXIWPt z`~*0D?8Ncor%qiwd-CMT(jZrFJhadF?2Re*Uvd5tUV z7M2I!_3_Q{cXlmtV=TNtVcP< zTAQ^TfVC2<96Qc_Lgos`^*dZfFyq%xKAyZGn^9=z)it_&iu3MKKj7jC)-CKO*a4b= zt>wQ=`Twf_i30a*redlC$)?Z2M*z8x?c{>)AVe3*dV4%U)0J1~H#pgPR#DlQ!v9lP zm9FPzvUkzD5&6nV&9sSCt6OG28RJyqj{vRI$`=~$+D`eU{9zmqQr4m8ZC}CV5>m!}!|EhJ5&Av+Iq%W;U{}swP%n$#XDz%h!Y94S`tfs5p=m@1(|w`I z>KapSgPay!Cn@mQ%e>$I#VwImrS{TS3@>HxfI;b=T%04jaer}HiY(^v5G9k zm|X9KXW3ojQxmipoHbJ~eqM6Tm)mjk4>G^i{)QFVkZBSK;*gr9E^rUJBTXt2Bh6p+ zw?h&UxXvcBK!D~@cCmCx;yFp-I2p%!Fb*#Q3S2Wy@5tFn0<}GQ^o37b@?%o*Y4)c# zUf;a=2m9y9U*m4P{uOrf(Ml#PukfrgCZ-=FAIGdD@f)odG0#q@JWl9f4>JM_ar+y# zXFMPO=Gj9p2?43v=k*3Dne97!nJQW8#9fVf;qB0FSBfqX7=Xx9XI!ipCf1^ot1X-I zmalk<>nf>Z>LQHX3H*js-J(v3;5_4(WBmVuQvc=UV;Np#3rjUfAa8VvXGwII}I2A=6P0A2GdFPKchl7hVmR;`(p{Wit zP2;CwCXD~H>tS~YNe&a6;^VdQkREJNG@mEw(C+^@t9LQm zDqAq1AK3ZRyR3t=BjuyJ^g}+lAH^d(i}ou|ur<;pRgxr`f7k#vShI!#~ru(u!Dvp(4gDzDh1 z?AT=$6Tf+GDABBXxg1W74!c;doK7U(B8~ajnwyl&yY>v}-~x()f6(R~I(@7S3WoG^ zQG&MqK5x3#ZakK->ZwQsBjT{3k`)O)7YIz=ujNoYPPj%qtP)YvTTt+5C%1w)nACRa zDW zBaPL>_J(yD_B}z1+vpz}S|0(_HJwh;CsnE19{`d6jD}oGzTis`V>}+h-Gj0!q^M7- z`Hfe+-{KtLK3MqXa7ZjMkIw24QbNj*L{u15h9RHft=2M}*qD4JY+1`*5k2lH-jN`j z43Q9vL*P-5pP9@ynR09w&4XWf7b>;*5jS~p2G?xpmN=Cy+4#&`=^9ML_)B2di$J7F z$bQ#hLt2!OwehG;_1G*mM2TM2Z6;hLDB5<=&by=k72DWI7?RG22&3{@N%d(w8gtoq z{zTna&&%@!ZoK^1UQf97z9t0E4Cx97#uXS|el+mmR|^@p6f)G7tI3AM3wdrj0R2U~ zFG?ilJT}0{wa013DDx_c!97&?x_OAvrbT1;;zQF+yea`YIk0Y>og? z1^ELe$poI!Dv*fWPgPdd$lc&TG9${h5w~{;e#Y?}ueKY+u1`i#kjkd1*&{)Z97px3 zJjslWxeyVjcYB#d$_wRH5BY!jljx^A{=w&JntD6;vclAL`gJMat-C9aDLtU4ThWo@wKzU+!u0yVBWil9l)!NK=f5X+jU2J+(Ca5f^{0 zktAhS$r_2a6RXyMBptrdI0CFFK95%>)2qsxUGehWdwkw@ZY@zx3W^E{JVAi0hXb0v zBV26Erfpz-#jCX`;uy}zmnkxC$#6#EqBH&T5HMo2-q!B)#fH9^yuzeXu9=@J>u4p@ zYjCn)0r#2DT_mG1nMbk1c#(FST=PWS#@5;EbYwf-3AfGhq@I2C;zGD&o_`t&g94(8ZQA(d|Qh?v7aG zy3UjKBCf9vs=wT3^`NswI~*1xK(QZV1A_|rk()V3fJF>j0fHiK`IW#)hdWQZbqHDN z7FlYiJ}iW$sqR~}7KkEDW>upV)~cE;Dq_DlN_ylF!jwPC}&R!ZV~mKl3Lmb0jx|)Ci$iKrYqk1Zq_?3OuuhFssPJ*#NCq zkw*Z#Z(;mlPYmULqJIVe{~;d=zIE{`I&1Whte_{OySA-DH+N<=y4620L;A_nfVPMK z82LY{vE3rChHD)GEcXrLQ7qZu=!qp%lF0b9{CjlSmYJud4c z@Fg>y-hG3CT_@2#uzZ;>Yfqm-n)0jiEN9|5g4dvA+p4RpGnOrzWSeqHPxo=%!y|yi z)ZT^g^ULVO%XeB+W;{fy=k#A&EEc=W7a`Z}>>N&=jF*}bYs7dx^I|aS2`X43)A>=E zqQ?@FL1E-D*6gaw7MxX5Q4*L)JX$nua8pQd;OaCz^St*Ooj90o_l@Q`*E-9|gc(2E zTuXcW*tthV#VS{Jr;|^G>N&SAH-1>}XATE@0P708;5cRAgXmPhg}!avE)Hdi#p$-f zLRJIist)|>Rz$Y{c=_93cInu3?O*H|Ay95d-G6`m;0=#){NjuN;JDad#{Rtun-|!s zvSXPp>+`1JEdzpe`-UzIi(+HN^F6`q6h(^Zw-K}5J zn!HU5b>ecpmPSejE=P3vqv677R~?7G-Rb&@i;l~sD<@dJE?ypT%6tsv9q^#p;?WAT zmg5~ZsBf2ZQ%Ne;T#w%MI)r3@kr<*CsClo7OwAW>CYK7GQDHMm4j2$>GBTk~n(-Il zDF>(Snxde?J`|l~gH2vKPSR+KC_Lh%`s%&$Y_Z84Nl#PYme0%%(oWk}`TIrtoQ*!P zxX*YiLuZ>=ddPvD?nV=yD>~R?Q*!d0&#Lj`jiQ$U1)DtziOJh;)Wn-D;S>>x>N-4$&ph%h){;X!g6R%zv4b9) zU^+=vxw6a4s_oHXy>Gfzis|agido&BgJZIJ@IxXn7(X(;g?##%1adqsHs*3*{U2*9M;P49ecp|e?$3Lw(S|~(yqn~$0 zc>eubTV1c~5#Wk##5wTDTmR0rAkmWEGFQhW&VQh$68-Og1F-+{ z*RlUrKmMAypH|3PQm!8~dChP~-Qm@4T32qdQ=%Fsb#P%2ZkAjVbRK_Vc83|&)Z)+A zoF{>LL*~b}jwo+L#^z2N6ZitE{RHe*f$HAm7*F;5&g1-8vW_81iD4WUQrRLcufi~y z0T+L%_5(}!O6!wb+J$=LhR=jSiV0rs5O8Lva&roPlQ^wxK=4O^Vz0DLt*9nT@A%c- z2|dFPDXPxfp;?Y8OZya?`m%KdLD^f^iF#(bX|ySLeTACl9Gr{ zT2$Nez%dRHV@f$qRM7q65uk8$AbX8!<+t#=*t)u(`IPa}^1N)3Nw21kQCmps^|69) zWtxrY-%T#~bA>h*9!eeoj=#JASA+Y1`&{+0C-i|!-CU5^_hCIQMc-)B#V{80&FBavHR)6Ekp6}llk}vsYj*y-pJ!D-CdwHilJ2qN>Y7&< zdp~t19Ymc7gO?HRw&D;b9L#{n)=Ab(P!`GO!4yHbe?X|&)4~|zSf>N#`Lg9U-udsx zh#TjES@uRWLf5eFLsP1kyAiCyx=uhi(vV^Ri8N|%T6!dd<8d`kPR$UcB#d(C<@x$n z3KPcmywUQTy9(qRyxw+|OKAT98d6qY2?f^^xn?=8QR< zm$LYH*r~z}Q;d-8tXB#Xr*%f-(CQ>&ol196k;kLF<8NS%;zF8%V||vhTqZl2;-Vmr zshW#xG0-u;Ed0L@dIZda%25T>^}Ex(fuw5xd_GbA+(e7cU|#4kTa&0|Q!9sy!f zujmmcGh2jvgBDuO<3y1l6fD}i%reQq2o;_-{{5+Vd@i~SFY-h8Nlp+tBduP@80IUG zMavso@CA@cs;;=_pb@1}voNnNO7jDWdsyt?mE*p};i>z7y#0$_xb!bP=V0+v*GB&E z%T~yhq6|ohD=;pX$US_$b@=vXUs0dC4|K5y*T^SsOy*M3YXrx)TSB|cOBO(;3<^a) zW>jfOtY^?6(7(B`TdX^CF-#Bk3Np4cm~nbCJT=%;$L?VfNfW(s6>FC10|iN0YH6vO zhNn5D`LAzt_xllNS6ht{U2Im|CRe`fi3 z_wPQaDT{YDeiw~1FBNABl0$|O1KrM7W*Ti3jsWyv`Na*rOPY!K!vh0r!8XZxcIj{n zshai+$-vU{U&&BQQoSy5F4V3`aG8o{6@(r!m5P8Ux>==kP+GnTvkkP zw6}*#arB%H5f8rBkU#!{F;$+^6{Bkw&jsm@Q6t1uz4jm=aE)S&Oc+?a#YyfN)&Rpt zhPldJX#~3lL_GT>Vy4&Y=+l76t*9YFn{WXLM7@X1RBmD#HmE$+%^c)OaT`BpH3-Y# zYq<-`qnl*yB?Q^NHEbK&nbE9;TRxRN7H#F=bprwqZ}J~bk*R;z(T_F} zmMU4Wc!TdwW>jlsBK@@ zDNPZ;BIyWTd~c*&blGppTPjT{&eld$x|9n%piu>v$)`yw`c;3vSVo|{Gn<{n*9eh? zMOhfACuLr8)hGEdGDrwWtydSY%e(W1T-+GUoxPf>ynX^vGg(uvL+d5zWFLp+cBXWAJuf4W!J&~h~BeJ5y@-4lRp67U z1V4Q5nqIzcyKxZz*aL49moChinV6c1w*Ndv+USo3+iED<2(W)NnJu2k&srNuFX>D| zQzN?So10d?JyBun2#g*TEYOp0g=_%3W5ipuK1#Nl%e{XkHL--M=qA9FUa5Wx#X>gj zkWD*w1mnCy`IH6kSum=|e7a+o6J$DCt`{{p2GB<@wBHg;O3of6v{W_krRp}NxdcnR z`rXm_$wpO5C^tO19gbyk^lB zNABvL$W2}fTu|}jn~V1uJ3I6X^}zd2hXrdLT4}i*w}1ccZ+}tN>OB8A^LqRTroV zV`mP};F|Qd(mn2T=2R)K!QyY?%6kjr8KR7l`{UnZie5vkorqwrw%FmV!p>A=Q;aUv z@NG^d%DCh3Ma`{!lU;^B+|WHU?Gs{>v86!N5MC|n&AS<8+(O^Lyf-uj&Rc`3gfo8-+VK$y6dG%=G|WNiNqbl+ zGw=kd2nz0gFj8@YP4wRk--FT>&G{(lvCd`S@BiVr@h?0W9xoK85 zY92}Zq@9M(GyG-L8qr<7=9L9CK~{Q9@`c5ZWK2rKRFkuDKK_mTw=(pwZ;NNBR@Is+ zO6Jx~Rm3Q&{}m^~)|kZGXYg^@xN|2q#Y4cKskCvE#+}zwnl$sO>J!RF$`!7{+-wLL zbl22R!!}r5_cT*i&Gdp5Cwi*A9q1%0?Z&=%RmZvVDmSt4W_HbL-grgfl0RTD83Ah(KP1E^i%Wyby z@lt*PTu6$B<`Qoy@6v)8HJI6tBpm?;+dYnVQKTKvBx~AC?!a&dC+9%ptwKpcUqb)L z*C^X_GyW=?7ZEdC!FY90sD}=&dZOnr`=!#o`&C^f17z1mJvl1g+}BgXRj-^D-{Nku zzl~>ZqLP)qe{sK7r^EAneNWwQ&4K}zK>4FF^V4_1-?K8#j-L}PTF{#n?v@(Q@){-) z`b94AZ|2Szy=r{6`fjwAp0q$yhl=`R&ex)^F{##HtBjXbl zCkn?K7@Jl^=L7GI$&a=tBm@N=0oEKUmV#CgK5h-869g$l?~95z9ijZC^0{5Bj(QLD z6j$;}HNyCmomqSN?Ff)j*eGLVigrc1i$gd|W)oYq7Q}T$40k80J6fNWFzvrns|a1* z{@&xfd*^%4JYLMq+@MnN2sE&RbGmyX(1>`ldvTqWp5tOf8&oxonCVS_(TCs6YPQYP zdr-C2Z2Vlx4>g5arCMf@nx!-?-FcFwH2yj}!wTKS9dtj$+2%*ehGU zO$>yOw@jA3Qmz;_sg6z3upL6N=rC=rih)LvT?P$vdNUcsY+JLL>`Bq?JJn@z%^tSj zUQREUPHHHyqC7o`{50xw;jJ0RQ{4T{u@=MV8%|Ent)bVaVkETeZ>Wuhm2@mRR3%_P z>`vi$HR5`v2ozpt*~huGsJ&Z_98T&hi3Jt>n|ev z)!WN?Tvc#TXo3HENW#@aH&~$cK&H<9QN*U8nSTg8%h{PnxsT=vO3k(FWPIx-;fb)~ z=#DPl?Z6$g=rK)`j?l1AY(kmPk;sa7F=9D1;stYUiAtcmFdTUVShXowNjw4s^57Nk zSC)=RVK#}P_kzBzD+TSmM>0S8J<19`S5$%F_Ov^Hud15+sgm z$w8gVLnYawtIaNlh>|>)x#0(BTVG`T*b6)ar@{%8Z1l-1qxEfVZWo*cNxHabJdpP% z8pqTK?4psqnGmI@NejaZj#akKc?0xy1DoEL&J>rp^7uKL$)fwB03@J^-?i?Xw6^N1 zquo1{f3=n#qR0Ath)v4#QVO54w_^OHxW>VVbLNL?uUd=)?-8Ib*PE#{I+x0vJLE3! zn^9}Umr5O9d$1K2&J2T;Hq6!^%HOp75~ez}ech?C24q54&mV^g$CZ4sL;W2$n9x_D}VUcU%of3j% z_XO?%|45U7Zk4bHO)2Mca+T53&pNJAkeygKp7n;biyikVwWU^Qizz1Y5Ao|9*SnYbxZJ(*}@T=Z) z{Cp|St(=RJ)rjHr%o%u50Ase^J)Z#)hZxTG9q3QEZcs*qU`3TQkEOgkcl#Lxg2uEO z@D$r0l&{S~SC~DQUazqpM1eElcnXiFV|e1{sR2*-w5#fdLQRY1Z)c^g3HdIiE)t(< z)2OZr!?-MUSKlK*(eADm$zn8OCSoU16*+5DZ7k$-?WY1u-R3CY0m{wrQYn@A6oOwa zg6ccT^DOOVQPWaY#kaz*Ql%c6y}RqCmKe5p22Yv=)TrOE`9f0Ipt+m4mrxT4!ZgB{ z5>kKeg}>dIRMy1bD%1PgB2NKMbw+|~?iNVj3H-Wlo?x3IsFpvj_T|@&g)J1-;>{P| z&OYm2-~OIpi$y|5?1sC{KvNu%ALX6#>a50y;P_VBo{$YEDZ|Rfrnd`c8Q7v_)vx5O zoiZfa7A;yl@6^5lx>3e(M%zgQwW++eC@s*?y{k8+D`~fT-&dV&H=!86!D+hesp zF7D|5baooEpzkB@;6LujS%yK%vDT#}rAt+h6n9fTu`p1fd7_3`xe^qYmmdkED` zf+GwQWs+Ayu?3)%?}5%df}S%P=8B7x!Fhq{-`=}1M$;~{A_5Gw>p;1ekCAtiw|V{Y z{o$$to2e;qd}f4NaYnvU5Z1k0tMtpB8dOT_4wWzd-Ad7QaV)uqnCzM;pyu~l+}N*F zdAg!m;*m4w?qGjjN^@hU-Kx5sWoJN##RiU-J}m^4s?-TdV5+))zYRi~W`%m^&~-mz!3lF4ts|`H*{^>;?XxOp-tGK7qXENBbWUuHtkE_f zT0@PivfKb?e~|Y-mt)LbR-E~A-;dNh<`|Re^P|CJH4w#*WaiHI2TqdI3UpVqlBZ2_ zZmm=;-}pU~J}YJ#?|@6#6i(EQRKq1o7nDqY#)C>ty<4RP+GfG1_0TEP=~;!MD!8nN zQ@J_mjn*dt_kLt=f^UB+J^m{E%JKG$eajz5{|H61eE1iR7uy3R3of4RrMnlFm0)@W zJzkU~$A#}UGksT5g|Y^x?Ht`X7NAdZ`KF>4%|E|_Qiu^v-kqV$eryho3kWo<8GnCq zEYTWkS}YlCtZMVx$A@k_(kyI*)2I~bDwXu;EVJfG?@|tNxyg3x;I;FN>&qx{n1-TfNNh@euzqhP0chOWwpw3qHf3H`k2L%j5*_Wa7#>@Vi5n}`cPKt zbVVNJwIpl~i>h^e6*XG;3{zj0i08+4=5@=EzE(c?Lg0x>xHzxSE3uR=x5rD5r{|E2 z495MxOY(1d)N-qJ&YSBvTcFij%ci@%r$VZ>1tHitUz40{7Eh@1j~U)CYlmvJOC5fS z5i$NAC;_syMgE*%gaeg>St0GJo4;o=P%N&2`(wZ1REC@aqpf7h8i1~36SSC}b0Mdm zi1EA3t{(H1P9(iBjL_W~(Udc-;u{#BIq;x#7xoSV5p9cc)QV3+EgGp$Y8 zzyI4`slRUf7q1vr{f{JjtG{o|Z^!<+$$!61ZOI^PbWeIiJ+tewGR<~7v@Ps!$vxZj zbqi|zm7&0#9iy)v;j(c719fM<0c#M!xcJGmL1s@#N-kB97F}&~Np_FxYPnHUIX<@Y zgw1#s!rcZH8I)JetWJUmIpz8$5BQH4k}SHnWQn5kaCm3+yRHmPM(FSRY@CyZ1!m9` z%j!Cg^0##i4OW-rkYk`lwbJD1xgk~XyPPWBoQ?;v@cB51RY{=47zaxFdui9Bsk9D3nX={qUEQ@cW0vC^2^8`(Fb)1TFIO^Uwa+lwS~;$9^8ouN z<3B9+;y404>pKE)+di=n2K>NA|64iE&+y2hmysW3FJ>TQ-(|2NeTZe;JAsQT!`s2{ zqzCDtOSXSJ{X5*Gd{hwaDyKTSu&+4gO5otQU%nO4TzgF{T@JQa6MAPUyjAoIy_R*$ zX0Y_h+tuuCi5etK_1!c^=vA42TnrjrjaUOqSM^S)r(gJZQTn8^&n&hhJ z;C8*mjh!=ppY~qga`jL3%lBAwsr|pH{vWOW|H}+J|B#^{0v5s&ir<4LR?Y{RZio8x z=5M65ZJYLTIr@@vnzP@LH1w<8;s&G^y*s_f*iMy=siW`*LyY@(IZZ*aak|yI)oW_o zn+|g-23gWCC7z`6rYYz)jMxhV#Zbxs!P-cTaDJEBV#~g< ziMQn0wU*qDZARZ>Zsd*_918zhu1SIKdjH?BS0gNYHOI17*_b<1R2GKxvIS-~NS}p9 zfrFmqF-usHal@Prl;*KETb#dWnPDmeSF$tH1Z(ZP$shN^Wvx5VwW@+9-m9N~YbQQ4 z<~7Mnd=GDym5ptkH~%P!0hJn&bPWZn{X(2d9b;q*q)frM#MF3;^)odEUu~b%C+hUh zt6~}F^cP#~nEhUG-b;rwo2AujgYjv4)YS2uodcn!vrr67jfs&`nf zM{Gys=XFb)K&5=7Ux>~XWblO;H_*hetlvf@_#Hw68m3|y5`I35r{(O>e z>y$ysc+Bf7skrJPh(!4q;pqehRIbQZ8+uT6Ii3Oi9ffyfVXh`}_FyJ>70~0jpi$5` z#lF`v^A!h9(ZZNGr5!4jhgW4W@imLH#`$3g%KGH?>nnqYrDij2D>wcDk$Uv*^exCO z)M>BZw6<8uiM>u{ZqRKILAoWs|I+LcVD3jNTzshKzd|*xNy)I15^gnU7B?d&cUj!b z*7rAVF5(s2u*fyt>rLI1!%`yEilP%)sqo9{ibzEjkc7mehFzYofvYTEK)%%jiH=OA zCxTRihMVw##u#_)x1-d0xBgd(7Np3QYL;WjUk=MH*^SH=a1YVqwz(R14!O?r-3tgQ zu%<*tG)uF0+I<`CT`o(%cM!Qx1=ZQIK`a6TglL7Grk1*N0-xHZ$P<|F;hE%=&i$GO zUmBcdLS}{z#_9GL5n?R@%8E`KO;g8Fp6LSf&d74j`rMT5i5SE6Y`k#U_OR%Hu5s|- z>-dm|&IOv=2E-L%Gd;G7BfzLadc$j*pY8*GP&4HI<#2j5U7vpZ<^$me!qMj(^xugb zzfZdrBg?-P?>+(woMj=+KP2}EQm3Mi0K2;yUGs!9;oEmN%{F&qb4MZak8)vUu}evQ z%9?jVl@UH5T=Xvh*{}aYZvXdT$H|SC_wT&jo!gUQma=SB3>Ej-zdz5!ql%mU^f7!8-dk5X*%UR zx~sgA)J!mw4Y}bepmXcIscH4#jJDxN$z!Q>3{b7K5*Fk*E(-s6n4}ZB76bF$Rk2zh z_HVjo*Yn;r->CWJb(6{irve(wt=9G0ZMV&4A+ThB`#~UHS)&tzd}2vCSHwT|8$h$UD{3Lxpk~J8ffAw*d0=^EB$`1_}Su|z+cbf|GqOU$KGo)`A_-q z95L>9CT(3c&IT`ex2Y$Q7Gysu*i+G-ni^mc+(2F44$R$Gjcy#WRD6D4sg_lIe+pIkf0 zy63iuWq{W{nD~_iQ+ul1ncp*&Rq4IFQl%QKu;zta5Bv6A4|rH6acgyda5LmyWgkCY z9MP71ZWmS{vkeKpe-5b-RX2nm(bwmg`Y~#kOaC(u` zTW8*r58pby)#JUQo%E~M6JcarQ_?C01)uS0>j-&mS-x>HBu4Ihue|#vb}D85nZMgS zGF2weRj@cUq_ZTf#wb`gF~dK^F`4$HWbS+9iLp z>Zr$yFB$FbUplz(dujg7#}qG~>=_D?x;86q=DNG7e)ZGu@+sEvG9n_;qKX7^1d`js zb2SG`1gD&=Z`=U2C0LknZBD(~TOXmO=H3Ra`_@pF1VIjM9dSOKg& z*ZQoM-6SEzDM=Sj5O0lj6k8~2(sjEE#tbl7#;1fS0)&`g2mJef*E=VTCfhl*110$< zvPvX)c*Ql4Ziksql(VKuu%UcL8QXAG?GuOP)HL6%4mCD`nc53+$mW{umm=}jpmH^3 zasS%!N${>31GqC%ejOTOAN)f*dsW>qCP*OH?i$4gaesGf=`oohL~NH8`Q}}li4Sq+ zw-zAkaanQu89Gpibu^cyVPUw3rUiL@WK8=nlBn^0_uXkFS)(6X^88Es#qs0j9}?t0 zytpkABWrpx!KUSc%it_{w>HUDH5nhPYtO#X_TJ57=ViTGH&V;L7@D6kr9055q%#gr z`}(TK+ATO;yHq2@X%P*qZS}o+{aUGque8#Qrp{BtNnGi!xMJ-55n$flbAoQ% zJ`wL6l5ZgF<1ij8;VDr0uv-_9o%LT#iXxQe08MW%Mk6w{rWxZkDxX^)| zZeiDgY3cJ)70yIo_)_2Ex3TF3=|g0mZ* zqgEaPz96#Rk$sdc+o6$7@GkR~i<7!kwJug*La4!ahk9-uO-~E*b{03azLrcF>u)K$ z(wvw_pCmIKia$h>bMl;%NQ*^l*x6L;#$RW~-G_txeoi6Moj*?`{D?`6ev)2KG0Z${ z#CQ{W$@vzdiHU>$0X8TzZkmC2Js)<_QjkB6+$*8TTj4l|5&)gN4`(`NeH^V1}Dz6B~?FF_BZRey7)htj|YX z*^bJ_nQR!WQRI>xuXfTV0Rmr{B7?aOEXuQIFxf22`b+sVWpB|nJM##DS^{BoP9yuR zaF~{1>n#R_YItbAdBflxf8-a^u17d|&Z;OwtFK>^g&ei;=-n?|0_`fEn@LtRbubvT z_8)`~TdW5NefLSxr)t?>`{lIBKatgc-F3`xj2U}gh%+5Nd{o3#sixq-@rrQ{^MV@Ln`B2OXGeT3F}$?Hvjm`5c6? z+*SCW61p`tO=6rp3gtMwfJfLZKX<81mf$bD}SIdxPQ#rUe?DI75lh)J6IM26& zkKg|g_d|kTX6h%%w>ap3%7*1r7D&*kF-xFHqTJb5`Q>1ON}`z%qMjA>99g7U==JPH zT}}G1l~`ShP98cXN-fKf8tG2l1eNilv+*7|76Y_Q1OGdIyCZ=~byzP$u&Vxd2yW1I z4p*7zX_~ddwN4n>;IXt+4Fa-fxNCDFb&77A|H`DLerfaK{V(3Z`P)vf@+ z4UeXB)_nZybtfwISto+8qN-PaD+>9r7KgZjZT*9`9PZ7yCl#x3CYQIWn2$&l>On%IzLwiOZKo%!{)^(qx*6`!!3 zGH^rZ`Al(BXz~PX#r=_X7N%j%ps_jBZD~%`j2pLb1h5_*(ta_}@%xN@ZE4}V+!?S) zo6w!Ep@Bb_FyH|z_o5Ki+TMPSgI=|cqx=Q_>daVmEfi9@BBEL z*rnoItSGc;5avM1uA}964twPq{zT~1Ror|yJ5zL^5ngccTMy}S_z!KtI?&ns9&(52 zzBa4Oa7onDRNlPfF+z8LVU(?7v-rGt!o7n$PrNko`O)IyLnaK;(dRJ`8e}`~!%gh; z&I<_b@b@kDLJM?`iN;i zwR`O+-`6ZuJN0e)A)&Ip;O&ak<+YL!jUE3g)R4dtfbq2ZiT9hpOAR+YcQ+$)S&{uO zq{>f602#`8szKN)8etU5^=|aQVtxwWgCh#x;jQAY;GxI+fmqczIX^e7ZOB?c7P(;ri#F${U|z}8;j zLZkSn^LXN^?)UW>zLTf+N`J27I*74WY-z%I=93k5`Cqh-QG6U-1KBK7P~h=Vcyiof_UGf}=nsSHXplegZu}nME{yn! zq!xDXF7X)9ws@oc?npftLFU>}~QJG;;-1(tcN>)GT{cbkogyQGNkFveChv z-`iQ7o9lc?$Gim@U*;ou5$;;yn1K$8d*$-{M*w?QOh1}pK3D_m$+}LBc4W3}dvB&y z!cu`B>$7eu)iOllk;Y%wBTX3?jCH((C=RBu3^VMFj(+o?N)AQb(rL5eu2h2!?7bOZH?Leh;JoHb@oZj$?qODKU|-O+S{nDJUx zwfdPLtE?rl8PF~%Sm2-DcP;w@F7a6`uPA- z_c&P~=4x7|r*n|CNyThy3=n0iyc1X@M{RxrN@`N*k1f0;ONiOPwcl5+SM-fnj$5|q z^x5N`M99(X!Rt(3e2?4lng%Od;^P701tN@sO35{Wc^i#%1iHHA@$V)IAdvmVw42UB z{@!niVu)^IR|%yipH#a8d-(;KH^2Ayqf_k5ZuXQ}Ga6bV2(xPR97E?smh7$JdqW#z1UkNAw_}us%^vuG@lLe6@9~q9 zP=u>XV=%H!KwF}w`kK6fQ*0H1R#*P~J}h^#HCH(6egC58hvrKa0fk+(B&%g6Is5!% z0Ut6kc_>8oX)5t9Th^R=*QkRkQ`PxXMVr>xf~GY+KQC__4jpo2AiWKo`C@ktUl5^P zd~ckP93OA%1P}5HNt#!*Rz=ADH13Xqc_C-ZjuVj0eu}0A;?R!W;Tl49 zz9^&y%%{}om{mMLUHEv(P>j3mKrnwDkLZrk&BxB1C0|>{UUAoy+5I3Ry@zJ)m@uO*i}y>02SGIeB#AnMz1uJmq$EgQvJ6w z!L(9p132WzQ&@hqq-_`R^sdh{vrq}%Ia%YXxq`0)EP^Jc3dAoQz7~k}^~?Z!bGv<~ zM=P9pz1z$?v@;vuzo!E&?&;F=nYvmX@Dk;*qN1uce!m%|AyLF@IljI}`hKbICtgZb zM79#zDJmu7&`g4uX(SA$t1-22)Tf^i5z$a?sWl#1rSaR@|2{@{qP=3Q=lNCX(N_9& zJr{XM6z&xX4ZzD6?JY-Bd}Bqwmg4Q_42w!DGJ{O%Fp?Rs1=i9rc*?IL@r5*+;$T>v z5u_dMmW)?|reJ-h49eE}O8nOOP%4V-J#-J$x0T~%Wx2E%z6ZvWYp?*%V&CCm(IRT$ zN5quET>9cp;@31)OO85yhZfsa|1eL&vPWj1j|RfT!%kb3H~aFb+(ZO zJMZEN7tC)hmw5%O3-_;=ji6)`vE+iFuid2CLfygEu`H@$goY_+VcL9{F{b&2(u;l^ z@ancbWd=rqa!wEA22<*dq6YXrA(w`$4Q$ra#3|IymBT(7jUK^zw*Yq-M$>DhP%e8* zdEj`_;a>b61;DAv$&{vGS)W`6-6arKmzL-b85_^|eS!=!ZR`3Zt!dX!uaGdImk6jU zj9bh&vJOymGD4R1zZDuSJe73oSc(D9JcB}mC-cl_zm`^!v}Guu@FZoioHFddC6Qu& zOgu%&$)`KC>Hllz?4Ob_;4p5xYqweEWthDv-l|1FhNh+{;?@ikE(sJ+Q`=^yfcTCn znq3)sBVVqDQVQ6Kg2)8TfP6tOOPlWs3R9Fs=L;&O9p$JPyBPg_uF6km39Ac)Y?@xl~?Ek#9sGRsZmjGsG2yOwl2iw6<; z&DPq3PfA=)4dW=#gphe53@n#JdN~EG~t5lSIW*kwBjS%_8j^;n+#&_1S1?O$rs+&q!YYH2m zNN36L@M(*lmP(Wwo-U|yg@OTxb|CPdEK6dRpn&tDYig-$lbhq$__29{UL$vgGI(+b z!1$xys42u3ujXYlQa@|nff>6T~SHjMl& zneSx@-x+(WltYOFsdD#SVfCnS=ATBHz510L*t2LF!%8Qjp84C6s zI<*KGKj@pfDT9JfUt1?rE~L-6>)boaoc*t$Xp(a;XZI`l1cReE+aE$|8jaTo(if2r zc<}4cfqd7kFyz(2oEzQy8w6n0v99?)s@_)&zC~8DREKjx(fT`{hYU-k4MvM3`vwi- zCb5p>)>k(%7lRV0VaaDyH4y|wxjaX}u+$po87J9`*+WcTOzpD&hXQi z{c-id^)BbnG9iQ*Fsd-tWwoiOYC^9~qiiBtWRJ)H!r+CHaUyqdEYtx1K3@}Vo)7Qg z*`V=m?ypyLGaho!lh@_ujZ0Y8`n~Ayn$+|n9|Nm-r=sKqDqvS8u_o)t1o{z2xC}pH zHm22hdHSs9^s*4oXo1Kbs@=R}#N^S@AGY`+$+Bx}Mu$D>P98PKMPbhUng`r9wnBrQ zPN4r8(%783AcCe(R=t05Q^h~zi7}CcF-pEyT6>vXGmsD2j9J8 zli>|3$J&~9K3o3mK8D9S&8`jH=?NJv7kdJ@>g4XLxn~ErtsK`Jp0%KriX>SS$Qtd} z81-)bEz^{n7l@y}i3w>?*AmBO@p8_^DMkZ(4vr3g4Qo7ce}gow!d#w#Wq>K8rqBY= zg~P)Op%bhXY~rL`NnP{0LhdKJ_{MFyQ7A7B)T?79vl7{J#$JolR{*M)mKvn3YbrhL zQHHGM=pOW^JcZhzx(!e1em_Zdyl6Tym|%amf%EH1QqqAZSuh0g(WAa8L0J8_29lihn diff --git a/docs/screenshots/multiport.png b/docs/screenshots/multiport.png deleted file mode 100644 index ac1194b1fa96040d2798df90b139721db4b427e5..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 72592 zcmd?RcUzN9*FK5`5u~UH2vSr8ZaRX1gr=Y%U6D=#N)k>zp-f41cVv&OpaOM@2=& zpz-K|J{8p&FcsD5Z|BbePpqSlf~croQfWN6Z{RydIHd7}3@7de*%#roPNROAs|dWh z^(pbQ#-o8*SIf5?rY*pxmfHd*V4T28F5?SG5*&3B2%fp zRRI&-Lun}H1Mth<6N>@hm!{5XV0^nfmxKo$a|^T%!S?F|Cmns4>I*yupSImED(eN! z7+OqHUJjQIJHQKS9`; z+@C!r`YKGdTEY*Q+c(!GPNiAn>zTwdZE9$+hQJ{BJ6@gG^tz=|*KU>o$UZ5zCHIPf zWpY5{=8PD5?32&l?}cr3d+>flZs^uHX@)0?>N}5K7@Km1kKE_s%|Pv90g_oiv^2dQ zk($t#JB2*}3(zW(ONP^Z7NSxKB_#45_qFRNle278_}5Pw7c4d+s`Why z5;xGrehMV?mfHnYlJ7p-3xo;L9p)F5qrS;8Apa3@2^nI3IKfXR-^k)bBh_I|698Yt zhWo)0hk564%La#sz75=`g5N)couY~)usPQY-)@D7m3iQITjTACcbLRnFD9a%JhAph z4lQ@G$)C!wjtuiZgFcvCZ-_@6c2dtY@XJ>Y%_Jn~6RoF=;oW>VUxEr2TT(xh-zY*5bseBWu1LA`h-g=1;4I)Ws}62yPSHO2OV&0 zHGjrWF?KIecf(hm8Qd@_)Lx=*pRkPmytx7jJZ)*+i}zXm9;vuJK~62E#@_R&Q(UX| zB!j4em+MX0Qi71A_vy#obQjEBhYgI)i;6^z931j0Y&xx$x5pc-dJ+{s;ZIYgC^g&i zh|+ss>8_HW-ldjXi+3fnInFmGzI+_T?&}<1E!KX?b)Wd^h86LJ_7$Jr9okCr?bz$I z3N<-A0&5*vo8<4jKQ`eX;Dvj0h!_-Ype`Zx`X(nR(&^lx=>l|)v(IY{l3u+-PO~Or zmjfHRtx;U-9);n^#Ep8p(;+5AG3vlNzsO1NRS!bg0UXJ$M4~2HtwI<%Y>W zpdZ6hPFAajY4fUZ@yM4K>JUA9bqjqCdTyTNhQl;-;=K#F`L%|gz0L19y9PF*2WZ-3 zK0>aF>k%18fE1MQ21RleG`)5IY104Lz3zyg%uYrqox06*5loz_Sqt7QG`2`{jfjlY zy=uPg2pzYA6TV`Z!Bj67dBDBj=;+VMw-LDFpEc6Ue-#~>UvGW&?i6k{41Tu9c87Ol zW^qUQ)ulCbgH#QF@{9R#$S>>_IUH85@V>h7$GhK;Jp*}+xjHY&G(z%U7B}y>o+^39 zvrNBDI=A|rT3(XluJ-R2-ob(Q=PxI`D(15+tf?#Zu*w+^iV7fIPrGh+_yRZ+n|bY6_AsodsxK;enR$Pb#p#zMU{^jcQ)+{XJ-;${uK zEG(d|WQRxjMYG=n=Tgh^b_TQam!o=7>o|rDTv4LPm0&mmYyDPCUT!-wV)=>cJXyZM zwQ>FU6z{=d3&8|A?SA&lJZfg7H>v&yi^!iSAr!xFVP0JhV8pw{%i;;n(uhTOC{8$v z%MG?j%2ns4Ec>7|s!pAH#WHgRiXcofwTtx|Amq|TP!l&7AB8|OQ&}2=(q|t{*gXHU zP3%pr>1=tP-^_AAUbASQJv?aWKfeL5URl_M>o9}Oy@#IComri;ixk<1g#8I`_2ESY zPqj6XlN6Jd_W&U}umWN!&eC}!W8`7sqWUU`Emwz*!f*Uod0raDefXPMV3Y5Ojk&%D zP6J#>TEujkKWs0oBRW(YZ%rfb{~|0I3e&m487YlA9j32+1P}1OnR-KVI5o|>nkioH zvSU;j7W<}}Ql#O=$nn2X8=Sfed`5EG!G$?5lb zuU?&6mTjzVuUT*FGegITGH^h!79K3k)Km#a%`CpR`WnIs20?N`;vvE&-#Fc1)T4rK zHUKyjsr4bN3kwE2{}MXcmNPPYLMdWPaw-Oqw%yJ#oSNuzFXg+*`1$-A-okRP)vQ zH?8vJIjx^kx9jz8UXZMUqNYOdr9Kg-8^y6u?9{xH^qL}*m{ijbve(_<4j=PV@o!7R za_;gv5N@{--%=+lTqSVG9#+elod-++sb_I#+_91GkyD;@Ax2C2D9-E1Z+;g(Q9a*n z)I3b&09Q?^0I03|3Gcotf#Rx`|J6Jvd;X?GK4JM?js)F=T=~s4Hg|HvlUI#(jn|rO zPYaFo)3?vB^QiJ%yg0SP6v!BL2Nmg+i5n41M(_H#KA=X3_L?)ERU1L!8NADEiG9!s z#w4HiIE^93M zddSh3EcxqVsE;V>v{JlBR!zNB=5e`CQ81$cY|nOr4YD8aPE;%z1qYwyrGjM}N)iZbbsrmhiyKSUo;*WsASq=4NdEQCx?BeumRoKu?vf=dX zp2Gb!z4m*IWT>R<5AVI|ksTW;S8QMU4m?%1j~{gp#+6PoWif|T)wE-Ji@^?uEBhCU zHtqU2cifD@NB4!LqJ_$AYoKT|M~Rco6)1zsg?NUrgjf*xy=teJ`?R-$A?Hz%zQI(^KM$ydaXrd z^0o0-vvu(aIFGMNh(HTej4X@r)wnL2oR1gjI@T~ZVB)96nGPq~uEFHUR} z`5+>_%*-rFAVq(-UzO=H1zB5Pf3y~8Dr{;xUx!gU4EP>JyYHGN=xt0b7C`fJ1-FHj zi7oD9^9O;4B=5^KFKlYGEHgH?(_Xf}-^dhC3@V+x(Fjov7)#%QCJ@M1ci?*K%c%=y z{AJ6tZ#(iNF6B0c@>Z*L)e7gTItcOPHi|KOfmk7FY*?)3$8=^u>681J?~10xqoRsV zkt%VP0S(gaw#Hh;>BodzOh-q@>k>nAb886fs2X53RCgP4KsuMGn!Rpfi{ewQN!33w zp?a2%@ZEz^B+u7kY|`<-d(6VZ!t0V@48{(AU@fbS9=QpC;dK<%MewIK%)NU~&ee17 z%eBTSY+Kg+wH9cL>})DynAcmw37w7cyQBgAcNtmR*833y4)Jd9D_I z;n{p6KJlWW!a4Cyr4bVR9OB#THwdZ(z@JK20(Yn7`p)oan0vrFoUQLJrv0v|>a;hD z`S?+QlX1bH9HzucWVq5FGLDR_u6hH()iTFX!%H)2HmztyMN~9&Bp6XDrSU@&5lviD$BugQXvh;avpZ3 zQOZY5%l#Ru%xy-luEkztXG=wAD@w;yRFSt)Dd@;zteu7ITwbbx`Sn1p*0@L!E^|{^ zME`!&dpo_|qkWTS4|YQZ!NrE4m0~PKWmGSHZlew`-OJbF`VKk{Keb{zuh8IXe=B%w z{!SCOe114Rwprw1*O#((SX{f!QIM78J1}Ba;r_ALKzGe)=LfdD}GTV1}rN?*BXTju7hL_i|_kirs@L(J`{CzX)asy)K8a3i3VRt=smuW z(rQ&=B7zYtQD~k66P+(=&+UGxM;zDUbV1<~GW=~dyO|f>0`CGeTZHv}t5xcUX0PAB zp_!!Q0ED80lc+C=WqHEGj@j%i+dUZ26Y$PWR8CF9dPK1)4HGv#hZ*Q6xBo)38;{SS zI+jQ(HQxOC^#sG^ z6F8Y9)tTV;Kbk?#BR|PrZE3;KL=-93`k4C!&MY_Q^5o;)8-o1&s!6G?H~?_(%7whQ zr>1)8uy%Z1k+!{s#@uqhIJpq$6g$Gf&0XnIyCTdSmCSTrEMU&Y-dq;Gz6tmoFboxE z7#>9J5=f|&aXNpYrWV+l9Ktrp0o1f&9+TMS5KJ(2u^F3X;0=oJ00yUi70mnqlXnpu z5AiRNmX!R>p#{Yhrj0Sg3IKge%gX94U%Lg+I*PkR4j$KA+};c&*xCopMt0s@HaIU< zA2P@Kh=!3nXRuV$fTGfED%)wrDZdS}w7xWW}T_2pLu;2KXsHRr`n6nhqn zBJms_g@3uUHG(_x2M41qswNJyX``+#b&!8H!av|oQ#yXRcnRzu8yj15?qkD^jULOI zMHF?s1<&yagAp7K?_Y3qb|^vC{}K9k+Bxd5$wRcIwl92m-t~asc0sIOX{%YQC^NI= zOM2P&FHJT00}om;suKVvg;31fd~VK`*}{_agD3Eq(l=$H4L_#yYi`dq)RG$~VN!@zfAdXXC?>XfayHG^xq+}nFI;_4IV0y?iR?8FF0P82 znwm&K)E3|l4KEm&m}-@J_xm6JS3QfQit$EA($bll%dWAo}8S-{9sai0iXSW)!FkvnpH?H`0g(w>y!Tb{&vk_Ww8kh%bYrw z#s0{1ghd*u>2Sx?*wU-@b^3DVg}y3|Yu6w=0yh;>{?mvB?7tXb1wRhJRk(Y&M=mC0 zG{}fZ@&2eGHUxyUVMVI~J3+o{UzXd0=Er@2_)al?iTYr4wGuf!y(CV=!ba^U4>T}4 zJKMDJutyO?BVPNH=+Pat*n0ixh4G^IVl{xF{k0)>@HXhU&Z9f>sY+mAKUZEJk^Wx;R{IRhyFm5L=H|-?8Qc*qgr-`pWnwR%6tB^Lt#EFVx`uk-lQK_BN zl2)kU=FJw!F}yELkrHr2{~c(M7v-2Q3@<=djyaAG$a9PkQ*gu2FReP-Wg3frvw2T7W} z`~j~J6zo(#FroN{p8^~V^Y!%|z>hTsHF_PYD{24rkn5=PNM4GOM|L3K;8Wa!}3xLV$;M{?N|JDH?o8_bb_F@f*4_c61-f1Kk z#U>_B*LH3cBfMn)n*2-rO;qH3&FUDm@EDFqEQbql3;b2uMetdyC-^^j^d5MRDwq-^ zfWMUQ6bGX;{ty1JH3Ob~7H}khCp`otJFzc{i2thxAPW3#b^;o}B*p#$_rI+uy-@va zb`oCzDS$rz=U@~X@VD6sG6U@aDnl{m{|p9<4M<06_7@rdmFz#QC~c_#=_t+qV(s6# z0m1CQt^RK!=5T|H0DC?5X~+J%+`v}G7`8H5i}l}5%mrLS;kZ2fB=S=MvnL^GjGsTx zASLPrZIsjfkH<*B~1l4(n$AXS)Yq| z?%gJRAY)3AN4GAq%szGUewKE+H1yBt;nKXnn7H%b?!&Ix^(2*~n#hr`KrT0IxNq}Q zT^0qysZzj7pa9G$wTRA9Jv9N_L#3NahZyCf_2y;3L4gl! zeLX7Z;IO1*G(AeE(>$joU#~STyQRw_ZA7ytHem=X%>S^MQ`oF{)~QT}-`3AarLD5# z^#_FttDCkC&wvy zZ<}lzHm&=9MME-`U4Jl)4hGPt9?3&@?r52Mgr8{*luxttlp17=Izis(=H zc^rs;)h-4EG~kmm5Y57pzQ@jND#%|tWmICTA}m<;ctbNk+yCkNJI;%rd;BZB_m8If z2gDe9R`~0>9UVXU*UPv@gDMtT%8$gd; z%r#D8VNKrGHThubT0j*L0QcM&UzXutWg1Nwxq3PA^Oei&jWC@DEngksLCz)w`{T8R zuWS^|1)u@sGqo48ocXEr2+1p~Bzi0wAS*0b(El?U*istzI75WK*C+%fH3t0960JuM zj|@zo7bk9n8bbzI%Xp0j`)>|2^eo+0EXcPg=FcmHO4eFa+e*hk3MOq!?)gRHq%7mrwxA;Nhg5V6pS( zjU+X}0tbnLa{o`Je1@by->h_>yvdr6Pr5pYjmj&wY?&E&@G#-qLg|(nbK+WW(%aWQ z7&ZR7dj6a1MZ=fYU5myp?)=#x{rZYlmDv`BFeerrahHnR+Ap!a@nlDYZq#>^H>&UO zN-^ailS+Lh2(k4w%1OI>_fury!e;{9Z82@K@b!n&!nvp}!rjHUjw=f?1=*A2+&!|< zF@Eg#WpEMKk@|%d>o@mg8LP{d+lec-qaic;ezQ zrfUsVW%RnWq1p=Govej*4zymZ>40dd%G!yVvR$h6_4+yU-Y87D{Kar+^6}AZS^6KA zv%qYc`g|G#u(8*eC0E~Tb`3>W$XciyCA}-`J}q38)%7}x@tbI>q7c$w`&nY6&-RB_ zqkFJX^$W6OYU40j6ezISP}u?I7sYb!tZ_9X5{N-aeOK5bM2r~{>y@@9v=eGR#p{AVuPBTr4_gIfz<-#Pf(Yqy#N7t+5U zMw>M#{Yhnce%rM`8E2M*c}IHUDsmyh-+FrKEtU;Qi>vn~g?7IwXkoTAubAGW7M4+{ zc{Ud+KZH}>nf;{KJIg{jAk_BoIhqwl2jg-2-6Ea*Y z*DQONTZy!&0H#>swVkgV&~8;QSY10ui~RGYdG4(!u|&llHY`0FB1OPEa2*-YfNha%M_icNvaKkDjTA}uy(%909szZDG?a|SJghIdb!&;l2jyiFH>pOPTevzU&mx;gl{ej`;Xi6t1CAg%1m!m}!i z;8XoevEHPe?V_~CDlh*QG%y`{; z{EDL<8oha!U{@wJTsa(BptLwLMkLo-IJ-0tU-C`qS4JiA(;{^uGuWBW8LNK8Z!q>< ztwj_x|BRzW3iU;+$fLGaF%+AR7y z7(O4$n8~jVBIps_+l*?htI$ej3*2CDCuaswMt@c9949UDycqdzhW(v7F0iO|JM9@C z)X;!Boz`J6x*@b+>a20|gXU~&^c+ac5;muMqx`_TX2K}%7ImAkQTX0Sccx>f7%~o6 zLO@?{W!aF7#Ybq%KRv%$=qW6 z=17Xtp)W+>7eEufn zhx8MebS=11dKjP2&IXqAhOz9^82cj<>5!5vU|Z{EuPe2t`-i0H=9aE=c@snV4aY@G za@-$}G9aY^zUiEIJx$b@8pX)lJfBzOee&iDRR4%GlpGyxT`>2HJp+hH%%~+ckh?#Q z{7g2m5tWXPP4juIclA!Rp@|rakdm3ff~UWA*I6T+43R@26gZcvY-?+#VL(9HNf)~n@K{FytqSxlEDSmbdd{k?` zT5IahCQ^%1dzUX;P+YA8Kb{mq(AdJrF6>jamW+`NVqe3U&{J@-?F%yFqQQ;v{_=-i zL)c`Judiw?h z^SZsO#qjw?F}3ilUX-$MW5dp+6Dobz0s(}NlnNQpv{uHiaBrIxspwebO)ct|N@Ca{ zj6+LD)%#)BFwEnRcnYx}`ntKK0 z>Z_oHN;xBS;8Vqsl{MCI9JiU%ZryAPz>6G8(-0P$j~9x);_5Wo9GhhM{M@t82Chc0 zM*eWUEa{X$bOQv-bF2|M?wz?t#Nf_bF*(lzs}tV|=xP0d?kd|DWFzOK@bj}jYg ziWD|C^!==+RQw2u@pDt^$2WWq_bvWT3)4Xtu2bg+F=%Pb{3LbF)220-?L%8ZN7vdz zqj=B7jwPPYX|J65^ZjWIz;f~LMCF@m0KH>HQRmu%(XLkxcJYqa*uaVnc*oo51_U%1 zzcCerZtzC`#GnBBCVLrx^O=#mzI~5;;fEB6YNs(C-o;B|U8|{$*B>;`MTn}y7N^-w z{i)R6_)vxJA8k>6c$JYlb?WL@JF2#$GyYU``w-1^HpD_4?v*3TyUzzr+z*C&s8gv%{pY!IKe98}u~V zFzYllm&;U8u9g4l)@x-h)yYWI5*MdrP!-k$=6ADO6jfPs4*PQE?lf?`Mj}rJpt<92 z%cM&9!bWHCsgzZ$sF_$B0i0{ON=Dw*m-7~CDVa-FVVem#;3liMP2cm6C#~3gbPHz{ z5P7>Fd7L#4ZLl}0=6?C>mRAqgAA~6|uhr+HxxbhC7oN-cu6-noNo68r#skktyz469 zQ>=Pq6(eOc+2(6nDg(9v*O{fIH9mNVfi`rwBO6b4Eltt2I~mGBlD8C&Nuhtj z`j7d~L*LpR4J^^D4lc_w&5le>{n1`8JPp)_a5Q1wnqmG(8EMxima3`#h5F%oebEdh zouyWA04o(9q`_?y4?F-noZu@x$tmFfQCMr@O*gZ?DU6f=NtEU<^?cjvb?MVJijvLW z`c7^6Vy_kBeQf>u%oMaTrDTo`Y>jo5^QnxqgpFcd84i@W_;okVEKIj?7V70zMy466xdFcJ;83YbC& z;uGy;Fxq1!&K_`rWTIXBD}Z#+A)G+d!2YKCLkJUq5~3(T@;xpA&xsK9@sYW8^IYQN z5eZhXAv1M6oC(R%Y1lkBF*y)~B^n!*jRuN}_MW;*X6pDh)dRXC6cDOz5V&~is-ix2 zVc><2W_zq;<Ew2md?SGMBv;$3YDjW3(DGqOw{mA^o{t9 zYc#kS(_Xxz&jb(#Q7MD8fUno%9__B5k; zo_*Z|gD%PsZAyNJ=MuO_?fL1KCJW(0s8DZ$WQZ1r`B)Yh-jI?>r_tTQ$&3t|B(&x>$wDqQCK#C^C>w_+Kndj~7^f~o1Q4YCIW z1KfH_su1BaZeZfA(UZe3C{hqto}v=2+x7Wl$63=leigvA)&BHa`gccD63A3kQypep zteaud*Q{c!V@hk7B`<8Z*|Q!;9W47)*(@OzQVOqFQ@H{|1t^5$`@l@Le9M0D|e9qUHiUvgYM-mE*Ox;DRnr z0jFq9U~4wkPAeNAa3wNbci~#XG=o_E7SgzS-{erv^ZL@f-8f?()rS!Ot}pB_OZ3i# zdHbGwcBewq!LZ(j?X!l8S>)^1Tj-7ch>eX>ue4KF#YGg587EW0mv7}NAL3&9OIzA* z3g3TdA)YUIf?Qx1z+4+FvCz zcQd%aMfZczaw5DEC=M=J`APts&cy@kvhs?ZVd@(9@`xjh;4*qfD(5PA|53(KEyA*1 z3o|mt=e$H-kvb!>?N9TJGY(E>>WYNa4tso~dFJ`Z5c2iPXCnS~`K>{uJV5=RZ{^M=7Qv#h4Q(yZq9gQ1#OC%M%s7Lm&R zP)^yqz~Pra*h(0nEnK!R>4;hcK+8BhohtVA1OO&HMjKo(AdZqLz;o0h(0)Ix74NAM zLl@fzEVb18zU=!EVibAFvoE<{x-EPsOg7@=slMvTRF!MkI)?zN*VHt6^`I@cGoRsC z>9dF56kN|et0UK%o(P@;w<)5JI6!il3VrwHvtfu$r-`bl+nU+UHN>-4Qv-Tk6_4-|?l|9+URmBV+*+DVFElUu)_W18_wB zQXl}IH7(@_0&uz6pN8@WX^*2xy8)d43TtR1`ZT)Y<{im9LUIA`rAjw63wl;HYZJv5 zs`;VMUWBo;xL~qMDt?p&x5jC~NV-7Hiu(EXusE~&acw7^tKi=L()jTT^VB0@2~?mu zZ9RpLCv(*Tv3VERpi~i(xoFPChZou9!Oq583=5TR-F|S0^0%^z-#;QzeYm7ZHPioG zP6FiUU8!(J!apm+@4dmoWT7P70dO zt`S8o*`~t_yIWFUMGuz@E0>-#d^QR##c#Z!h)RuYTCU9D1dksCqXEkfE)d46wF7K? z^)eXo6+anV@Iy}I$0hTXQ-ieT?KFUltS1_`#Swf(P+N@gj1pX?{<+#g7Vtii`cEim z-&;zsP=S(ftHqg$MYCK4_~)E= z3P91k_TFGdR*ly1OPfrh*e7fCcKFwfq5;(Bj0bPknm6F-K#Wt$sKA}6jdSrwgUqHY ze@eVAF3sDMdlgH4j!#MTPEJTceT)5E#%pVOT#O74?F2=N<7NvLMlXK41!S3$PF?Lw zICb@cUf18+9@s#@)-QE?5hmJNxZk7Mx`fXuI^l`bs^N(b$;^^;Q@vvO290lJJ6Ty6 zK_&{(6`SXImmjG@$+H>K3zyA+l!G_>DV{+3I!4hxI?lLX>`sr>`hno6w!i^)j(pDU zJX1XB=(JvVZXN*f9h@U@V$0Q%HWQrIF_3N?WErvz!1GtTt}hMtnEuGrB{K1;rB>LG zbmb#culi~kVSd4@tsK4X98;Mmo2T+4Lb1 zI1kY9>#?)6NraXLKB*9X#${@wLsDyFj)JGZ@DbkaiAar>KDwI01rVu%!8!i+Ui^Tp zGq?Fs$y^Y{Qs_{>+{~+H_KTYE?f~_IEp`m-ESBUS;!p{?FSsyZ|e->uS8rqo7RYWYY9g9hA*ZIIY+zen?lr-L4o!e~2r%Lx8ta{Q_ z_4&glAKRzxqzY@iWxL>?FV3Xx;bP3Bal1y zj1a?3QmiO=>Eh@t%R6}ioqtP7bX=+zZkup`rf)L2vvTvKRI#bnW!0m2qTF{5#A3R9 zVP;^XW~HO6LwrXKW2A1g0^?+xkalxoSW#F4QezxJsxw@lrl9DRyBXuVJF5T+pANGD zHe_M-HD_Er3FnbA?G@%n&BIdOD{;K2BzYhUDI=H5?Bd2Gb|`b`=Dc)Ues~@u|8akB zXs3Iu81OHU2qk9*sonWD@A{~whMZ;i(DQ4x*Q(BrKk_eY4B~+&N92+g>*&eAM-N%p6@0CJ^lfhLiP~g!jxUozMSG0gvI=3 zOzKY$p;FeO^Kh&6uKwWl=!#KJ$hiO6Xn@v5-au8#sY2(k@S@9t)s5EHiHi5z8h!L6 zqWi^B{wY8$xBFzAY-PqJ>)+aVwh`K z38^9hRBbXO55nrTkK1CTUGuF+J9ZwpLe!9xY5ff~3&=G4%k|j{g8{M>?c=lDFp2gJ z$BS}h_NxMpx~bCk+aFYUcALnSkCht}>t1tl@3t!=Q+K)sj*}aGZSUKtsQGw0_NKk= zZYk-Y8|maov9jT+!gH{h9W_h4z3{`{``+(<*0^!}%LixXDOa>K%Kx-AK#s;z(R`>% zZ5R$n5N|Mcy#UA|E6Mxt>i*_pak1+?J@$!}dV9lxs8p;A^||=@F;UzT6?Ir*hd$3>26rFf zUVeJ=QR2z~U{6Fj$!}4Nt5Z@p$18~vpU$;=mzmvaEY}B86}Gk}<#lx;57PW7n|lt% zCQ2Q_-92k4rn`(x8}~G*CrR2mY~K&DRSxG9vxls5sM#Ly2c^P$-8h^X)x=lBYDeO z1l8u^LZC;AMgoURJ}KYjpV8OZFG3^~VLu4>7@eft>V9ytPl(fOMK_0p+1L;fqYWNa zTTG03*9q~WT;{eOTFIk5nmvM_`@cOnVIV;|Oid;tdKXVfoLs$T6Su!5R3a562_ElIkK~={Lhc2 z_-)7!-cswGwdht%{}n4}%w%E7$k=U%7dqY_`V~>9y{q=y;>=-Q)ttP9B%8B!$CRz{ z*yW1=QE{L001~qe9s22CbAyuo5tV3fV)T6}D(j(=Wa-(_o~4m|?yp6IWyyy6e7`i< zDG8Qt%Tl8>#Hc*gjNk8f6c6|*x7oeZ$M-k~2U;$avQS*H=oS~TSl@^tJ^3of7i;&d zCE2x}&$a$=8sl_2CTwHCO z$4k)K>=U(p9(^KQ?LPu`IvLB7D`zN%-;dgF~%_F;M5@V^*JJX;zvKby#TgIcK=L@Nvus|M8B`w#H$cOPN{C zxvV1l$NmNc`USF7p5?+0_uUs$vL+wvk?~V3Qtcy*Nf*a+mDF(ee0J89Dz>|}Qv&ofm%_`_ZAQh)E0aujhfEyGFAn+>;IeOJ!R=5h)GLhoz@F=Se^`fFXwQ+=C zcS)bJ)e-naxp&I+x^oM%q^>~rynuwDG3v?B=|Bk}|FEpg5}huNp|K(UIffPe45PU} zjAf2VpzIve!_&AL!=>S|%rM1o*O#x2@rB55cW|@VkPXZ0*V~fq=Am0_&x!t%gwRE0 z(ttXv`Y_0I;k(@SAKKCo{toYzQDIe~wk-x*FIs-1KA_3@Zm7NUiig-S zehE0Ri?Y|b6WtHoetXqU8_-T2kElg(9{TCne-R*Wk?eGde-MNcQxXl)BC0H==8we{36%nQ<7!P= z=jZ|dW5deMG9eDb&v}N!LpO#Uhh=;;y`Np!tNS&RQ{hK_y{=+^$2cXe>*{QzL0eQ#+c>*X=BPhN})cN|s(w_=O}sx8Xq zz}C&fi>zQI)(J=fqOb1C5Ow3y{J;6kZSBZqrBy+G1n7KD;tlan0dD*?Oo}MpQYG3s zI}l^W+Uz<7MLxX+(oJEtQl3Of5gTHu4{o=8}1KmWvZh z^6<^p3en54F~Z_vV8KmRkx%^#SAa?lNmUNbE@(EgQr9dgDXZ+Os((O$?Rcegsdc;V zXuW+D;pvTxs9OztIdJV?W!fJ{pX6X}ls)}Zjc`hLO=!1}>i$89%zMm!0l+{G7|&rt zh4S^%6yfVA5X1`VbT3p3mY3k@lc@PE9-#~{!R2csl|Seuetn(ji3n$kq|8nh>T6T% zgsU*G@AJknypye})N-&*y_GfBk^lXwpWmVxuxz&fF56spn&PG?r8-ZlPhnFt7=WRY zyz0xWpKe~Fb%=o&6=deOzBal)4Ctre(?Px(v$0Ea?B}OKeT_BRX-QT0;@+sC$09Uh zk_`VD-OJWD0}Bnau;^Qw3+o8gB_$`@4(90So0&nN|UDP7GEKWp#cYnICC#o!FFQ#$!Bz*%tJI~}{2$(o4u%ZUj$I{o#n{x3 zwM!)1Elw6rl}r}KbeV-~p`%%r>~{u!S8M?FxM`CjnQwtG@)8e}b>^zUSPW!3#@?EQ zYxc&VwiGlvL?oWa#u(d$N;tjMijG*q#dv5LzLwSqeOKspTSKVuw+&poG>grk3ku-* z1e?XDz4zFTrMG9pA9Z~EFmYM%ACcINQhjEK^zg7yTz+<8q4vh+CZD1r>_fGr z5#ZKA1#LmBuyNOBpR%}~!usoj71y9o%ExTCAYY0}_`;m#3;}8IsLoNo?`+E>r922~Vvxg-inVN=y4a zF>z0ATyzW5vGc5jAA=gcznwhUK~XN0r@m{>_9=`heSb?v)cCj5hQVV)4L9d9n|MqT z{+(uLoC%ASH*7JrXjrJfW#*}^wSLb2$YoQuSdqS7%bR`mbq|c*w4?V&?r1!jj{9VJ zv-ROaeIu|)HXq-gg)fms1cH->u5MgH!qw65&qE$Oc#yyw3Tl&-l+^a@NtMT*jb|(* z6B{iN$7CG3IOq_%*D8*<;WOzKf3Oy&{F#W@soeQzF1tYpex|W{)X{zdTf~<}HI{k*->}12UIgbpNUk;TsXgZT+ z@`xk+=2Z;nsWV^gL=d)5a}Dz5Gct;>6Jdn~L1FZS5nlPQolXlDlQ7Qve-aCKb6jWZ zajPTu`&LJg)0aF}5|p@K@USb~0XZUj#tK@{uCfK~CCS-o<6hD08%rCHrKo1o{{r-N zlK%=QHweg?f}AVKj1dd+*AND7I(B=g1`SOR1MB*v+D55PWjN!mF(bvb{n7B(jn2x=(a^NYyw%pO;pF+*>BIT>@BNRN&C&7Vvdr}4{rzsn`1~M`iN{R6 z{qkTYg|(M}N`gKHtX8Wd_RyAHdO`b7_kKkoZsEvNJG1h*y~x02b6kx4DoRypuT!sU z*x1rJc)27;)=#Yd@zNJ$hgkp0;qj8>UVjE_&66geGsqACyx z`-UyWzJ6mP5bEja*)mdUY(CCPxpA0=2XO%xSezC!*nvW!%dshf8o=@Tm#uNHmqigD zK7B_Suvi&RExyr-Nq~qv$s7HnS6`cbMzrX={-63vl{Cb4~DF?z!mZSyPhVN7D8U_?ZO{?n({$n}9*OMjIN?u6$& z0-m*q<#F6mB`vZza87swurn~SW=UwT*t|gmJ-lO9Sv}5XF73FuTbGY7ZFR*e<5cKaW0~j4u&St#7||LvBh4$EjViTLe0uM zV}9gWLz@0o-fIRT34(9r`1mUa=!9%n+Rj$|HvE|V?HDP+J6nccnyrY<*yz@}V0#oJ z4II}x1`C6vE&mT?Ul|r<_w_prJu{TlptK@gf+7e~f&ohR&>`I+Js=GtAT1qA4vj;@ z0MZ=;D5Z2rcb(hkdEfVcuJh%bvp;gFc;9>Pwbn1zy5mL5m193?C;-WVQJg)k~jf6+HPmh&7Kc9_z5jKVeU9Ij?lA@V;yD> zLJ9kwFcB*TO13q2Ld7mdAEUM;?;Vxu-^xg4;}+@YD+1DUA5Uz5B>|ns0 zb<1%o_3L36g&p_x&6Y#ow!Cv(PnNd4KEui9oVvADZv%-Z41^$?1 z^gU|5>`IG?SJZk6=CRSGNrs7@2P-LPFP$rWrn}>J=Q4edr-?njrF9WIv|PELo%r8) zxTNh?iTJg_cDniIc@hSEo2N#*hzZ^QB+TBu$j@^AR08`b(pJNFUn6G>qG*{=ruiaA$%U^yO0pVppoWIL{>W4Ox3%lH0&fnjtbmKA_ zeGNn!+E)nO<`QBZhro^eBm+RnVZ6B>^1oKQ25v$3{LQt;*{0#mh6rTNb34(+LxG8X zyg@JX=5lr5&!ihxpS`7N$Cx;>FCJ6xz7r&)EyiTP<3J@ zVElrHWgs`pSH)oM!098%hu5DzaC+WHxw-~-zD-?^JfCNcbJ5RL*m1Voo;MEU7QFk> zkp94Sb*_B&G(1IFG`3@*)x4q8T~JvD0TgTh^lCLwR3Y_2EZlL zKF|1JM*Om}SwM1YzFg+NK+T@7d~0Fn=QFT#&+Wba4d8+&izdFAmce$Oa2(l5tfcZX z1l+k*Ie|^&4i{Zs0fc9CjY`k09Cn@}^2q%^7V9;SxH>zMNncz!ocqqY*8#n*|6GrY zh5zUI8ebF~Np@x(0$b@2UWeX6kbJj0l!^lRD@iHHQNReMW#L|NI1qhQV{jZVBjK9U z8l?T{?W=53Axixk3oYBGh(uZ%RD4omN$gR@`mIlzMnLQD@Y*v^`#EUy@aPlW^V6(m zgFfEcL*H*{&xyBfz5$KWkAzZoQ*BVOH}z+lXPc6lLeIemHnlG_Y+4+QTE+hmjPQ8PL*T9Ke%p;M_I#%7k3Iy~B75MIY6HF2?iGV!rER{^Ljs^bGU znA>XfNtyigZ;iPS)Bf`w+8553Z6iU$37GD2hiZJ1BK?#Q|pcrB1nG^Uia4l}nzf0@GsT z5TAx%(mkb{O*zlC#n1sqn8XLioTxBQNZ4HJ}nH`MlHH_MXrt}h;)&0c-%zRYKd zi!-#7Z`-u_b~xHLG1(yNPP&s2GD+%ksS>KLtMWngHky@Z5Yd-^w>LwzzyoD6;lEkN z-;_55Tq_J{-laY8+XRA1v5Zs6D z&Ad5JcDS9*7?WK#4T<9Qb@~0-zP<-hb;Oi;S?%*%Z$5TjVz+H_oO1)nFOSYv-RPPM|N6A8lW67Cpgmul9k%W!6zj8b@<(U6L$UtB#rsN>lMZ6$7) z(hDILnop@VfBN!003ALV*bG9`HS)2xd_t=dwe0+zzN3vwavj=q6V&ff;D<_iMcpIV z*Xw(GP_2?rU70@eez_Rn;?50>#=32me(QZJd7*PQ{#EjB=EWuLa-ZQ<{pH%*mc*{i zbO%)F=-q{*!9+&fviAyQUOVv9>+JM6Lj(2^u8hN`ng@N4UaA@dZ^mOzZCB$lu0jL` zp=sCOl#e_UOFza=c)iL#PaemlX0<)^Iu-b*fevYP3t^~UF>$fHSMe}w?)buH=-Uc< z%XuS3Lxu&BR%|y5BFXAt*76+n)gC~4?(-?R9n9nKOTC!d>wkj>T$Ktcy_Ji$I{}zpdtkSonXGHepoQf zUGnhvyqm3t@KNd=hR@y(c62dw+jlKsKDimMv{&e2jLEu&{Wna51{dMysX!|ki(uic zUnCV-Y7olG`^l;={`MP#jOy|Jzec}+0~N^HhMyB2y^g+nOO=hSY+MLWwlt_;2H-15 zIT_U5U8&VYQl^}NW61rr(ZKoDree%G`Pfd9UPaGVQ0d2)v7I*U=GhU2F$HEW>FN=X zq-xAs|DNh68vGI3fU)HU(0N5t_{3Pd`)Fr6Aq(Tp0>tnpgQSw@m z)ppWcwnU|iSqs3s7Y{W}KTG$sU3JqEs~?^CarpOlHy8@i$*r6kJ4+|5*7MdpD(J41 z<$1t11pD!CC$oU>d!U!=|1!M>aCMxAEzE>H4hM)P-n|t3`#FF)?S)xmifTcqY2K0- zYH@UtIwhWSGl!CmRd5#82Y5r0uD$EeA2T4tYJri1U28Xke1!>SM{uCO&wMYHNQaJ* zeZ&f%tDb9sFDaT;BA&IOWJHm&ztlFdv52OpOfx{MN)BT4BfkJ88?pK1=4PvjFy5S4 zV`p1;e)T{No+BWD_mY+Cl>CT}+?UIEq44zCvnAqJG3>Ztr6s}EXYl(|Fg27N|-h zczZ%OT(b-!5po^adMXq8G1GWKOit|y>F(A29*V*45}IQhxw9JHyDZEw4V1R$IE@87 z5i?K=nTs=JRx^1>f9u6Fx8iG22m*q*{qOol>*%{iQdYolHYPHp)>P&_lwKp!ydTY$ zd2rEk(@>+|;BG)&tE;DHU3S)~H9J{eQxvdyIPKzk7JQRTh$zrKGtq(Ehf)nDa_TS& z@C0kLtFCp{xrYA=_WPka^@mvSapy8UGoF|dFrdo9ocl8Q|YfDMd?k}M%MS534NR%&F|1J#i zgp@wFz3a_bqW(pp*duOLC{qi;3Tj4NaD((qzB9fRK_2aF8&wS&GrssijYH%uXV5+a zHIHnbpy-e)hX61(;w|S-?^F1VpJ7%3I0>8tjL`trcV1!ylf{(S`BxW>((scOl_LE< ziB*TEd)OMGw}U)TU0Oawf$vfA$o_tE8eo`h)GzjbZdfS z#wT|fZHP)8rozlKOw5;}&^cQ}V367TrH~Ql7V*7q%7JdT<&lB-h37VzNkVBf65hs+ zx_P&!CMKrVsco2Njz9)FAvyWQh=zipAXN6ih2>^*uIuia)lA)u&IUs|&3ls^G3z0# z8gEmu7Wx}LViA*N%xOm6E>34rqiiheZR)}NdSH^t(V|!glMHO`7!P|T6%C+(N z$^i-i3bOyK-;?+#N&hi!o37VWVta2})Gw|Z4Ad?r;2Rtqu_(LA5)Qn2$pv9ml}5J`d$+7E%pPoBL3^ zfCBZlM?|6_6OgbjQbSBekBr`3B((k=9D-w2&g4Z|PW1-31Ii(f+z1QOSq9$!R?vn} zqp8rs|1SI23+K=fUMU(Q?wbaOf~!?*v+eZe2+f*PgQh^)!*@T1Y&adA#M@dY9+RPu z`*Bh4nw4g7uX-9mG~>G_ zeE3-EPs7%JUBL|ZMq@xpk_K6tZ(m6Dcg9d-#I>w~5U?kug94(UXCr~#4|;R;o?$2k zj);fb0>iMI=}RXMYBg3`;$YIyWw=g=KsA?4v~dmi_C%TV{?{U^{=;czO$W}Pf_8_% zWOGiusB8@W3%y>|g4X=j{Oo_0(EQ3SP0jDUUWsk_z3os; z0y{LMuAR{ZF1YVNu@-}Esr`c3fw+G2C5GN{U@Nb;n@99=L`)%Duzli5b1*m@jH@D} z7;Guh;WRzesho(4CW0~gD3>vJ^?lZ5W6Y_JT63MN!Xq#CuD+_aZ9PKVuO)P(!L5}F?y*tkE>hnd_C|H)p zwe3C;8e`gwA|XfOKkl0hNY;jM=Z;2%t&ZzKaCGzo?n$mB)lmJB@QLTsZ&n;}RcVv^ zpYu%vLMgQ@Yt*R0^cZ%0^|$TH>VFquO;h$VB)k*xO96Tpd%bMf+{h(uGl7fcBtR*^ z5T<&^$)!P0=%T+Aj8Wclk`I$^E2py0cQp?a`^gfLlPvg2<90k9Uow$C%t+s_-5_}< zle`@4XkL^HdxJY_VY$6p*&iYoQl5ivVyc(lo4|4B!H0XYlq>bU0$>^Iw({U(8HdIf zd%w!Vgd4#o6ck1vAIzB_4qr7)S0Nk+0lHsJDYF+Hp#jCc&81!r(;bDTwMS_ZVIR0o z^aGYGpncfK)`NY+?*yy}3M-e3Iadw91-?duy}s9q=; z6Y6R`^Af8;q6a}hkx+#Xf{|>A7{!#)31b`-Fx5yK;_Fnso4}zW%o7}=->SL>W6OIj zV5JLTnOGz4$k$zgd|mRU56vgPXdHf%Y!=2X=!1jCtCxQy6g1F#*QglmXgvH4dbGjA<$#)+U+!7)wJ&h z@$`Q%mhTJY(sZ-UpvPTJ5~AXAX7%oWe%6^A1KeZvu}#Z4N`vg5r?rWpWQA3O6GPuVTPF3C zXKL5Drx)v4=r|c;x&W7gB+3|y^C!xH;H_MmYm^GvLhTrpD5{WZ3~*PSJ|klj$}ZO0 zPs)}&J$%;|Pq30ZsE^{88k;}UnXV@l3~ok3qi2ssLe~e$=1wx0x-J?w|AbmNNj9Rc zi?8r*82|X~@}7Qcm|Oile+~S_)00z=uK7ugvMx6d$x;q#s9=+Se75Z?GX7GWS%#Y?9f3m>DIF~+z&X@+7_Cg}x;OPBA+?GM+&lAHA^Yo~Dtu7A zkh&E4M?mxxk%mJogh**77NmAR`{m;s@Bk(O;=4CKf0tzax_K zH{Lmog@Q(B$xxJ8IxOQ;ppI~|Z(4yr+fT5vQy<TaGC+Wa(}dhF^e`z-hQxvdrL?U&kiscr zn88w$7M>R&DdbfKkCTd`l)v#?=t7|JC9pe*7(6K+MTur^iqSYn(0mDjd-f0q62hFV!8zh%x-9;VC5kf zw}njb1B6>AYyK<8*;kOwFnR9ol#+cNt@lXS(7YlvyxQh;#0kN7RGyV#gS0l`xlWhk zTXxMee8qnrDP42LgbPnyni0#w*bEQ~{g%Xg(FvQ!Ug-}tXacuxGxZ>s>FUMyXug6% zAdS@4o%z#q>YJ7tQM5kP^*FA3Qy&R%o;s9Q4 z$fqy)ln8z=;Wx-|U!vABpwNd5xWiJRBFmlryxBAR6KM_nUN z(c~R@b|cpxami4{7Z;~f7*l75DzT7*^A=YUeAH&&x8~)6n)ifce8hUmtLOdm{$5UW zU2kus6@vqiEyoofXZ5|$G=vWzh;O)#rEeEDfK!!a2aFHtVHye-zMHy3z3@sa#QZ)~ zc`4jH&lx|g_kpc6^~YmrB)mUJBi0mN8loK$W#eQBq8%j=4B_|TeehvX=T+o{qjrB# zNWP;`ev#?u=xW5$Gm1>3tiVQasXtIi9Pk9sC8t&8(G^6wol}lub_w#m^8_sQAQeBL zptl+o8@$0ZB|yS)e4RE=_>6%jjPsv2CQr?$PLWt3tZihJZn)JhI{0HE`rJj;Zo!*B zx%0sc-KOpcO?3iq<9rVJ+t~)xhDCp_{+$jUAO3f*&vu(ZsvG{Ko3fV)l3lV@TEXr3 zEtEEM1F5-1Ht82rszYTC_z&lH5+uJN!zSXoZU!~wikfHNSnSi(o=z+~Pq1tnKv$18 z+Pb}(eQk{|nEhxrj|NU2lMT{bHt3b6 zfHe3ge>)V0IKq;Vkzuble)|`;)^H&=$LZ?d?Tv~rVESo-XouUn&IE00sb~n;3qMIK zF2UF1ND%1S0fso5XG%N@6+SIeRL9oFZBC)xf~q0(??2Lpe8I=0WmDnPtqoffEnxUi zzlLGU1oxLKY*Qf$^PTfbkmGCcE1;f z4&jeBjXnyCfak$uOhMhuW97$A;q-UY*xm{|Y#kKgaznoZDddmELVatCAwC~I-v>P? zn&u*=aV>PSesjgQ`w{mo=#W0ny?0IZlO3L`8&BcC{h$lKd9{2Bz8PU`Rf-)%^*CK8 zTB4;Qr6Pz!k?+Oc>29qS_}gFA730i_E0@@meYzMQ;N=ZMd2tBkv0eV8ZZ<5wrA~&} z-_g`E#|ZwG*p(;)bGhxFB^IX_6JD?tm$>nkDro%NxVcWe5c$5zr)fSmth}J>>9;@z zOtf7Lrlkokt$I5&`SdVbQqMr71%xi)p@Gz#+_dd*I?hJ#J;v3-#l`l>A)%vD1}@T0 z=B|^;6M^}TCZD&)WhS^Zw`f7_ZgtmA8~a_|ear+guc!#|(T@5J#3pmvZ*+-9-f%d> z-XdqxqytdTtX>>DnV)|*|EQHD$!8ZFW3KIWinxg{ScJY83dtYtSDep7gmzt)s|5Fw zz7gs^idX_ubKCR?u2h5cNY$b#dXInnO*S()CNQmLbsgYN7~!<0dIy5d=JJgBJDLJ7 zfBwm`;_N5mv*J`=P+b^BKMd$QMcNPa^MDA{%{HErmo2sLCX|_iLauJZ2qy_Gpsh56 zF#{KlO$)MDsuNJCCMx+rr%2UokZNMyKV!;RTJFpBsz64cWat?JDyDVL;-9gEvyObmd1ob$7tqXUiH{6?f7CH{uh2t2IeeHyL<$^QhE%Lv`n^ zjv(R9)@j|A|7Y54b2ZuD?)ZF3eRXDTn>$aBzG=W7T_atSg9dsZ#Ht!f$YLz+L?fIsU@D+^9 zj5)U;d2&lQ&pMU?bUXC49l>g}yQqjK&awN#-s+VCOH>24KWfRa(5n}u=6_J*JrD=!THhA#|gaZ1_bDm zpS5;B?3$Q$0u+!oU+;10&S{6x!xZe*qK$PRES+IXUilK_%H}$diehLCK^Ddd37^3m z(M&dhD`?5XJHC7f?)LrOPm4$2{7f1}vG7VNnxc|7IM$~HM4LzVde+a{<$1xAm>dqS zS1XE|Ns?CGg5Z}CLdiJw&!XE5bIp~P;1*Dm5%u<5a0p(Q4hGh5lC6rr7lLmB;zlO2 zbaBAz->J_$Hs-WFdDcTc0CnUw|F1La7GN_+I9X3tq^V)f#xz(4K}2%2CgB!zTA&kc zl=NyVzw}2hpT1vE<)_*)Fk68R zI?oL(+Or}IsnDd1*$0;wBwryAK7c0s#_Bi?z_IeJ>5_V~UJ*P*pU;8;6`CfNUArq0 z%;v4R&ed9@jc+69s7HT{A|(}pAT;mHv9=y1cJ925P}0;TX;fJ_PpS75zVv-DOF~ymH6aLvD@K~@ryDn>0U^K(?A>4gnt`B-#w-p;>{O^ORPHc9CgvQmn^b6f z--Z2t9IOQaYCSy;lxJ@kf4SGF4IH_(^eNVhZ$_L!pmy4!o;Xt}s%VeBBd|$9n8x#^ z$H&gnD4a0JTdt7XbJbLdrwEk4OmxC)1(v8_I=yJYH^JF-6c%Q57L9v>>h+f}G0VF_ zG*9E#6S&mKba4PqX&C&>^B0@7GoHC5EpU)!!kY9f?{<`ER*l7&1|7U-9h1uWK%Vf> zA%|l-T(i%_$u&|PQ&RiPyTbVY^%XwV z>j_vN`5A{GrI+Kqt_FzPfd0!KueX)gg~9FgX=yfsQm{o0$OI-5#o2=`jCAN{mR^W`bt5|MzpG}%5QSL6OZruY-4qO zGM{_il(G+*XwZpGW8W*Mr87LMLZPiSu>0?Xww=a?(bUM|m?6V& zJiq0U&^j9|Y6kEM8Q`Pz=+JM{9?Pu`83JVP`>`ix778<<0k-c*0V%C7Vlpp*K~!Wa zPL`@POt{?wpOzKii85(jE~akG1m%J1vUzkc8(s;&4M)=yL5ng1^AKocw2Ha4i=3re zlHp3#zU*f!YphH4#q=lFr4BkV$0a&QNAxthDLFX1`G8guVQ8i>bi%7zahM@MWHdY_&@Nww&(XCE0u`}QI?h* zj=Z1O)vZX*Y$TJI^b-6*r z8{2}#w;jSnrJi*~{94mrl!PDih zXJH?FNL>(Ut+t@n^Pg@DqW>0`93^Dk3L4=w#jL#8h!Yc#*45s*i*u%29#fx&)XL!w z*?=z7R%yC+r1Y{<9>(122OTTkVM#h7atd0SS;kn8$CEe}USqOQRBX{ZYE>|CCaR^e ziM`<)Xi4czlsCjtVQSowVEragthb-Wmc_K%Z!>Swb}0V1U9kC!-6Tb;U-X*C#v_TF;~k4RF+*0GJiCIQaU01$`zXBpNm4nbj(-V>Uz zp0fZeoCnYjW$O3l05wT1F1+`cP#YJC(}5UCtL6pQ#uUWkd&C8l>U7&k>t2bfQQEge6Vpr*?t5vMOYVMVLT#tqD{iXz z(?pa!&P#-9xtY)|kt#oM-XeQt*zh-j zuVP!SYvfqgb{E@gu+1wIA~3{bGVVo*6Q6wd>&rT}?9J1?-`|8?IUOf(Xm#;exciFE-9_FLidwfZ)Wk#d^7Vpi9!Hubc zrX^}`Rhzg_sHl8$I zV1c~Ou;4tlN6Ah{G&qFIr(OWk3!%?d1i&VUmP_9b1**eXJ=G%D7e_)55ZgY8CHNv0 zTkm_Ywsz`PxH(buc)6rDcbRWTp?XY3-9=XHTES#3$$gRthfXw3A} zy1`nil~tbL0hqmVd#=WCB~^&?_YxXdNODW1*=?}Nmz7!SK7|@?N#SoibVm1`qr_rl z8d&c!peMl(#Y0msQTRer_}ez{*;d{G*=$|NTn(+qV%^$a3KU}^&wxz5H4C>h!bjp z&VUB}CbQv1lEEq2?fH7GAi8YZvg&i=78=(-Af{^koaG_oL9;epq5>I>qE5P7I3enF zss+xkyN|Rpc!YF(NTHut#>%AtpdS#=BmbQ~`QhD=RX)Ohv+(WP{|Kbt2pz9}uRd+q z@|oDV`m=L6g@m4c7inys#9|wkY>qXHYYcskxT(?h)(g6QtrKmnZjGeqFYZ&K0yVSA zKYndx{^Y~J%uxOazz&CqlF%D{^jMhpK-*HXgI=>z+!j}>;YmMpVyw$4qcT2+4S-fD zS)MaXS71QYr;P7{(j9dcyyoywyxwvoneE~<{Q)pTc@66-DHXJUWuKtxiXu&siU5_a zGpw2sTw*3ki%ZL8GTfqSa9?wGk8p`_ACPU`c1P{gX{R9 z9lKMMgHzOk$lFppz1UXeFzXcVRCfON+D-Z8Jrv~R?V_z||H`-DKY)_zc5Lw@Tu4Yh zeCD5QIb;xNRKBnv44Jl&Z?I*+BbMxO z;9>~~P^Mi<0_17A6a*-oLfdVl*3@i!{~$4OX=foS<1!ixv=M&?#?<4UoF4&E_rs1C z-3uCwDcR!wh7XqE1wJgs_xeE{=P>g2G#V6ckTEGt$i%8JaX>&^qp+gy8;@gp0WM$l zt=f9b_%GP2WiCznT4^XX$J@sW*RFEzZ*`dO3p^6OYZU-z#8I3Y!)I(4c>j3%$ZIL08*Q4ac9$Ubsj@ZSeDC8er&eD?SHk2e1Yl1&MJdF0tIg0jB*;Md1 zuRY7(x!gFyU`s(HQ%!C)=AF+!GM3JI?|4|3`On%oz|k=deq(p6)zm&kKE3%_=TZa2 zCMGV&S?gAF09vUAU>Y?at5Jl>aI-{vcwwALBcpJX-@bMQX|8SMvb>wi`}u zRt8xz^Ot|6O(zCpZG!nqfwI2xg6!r(#)E?1V13{Nz@_rtqv zc05nY=E)sd=8w2{5TgcY?-Rb1Gu4Iqb!2gPKwk}qBTj@*cv`={bTmb1z9Vc`m@84n zH$W{8}RV`a$ly4}EE%a;Jmy=@~+?xoeBI zgEr%x^g=(ypmK>g54C_)5{?O_`-IJ)>SNx%Xv(xcz5h+C)?{(iC-j+p1rGCW-si80 zq~%8uH5$BI44!HBy|5=j$M~OKOBSR{c)O2W96qLpAVe4(ON|$-4e8M_o!Okd#~*v_ zdA52mX*5wmAi-)d!D~?hE<}Nm8(J1@@|YGUOi*2}N~3}bj8?w7WkoO#<&B7zN~>@D zZerf=j#JOPs9llo*k?dtA@vT&5ff;d0yMzK;&(G#PiL3q_Y2=Po#QDU{8*ElT1pH<$QL`$4I_8kl+v1?p7e$I-Sy? zb5toJrBekTvx%K8^8_a^$^`d;`4pl9+@-}=2J$=0iDceU^qvSQlBiT-1)w<($;l@f zUJ-`@3V$1fd150x?A9FJK&*()Em3)iu*EbHm0MC(77a#^wy30vA;^Im*1?*4f2K}& z(nT-CgW>Abi2t@7Xi7NpBFhq!nh&m5k{UN90Fr@a{&>=`=;-h<>JA#1KdF4NBGPQ& zy~^KW+F3R|Ia!~Van0c1Ukt{+CPRE8u$7KpjL0*41e8wXMj$juOBo}JWHE^%zAKoE zPt;CNvr?fxqkQggqhIX4IYkWtJSSy0gTW zI_23hIB*diF5I5F7)UxtCMtiA`y-Mal$1~)k5lb@KMbeWWSL^5>9-2a?Y^g_F9G~l zO?jJnl6`_#y?h1^8S%3WfCJR*tFTvaUCJkoK74VX=*qkVL+{CAqkTjFk(4g$&1_r%ptXM$f+-HGD#2`1Sls0UT&cJT#=w!@D@J7lN*Yz zZ6VUV7SNk`!Z*S3oJ01{GnM9VSqc_qd9NNykPU)VkOI&S9))aveVQrfF6d}#n9DoA zx6GUZAuh-oZBN&^E1oqVz%(H@T{16uW_NG0Ev^4qe5w2`!2!_lbZ!5G@D@};MCToA zp$=PZzR#tzE<;+0L1mKHzsbe{<+Xb4;N~Bp!p#MQy-&cmD!vf6{2hjv%A6Q%4L&O} zDtbYo4WY{cxLIqLbMxa~>Q}uezG_l%tr73m@$-DHM8ReF3vqT<<%cFVi+~iV-fqN= zXRDKlp?MC}6=FJsh(DGFNT{8!M0U01FQKbXO9}~{%3)i`e1Um~_cUDE$vYSrtu8E| znu2*!(i{nw)a{BXaLx7#u3iGjGkm({pL;pgG&!t^y2|=hs(*b&YX2-3H;|U^^`OIz zW@&|SF2^qis^-yk*KGyEOPOsFk0$#$8}Y00PYT*l53Iq(jG;QPv&hZv3R4)~TdQ{# zq-f<7$QK2$o-&3#@;w+K0KurPg+37q%^3a9N2ikt40?DGSSwCl|#U% zAd~e79FrBrL}UK6_&OeFhmm;4Pp!!yK4g5iARJxkrXkp;8g`|Z*ybp>Fw=Y@kp9#q zG4OA^O8s{g1B|fGQLzCF^;gS#m1c6*`JUI8XSzJMOMUmCnMupN9uL(2ez9%DhRJKq z4Zv$uoCywBt;}@<{$JEnd0GEZnPK*ht4I?tA$u z+%AM5>~&Z&Zk@Utu#^AUOWdS{o2#^? z;8WIW7t0=vuLCw$aQUW)KJbtzseMV3wse%?G7z98uk4RlP9J5M&`5#8QS86 zKB2QKiLYH19m-;-S@fpxTvO@*o&`! zSov$@#~ylsUN5i*s22;_ZlT7W2vG9hy79|DYvpLQ&)#Jr70y)SV(r{mYqLr^m|n?z zCcj`)xMk^}^|w&y3o6}MDuiWRc(8aWy5WLF3+h!7k4b9{!RI;DEbb>Q@wpZ zF+XM~oaj9cPg?1|Y3ACJifB9QGO33)PcY*Pb`0Id;rD`y_CE5&ymwadMU)MeWFV|voeTz#f5yS{lu@5gOYj$;^-8>TafW)S}DCXAvjXA+HczZ%)Gyx zDp4LUu0yq3iU1@#0af^9?m#Ok`Mq^&C46wK^LGYu1=V+$LN*|WMaI~1I2PtB|LgeU z4K7xI`Zu+jo13;1SUtHsHM|a9o6B~;al)Z3vTV69^wSI2{7Nn9^JQY~MGGaiDmgjbrlZ zQP`p`k&qGlGcj2!Ys{Zg8g^WRi2e6fTF6P6wHeQ~d+Co|0!SG|-xQiK)j+A(IQp~V z&0E3bI2kj(iS(u&lkGp4Ew~o#$c1==lPCTjoT?e}kwn1cXUv%vvjw}i2)|7m3H)VO6J0ksle4oL zPdxXRNMyAH;L3l4k4*t7`qldonz?oXiWf9^d0eBck6)BZSmH-P!sJ{u3bpg$jgE?m z^r33|5>)$0UZRIG>y;Hh-&q2M$#zJAUuUF`WD-ly+HOa_82;4lIcH?tvf%zR$oNW_ za;T8>ALZD%1mfF2eWY4#F4`zz`m2`nZSCsxaCV1ABOru_4Bwhs+a3Fo!Cd40`+J~i z2)i~hFZm{^x!E;jJ3w=pJB^8oIK-_VEL=gAxGDvs`{gY&7%=&~g6jD8#6#1weZ;?3 zslU%&;a}vAz-uHX945z?d*_PcYG+6M)rsHE%leaMb}Ku;1Z>wJ zh0S=YG`e23x<2gE#fg2w1^xh&r6^J%H-ev^q)id2L8VFt?g-`6eFE!6J0|M>RM~*# zaonrYS38h`*@Y#37@isdV2smKyTD`;gj=2O$NzEqe&@nNQOitPxS#6zaC_l%+p8xqK+CrS6QywI67|8>@ELr<6Ef)Ac%C{JM5A?;qcMwaqjP5O5&h>w_D4|C4q9FQH5}S(sg# z)EVEP=ro^kp!{il!wUd(<`7X- z5V4vB%pX)i>K_N5etM@~nfO4cn{J{s+|$LRPHbVOc9l6@+$|NnR}vWcUk>Q++aw4q zj!rQhI;O|$SL%)%c5^5GB3plVYr0Ac9|fi58~0mIZz*X#`t?`9Pcj9_4mkS$3KANE zfy+|k(OKe_L`%n1n3HzE3p!N8==l8NBjD`1L{&g8J9acC?H?dt{;aL8FgfVPUj%?x@+u>R~Yd5!KdfrUz-&@ z{0~1JDw>KMz4?yb$p85}*yM}UCoDYra%|5ByC0mG^D z28ION8b@=d6{|K*@l&%Q;kSm()jMG1+(KcC;_mj+7tzmwQwb{#v4cI>1f;DQV%*q! zHMQ=;-#-i-_6yxvujrZ_l_isq1G>3$4@(^+6b&zCt`3124WF;qK3?CsI-0+x41Rzs zT(e2U@FbWE%NFvvUH$enPpkP9Q7B&J@O9EdX&x2Ed~GLfq@q;+c&|65oU=?c3$sPH zVEHbbnjc`T&%ll>PK_fJ!X5OBX^7(gxP`NU=K^c_HlMwADDYiZFf8c&?KC~6aU-TY z)beK#%i-9x);fV{J^Go@ny_h_wYK-Eoi<@ZT|nd3f(+DHWC;#48FY-3wpET!3i%56 z<68Q#nm4LJwU!#&3tyr`3fwa)HiHQLjODSjbs=_KCa@^*_Y$dVl(x3F7YZm22(U?? z);xt@0j&qf>mx&^EPns9hQl_p>xWk>4%aMpmPf-2TYh3bTQCX#Kj3Zui zu5E!t%KzDMz`$~D_4m=&KX^EWZ=;d-~vYlUAQ4S)2j4q^1!eR=-J zW0ea5LXr8dQ66r#Tt7M7{Y|1yH~*3_I^#>#DnAezmBv6f^q{;C`UW%Ol;mrH^tds2 z4&+z_RJ%}#vO2N+^D60F7lHf|Ae zpuH#4Qn&OjasP@EW@5bmY;-Ad)%-iOmAXe=j&?^t8g~1gdUaNygVSlD4l}R36G4SI z)ARh2zRawn(lm{Y9Z+5(}7i-tgCsjRd3AaT8HctzygdaD_N)a7s#F(}|wFF_&Q_ws0zP0(o zpM~@2kfkW)RNdDK#={67TaAv(!XnEBxoG3SdIG4>eyOeb z-e`N(g+ON91k~toJdZ%FMa?a+NfXuTbENikHI@5?30MzsBT2os4N8dsCFxW29rb|C z{41q^tLC?uEy5D-ww?5EHbZ!u7p-sY$*P+bYexe-}wOQ%59ez~!zu)fs)*48_ z=Hk~!8MY+1P(JEsnCW{dX8X3T+#UG+fSP0JHy({2kIYDnE&*c|82sKXA3CvWTU{K6yNj9 z)fPFX>s|J>*_~_t!{+&OklhAsczES1C#1!39bfG9C-DZu59{JZmI0|qW-gVK0L}nz z4<5jZ?U7D@7Xi>rb_{tW8aTcY-Q6_oPDI}K zp8^P|{{C{JI<0PN({JgB-VEaMz2FyXGH!THu2CAt*#$xxV>LPS6x;9l<<@)DRE_K!uY3Q$+ z$dn5Xek3IuD99-liU)5~)h07}*P&|WQNjUm>2LGBx;QbdlN;eP&M+kP3|Z{sxiuo9 zFq~$`QhdZwojKM|%e7X#o>8Be;GF_#c#Q21A6Mm8Rn2p!JFj>y1%joz}T2@9(Lf>0_4j7lxu)B1^{bdtVRITXq-|p z**Pk$-=ycd02I8C!0Y|9((#pu(WVng>CBr|hfzcSeWvvm|6f&yg+o5! z&DYz6XXmh^*W1~=7tt@>Hka0`gvIMp3_Xyl6M|NBwrhujpPOE(chMyioL)SBC<=y| z5yzT>P$13ZgSZXNT0?h_fq-OV&M0g^vK^;l{E}!}nlUHU`^CV2jD-Ib8bI0stmMJ1 zN94Fzq1P}(=q3_y+o!K@g!MbIDT)%z5a6gt1U&Z4aRDAtOIYt=kpb*YhUH*66{$eS z?%sU@VWjp~elF_&Mb>-9HMuq2!_-g`5D-G>2}LC&p@~vNAp}G~q?Zs7LXmEyD54@Q z7@8Cz6cs7b0)%2h6^x-56;Qebq=*6*5D{$eb9dgH4YzSO?*rq+To}+_cAxC zZItO>6gS_Qg+n!sA^kqq!#tX+OJf%)-uc`QjI9111pTMoH2G_e5C>*Oe*WwQir??d zA&G-!p5>@Q{)~)_&^DR!v(Q9&eS+e-$|FfWNll$)OOOM;1&v3G@=~0~n#761kawl7 zDH<-u=zT78FP5{U8m-T`uuzY8axI)|Ql$;9l@j?P(nC)iQ$Kh~uH-J?nej3EcRd!Z z*FJe6zn&Q)K&g`6F@Qkqqkj=0OZL&d(pSe}0}fI>@h{>EP(WU(g7+KE%^yf=IB>&O zG(1_>8@IyJzb3%hcm#A<;qYdZP!iIp(7*}4Z>9ZvyVa{ZF4sa|w`JUvJ7FC2*E;{7 zG05{b^GK3o%I_41lZLzEC0%rUnk^U6-+%t> z?0rMNjM3E*YYS?UY9`Hw;b|m+JTstdB5A}Q?lpPAp$y)~k;d8FunK!OYE6+NY#g9m zzQ3u1eGDu|RYCzSvx{JOQyzFDIbUdQbU`$=L@m%^=wG$(uYXW^FqS`x&K1WW=9Z`m zJ)fvJL*o#KgBz9v%LgQf2O&@%7zf5kigp&1b{d@K1qJY?&#F|hkFPl(9y$kzhXgAJ zy5UK@Lk+OEchqf*(?de>b3LqyZkA3Ti@DKoSG(h0*<$6rO0cB*^MMgzLEq@=S8^qO zAVrj2GlyVZ%tG*qU8_Yn%`O1k^F4@VYX}V8HC(e&ayf}fmdzxP6z9AGa^Jp8S6+o^;&WU}R;{>mF9*01rJ`Z=UNIQ}?0bQx2EMMgWr- zq%`8g1m@}KT9j=;BY=CIU4c3402jhAF=E- zg3Qqzw%?^~Bt~G`u1=N@OV-(`XZ2ExQ>W%wDpV}v$Cu98mp^A$YN4LP zRaU#cH^JxxF5rlMcSAFalS^_DV0J|^w8)v)( z1Ch4v9(134YGVgUDzP$!BlD5A`Y zaRlg&KZpa@)Vo-&;cmTYs;3OnS9L(M&q`$N);oahSRi0Bp=Li-bZBZKZkqjnGBq&O z=V}$KvT;n}eaPYFf=nCvdQ$-I*EPgvJ3$mdp<#|B03ghL4L-oIUz^>qMe}hl9O4CDtqUbTxW+|z-z9L00qukJa5$XW zii*$hiW!y|+zPjVSo|6nN^A~PQZhsAtH;ui>qo}IYzXGucuf%iT+z38;7|LV4Y zfRStU6Po`ZQ%(&9Hnmn%fU4(O2P{4Onq~=zfz+r!@XfQAH2RrMjQb4Gw;uioJMZ zb>@|5%srKWgI|Xs&arU=LDS&pj+|(UHiv_u%jg}{%tl;x=OR~NtJ&erYq!P^z(&3f z{)8q<`ztYcq}}5HUKfYkQxcWU`|n~1KvLh>8eoVJ5M=?>J*{BQLJk|3;Itv|^COe=?v|JZGA zdL0DMw}B(q$hZCkwz;kPhPH9t9I-WtzQY0BJ+j7}n4s&_F~!x&?GTJ>Eu_1c*Y&$J z9Fzq@x~M+)ION!vVypAO2$FmR$2e$VYK0<~eQVphGeiIX2S;50J-uxo6PXPIQf%D( zq3!gLilKp8WvGX6ao+lD1rfS?ws?;rw;ccHHI1{Rv;-h+_{XIViL@-rUuvzF7AR#4=k{Hyy~u@W`a2M6FQOZ~CHvTct{%Rk{RDiZ!> z4JXLGWrPa-xIyCsC|YZ6N+$;;ZT}q{_+^%FK0ha+K?J*Vljbx=^|k_E^}<+EWwxgB zH?TUx2PYbi@!m4MEKGBZI=;ecXc?BXo?|VGS(aELi2y}F){j;uZca^;q65fO7(@pa zuxP%GJG04xQ?evqjuTZj5{5D97`Z1!O%oJHdT?M^;$JtK5^Ah(a;{o9-zc{r*I(Gjk}w*P98N75&Gb&QjD$!u zhF-T?X^4jq0zeqlDUYdflQUDCQzAeRaL^YkA(jiHFOC%a*;El+5+mL^(TShARem5 zb@C#xQ>_yLXJ~%nod;MK2aDAl<*DCrS3_R$4d3d79FN)GQ%vrk%CC=(wwDYEZ5D!4(^Y!Sh%*;zg?q$S zAd*Or*F4`YN@=^c?u+=$(TKTj7qOYe7BPjjrFq@g3`t@_+YUQ-L6Q zzs*|)#0sX-TLN6h@1IV`dWxkohTBLFrDU<<-Y7bC|6 zULyM(sF_o!gk8^AolYhrePb^!*Eg&wC2Q4@?I8*x<@Ue;g{Kf1@izKm)X#OqYY%Tt( zu%zr}Sre$#RwO`T^^t-*Wx9Ke5!7`IeufF>YC1gT@Twy5-cp(A^#zCzArz5whcE7} zz6Xvs6h!v|(Vy_G^N{{#Y0dKcI_Do`*WVVo0Ez{drlp5qY`abFa5jRdaPC5B{bPMr zPM3^UI&9u!c_9<%s@IJ@x_1;rIDj1G*(}my!wXDxR6va$CYl)qS_c2ioB(8ve`9kF zphlMn>KyFP&mI!zKn)0Nca&ew&^9l5B0|NkFR%ejjC4zv3k^JJIwHAW)hy}LjUm`N zxeCkd=)SnE<|4|5u%JW=-DQm<#%|;eAPs^s1S)4bbnBptBv%c<5uh zEON7v1I-hPOGO|Fz%QneoBPek+uD*wfY|yQ;(UQk+*UY;-6+6X{wP@f-NTm?b53Kh zVfk?67u&J^GNuSRPVq@YT#&X<=+3MwGOZ zuSD=>a|P#vZ%H-s&g%fW^Zz<+l&~E62=AWWtufr}R(Rp@qtLltdK#b0iafzRwFn_x z`L^K=kGe@K(@xrx>oZPL841w2*k0A`?NE%;19kObeX3qQ0LTdZS|e>J<($t%)pa>- zBb03>l$|)BX|~*!FS_>4wAYF8P689lzb&<{_5(Ojws#(ewqk~oQaf?J)Pr%WV) z4#b;_kU!CMUn)pSc&v2#{h7@#ahHk0YL(3&OS_*Pul(0{u5E18{hIvox`Sq983$x( z3o6L>qoBkg|Ao%RS|H2?wC!)}6S#bAlhgq^)e&qypY1}EU(;s(u$cQ#%K8jDvS~j5 z5hzirqa&rsNH3&WI@CY>DUhPMr`BT(4z?(iE^-m_cv?AJW!$d#o79*b-f|(OeQ-K_ z!ssa}NxxI`G6OHP|3JjVGapwebI0;=-&mJ>u`n9pZkH|-0Qgj#Y=U)H!S#JYcRe^Y z8E7mWQAfV`uQ8$2uCv0Q;aS78yLq!}9x?s2#JULqLdLPk4_~&QFwT9-1-5t+p5VIJ zOm7~s%bXa8iiQTDY2(mD=_h%K0K=F;sso^gIR`F@~q$q zR)D4E4_K>w4Ie#ADh{n?+Zhj6%2Bm1omQWq0TfNEM4DE+x>V4!cUL>s&xT{t%=V~zX|RsKJe1XWU<^!y%zd#e4wZ09?Rc_bvwJ|DrKa!G;a z2r(hYm=todwD8;~Ro~pus}ned(xp%Jmf26G1y)vG2Sc%KM3zv0l z?BVL$Zhzh-@5d3Q5VEvSkMO*}G^K zRgIc=#j_cli-~pqWtHzJ@UM;8xWjzuhtCZ6nIVT8)osH{6ROiZ2sR6n$8-aK2H+IU zXY~!DkHfQk$fWdI%Mwgip6LJhCv=E0yVs11E%`JfyRDaBWi5;?#aCsF7;^hQw&# z@{Teb`t+$|m;OT{vS;_BQA&KlalISv9zlg20o>oxzUZM&bcwZ^Nwr@5U#f_IEgXn^ z%ReW0Z_P=!#soP2GQNHf z9zk*6S?;J!oL19~VXC)75--C>f;-rk_hlbXlg1jE25dX4hbRmr@g1PNi;Gk1XH7gS z_Few|Eg^C|4Q;3D_GVpU33k_GQ_H;Mt;CQ+^<3WkjzZQEEXxL>?c&c3*m>IoxjoHo{aq}A7Q#nF}SjC#XCE?2ZSl)R-P1%s(wG!(y z(>#r}9Fj%>Wg|YC6upOlN<={tAcLZ}+@|v+j_v?^V{48=tDg`lLG4keB-nYc#13()3X>$#T0d#SI~L zD)#!n7Z5P#%ubU+areL!WYH=l5*oNVzvEA=6s>CF*z*MDSd`$zNg{2XsAF_lqQ8 zJ>K>HhPt@tefO5@N27AG3W|dL)Q>nYfgmH&Q;RbcAy1%3HC9e*mKkr@zqh9<8h7 zNmz#t|Gd8YX6KI!a3MeMuZ&?-T(F(TS|Vr;cZ)>YOjN6ljBj$zTcmb%KGTp$k?l5z z36v{Rs|O%3=#M*A*6-0|b~QW-K0*!?2BLcc;b=-?;qTe?ZwO`4ciW5P zwxI#67%v<1ffy^expTqcc8bYW`p~GOR?g>G1cM(d`3$}SP!Ps)&OromOl>P>yO7A5 zz`1hv@+oms@SEv?_c7b6cHeC0)_hdo?bvrB=0lc_kTaL_pH>F{k>-x7=Q31R;O+s6 zOr=EGvauZRV!;DYY?nMS?TB-xF-o3?MOUi3kK2Y(iKw z?UVA+@o_!|_$tUMqqP zfJcP}V9UU9N@xREM1;gQrCRziHyI^Q5b(*D8=|kS&<^)BOsF+?A30})^mbBHoEJn> zIBH?tQZ!=Nop6)ew~D4mks7z&-!6SU=zga|(pnPa22GrDWc|;!rhpO@WmnbG(*7z&nO_ssW z{!yL3H}=;5eRP$hVZ+YNG2s)xdCzhZ5&*uOnY0<3pW9a1Ik*173enr4iG#Qu25}KQ ziih{TYrF;Ca*TR8I(s^O!`ejh-l2i3k}RPqK)hk8Mr>Am`|>TzS2-4p|DKwM0h3Cy zCl-V)FYXc*pRrJR-SSMf4oM0^|vSlp*>!{LIlzQ&SB zFOEil@2QO!w&Wrn2es{mn+XG!MOV%KaVq+YH}CyvyXW4m@cPer-h4OYABAwgwcMJ$ z`kjt_hQJmn>*D&$UL#UwTo6&Xb+7M`t#mP|0_CXc&=Pjd1s}Zr44dkC-}+|UNpE@Z zm5%JDdN6A_bs2Ya?7gr6Wfw5Z5FVP6DeQoVtbI#@<=3?dn4 zi<;*S4DW2KZ2pX?`5D8vPGui)&e!kcU_Eqb^*P~enc>hn3+rj;GV>MGcGLZBbxZRW zAOZA~;%__`wEt1RQy8l{l@1{v(P_Q@Phq*-wlCb zFAOiOc^3L)M}UOjB+7%1=L;f6DO!gf6Q);$Q>#V#jL#{n+3>n8M1#;2eLU%A92idC zs>(^0EqkRx6v`LwE=@fH?1g8fd_0Fy0{`sn|B_8QNar_5e9{D!r-!^Ess@Mi`V4B$ z3UE4(K0F%PCM4qBYT=j!qc@n;C`urQ9rEeY@sO+PK;6=YHQTLp)nU%yknT@Cv$+Pv z-du@|ju(h-Z}bNA3elTsLWJo~UK500|LpEHuY-~oFBsp6yAs`SFBM_Q_*`XGFuJ_5 zC|@wBHOupblH#1It!zUk4^&0>2>+|+}K87-e3f#E= zk6BkzACixLQmXaH=r=h7DSlxMQu-V#_s}=(p05M)7^5tMY%!a7h_^|QWC#~!`+IpH z`6ByJx6|$CXHH*zI9y9vh}PU!A!Q>*)|;n6z?r5Gk`8kxU&J!trCiOx+af#|N$#nh zTnW1V{0AR@x92^h&p(LhMr{6w3zL^nkDWH_I;rN#f29f5&V~>*#M{gCHZ|eo?=XPE zgfSAwgI+}fx)oyw_KFb-4L`t|c(3D`Y7E%EwW)JVv1RYR#{Sq*fM3#}&jWUN|1tk8 zDQU#Ny?)~RYPG^K9X^67C&t?v3{JP52m>4fY-m2~&n)t+ED;H?gxrJK(0TS^5)}i& zv^2J;6-rDP__6R(BS$pzSx)|F(>}c#iBT>L3I)n6A)kq@d16}qfyc4$(zIUFw%4qQwa4D;=}2dHa=@l!8VMwj`O%j-sE)IT$0M8g?hdG3$%u3`;}^NdG& z;UH0vky>y(Y#uC<6zB{OVeH4ZN5#6_K4@|G>U9Pj!%O(_kJa?y!%i|JLpF;UBiyR- zXd(!OqL%kgm*m!1TtVPLzF#g}rrgQzj|c2mLFh!)B;{#7-kz6Uphg(Ci&?RtLE8~V z1egR+MubGvzS_s$n|iS<5dgxAcL#8{Ya}q~y^Sc6Tnk`nL5Oq6nsrCC@gajoUzD*Y zHZs7J1s(kEu~@YMqZiBRV}*Ufc<&)i_zV8~L*oagU+x#5p6f9JeQqiq?tk~CbB>S` z90%+AR@fdz;N~}B{)-b~0`%%r+z`SUj1a+}^J{;&-Gqg4ysZvYYB6ZU*L3%xO25&B zU@<^mG04%PO67qDhB;nvR1sI-10pl06&AuU>m# zj)HPceO@sx>|O*O$DHBM4bv<{c0X52*O@( z!T}>Yhc8hZ+acD0M=wQ`M2cL*qP&x|AMtrL6F_elQ-f0C$;H{`)B0uilPZ z1LmF~bC)zk)+@1pU8ZlS)xTM;DMWk;d0GrK+Fcj|_)G6CnYq=*NC=_M8rXhTA8UT~ zv}DFm@rmyBFWDTD9AJy6qujYCY#DLjj#dt$ziVQcd;{6L%8%K>)|%q25{tEP9+Y}& zaT|eyJz3E+*O0v$6Ozgac;Y+`U0t4P{C3e;CBv2e(PkFZNY>M07JU0ScLrc|{12x? zD1Cy5aKdLEe@TVpc(O+qU@j@cSw zvTQ6$nvW4pl6Po<44OImwYS?!fDZt4yt*(Rv9As$KblyPOFaSHt!C$73@t`=w~AxU zWKk>!w0K`Q!6km3=-CttuE6@hU4b|2T?7*X7IlkjUdb`fHF4PZ?~gFsvJ2?WUh>NJG@*>bv$Q#!(k~u6LTFd0dAU&gh^?sgoQIt=4U2i zrfT5og!fwE{d9F#Deuu)zjveSp&jgrVPIJMSSKBTLJi$$bh?IIJ3Ao2xkt=*TJTll z9hQU`E$ZAMrQu9&7d#{YRGj(6p z1Q(2Ah?XLG4N90JfUqxiKg~n;=1G>~D9%of!=Z)DroF+HDX88?ZnUkS` zdA_|IK>aZbLLmLl9BqCMJgq%1T|PeGl!R8g;dgO{<>)&f&(AG^zTR|bLVF|>ki2e2 z%5<*&}SX9A)IxNq1og50Dl^i0;054 z8)P=k330kTHK%GHRcM7SeNF_-rTFj}ZpF;3kJpJxhOAw~e})-1{f1QQn|vIAt>5vW+j5Mq2^BP%%N^nZFoh$x-d%I%tm7;4FXk%>&J2?HuT45o zdxZ%*nj}M~eFUY8bDp~P6+-?yYhm_z2T_21Vy(^g?C7}Z8V6Ah)3L|RRexH?5qx6b!fUzY?;>xGa9Y2Y=Fm=z{*`v2kuPS@6UIZW7`t|5tr|Bg#?eVH<>c+cEPp>FH z0eltC9YbsxeEMOifJhX<@_)IVIr@1~rcCU`rZ0*;D>rSNvxCTy!k?E4(*0H)EGkyed)n_oj7+OTEtq=T=se^CroXNr5mr|sZ zRR?8UzFKcFg0gcHV~~7vt5!B{I6Qm&W`?PS|u_>5nt7m z6w*Yk!96-q`z)T69P$2#hKI}R7b-1N`7M$T%z^I+97KtnsgoG&V!EK;jZ>TK@HZ_6 z-uN=Rh9ai|M~X=Nu1;8El0vq{>A(DR`(mW{f*(Nh!yEosgzZRE;Z~%Un?lwez@2pun*D-%WtK7?in%H!g~H4@_iG8hphkUSle5@I zGai+_{xh#6iixNdL`pk&T<%2Ttgbyj(pFbh;aa{VE>j{~?ntvql}iMVeYp_Ow)c{} zcX@c}!gLu}M!C|TGu3)9i!Pz>DuS+^i1(M6Xs&+SF8n@t;fQ5lTI+YrDSZYG@}B&3 z_RNBGcj+=r<|_i9Rn;S_M`fD=-zI|n>DLCO3%M%fFEHec;Q66&N%a{kECe4Op8^1wh@qyK)!dKobR4~!> z)7BKo!t9^>?jjhq@`fFj?R%Hr%6EZZ2C1T0bIBz^A~}2^rgrYbExO$11F=&H{Qdr~ zFl7n+?13>ZPzn1PyAbNy;y62sP)!_c*GfT&uH{$^soFLI7rRR=*TbcHxz41WU?+Et znZb@UOPyTFxym$kj>1C!8;sAi(1a4i5C*O>z32>_CY;YFRn|8WZUw=NuW-l6V1*JbpmW-i%+g0`ZDY4Mm&P z0V(IyWI%3e#71Ukt4*fH%OoaVMDxDHisKP!#iY|YHE!!1={Y~c(C)p8#Dd<5kL=|` zf>ZBB(dWlLnn>1hF;$pGA*azfi-!n*6%Rzu^#y##m#EbhmwNU@^R@akKBs`kw7{1Y z%K?>%=ZwQWeAJ)e6A zmlCGB+@Ge0tK1J-NO{iAEg{#qfv36-uFqcS5uxwA{=-@)F)6|2k~!7u*~pnGFq_u?7E-B@CJVnP<C`!>P;1lxy8e3*rE zsZpU>;~+#wyXV1_`~o|FvbPaKY&_f%i>Ie?;e%R7$tjjk>~FsvcerX+61e)hQh&!V zbn3OznVO@&_o&f-6814ka-tVZoYl^rIA^TFrO`YXvg%)s?b);uKTCU_}n$0kTjVUy3hb&-E3a&8~i=~IcT zSEY6~#5FAkj4Zs`(s)bld2QM99=4}kkCTITydSj~5Db_k1V49%iL<4h;FCx^kh62x zvNH#3tg)IJ82$?uBN*zobHH?mWn$3np&EY&P5ke-<6EXZqiyUM!Xep=n=Z9b93`Lm z4VT z71SEdjEXeh5qS6bwTsxrl4rU0cIC|n{Hg|~52H_zX>jVFY}+un>L7K+FLKzdcQwLGd{kPY=ZssAAkwiJsxtsJ z!YmL`=Q14bDwT`GJ336jOL+p=hdi@^dkNfK)qZaujShZe z86m~szQS6MtcF5r==*F}PO0z7zdAoh)&7ZBF4-dcJYHRrwQm&+UT6|zZ_}g-8^iy2 zkE^Dheqz_CT7cA4J1gO&Q*D7ZDaWUT4SjGlD}m~xT~^H;&9zEIqZU+&hxEFt@V)_3 z4{(sRb^v8)uJ*lV>N2F*Kn%i4^(KcJ7-jGiaymQTTog z;UpojxzVGhm@w?vNv#XXmTpziGzqaVcOI*5+xuoV;|yM7jxJ!EijRtRzftsla!rxy zJfp0bM2BIIkmO|(s=njU?wRO2&qU}OH?H%8^b1cwq)8-BHby`dGtKFn_lZ}KvGduq9_va z4I1$jy7Prx(PuXYqLe|>B~k`Hi;H*c(3<4sal`IC^`KvlwNN@gn$U=E{lFka`i}u; z=Ucxp&UU=70&2>oEg#f*gl%^a!Th{=;nDXItm~~@cl<$jRsNaDrC_G2N2-ejeW>pX zo^9jOpIhBKq)G(-YtOGFO5~g(wcZp$%yT6uWuSvz9#BoP6aF$AzRFJyYDm#{EzLc_ zHAWL3vlytcOTIi2Ci~ydR41sJWYniqn#eV;5CpT$`HP}-X|&1YaaaDq%+@Hh)x?uI zM(~D+W_RNZSz>%!>)Si6($|e%TKYs0HGb%~CHBT@bcr_kOHa-^cBTw`7ZWjBY9Ho+ zvr2*1Uv+E=Y56dHt%vT8Z}s>y{3nI9F}+ho3w%G>WUuZWn|Uyz0B$A%Py6ZhQeCB< zfP=8f#gK?D5rKW82^t6+Zg&(v42QBfaos67%Y#Cfj6c$35u~EQ{f=ajH=(1gxL8M@ zzSuGwp2?1!HB}4o97~ZgVq|YdO1!9=9?|+QmPsNRez^~gf9LSyTW52yF>SOi9^xyJ zlj>R0!P`cg$WC~KAnMWwU!DMuea@Y3v3zr`9QGD=UmV-KITZ9{9^&2U{E%7)3I9`D z`oT;}vnQPRe367EXySo{MFM<$nrJ(p?X~Qp-gfRd?i88OtcI;GgmfTj0ngt)M>O#Y z0BZc8CWZ}yquX3;U98b!0OgSE+KY5dh2ezQWN$=tmTI10*;+FAu38V%TU^8oCbe6RR2fz!8kj!R zh^E*Yr}4dY;qjioNO3R)@Opi%xQXn!K_@!3eq2AIwcXd=SCsA##N}mLvv*$Lo9%q} z&KpH`>W_X)Za?k#RQ>dI!uYj#T@_!+nvv|lEajk|^|;iWpYG==E^VsBOl69T->LMN zt2MG`Z6*}uG{yCat5+qGZFFFY2UN-B$j%0fC82joj1r}09aYP0AFtjKG&Twel*D@((G``|bW;G5q{j3ax#PHLevAxaaOd-?XUlwGt5{J3LN&5qwG` zF$%aoa%uxk(V@u}PmwmRrb!ej1k3>U8t&$}qK{s?XzqO3P zL!;7}qX+L7$W>V>nU0Fr9Y&`p5;H3ckGxMxrUY=B>=jRlKER>)6rCK(eQVA1y;Qd6 zfL`g2n0?O>?nO$VTe$|6l7qbud#f)c7M#va@8+T#>D-lq@UOnq&+DWTA zpSO+AP2WI@{;>8s?Q|*^O_;s~4r&t9-g3ZuG$QGJ^6rL;jschPJxyE7QR{=gi2yVr z*SLdSE87AZZ8cT(@U7WQ(lV{^o5#fDkE*}UFk(L@;%>9D-_U`a~YW zQI2eFk^v5~m;eL!6ye83!zr+MEx5OnT?xg-KCkwb#groTrcru|nf>&v>B?JeL0pf> zID$R#;?zEVo)3aX|fbY%i{uBLoi!Abp zytcY{#vc*o^zBX45jARhstTkGKrt<~L%q=`cbIn03uE-Da^@37H~xN>er^1az9>dn zig>TKr}kKGw%h%bpfo040?!y{d{y>c9$x9sO`ltedg00Tty_pEA!NCmax-0PRcKa| zKE|zZA zAQa^@Zlrk$!Jc}*x*;M|b8+LkgW<1{2ec(wG5VNf-LXq!7Te-=#80KU8>)E6->Fp0 z%NtuJ&3^3EMU~)o{xw?D&h;X!+FEI;Fa#Qlu5%ENYLCAZQ|$w7mMpJ6qdyJo^Dy z#Z3N6SZ;w5*~|7+(?wc)NM+!w%hU9`d`wcO7r*ljC`Rc+r?IcZ6s>k#-}Sjli{oK) z<*QgKf;epgX6NE<5Nz`5?)`N!*IQ&y$Q>o5-mr+~wG+Tk^|E%$`-Kh8N+F0R{A*1+ke;nedFs@{vdCsc&xFu%P|rBNJ&h7jn$NbbSP0J6!0zw+6U9lFX(@6jqa6IU7v0{+in3{ zHfa%XSFLgLP0gn&Tjr45&Yr9_2Y?Neq;S*tyJ>J>QQLuGm?(}5F&SXmQ(@i@X_0v> zJLeGN`(S;J9O+FK>uP+0RvD8%bxblb$Yv}>&V`|PE!k;kz*KGQ%$UU#d5%u{{nvF=^X{Vm)0p5E@O9*%TOFhVIh48M)j+Dn6O4^sueoY1)B8 zQ?YK^aWHo4 z85$L>5glyOC-M&(okYDBZ)C8Mmr;t#2;$ftzM(Qtg}6%f`US-W+hYrBRn%%Pe2Q^p z2K`xh;M59&qWhDVJ)g>7Z zq&{E4(0A*v9Qd8{hYA0?{+Rgv-kj)n4xPS#Q`Vq@JTHYNbl{q6vmW~ZUgSvU8I@e1 zY66gudAO&-r#dHCDP1Ip0e5g2mbgbP>*Y~&f|tc*V(#$0(zq{!?P*ZY1Hy$X=|`r6 z%goDar1Y3hV2*XR^TMnbe_o>Bov1TM0?m#h#l{naQn$aDDSFjVU>_53#@U-pn^QYA zbQ7MD>cR zi|I#Wvi_9+=LaZLB%s}sNM5I*ecX-0;Jaw?KEAO!?8_@eKl;!il)Y=OM7ACO0yfqx z4XWA0E(>?Gu~k=!7peQs2$r~;PqzzC!HtlPne7|?FzX=Dj-~id@p58AP7T<|v>X^H zq|vAww97w?JDRWXJ6nWS1GiVl6=+vZL))EJbxr%qX{Rp)gcU(tYbKD3397rx_#QzA zd>mBsff8szOtj`3fK%_2JZjg5Fg1BuSlNVk-76!>Gb-Z}!)oUlQxP*k9T9g=vvu&L z%m9(G6fIM=Js{4j5qOv9n3L4?rb^|6_$!`@_E1YQ=R8ObKC+F+AZ>` zum63MRt@XcZlE>y@2A?Ull%C^hrnHf^QrQ+f@^Zo#i#SEmFO-dAc?LAdLin)9O$pX5LQ-`-~#m2`9=dU^X1ZU3h(bvC&l zkt}i-6~Snz7!spD5s(rVtr0I^Pb0pq$ETGTkmnHiL;Q~M$Ilc#P^usUaTc^C-uN&D zbQyThq))$bGKTGlWJj*1Nz!kr;dzxL>#i!4I<8}YYIS;VTKd;RUZjr+qp&k7x3D9E z39ZwRXwxz4?dhRi-dd^d_}Kp6zRs)_@NDfjJ#IXa6zL_ zKs%M@gCliQqeM%GR^%sMgtO>HzxrP$g2fVr9h?H{YFn~xjnU>sid3%@mq3rUG?ZeP zyrN&a%-hRjuD5_9ag~zW|3z1_bgA(j6lY~7^yDjcX3Fvp=K7SBn9~QRx%*Fj7B*Gz zuS;K@$@N#6E_K|%;K?6Lb5{^}-2?iaxVjX|{;|R`)1bi*LddJ(3=1m@C*XMy$49$Q z&cAqe{daiekBt`l|A)lP$wA9sB-WH(c%RG&L`2P=Zq?AI*s`W`An$QrJ`EL2%XU{0 zjWj8$Kf)y9SZD%i!W`>WDJPYI%~R{BvO`w(0EHc2mxe8S&hsS>Rvxf`RrGxL+Yp5u z6QScQTffasJor=<{eI%8@BN05A0DEp+D`Uy7Z(%X$kN(K0+nJ+TRL(LRb=w*-MAQ8 zIB&huk$}GNd8}Hm=>RaATGPa8GjKgHMbKsK_Ol_Ici!{nYSAfMc+JfsX80##V5WPb zE~Q5LsrlHfhS*rxT&?c2M%P9ayy=B%$oh;T~&)sySAn49r5KB6{$ zq&fEb3#6X1mK5zi?j}-KNVP_brz7hbW|O|g50@@xVStX)&QjKAIJ=}ZjqB8&Nu>ar z`?0YYJWH@ya9jS62{(^V4i{CZwmoEQlNmRpuWjvj^@S6&+Ts&%4GP z-5vtk`zh~~{46tne)!W2q3p7+;mUZ$iVXBhWqfCr%q z=sW!x3fI5#o%sG(@V8|A-_S12s+CIDm2bkltRVpbd976gL|Ogza8p_olg8ht>|4dS zgUv1f)UA(6lTqB16@%W*uMlHUB!;{2cNJY>NpC8e3>up~=e+Y;DU{9Y)m`MCwGvgTVzFvfawX|2mI5oPJkulXh{4s4 zyZ3~h7Bw+4qpOCtVrth?B2)5XjtQ2wsHj?gJlO)73N;;tD$t4dTe(~)e<8jx3G}rL?;Af z>?>^A{?e1Y|Br?jJCTTXJNo6(|Hs=~hDF&vZ@`a%(z0}jv`8$aNJvP7NcU3Gu?UNR zG$;~FhjfRCvap22(z29*q`*qMG%SL+biTKb`g{KGhxhCIg=3F{d-uL#u945KnUo|5w&$nqWEO_ z(^%;y;$52}%29nq&%!9Un$9`_4ZCpyQ(2yT4`I@4Q!7x+o|aveLej1~yo=$v1F>%CAY(TTm6YORP6`otOVf;EnH(qS zVnjn6>;@-uA4CXjN-s){6~TB)bU}gMBuMJ*LK=^2QHD+*KtPZDHXTa@7lcH9;{7<2 znA1p~wuy;}^I~`GS@(6XGrQBnotFAJKb%6$OYVQCT*PgrUIQy9(~G|)Q5g2^0D^js z3VSkk&$|zT+_(+RNv#qffz}iELAkU$rc6mdKh-jPdBGKm{l*Q1Mq#u9F19y8Is8tc z35CgK3Xc=mQC84GNE?slRrLwE0{olhYaD(BSvJeU%dzH=n*FKqmIL8m0X`k>InhuO zH1+G$66-H`eR;zI)x^#AVy-(PgKXA74(XY{&*LPd?Rt>Oi9^AU2fAdhtM^1R*$r*C zCc{QL0z9bfO5kjeTdUpY=GE)fTxM}oRVOd}HoV(d6wspGihK@Bg<(BPx0l{^LD|R4 zl)5b-6y=@Co+(*b2y1KWu`-hn$FVmw(}l3YUF?r<+{_}tqHlMt zS+l_}>n8Iv7fOPvDjng*Wk6rz;o^-42UFvK8Gk>%xo(eX3g^bg`oht;A?PqEIMUtPVchO45~9vM~j zGVroV8@=h@c#b{TouN!YEEBT>xe()d0-}!jH5V%mn<5(nL7QMAh9eO)_u{ML`*5Q- zU~389E^6evrJJBC8SKVbVQmj`a5lbFw(U6F@1(Pd-7EpzS;yhoK`8*e2O4@cOyZdb z114t85Befv=T$0{3CFcJ3Yokvj_46-%E*h z^42>dZuI+sp`oya&d5B+Nk5H#!zSAD>xY*A{9cTtFzBz$=@#0qyP-&dVDeEvST}>9 z_$`b9+E_@bXW(hw2xKFYe0)G)bMf1v*X5XKAP=2^+|5AWXq$t?c{pJ~!X{l_Zb>oM zJdjF=m_mull}E=U>L_g5{EE1l=QQ1z1`tWDI@fAyuTOrWbGlvpZAjve1Y!7Vu(d%T zyS-Ein>Ffw441VAZKse@o|uvKJQ+xVHDY`jeFT$QSP)x-c1_)br*U#l)O=;)$$O5K zG$>5DU+LIIb{j$fAO+`0!^kE_s{o;fe9hb&g)}ve%OxLZk%zFLPg6HI>bpw|3$4dT zM+^P@{iVZ57GH61wH(ZK)XzNYmj|1s?F66wt09)&P&GRqxo@g?9#-|#P@HOt)*PhU zylR9r8c5pmwJDAgR=pRNG&63ZNl7T}Dplf<=nZ)AVppA6v3|X5W~#)Ux#nxBY_?ahG?RA94#YY;Ohrjc z4P2M!sD)?T2(4NelC0qL=XHRsbo^xU3Qo)ZZPD)Pi1B=w{EuthH%5dz0)1Kx|A{b% zeDr(%T~AA9?}0&?-Ch}8`QuO_Q^w8 z^_9=@nR^wTg`GsC;xgE?N+6KVwa`*sWv+aEWcg9wC%tvbIGx*oY9$S|$-f&W{E(qv z9do&m>-sET%yndR%(!(jt9-J7E+6~tqVNoYBv!{r`o>3ulg4x)Ftn{Q&){i~KvP`? zdXHS0xE5BRPWxU-ke5eKnd-F$HiT2Ib2u=>$0Ud6lfJ%g9U-Ibz|;jv3Lm90?zT0h zK?RFLn9VoI^2ME-#=+Mn<&8+u2H$wh#!IwO;XDvf5htXo&N1ag5Ry=KNs**}F&h!9 z5iLt%M)5P~W44mk`7u_7-L1vr)N8^)0w03YUlA2Ul$i!0f8I(jc(hWXy}Z0U2|&)+ z!}sAA(BJl?Bzux1joGrgR0~Bx5Kk6U9-^9pseSkJ*f3Ghq=PGqzP?mGGTJSHEVhc_ znW0*3YL@M~NI_9@g3cW@O-WZjanB35Ts7-$!Vf}<{K_G`Lg)xDZIqxCuhTzR$u|~3 zXBu8W2)DR{Ku(@sB_@?ShTHlo1v+%~rOQ)|&q9iiLlK6Zgx2)PVsiHI#cD3t6=>5> zAon#8lD?dA5?0Vpu->=FW^EjAuQ<)b#`N5P$!j#Mp9c~rU2{rR?joy%_>mkTJDVP^yO@%EFiSf==__KvDM#Sh-Qetw4Q}cD?G0R4&ss z+u~JP1mGm5B&AdUX%*XYTKT)Az0Nwtjex(TBsF{_m&p=(TTY$ys-kOu2P1+SY@IP` zQmstJW@#uCUIGu>n8t*wAU>t*8<G@mZ6j- z9lDPzuQ4#hj4W_7-vL___E3Pn48@aAO@xwyYCFs18#Le0gs)GFxr**@p>LT^9KoJa zsFiI$ihyVMhnp0z`p9sMRQ8&qK5uNzB659x@|avae|DcwhPlu{lD9Brivr9+(eYKC zPv7#QIYHa#TL?|CzwO`xU9hN=-?`QWF4r_5F!gU{aFYmgYS%+o;Yn;%pD6y0q$t!F zLGwDhAW7t>U4S+vVXaHCEuFJT``0H(cue9|uptZhW75U%&9MhWkFSsZSweM zZl757o}0ae+cB_*d+FfVt!rRPPS{5y@|d^IQ_rWy=lr$L)|WA3)7#C6OiBOZ2g?B# zr%%J4(L<}9m!@pZKje00D<~l-k}ZZlqH{__mCiNd zi4`?v@S;@rxn*FMm?;j;YE=X_D-wkG0y=Yp2hugC#5b*1QyR_mQ! znrzPZmG3wimemE{QSYF|Z*}hp=@JrEE0T(Yv=!r)DTE2>CuF;TNG`N-7z-&1BjJcr zf}l!k9Zi5KKP186-om~opH){HLZ2$u5WXJiEv1S?gV`exhJNPu(ej+d1}zII_tN^H|g4OsQzOih`BK%S#)*BFw)(U`7M{ zfvHjgrySaQhzBX@CY-Cv;@3wht7u7?7)zE@GS=ZE^>ZIVyhyb-BMwSxXXxh>f-$p zYR8c%?Uy9-GUO17pJ`6*(f+{_PE(x0N`K=B-IXx#2oM`)mI^Qq%}3ViMH@yiC%p7B zTlOli=1!}9?{49n^X78joIV6*o2^MgY}=W@1lMW=8;$fzIBO^-huW}8U14U2(<<^y za;ZzI7)JC{R>l4z{XM0@%J*3s;GTZ~z$Z_b#`N9PB-;P}r70060w)|>K#iP!6O#?+bwJ z7cY!@r0=HaNEAc0<9L+gWEKsbs1Hr~;|Ry6<3kZGH9#EvNDVmR7fck*KHC)~gM{ZJ z@mr0a;(2y|^K<@aQ6mx~)jZgE9itV)9YWe}Vs+VBPi%6Q$ougUzi zJonXa(;{k2ilheegcb0zgd^0;b_o-IVT3-OOyyDXNmQ zbwRh+;ZBLuH`8v1*by2Pn;l#O14)%NDq))$P!cWD@`~c;4ft{soEnng5Bh$JIe|&w%!y;~PLnM)s?s1Jn9;3Qg4Ag!O$s zPa5A4u=`xreVGuI!bCcWKFr}_N_ao4Aq+{rAAN_*{=lnK_vfQ!`j~YIrO!!1$uzjQ zd4ModYQO}XVU6VEwy3wRO@MP>D%&!{ZT5AH{xYFjha-E^4L0LDg!GcmK0ykd$C6JN z48?)_IRIB@l@g>rC_JcVY!~YGZLjAoBXgC$LZ>!p1tQdJ;W;Wq?i9l|(PXf?Z@%$$ zF&-LOj#sZ~R%*)Q9Rb!IF=bvuJPYL1AC5SWq792JZKA-COYPAXJ}_tPx%h3=qO4Z8 zL&hI=OLs18z{Dq@z1m>oqa2XidJu-tK#L?*6UV+F1)Hj}h2ATms-4=Dk?Z8gqK6D1 zofJdlJ1?`1*~4wBT!}~RW7!~V)Z(`X6gr1BAPrU@2$|)%`?4B0o!zOBw;1rIOdu|Q zTecTh*lvl7DO2xcQ&efg(;FD>vM-beHjNVZ%@%D$pD6NwQRt+IdpT%imMMJfNJKaH z>Rl)Q6S6OPDaW-w0yg#Y%ENvBW{X0K3vCrVw7@O~O_K{y# z4oN2!CGK+>Nt)5~bxsK&Sf8W<+~;A-KYX&ir-qwKH47>RHJQuqOnbEoHI+{8-_Qw0 zmqX|#&JXvSwxnqOlVg80jnu{AX))jU%PZk91F?DpfU;8%#g`~)(lQZ+6p{s9m!TQ_ zJMs(#3|)0_xk6UR+VR66R+;zoa$^g%hSBs(#7Vc&#cmO049rz`K>Rat_;1+&?6Qre zm$W0yxdYv-K@|JPAAjL{3#;`~SLsvb`7APF>*U_?6#o7O6-ZrJ=`#hgy!Y6Ty{Op# zw!dRBV__&%UZ{Jc5?pQ1H^O6`geC+bJV{nR20l+4oN4N(5IC5sYr#+;`}{5x`R1t0 zuj(%Un63j_V~%@c%s zNWo_K8W>QLtIhmiA64q?tVclOwZHa_7c9+&L+=f#uG5?JhOY6-byDoOMyEGu_CMlz zEZk%j{)sa>=IJxf&iffy`c!Iw-e@xnGA+oXV!jiqnJnt^0nq@hKh8_DGyuFF`Eek~ zZA82dzhg5t1b<%-z%v9VtxS_@Cs74S;t$XYAv-4-hS4F8mC@gu>-{@w8$}^-n|vr4 zlVtzYJ&)M*7#3QCjH*L}GhIs1aN<)#6|D+Mzs+J;&$GMT(@!=8>p~<(qM)ZPG-^#1w@p?5{m><-!%~anRJtK~RD`lc- zs%3PQDL|4skhTHj%HZrqd~i?~1#&`w0sl&$ZC1O{d2sK^3cL`WZqp{@*}-b+a}!CU zYX>(D5hj1X{R$?>o*0A*@+brHn;J9Z?IFWf+rplE3wyrDqZG4JSX z-e66vyaTUoU??G>?G@n-6tmOIRCqbjH2dYS&^BAsI^x+zD+-QNGPRIR&a9%ywM;88 zeJ+Q7G`T$YBiw^c=Z%@-^HnaenOmNAE!dnLf~d{!l@vY`hvIF@xYz$4!N82PRjsnYu`*;$PsHz6NhbdrS;xtk}F7Ryj^zvjv z8<>p?!5x=lsvQmD%m9=zi55bHqN1xZ10luaj__aNM3K=aVP_k5;*SNd9b&NGOSUY06u;oc0fTRodI5q z4#oV1yZB-z`?4#18%%Qdnt++Bl6H~_^^V2?DI84;LFS9Z>PQWaN$Lx?fi+fQ*sWa(=;_h$*q@;oX+WCg#+RN(uu7f;@ zn9LaI&TMu=<8|7&L@VU9JS$u}1divZ?1;$~D}|J{KqF|Uib~%!x{oRGxeAT&!syEN z)oUI#M;oSsAp`7??zdeCS(5Cwg$}27^;|#kC;RD?;J?VY2`#Elf+<{f9-2D?a>}PS zp4mMD61@owgd~!t6n7GZBSIyVTwcp)PTX@M5>_lz?Z!_$4EP+E&SeIFW?5iIG8(3t zs@KW-s2>v^>1oAZRavS;GhXu^;5Pc-kVRSq<1035#V*+W!{tJ)YZMEL!nSA!UrrB4 zK8pf=qMNY_O!?{MG@Ay6a&zupqVRDhyWJa5;&v+B1UH<}xJE4uPZJf1v?=?C47zBC zn7WA;We4gfi~z!x5=qi>i6ho6h7~!ReWy^=!|Ia?pmi@+3!FSeTpQOXn7TLqgAX_3 zuMSAuYKCRdBL5ZPf;+-Z5Q5PvlREn%B-tn#x*Kh5NgzDw67|*a?)za{;dn{_3BN0k z+&?!t-@d)(XZ3~x9i|b@Caxq}UfOCC;dk61`_9PHU*Y>#Z&kZA z{b~O&wzZC_yPp|JLU+|36$F7%t{`Es1L5LBZcX;ZS{RozRvuCS(qXajT%#0O@`DWu zffM-#2>ow5MA9$y`^oTejXPw-;X>KG2a3Qt)idbl55e zbka^95Eh;D?EfwnyZ4_E`vPGHbl5*6Z^#Po5?~?ROtd|Hjr>>59zIE}n7wllRu-+D z=+09IFE9;#LPS(LJi4OM8RKePH4_mV+i??cxH)WeIm@o-h9;uPATg}6s@gLR{%sp` zRi?{nhNX%U!HH6Woji|O96WoE_et(5mR)oY_y~Az3+>bAxpt1(jKMlqd>U?N4^av< z)53qL0jKVFwuw^LDriPgn(C04NzeyMP|o;7t$8RyzwO#~p@GkoB8k5k019z{jg$m< zE`-&!w?y6)69z9nFS9|@e;XZVveTzvqhxo3z=gK1OSGf%OG2&mK|Ll|2A{cM4fp4Y zK%Ez8q!;2^x9`I7O3dn={=;73f`zJh*;v#m)`jsd{#7(C61hDo} z7^wVXSpPgJp~dL#lW&_R-LXsuM|amhJpMziv@t|I|2|hlE_?j0Pq0FVD52c5h*foM zEuWPYMV51ukX(7YlDv;nK$Rktgt@A98n5UlKM@WEO#_8%YhLP|tm1cU0@EiieNf`J z?Aw52=|L=aldsr>bQ2cRR*{A8(icTCJjalngjz|t$ELC)PQpeMq*m?kx(iL(EZRE& zE>UDhgWW({!^CGw0crp;bo^+H(MXRyYb$g9z6rweBA)7)>PjmizBW19`9~R!!$XqyE?ARP0#c)QD-Z3?^!~ z_8EeN3pcj5)i+e{SIG5(i4=(h8uJmz*;PQS%P4=w$7_N1|y zw`OFZTZQo+KcmOauUsE{sSu%WtRiWSkY~0n^)Pjn#?Jd{ zI$Fwq*pyOX*zINL{YO`&8tuf%S6@&Ryt)PgToZ7PsG8Hmbwe12eg z+K971td)oL8#PQlXaYEsX4lDY>BfSYODpAr+k4h-Ha-@)oIY=f9=(=U>BAuQI8~d3 zko*ndXeg4R)85=RX7&hO1u%8#CzRRlbu>ypBDr~A#KLN8I?Vl}42ZAtfYhUVtcWU{ zoR--90iZ4oaeE%2ne6sNX`L7x-sUMI(&Kdu;Yt>?&swq5FoRhbkYBGShO7O0o!R4~ zBjvXOcZCgf<|pbSQs)Z|_J9G{9*0m^ct+@bAbZCST$3Es%S&L7!U zE4TYlK_;@ac8XsW$jLx_&n&N}I>c~FhV`5Ojt)H?aW-&=%o$pJSty2%Z2~ngg(Da{rmDvuB# zD>@Xdfa&}Dimp15?-@!_W%9Lk^#IXkF7RYr<_6IRXMeW}XJ?!~O>Jyx{!Na^ux{}M zAx_etlv(!Y+1_&WY!ocu)C`-DC0vo^g>b{jpV?mEu5 zH>%w~2n)bp+#pP$QORR%zeT`qrfNrece_K>)IWGK#k4RJsPB?FeMgIoJ1VL z#k11Gy^b3f&Od8ReD&TA1O+Pp2=T5B2o$mzM914-8@e!7|LOo-)8<|dY12{a;eJ08 z!fr7C_099GsbVgdJd7*7^D1(-^r>lDDbh_R%#L1--#x7|MoLzhl8mTh8(_|5LcKdF z2CEDe0S|eAvM-UFSs2iz#T?GqkdI28oV}8j%l6{RZ+NV`y9XGawbe$XixJ_eg*(R< z8-$$UP?gPQoxsuYGA{j4L{$fMxwH2OuP0r3jhrIiG;R6)2uI+?dRnBmhf+;l>E?{g zNOdEmx3|}MYo_55NnPW1>&gFQRbVR^w&8W-`c-Km-|n)6-Y*X08X2^^3%e%x`3zzI z_Um`;hc2?tj}90VKe#^Wj?J~FJs zvp9j$yuFEYYYJgcWMwLeO*kOS6nAmr>YdqPfvx=5Qs*iAXv1ow5$9dg8j77;xAN?w z4SysVPx-Bb56m#`VcKvGRna z)p>L*zo|qsuhwNFW|Bbf?RDl1(7l85^$)ZGLV^3I@ihr&z>c44y@e8G)r%{ZlSJzi z^*WU{mqL^&{k^wSfhulF=_V^wxr0N6v+MkAj>rbClxhK&ca$q;bSsZ}6iW+ghiFF$ zfvmawH7tb6euiAQgPtQ!h4s3k%VcJ(y^u;SC#QTXt3QRrYWIMlcm$WZ##sbhK;d*Zi`2fbVIRDYZe zHk)bb9pg+h?9HA(XYDPxB->)+)Z(4}77#zs+HOA@+3tRBLx0neQ7t z7A0E1E5M_)OilfMDqPuv6Onxs5DnDKXWdZ%%4dt;r{FlaDzC`hveM}WDv7LOKu1rT=tsfw#ArL7E3HkN|nOuvnQeD=M^)U~qZa;q4 zcz%p?lgg5b_>)t5rs=*Nd~qD{y5zcyxCE=?B5-dPuHy8kh3`H2ieg9#NBK@NeU2*> zD;=Q*Dy__N3%Rmr(tzC9dEq$Q<#3Ipv@q91A<{8`E2A&}tdy4r>_r9ktQC?$1spxw z>$f}~<$>hQdd~+2V+w2C0M1q0Eiu=b)uFl}raxYCHhxCfeIezB7D+R7bT>o2CT9F` z2JPWOO}+$ojc4G!z=@d)l6297*DvxV@;06gnAbcmCn8I8HC9tfadv_lJt$99Tn}$h zr|&Ft=n+`IZ7_EK{fxB-=SW=k5Ytc9LYmJn6BP+*Li>$LQu&HP6ND5;Y~qC!P$J)3 z+Oq9PjMNXl6&j3N0l*qiCEDtS;iX?YZ4GosEo+F0iO0gpn8tvMOqa|`mPhxhkaWN$ zuDPO+8l;q|tm&Vvl5fse`qunj&*2o-x0^vy^7zJQ%=iOevt zo9x?^sWMlIJfhN5!7+^Nv)b~aoG(Yp9v3m1K2StCoB9J3_{8$(Y9eMJT8_f7opu5? z==j@r#wnKCj?t+GTwGkd_wO4LT)QUu03ssuP6;On9HgPf{V8ahe_e3gU)4zZ^1=4m zQJY4tjK}fPi;suBm*g}6W8f$K#Qw+p<(g0eM|f=xmYu(c!)US+-aP8-8?QqO)Izr{9%L8Is}`O&Ijjnd=P$o&ek-?}s{j5? z8TuTG4o3-mHb*_*r7FK^-W#^({C$N}6gt9dG#rvJ6tW_i7F&XeaMc$U7Ut#PPzXg7 z6v!#!zO}#F_*!MdZV7NfXs{x{_(ux&3TexW0oq~Hq1my^|G)(iz%7zala&%(i;E~B z0B1^p@3{S}`PlPzF#BwOjN!w%jHu8d)!9i2q+8~TtT|ws^wf>Y40WoAc-!77j!!Ug zM7DC0))sl!JI_v(J2OJ`br_T>qg&L|;}aD_eT{j-_JEvmVXcND-#tl<0h7838q|}c zk}s>^V4LpEqX_$q}<$dCwKQ92MEe67hP*Vz?eph90F1b zdvpHV8qLQWh&Eq=Ao9O5lg;khywfcxbI)s~O(XM`I57H*-q&vl_N1hwe^fvkWpYJ4 z_PK%42imS|4()2CoxcabQvrLuG9tN#ltFECu5}g>I>`}vvX4OK{tFG2DesazQYvd! zGlp11I!|9yljKAbj68CO8xEss@&?RMr?U+BRD;w0wz)$Eoal__7Y!q%W!rh~K$e)` z<42GB`iF*stLy6a&B10YEaI?luQ^oW0!qVhRA961vkjv*=cbiR=$2I4#lRoFMrgLZ zYYTpm>9$7J5mOAOCQ-Tx!fsO(Y|f&UA&jFk&)b-n%@UFR6qfk`771wyDvFBJ(pM_B zD=qq<4m69x0M0+ByZ^Q;(Wg7_OO1TF3;0*e^&<$d~!rBLaAdnq~-fFH0-zsK{< zlAGW&VeUP!&;i!zE=!;fx_l;Hwj9ge-}9$g?;z!pgKeK&)36N%F1YKYg~ zZaU@jhZAtSwdqxU;{{^E%#+C)iRc@SlgHn^Fy`Ku=AHf;?an1x*Oom=u$3P7_0RC} z@q0W^RuGWJ;Y(jG)9y~bdGRpKApSF{p`OEM-NV3VA0LAqIWJxTacdJvUdcG#5t&s==VM^c@ohTFS*bm-A|MMOcWUM zuLJk{o5Xa#(x-o90;$=7!> z`L>88v<3@U+|p#`(|rQBDm)6k0*y}jcTW`jT+gy67I(f{nZF7iVyoDB6Ce}r8*~!L z((U!TT?R46xI)mv?;*~vkZh{&k!7;Xp$na14Zt0x8cz9p>JV$f2dFbQV7Cd-ZK+0}i$I^$p0Dl`Lwchhg2Y*ADyT4&Q9CBQ%|tWzs4S`(e}kQ-wqh z7G3cfkEnOPeM$A0H3j}0N~n~ZadpWH)!6(j?i@Km%)LetnO(fU|NL)V9ENjw-`{%5 zWn?Yij4O(lVbrcG^E*FUHJd+6!`(4kr5+7j!ySva>EGbx^eJ2HzsHGYq{C0u{$!`l zjrV@;x76RL@c0@o`KVEO)$0>JqhiDo`>g32yhx@b&@}?KFtZ<+>SDD!?00tTU4POd z9Yiml*P&;qvdFQp&scTHY_H8V9grw7 zMj`~?U&BRpPMkJP7YOE>i%T4gj<>O;cK^qus`DwQxxm3HCf=LyZ60&8`zx(gL~(3D{LefDlI8ZtfWywT+}Lk_B?w0fWR+bo1`S2jQb{2vO>5l)KVPWFc4=4B=x7r!6cvfGd zYD&E3K&pTWu&7roI$=}UYG<>E^NxWnoZTG2h;8tMaY)Jj);SKlc2B zU^N>Qb@p-P=DZ*U|VB=`AM9i&6vv z@x}YzX_XD>Kt7C2x@#zv5}r;=iilpE!~V2+Gwk73sk3=bs)3pj`IsSj z&3`q#{cVk!@24r)ME#$>Kv+o$FI);RsU?NF#-LLJOl+Fv(w_Up(IQJjuJ6kFB|YLa zev`+)RBnS-N}9WbThVjiw7JW8*jRTq+z_sHyBF49{y^+ue%Hg3M2X@r)6hiC425Rp zqSq?FZE_gY)+;)-JZ)=J(6nM>I;5gu%quYsNxPBwUe9d189(@<+G}?Wgh>-@0gMNh z5q&t%Y;^%2B#(_>`S`aHQi>qru>d$k$w^9kj?x6SVIa! zV@i#D&13c|XQ(Qu*XVjrptP~#kR#HG5ebtxz%mNS4Xe~NnZARk?LDnamq?<&K2-q- z+P~AfW5#V+XR()1zUq3>h{MbuCJptF)>-tI0tTX?HSl}=AY`%-mZOQSR$d8+S#IF^ zC)ak8F$&fYov@uK@4(&ft9g*DFh46dMqu?_Q9W5)4jW5u(JR+@CEMnTRakhOPZxlnhZHq-+fY+!HlY9qJ;Bk~pBL}0qB^>EkkRu9~!?L?~h3hslH zF1_7VU6xgb$Zd^p+3}(w583ZO)K?m7R_yS9et0b~@TbLzPvDA;6e4e!%=4^6>f*(A zQ+*E^yZ`UeqP;{m+G&F=5k`5 zYx%WS9QFOfG8gR}CT;!Y{VV$iN7reR*mCEMxLxzt{MG4OVsDb~pQC!eK4DVv_YFMT zm%|r@vzv8DU%YGH{`*EsoERWeE*7V&58AMq9s|Y5XhEsqkBb#8E2hZ6lYMetQ{M&& zWWQk3ZuV>7?PAyvQr5me!ex!z7|o3M*nsQCuer=CK{9RQPCvb#D#)}|9D1v!1xe2} zLSDHVyU0u+YBF-9Ew{fssc;kDE~prKQIC^E%2oTCeLbqnVdHmu7+3qr^p|0QZCq~N+-u?~Bfv#Q7Q(}T%XIp^j z3_2IRxX+R+jE)zn%uMWo^#ji|K&>jHcW%=*i0{Arl@Um?D0ky-J#cM zGS6!>Zq00Z?K*N6na$&^HnZTt|Mxe3Is^=tew+MoO@qCx0^woqev}3rYk@xqwg7fy zD);j4^_23ZH;Nc8I!o1&UuW%)X6m2M6at2Hoh9I#UJa|YdMf|c0SedZQ4XCMw=4mk z`#qHcm@mRD?j#%sfYuLMlW0FA&uebq8;gWKib=#!DAbIKJI>qup6X|*U+Vkc-~J@Q zWoBnrEMtc&zc{nRbP_EFmasTupWO=n@l6gR80`M9dHgdBTwUJG#_?KFd~@H#v|HQv z#*?k4YjIfz6at94gI$zs%d+e*Q1)k$%(1+Jg80c_mkZzX#WdE(rB%hh|>}&^lKYb*uAUF8j7jq-{@XD|0 zp_Sl9TZAV)h%K@#smzk*jX5|$fDI9#*7b~-zkj!|b6km8=b%Nd<_1pu(hOvG%qv>< zGjXX|!M_si+Ne(%{WJ%m+1G`uH~Mz0s3P#FUv9y;??nm~`+YM}Hu)p})^_^uc+h-d z%mL#?YsW=B#AC8o(gLY&xtLJl6hPm$NkNPe;+2&hLJ$_e;fyy%O-(_}=Z{y$k&RPR za>cmE^S_J$uyENp%gWkLGXVHsm$!K|kblwhBW1^`cVcHv%2!5tummb@8P_6f>CpRdEiNDeqsVCSd4Nbh-RtX|_B5GuF}X#^w{@Rg3P zuj`svCfCl89(FO#f^pFm6@t~rBzvljoZM#tohPeL2uEkN;EJK>yn9#Y5^h$EhPmtM z<^C|C%GmOsi@cIEP3flIc#R4c?|~<|Q2-;WI{$9jwyhU3QDz~^i?$p8F5B|-BS=AY z?x5|!{P&Ix24LmMIK|c!NqNndvOxmt>?W6&o;L|!BVPY`Y~oS2^C9m0!A!zky*mDe zXT-9%R`HFjL8sZ2#27g4iw^N$v_oU|-IRG*(zU4zjZx5r~5Z)(9Gl%&e zF&(g06>qgj=wX55!RLo>b*Kc!Fs;Wlm|W?mK$+Do^CmOP)?La!E2fRD?ymo#G!b3- z_25CA$LrEdjl>glPfyfq%NEy}j;i~2mZ$Or!dAuKoJlE|DA*%;swG8SiyUwLl*q<6s_9^5ZB;VpSAdB_4+c!cH6rb%EAluOHuDUcwli?YCc72KK@z zaG4l`oU_OqGBGx0g|_|Dg~W6ci=q8n0Y09L$5+`-)u4(l`Q&M5N|QEHpUI~aka0Y; zYmMR$_~}{js{!O}9ir1V32~zG=Z}o$q4$wp6fb0i-CrZETI1T^^rtET6S*dH$cXW{ zO)ObKhgl73S;5`yc>F`WIqg}HgG%q>yuNMsyqB(pj9~78FB7-tsc9U36GArD^$b3y zSg|Uc2dE}s^c5GVoq4-ZSpL2C(`G3QrtSIoJHj+ojDgYdO$SA=MNPuL(SyXhdZA6S0G4=H6SY~Z_x9?pm%(x4Lk>y-mZW>sSVl)F zjjj4dy;sDTkCvZW%5^qyQDon}NO$K>q|&%REBNU`tW9isOI?4)WyjurR-3pEja~fA zEA>$G{lRK^W7132$N?PG3!cT6I@jdY8rm;2r=SQJM6_t5Xz-jF=488VJIyC>cSHp# zd%hD7ymw5?wWVusf7;DoTFXg|=(pu@NPwB-T;huzJ}unfvF)HiVx#&#qL*8~e4!g2 z4%Q!@ZV2=Wa(U@~11}ziUOu0@Az)@VTBJI2sC4ORE4te!=A{4u?{h#Ka0et^P zrKFk6#7T@QDj;TIFRH_ko4EU$gikQzrX8vva?gX2Wq zbNyRhs`2)RJNtn04>$bRNR5srS&?A!&Ud*nqg>JE z^GEx&L9z}2V!@|2>OK!74C+D3tD=$*N^ViwojpK>`G^;8&t4t4DSLBsk967N zAzobbFvSLWw3r~Y~%psq)vThmT`d8cm)1vzit6sk^&3@u z?SOCB?I#guZJs`R8mQy_oTJQ3a!@l&?|Jr#J+{AgT1IC-NE@pq`GDb>=8%skJhbOX zst28Yz%WWvRp2Jb3#JQl4cNS8#B%<|v@f^kUBQmzV-WtQWOkP52?yU`T#D&hXnbti zvT)pp6wYsU|J$=>*Ai5({l{a!K=apn;Vv1XV(R;=9=2|`zR%DNM1N~+F@%0_V(CrE zjf}Vw%dmf9mbLJ`!GQsQO0SK?&U#{zF}wF7;Vkb1ygWTo_5=itt$yPvu0tRIzj4(5 zAON-^_!s^Ij;%1K>D#Lkh~Pd`<7#{~@76&;%(w&ygm=%YpUzqLIzwL375;eSQey^( zi4Wm~&(|*eL8+VWxj`4)5=$7Y%~NAptVrGvUi!zLIO58x)ST&$h7Va# zKt9;LWv3g5ydirE>-;(XW*JdkjmDk1wd|$hrZGXkH5EvH@YcT@dyNvw5zkzQ`#REN zHY9schFrCMwUqb?_{EFwv+7WNhktsk3Ods>JVeF|b)eLx-JWbdMQ6lag`m#qLW_hu z7Hx7Ls?6^($vH(6APOhnG3kNKSEy9Y*5sQX5^#MJzk0R&$3k|y`r~rV+cpl*2Js(R z#DFmA!r|RT|8pr{UD%}%5(=|~YdwWruY+bDriS45f=>2)mTe-WaTo)nzZUX&g%ONv zUg=~%OZ3;HFTRE7H?yephifUCUx&t4+`Eoh(O~NM@ni4c8s=-B7}TE4WzFROnfSRm zma=YcZfdfz;`(tQT}&IleiPe!s{iELzV|zSFwJm!f86v)_^jsa3w;q+&wdracgfVPJ4DJs9eo0yKk_ zfuY7_?L^fbEaD6dAM6EUK$?K0H_*Tapi(H8>VhXK3hwTzG_KkI%^Ktej&)V6;VVP9 z?EjajoSFI8H7yC^fL~7znwpr1gsqR7xbgzX;R-wKUYdPb!_WThJ@6}2nI~2@B2nDScaMy3~T-BUCF_@2mfmg`=C;K6ly}ylH93=15Kc1nh~ zcvelHZ?^WZCObm|TV`garHap`-JiT;3P36f(?oB5_CM;W_1*vH*S}{3L29yc=ibxz zyHt_r_WHe_IRk@9+!eX!dEHaK==2rUPmjMov(F4D*#6GqoV}2CaDD5y6%lo|;TD$| z7%s%@%j+*LcR4-f|G8^k?ve})vx4tjyH&L1&-CxyVt?2-aG$J5o%Wt~$(69B4Txf1{Y diff --git a/docs/setup-replication-between-two-sites-running-minio.md b/docs/setup-replication-between-two-sites-running-minio.md deleted file mode 100644 index 311c6bb01..000000000 --- a/docs/setup-replication-between-two-sites-running-minio.md +++ /dev/null @@ -1,117 +0,0 @@ -# How to use ``mc mirror`` to setup replication between two sites running Minio. [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) - -![minio_MIRROR](https://github.com/minio/minio/blob/master/docs/screenshots/miniomirror.jpeg?raw=true) - - -In this document we will illustrate how to set up replication between two Minio servers, `minio1` and `minio2` running on ``192.168.1.11`` and ``192.168.1.12`` respectively. We will mirror the data directory on `minio1` to the bucket on `minio2`. - - -## 1. Prerequisites - -* Download Minio server from [here](https://docs.minio.io/docs/minio) -* Download & Install mc from [here](https://docs.minio.io/docs/minio-client-quickstart-guide) -* Familiarity with [``mc mirror``](https://docs.minio.io/docs/minio-client-complete-guide#mirror) - -## 2. Install and Configure Minio Server - -### Setup minio1 - -```sh -$ ./minio server minio1-data/ - -Endpoint: http://127.0.0.1:9000 http://192.168.1.11:9000 -AccessKey: MURIVYBYNPTYE7O8I779 -SecretKey: lVbZmz4CvGkBl7JKw5icuL7RCcSvpBJTkAJTFQwz -Region: us-east-1 -... -``` -**mc alias** - -Alias is a short name to your cloud storage service for ``Minio client``. End-point, access and secret keys are supplied by your cloud storage provider. API signature is an optional argument. By default, it is set to ``S3v4``. - -```sh -$ ./mc config host add minio1 http://192.168.1.11:9000 MURIVYBYNPTYE7O8I779 lVbZmz4CvGkBl7JKw5icuL7RCcSvpBJTkAJTFQwz -``` - -**Create buckets and add objects** - -We have created few buckets using [``mc mb``](https://docs.minio.io/docs/minio-client-complete-guide#mb) and added objects to it using [``mc cp``](https://docs.minio.io/docs/minio-client-complete-guide#cp) Minio client commands. - -```sh -$ ./mc mb minio1/mybucket -$ ./mc cp myfile.txt minio1/bucket1 -$ ./mc ls minio1 -[2016-07-31 10:26:55 PDT] 0B bucket1/ -[2016-07-31 09:36:17 PDT] 0B bucket2/ -[2016-07-31 09:38:08 PDT] 0B bucket3/ -``` - -### Setup minio2 - -```sh -$ ./minio server minio2-data/ - -Endpoint: http://127.0.0.1:9000 http://192.168.1.12:9000 -AccessKey: YRDRWWQLEWS9OBJ31GZ2 -SecretKey: y2sSWzx5ytwvkELcxOuSaQ8n3doNqoIilRpb5Kjj -Region: us-east-1 -... -``` - -```sh -$ ./mc config host add minio2 http://192.168.1.12:9000 YRDRWWQLEWS9OBJ31GZ2 y2sSWzx5ytwvkELcxOuSaQ8n3doNqoIilRpb5Kjj -``` - -**Create bucket** - -We are creating destination bucket ``mbucket`` on ``minio2`` and adding ``minio2`` alias. The bucket ``mbucket`` will be used to mirror data directory of ``minio1``. - -```sh -$ ./mc mb minio1/mbucket -``` - -```sh -$ ./mc config host add minio2 http://192.168.1.12:9000 YRDRWWQLEWS9OBJ31GZ2 y2sSWzx5ytwvkELcxOuSaQ8n3doNqoIilRpb5Kjj - -``` - -## 4. Setup crontab -Cron is a Unix/Linux system utility by which you can schedule a task process for particular duration, we have tested this setup on Ubuntu Linux. - - -### Script - -Add crontab configuration on `minio1` providing path of data directory, ``minio1-data``. - -``--force`` option with ``mc mirror`` overwrites the destination contents, this would keep your contents in sync. - -```sh - -#!/bin/bash -datadir="/home/minio/minio1-data/" -minio2="minio2/mbucket" -MC_PATH="/home/minio/mc" -$MC_PATH --quiet mirror --force $minio1 $minio2 - -``` - -Set executable permissions on the script before adding a cron entry. - -```sh - -$ chmod 755 /home/minio/minio.sh -``` - -Set a new cron entry to run ``minio.sh`` script once every 30mins. - -```sh - -$ crontab -e -*/30 * * * * /home/minio/minio.sh -``` - -Note: We are going to introduce continuous replication feature in `mc mirror` which will enable the sites to be in sync without having the need to setup cron job. - -# Explore Further -* [Minio Quickstart Guide](https://docs.minio.io/docs/minio-quickstart-guide) -* [Minio Client Complete Guide](https://docs.minio.io/docs/minio-client-complete-guide) From 6ee27daac1d9c34935ef5606f2a724162198b4f2 Mon Sep 17 00:00:00 2001 From: Andreas Auernhammer Date: Mon, 26 Dec 2016 02:03:08 +0100 Subject: [PATCH 033/100] fix blake2b tests on non-amd64 machines (#3496) Fix the TestHashes Test for non-amd64 machines --- vendor/golang.org/x/crypto/blake2b/blake2b.go | 6 ++++++ .../x/crypto/blake2b/blake2bAVX2_amd64.go | 14 ++++++++------ .../x/crypto/blake2b/blake2bAVX2_amd64.s | 8 ++++---- .../golang.org/x/crypto/blake2b/blake2b_amd64.go | 8 ++++---- vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s | 4 ++-- vendor/golang.org/x/crypto/blake2b/blake2b_ref.go | 3 --- 6 files changed, 24 insertions(+), 19 deletions(-) diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b.go b/vendor/golang.org/x/crypto/blake2b/blake2b.go index b736632f9..fa9e48e31 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2b.go +++ b/vendor/golang.org/x/crypto/blake2b/blake2b.go @@ -23,6 +23,12 @@ const ( Size256 = 32 ) +var ( + useAVX2 bool + useAVX bool + useSSE4 bool +) + var errKeySize = errors.New("blake2b: invalid key size") var iv = [8]uint64{ diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go index 422ba849c..8c41cf6c7 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go @@ -6,18 +6,20 @@ package blake2b -var useAVX2 = supportAVX2() -var useAVX = supportAVX() -var useSSE4 = supportSSE4() +func init() { + useAVX2 = supportsAVX2() + useAVX = supportsAVX() + useSSE4 = supportsSSE4() +} //go:noescape -func supportSSE4() bool +func supportsSSE4() bool //go:noescape -func supportAVX() bool +func supportsAVX() bool //go:noescape -func supportAVX2() bool +func supportsAVX2() bool //go:noescape func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s index 86bc182a5..96a51d524 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s @@ -489,14 +489,14 @@ noinc: MOVQ BP, SP RET -// func supportAVX2() bool -TEXT ·supportAVX2(SB), 4, $0-1 +// func supportsAVX2() bool +TEXT ·supportsAVX2(SB), 4, $0-1 MOVQ runtime·support_avx2(SB), AX MOVB AX, ret+0(FP) RET -// func supportAVX() bool -TEXT ·supportAVX(SB), 4, $0-1 +// func supportsAVX() bool +TEXT ·supportsAVX(SB), 4, $0-1 MOVQ runtime·support_avx(SB), AX MOVB AX, ret+0(FP) RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go index c9a22fd7d..2ab7c30fc 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go @@ -6,12 +6,12 @@ package blake2b -var useAVX2 = false -var useAVX = false -var useSSE4 = supportSSE4() +func init() { + useSSE4 = supportsSSE4() +} //go:noescape -func supportSSE4() bool +func supportsSSE4() bool //go:noescape func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s index 04e45beb5..64530740b 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s @@ -280,8 +280,8 @@ noinc: MOVQ BP, SP RET -// func supportSSE4() bool -TEXT ·supportSSE4(SB), 4, $0-1 +// func supportsSSE4() bool +TEXT ·supportsSSE4(SB), 4, $0-1 MOVL $1, AX CPUID SHRL $19, CX // Bit 19 indicates SSE4 support diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go index 2c3c68b0d..da156a1ba 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go @@ -6,9 +6,6 @@ package blake2b -var useAVX2 = false -var useSSE4 = false - func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { hashBlocksGeneric(h, c, flag, blocks) } From e8ce3b64edb2515f2f3c0f4c1947e007307aec79 Mon Sep 17 00:00:00 2001 From: Bala FA Date: Mon, 26 Dec 2016 23:51:23 +0530 Subject: [PATCH 034/100] Generate and use access/secret keys properly (#3498) --- cmd/access-key.go | 91 --------- cmd/admin-handlers_test.go | 2 +- cmd/admin-rpc-client.go | 4 +- cmd/admin-rpc-server_test.go | 2 +- cmd/api-headers.go | 23 ++- cmd/api-headers_test.go | 2 +- cmd/auth-handler_test.go | 2 +- cmd/auth-rpc-client.go | 2 +- cmd/browser-peer-rpc.go | 4 +- cmd/browser-peer-rpc_test.go | 8 +- cmd/bucket-handlers_test.go | 62 +++--- cmd/bucket-notification-handlers_test.go | 8 +- cmd/bucket-policy-handlers_test.go | 62 +++--- cmd/config-migrate.go | 4 +- cmd/config-migrate_test.go | 8 +- cmd/config-old.go | 12 +- cmd/config-v11.go | 2 +- cmd/credential.go | 74 ++++++++ cmd/lock-instrument.go | 2 +- cmd/lock-rpc-server_test.go | 2 +- cmd/login-server_test.go | 6 +- cmd/main.go | 8 +- cmd/namespace-lock.go | 4 +- cmd/object-handlers_test.go | 230 +++++++++++------------ cmd/post-policy_test.go | 58 +++--- cmd/prepare-storage-msg.go | 2 +- cmd/s3-peer-client.go | 4 +- cmd/server-startup-msg.go | 8 +- cmd/server_test.go | 10 +- cmd/signature-jwt.go | 16 +- cmd/signature-jwt_test.go | 6 +- cmd/signature-v2.go | 16 +- cmd/signature-v2_test.go | 16 +- cmd/signature-v4-parser.go | 2 +- cmd/signature-v4.go | 14 +- cmd/signature-v4_test.go | 6 +- cmd/storage-rpc-client.go | 4 +- cmd/storage-rpc-server_test.go | 4 +- cmd/streaming-signature-v4.go | 6 +- cmd/test-utils_test.go | 20 +- cmd/web-handlers.go | 22 +-- cmd/web-handlers_test.go | 38 ++-- 42 files changed, 429 insertions(+), 447 deletions(-) delete mode 100644 cmd/access-key.go create mode 100644 cmd/credential.go diff --git a/cmd/access-key.go b/cmd/access-key.go deleted file mode 100644 index 5ade6037e..000000000 --- a/cmd/access-key.go +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2015, 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "crypto/rand" - "encoding/base64" -) - -// credential container for access and secret keys. -type credential struct { - AccessKeyID string `json:"accessKey"` - SecretAccessKey string `json:"secretKey"` -} - -const ( - accessKeyMinLen = 5 - accessKeyMaxLen = 20 - secretKeyMinLen = 8 - secretKeyMaxLen = 40 -) - -// isValidAccessKey - validate access key for right length. -func isValidAccessKey(accessKey string) bool { - return len(accessKey) >= accessKeyMinLen && len(accessKey) <= accessKeyMaxLen -} - -// isValidSecretKey - validate secret key for right length. -func isValidSecretKey(secretKey string) bool { - return len(secretKey) >= secretKeyMinLen && len(secretKey) <= secretKeyMaxLen -} - -// mustGenAccessKeys - must generate access credentials. -func mustGenAccessKeys() (creds credential) { - creds, err := genAccessKeys() - fatalIf(err, "Unable to generate access keys.") - return creds -} - -// genAccessKeys - generate access credentials. -func genAccessKeys() (credential, error) { - accessKeyID, err := genAccessKeyID() - if err != nil { - return credential{}, err - } - secretAccessKey, err := genSecretAccessKey() - if err != nil { - return credential{}, err - } - creds := credential{ - AccessKeyID: string(accessKeyID), - SecretAccessKey: string(secretAccessKey), - } - return creds, nil -} - -// genAccessKeyID - generate random alpha numeric value using only uppercase characters -// takes input as size in integer -func genAccessKeyID() ([]byte, error) { - alpha := make([]byte, accessKeyMaxLen) - if _, err := rand.Read(alpha); err != nil { - return nil, err - } - for i := 0; i < accessKeyMaxLen; i++ { - alpha[i] = alphaNumericTable[alpha[i]%byte(len(alphaNumericTable))] - } - return alpha, nil -} - -// genSecretAccessKey - generate random base64 numeric value from a random seed. -func genSecretAccessKey() ([]byte, error) { - rb := make([]byte, secretKeyMaxLen) - if _, err := rand.Read(rb); err != nil { - return nil, err - } - return []byte(base64.StdEncoding.EncodeToString(rb))[:secretKeyMaxLen], nil -} diff --git a/cmd/admin-handlers_test.go b/cmd/admin-handlers_test.go index 0e012626f..95f5b853e 100644 --- a/cmd/admin-handlers_test.go +++ b/cmd/admin-handlers_test.go @@ -83,7 +83,7 @@ func getAdminCmdRequest(cmd cmdType, cred credential) (*http.Request, error) { return nil, err } req.Header.Set(minioAdminOpHeader, cmd.String()) - err = signRequestV4(req, cred.AccessKeyID, cred.SecretAccessKey) + err = signRequestV4(req, cred.AccessKey, cred.SecretKey) if err != nil { return nil, err } diff --git a/cmd/admin-rpc-client.go b/cmd/admin-rpc-client.go index 324ecda7b..d454cc05a 100644 --- a/cmd/admin-rpc-client.go +++ b/cmd/admin-rpc-client.go @@ -109,8 +109,8 @@ func makeAdminPeers(eps []*url.URL) adminPeers { // Check if the remote host has been added already if !seenAddr[ep.Host] { cfg := authConfig{ - accessKey: serverConfig.GetCredential().AccessKeyID, - secretKey: serverConfig.GetCredential().SecretAccessKey, + accessKey: serverConfig.GetCredential().AccessKey, + secretKey: serverConfig.GetCredential().SecretKey, address: ep.Host, secureConn: isSSL(), path: path.Join(reservedBucket, servicePath), diff --git a/cmd/admin-rpc-server_test.go b/cmd/admin-rpc-server_test.go index 99832642a..a18f91547 100644 --- a/cmd/admin-rpc-server_test.go +++ b/cmd/admin-rpc-server_test.go @@ -31,7 +31,7 @@ func testAdminCmd(cmd cmdType, t *testing.T) { adminServer := serviceCmd{} creds := serverConfig.GetCredential() reply := RPCLoginReply{} - args := RPCLoginArgs{Username: creds.AccessKeyID, Password: creds.SecretAccessKey} + args := RPCLoginArgs{Username: creds.AccessKey, Password: creds.SecretKey} err = adminServer.LoginHandler(&args, &reply) if err != nil { t.Fatalf("Failed to login to admin server - %v", err) diff --git a/cmd/api-headers.go b/cmd/api-headers.go index 5dd9ebeac..a16470ca0 100644 --- a/cmd/api-headers.go +++ b/cmd/api-headers.go @@ -25,23 +25,26 @@ import ( "strconv" ) -// Static alphanumeric table used for generating unique request ids -var alphaNumericTable = []byte("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ") +const requestIDLen = 16 -// newRequestID generates and returns request ID string. -func newRequestID() string { - alpha := make([]byte, 16) - rand.Read(alpha) - for i := 0; i < 16; i++ { - alpha[i] = alphaNumericTable[alpha[i]%byte(len(alphaNumericTable))] +// mustGetRequestID generates and returns request ID string. +func mustGetRequestID() string { + reqBytes := make([]byte, requestIDLen) + if _, err := rand.Read(reqBytes); err != nil { + panic(err) } - return string(alpha) + + for i := 0; i < requestIDLen; i++ { + reqBytes[i] = alphaNumericTable[reqBytes[i]%alphaNumericTableLen] + } + + return string(reqBytes) } // Write http common headers func setCommonHeaders(w http.ResponseWriter) { // Set unique request ID for each reply. - w.Header().Set("X-Amz-Request-Id", newRequestID()) + w.Header().Set("X-Amz-Request-Id", mustGetRequestID()) w.Header().Set("Server", ("Minio/" + ReleaseTag + " (" + runtime.GOOS + "; " + runtime.GOARCH + ")")) w.Header().Set("Accept-Ranges", "bytes") } diff --git a/cmd/api-headers_test.go b/cmd/api-headers_test.go index a20c4a3a1..540d136ee 100644 --- a/cmd/api-headers_test.go +++ b/cmd/api-headers_test.go @@ -22,7 +22,7 @@ import ( func TestNewRequestID(t *testing.T) { // Ensure that it returns an alphanumeric result of length 16. - var id = newRequestID() + var id = mustGetRequestID() if len(id) != 16 { t.Fail() diff --git a/cmd/auth-handler_test.go b/cmd/auth-handler_test.go index 0e6989390..a1a536131 100644 --- a/cmd/auth-handler_test.go +++ b/cmd/auth-handler_test.go @@ -301,7 +301,7 @@ func mustNewRequest(method string, urlStr string, contentLength int64, body io.R func mustNewSignedRequest(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request { req := mustNewRequest(method, urlStr, contentLength, body, t) cred := serverConfig.GetCredential() - if err := signRequestV4(req, cred.AccessKeyID, cred.SecretAccessKey); err != nil { + if err := signRequestV4(req, cred.AccessKey, cred.SecretKey); err != nil { t.Fatalf("Unable to inititalized new signed http request %s", err) } return req diff --git a/cmd/auth-rpc-client.go b/cmd/auth-rpc-client.go index 905eb2083..978c9382f 100644 --- a/cmd/auth-rpc-client.go +++ b/cmd/auth-rpc-client.go @@ -74,7 +74,7 @@ func isRPCTokenValid(tokenStr string) bool { if _, ok := token.Method.(*jwtgo.SigningMethodHMAC); !ok { return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"]) } - return []byte(jwt.SecretAccessKey), nil + return []byte(jwt.SecretKey), nil }) if err != nil { errorIf(err, "Unable to parse JWT token string") diff --git a/cmd/browser-peer-rpc.go b/cmd/browser-peer-rpc.go index f62ff80fe..3a76e8ff6 100644 --- a/cmd/browser-peer-rpc.go +++ b/cmd/browser-peer-rpc.go @@ -103,8 +103,8 @@ func updateCredsOnPeers(creds credential) map[string]error { // Initialize client client := newAuthClient(&authConfig{ - accessKey: serverConfig.GetCredential().AccessKeyID, - secretKey: serverConfig.GetCredential().SecretAccessKey, + accessKey: serverConfig.GetCredential().AccessKey, + secretKey: serverConfig.GetCredential().SecretKey, address: peers[ix], secureConn: isSSL(), path: path.Join(reservedBucket, browserPeerPath), diff --git a/cmd/browser-peer-rpc_test.go b/cmd/browser-peer-rpc_test.go index 2f34da764..4adfd4abf 100644 --- a/cmd/browser-peer-rpc_test.go +++ b/cmd/browser-peer-rpc_test.go @@ -63,8 +63,8 @@ func TestBrowserPeerRPC(t *testing.T) { func (s *TestRPCBrowserPeerSuite) testBrowserPeerRPC(t *testing.T) { // Construct RPC call arguments. creds := credential{ - AccessKeyID: "abcd1", - SecretAccessKey: "abcd1234", + AccessKey: "abcd1", + SecretKey: "abcd1234", } // Validate for invalid token. @@ -105,8 +105,8 @@ func (s *TestRPCBrowserPeerSuite) testBrowserPeerRPC(t *testing.T) { // Validate for success in loing handled with valid credetnails. rargs = &RPCLoginArgs{ - Username: creds.AccessKeyID, - Password: creds.SecretAccessKey, + Username: creds.AccessKey, + Password: creds.SecretKey, } rreply = &RPCLoginReply{} err = rclient.Call("BrowserPeer.LoginHandler", rargs, rreply) diff --git a/cmd/bucket-handlers_test.go b/cmd/bucket-handlers_test.go index 24fb82783..212c066eb 100644 --- a/cmd/bucket-handlers_test.go +++ b/cmd/bucket-handlers_test.go @@ -50,8 +50,8 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri // Tests for authenticated request and proper response. { bucketName: bucketName, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedRespStatus: http.StatusOK, locationResponse: []byte(` `), @@ -192,16 +192,16 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api // Bucket exists. { bucketName: bucketName, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedRespStatus: http.StatusOK, }, // Test case - 2. // Non-existent bucket name. { bucketName: "2333", - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedRespStatus: http.StatusNotFound, }, // Test case - 3. @@ -311,8 +311,8 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s uploadIDMarker: "", delimiter: "", maxUploads: "0", - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedRespStatus: http.StatusBadRequest, shouldPass: false, }, @@ -325,8 +325,8 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s uploadIDMarker: "", delimiter: "", maxUploads: "0", - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedRespStatus: http.StatusNotFound, shouldPass: false, }, @@ -339,8 +339,8 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s uploadIDMarker: "", delimiter: "-", maxUploads: "0", - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedRespStatus: http.StatusNotImplemented, shouldPass: false, }, @@ -353,8 +353,8 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s uploadIDMarker: "", delimiter: "", maxUploads: "0", - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedRespStatus: http.StatusNotImplemented, shouldPass: false, }, @@ -367,8 +367,8 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s uploadIDMarker: "abc", delimiter: "", maxUploads: "0", - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedRespStatus: http.StatusNotImplemented, shouldPass: false, }, @@ -381,8 +381,8 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s uploadIDMarker: "", delimiter: "", maxUploads: "-1", - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedRespStatus: http.StatusBadRequest, shouldPass: false, }, @@ -396,8 +396,8 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s uploadIDMarker: "", delimiter: "/", maxUploads: "100", - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedRespStatus: http.StatusOK, shouldPass: true, }, @@ -410,8 +410,8 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s uploadIDMarker: "", delimiter: "", maxUploads: "100", - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedRespStatus: http.StatusOK, shouldPass: true, }, @@ -535,8 +535,8 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap // Validate a good case request succeeds. { bucketName: bucketName, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedRespStatus: http.StatusOK, }, // Test case - 2. @@ -684,7 +684,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa bucket: bucketName, objects: successRequest0, accessKey: "Invalid-AccessID", - secretKey: credentials.SecretAccessKey, + secretKey: credentials.SecretKey, expectedContent: nil, expectedRespStatus: http.StatusForbidden, }, @@ -693,8 +693,8 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa { bucket: bucketName, objects: successRequest0, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedContent: encodedSuccessResponse0, expectedRespStatus: http.StatusOK, }, @@ -703,8 +703,8 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa { bucket: bucketName, objects: successRequest1, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedContent: encodedSuccessResponse1, expectedRespStatus: http.StatusOK, }, @@ -713,8 +713,8 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa { bucket: bucketName, objects: successRequest1, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedContent: encodedErrorResponse, expectedRespStatus: http.StatusOK, }, diff --git a/cmd/bucket-notification-handlers_test.go b/cmd/bucket-notification-handlers_test.go index e4ef7b35b..986749fdd 100644 --- a/cmd/bucket-notification-handlers_test.go +++ b/cmd/bucket-notification-handlers_test.go @@ -209,7 +209,7 @@ func testGetBucketNotificationHandler(obj ObjectLayer, instanceType, bucketName } rec := httptest.NewRecorder() req, err := newTestSignedRequestV4("GET", getGetBucketNotificationURL("", bucketName), - 0, nil, credentials.AccessKeyID, credentials.SecretAccessKey) + 0, nil, credentials.AccessKey, credentials.SecretKey) if err != nil { t.Fatalf("%s: Failed to create HTTP testRequest for ListenBucketNotification: %v", instanceType, err) } @@ -222,7 +222,7 @@ func testGetBucketNotificationHandler(obj ObjectLayer, instanceType, bucketName } rec = httptest.NewRecorder() req, err = newTestSignedRequestV4("GET", getGetBucketNotificationURL("", bucketName), - 0, nil, credentials.AccessKeyID, credentials.SecretAccessKey) + 0, nil, credentials.AccessKey, credentials.SecretKey) if err != nil { t.Fatalf("%s: Failed to create HTTP testRequest for ListenBucketNotification: %v", instanceType, err) } @@ -268,7 +268,7 @@ func testListenBucketNotificationNilHandler(obj ObjectLayer, instanceType, bucke []string{"*.jpg"}, []string{ "s3:ObjectCreated:*", "s3:ObjectRemoved:*", - }), 0, nil, credentials.AccessKeyID, credentials.SecretAccessKey) + }), 0, nil, credentials.AccessKey, credentials.SecretKey) if tErr != nil { t.Fatalf("%s: Failed to create HTTP testRequest for ListenBucketNotification: %v", instanceType, tErr) } @@ -294,7 +294,7 @@ func testRemoveNotificationConfig(obj ObjectLayer, instanceType, bucketName stri testRec := httptest.NewRecorder() testReq, tErr := newTestSignedRequestV4("PUT", getPutBucketNotificationURL("", randBucket), int64(len(sampleNotificationBytes)), bytes.NewReader(sampleNotificationBytes), - credentials.AccessKeyID, credentials.SecretAccessKey) + credentials.AccessKey, credentials.SecretKey) if tErr != nil { t.Fatalf("%s: Failed to create HTTP testRequest for PutBucketNotification: %v", instanceType, tErr) } diff --git a/cmd/bucket-policy-handlers_test.go b/cmd/bucket-policy-handlers_test.go index 5b89e5289..c4d893e5d 100644 --- a/cmd/bucket-policy-handlers_test.go +++ b/cmd/bucket-policy-handlers_test.go @@ -277,8 +277,8 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName))), policyLen: len(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName)), - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedRespStatus: http.StatusNoContent, }, // Test case - 2. @@ -289,8 +289,8 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName))), policyLen: maxAccessPolicySize + 1, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedRespStatus: http.StatusBadRequest, }, // Test case - 3. @@ -301,8 +301,8 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName))), policyLen: 0, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedRespStatus: http.StatusLengthRequired, }, // Test case - 4. @@ -312,8 +312,8 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string bucketPolicyReader: nil, policyLen: 10, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedRespStatus: http.StatusBadRequest, }, // Test case - 5. @@ -336,8 +336,8 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string bucketPolicyReader: bytes.NewReader([]byte("dummy-policy")), policyLen: len([]byte("dummy-policy")), - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedRespStatus: http.StatusBadRequest, }, // Test case - 7. @@ -348,8 +348,8 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName))), policyLen: len(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName)), - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedRespStatus: http.StatusBadRequest, }, // Test case - 8. @@ -361,8 +361,8 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, "non-existent-bucket", "non-existent-bucket"))), policyLen: len(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName)), - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedRespStatus: http.StatusNotFound, }, // Test case - 9. @@ -374,8 +374,8 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, ".invalid-bucket", ".invalid-bucket"))), policyLen: len(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName)), - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedRespStatus: http.StatusBadRequest, }, } @@ -469,7 +469,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string // expected Response. expectedRespStatus int }{ - {bucketName, credentials.AccessKeyID, credentials.SecretAccessKey, http.StatusNoContent}, + {bucketName, credentials.AccessKey, credentials.SecretKey, http.StatusNoContent}, } // Iterating over the cases and writing the bucket policy. @@ -520,8 +520,8 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string // Case which valid inputs, expected to return success status of 200OK. { bucketName: bucketName, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedBucketPolicy: bucketPolicyTemplate, expectedRespStatus: http.StatusOK, }, @@ -529,8 +529,8 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string // Case with non-existent bucket name. { bucketName: "non-existent-bucket", - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedBucketPolicy: bucketPolicyTemplate, expectedRespStatus: http.StatusNotFound, }, @@ -538,8 +538,8 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string // Case with invalid bucket name. { bucketName: ".invalid-bucket-name", - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedBucketPolicy: "", expectedRespStatus: http.StatusBadRequest, }, @@ -693,8 +693,8 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str }{ { bucketName: bucketName, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedRespStatus: http.StatusNoContent, }, } @@ -731,24 +731,24 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str // Test case - 1. { bucketName: bucketName, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedRespStatus: http.StatusNoContent, }, // Test case - 2. // Case with non-existent-bucket. { bucketName: "non-existent-bucket", - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedRespStatus: http.StatusNotFound, }, // Test case - 3. // Case with invalid bucket name. { bucketName: ".invalid-bucket-name", - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedRespStatus: http.StatusBadRequest, }, } diff --git a/cmd/config-migrate.go b/cmd/config-migrate.go index 0107a3d8c..04da1c882 100644 --- a/cmd/config-migrate.go +++ b/cmd/config-migrate.go @@ -112,8 +112,8 @@ func migrateV2ToV3() error { srvConfig.Version = "3" srvConfig.Addr = ":9000" srvConfig.Credential = credential{ - AccessKeyID: cv2.Credentials.AccessKeyID, - SecretAccessKey: cv2.Credentials.SecretAccessKey, + AccessKey: cv2.Credentials.AccessKey, + SecretKey: cv2.Credentials.SecretKey, } srvConfig.Region = cv2.Credentials.Region if srvConfig.Region == "" { diff --git a/cmd/config-migrate_test.go b/cmd/config-migrate_test.go index 2e0a1ce41..f2f4d3229 100644 --- a/cmd/config-migrate_test.go +++ b/cmd/config-migrate_test.go @@ -148,11 +148,11 @@ func TestServerConfigMigrateV2toV11(t *testing.T) { } // Check if accessKey and secretKey are not altered during migration - if serverConfig.Credential.AccessKeyID != accessKey { - t.Fatalf("Access key lost during migration, expected: %v, found:%v", accessKey, serverConfig.Credential.AccessKeyID) + if serverConfig.Credential.AccessKey != accessKey { + t.Fatalf("Access key lost during migration, expected: %v, found:%v", accessKey, serverConfig.Credential.AccessKey) } - if serverConfig.Credential.SecretAccessKey != secretKey { - t.Fatalf("Secret key lost during migration, expected: %v, found: %v", secretKey, serverConfig.Credential.SecretAccessKey) + if serverConfig.Credential.SecretKey != secretKey { + t.Fatalf("Secret key lost during migration, expected: %v, found: %v", secretKey, serverConfig.Credential.SecretKey) } // Initialize server config and check again if everything is fine diff --git a/cmd/config-old.go b/cmd/config-old.go index 5f85873ce..ebf8edb3b 100644 --- a/cmd/config-old.go +++ b/cmd/config-old.go @@ -10,9 +10,9 @@ import ( /////////////////// Config V1 /////////////////// type configV1 struct { - Version string `json:"version"` - AccessKeyID string `json:"accessKeyId"` - SecretAccessKey string `json:"secretAccessKey"` + Version string `json:"version"` + AccessKey string `json:"accessKeyId"` + SecretKey string `json:"secretAccessKey"` } // loadConfigV1 load config @@ -41,9 +41,9 @@ func loadConfigV1() (*configV1, error) { type configV2 struct { Version string `json:"version"` Credentials struct { - AccessKeyID string `json:"accessKeyId"` - SecretAccessKey string `json:"secretAccessKey"` - Region string `json:"region"` + AccessKey string `json:"accessKeyId"` + SecretKey string `json:"secretAccessKey"` + Region string `json:"region"` } `json:"credentials"` MongoLogger struct { Addr string `json:"addr"` diff --git a/cmd/config-v11.go b/cmd/config-v11.go index 9bf425b26..ebef56f36 100644 --- a/cmd/config-v11.go +++ b/cmd/config-v11.go @@ -50,7 +50,7 @@ func initConfig() (bool, error) { srvCfg := &serverConfigV11{} srvCfg.Version = globalMinioConfigVersion srvCfg.Region = "us-east-1" - srvCfg.Credential = mustGenAccessKeys() + srvCfg.Credential = newCredential() // Enable console logger by default on a fresh run. srvCfg.Logger.Console = consoleLogger{ diff --git a/cmd/credential.go b/cmd/credential.go new file mode 100644 index 000000000..3e2c7d576 --- /dev/null +++ b/cmd/credential.go @@ -0,0 +1,74 @@ +/* + * Minio Cloud Storage, (C) 2015, 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "crypto/rand" + "encoding/base64" +) + +const ( + accessKeyMinLen = 5 + accessKeyMaxLen = 20 + secretKeyMinLen = 8 + secretKeyMaxLen = 40 + + alphaNumericTable = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" + alphaNumericTableLen = byte(len(alphaNumericTable)) +) + +func mustGetAccessKey() string { + keyBytes := make([]byte, accessKeyMaxLen) + if _, err := rand.Read(keyBytes); err != nil { + panic(err) + } + + for i := 0; i < accessKeyMaxLen; i++ { + keyBytes[i] = alphaNumericTable[keyBytes[i]%alphaNumericTableLen] + } + + return string(keyBytes) +} + +func mustGetSecretKey() string { + keyBytes := make([]byte, secretKeyMaxLen) + if _, err := rand.Read(keyBytes); err != nil { + panic(err) + } + + return string([]byte(base64.StdEncoding.EncodeToString(keyBytes))[:secretKeyMaxLen]) +} + +// isAccessKeyValid - validate access key for right length. +func isAccessKeyValid(accessKey string) bool { + return len(accessKey) >= accessKeyMinLen && len(accessKey) <= accessKeyMaxLen +} + +// isSecretKeyValid - validate secret key for right length. +func isSecretKeyValid(secretKey string) bool { + return len(secretKey) >= secretKeyMinLen && len(secretKey) <= secretKeyMaxLen +} + +// credential container for access and secret keys. +type credential struct { + AccessKey string `json:"accessKey"` + SecretKey string `json:"secretKey"` +} + +func newCredential() credential { + return credential{mustGetAccessKey(), mustGetSecretKey()} +} diff --git a/cmd/lock-instrument.go b/cmd/lock-instrument.go index 2442e53ee..7c00ba6bd 100644 --- a/cmd/lock-instrument.go +++ b/cmd/lock-instrument.go @@ -268,5 +268,5 @@ func (n *nsLockMap) deleteLockInfoEntryForOps(param nsParam, opsID string) error // Return randomly generated string ID func getOpsID() string { - return newRequestID() + return mustGetRequestID() } diff --git a/cmd/lock-rpc-server_test.go b/cmd/lock-rpc-server_test.go index 30519b5b1..27098309e 100644 --- a/cmd/lock-rpc-server_test.go +++ b/cmd/lock-rpc-server_test.go @@ -55,7 +55,7 @@ func createLockTestServer(t *testing.T) (string, *lockServer, string) { lockMap: make(map[string][]lockRequesterInfo), } creds := serverConfig.GetCredential() - loginArgs := RPCLoginArgs{Username: creds.AccessKeyID, Password: creds.SecretAccessKey} + loginArgs := RPCLoginArgs{Username: creds.AccessKey, Password: creds.SecretKey} loginReply := RPCLoginReply{} err = locker.LoginHandler(&loginArgs, &loginReply) if err != nil { diff --git a/cmd/login-server_test.go b/cmd/login-server_test.go index a79e18371..3d8f66a97 100644 --- a/cmd/login-server_test.go +++ b/cmd/login-server_test.go @@ -32,7 +32,7 @@ func TestLoginHandler(t *testing.T) { }{ // Valid username and password { - args: RPCLoginArgs{Username: creds.AccessKeyID, Password: creds.SecretAccessKey}, + args: RPCLoginArgs{Username: creds.AccessKey, Password: creds.SecretKey}, expectedErr: nil, }, // Invalid username length @@ -47,12 +47,12 @@ func TestLoginHandler(t *testing.T) { }, // Invalid username { - args: RPCLoginArgs{Username: "aaaaa", Password: creds.SecretAccessKey}, + args: RPCLoginArgs{Username: "aaaaa", Password: creds.SecretKey}, expectedErr: errInvalidAccessKeyID, }, // Invalid password { - args: RPCLoginArgs{Username: creds.AccessKeyID, Password: "aaaaaaaa"}, + args: RPCLoginArgs{Username: creds.AccessKey, Password: "aaaaaaaa"}, expectedErr: errAuthentication, }, } diff --git a/cmd/main.go b/cmd/main.go index 9bd2292e2..073f3e922 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -189,14 +189,14 @@ func minioInit() { if accessKey != "" && secretKey != "" { // Set new credentials. serverConfig.SetCredential(credential{ - AccessKeyID: accessKey, - SecretAccessKey: secretKey, + AccessKey: accessKey, + SecretKey: secretKey, }) } - if !isValidAccessKey(serverConfig.GetCredential().AccessKeyID) { + if !isAccessKeyValid(serverConfig.GetCredential().AccessKey) { fatalIf(errInvalidArgument, "Invalid access key. Accept only a string starting with a alphabetic and containing from 5 to 20 characters.") } - if !isValidSecretKey(serverConfig.GetCredential().SecretAccessKey) { + if !isSecretKeyValid(serverConfig.GetCredential().SecretKey) { fatalIf(errInvalidArgument, "Invalid secret key. Accept only a string containing from 8 to 40 characters.") } diff --git a/cmd/namespace-lock.go b/cmd/namespace-lock.go index f549dd4f8..9f9ba73a5 100644 --- a/cmd/namespace-lock.go +++ b/cmd/namespace-lock.go @@ -40,8 +40,8 @@ func initDsyncNodes(eps []*url.URL) error { return errInvalidArgument } clnts[index] = newAuthClient(&authConfig{ - accessKey: cred.AccessKeyID, - secretKey: cred.SecretAccessKey, + accessKey: cred.AccessKey, + secretKey: cred.SecretKey, // Construct a new dsync server addr. secureConn: isSSL(), address: ep.Host, diff --git a/cmd/object-handlers_test.go b/cmd/object-handlers_test.go index da23c77dd..7ea378bdb 100644 --- a/cmd/object-handlers_test.go +++ b/cmd/object-handlers_test.go @@ -97,8 +97,8 @@ func testAPIHeadObjectHandler(obj ObjectLayer, instanceType, bucketName string, { bucketName: bucketName, objectName: objectName, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedRespStatus: http.StatusOK, }, // Test case - 2. @@ -106,8 +106,8 @@ func testAPIHeadObjectHandler(obj ObjectLayer, instanceType, bucketName string, { bucketName: bucketName, objectName: "abcd", - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedRespStatus: http.StatusNotFound, }, // Test case - 3. @@ -117,7 +117,7 @@ func testAPIHeadObjectHandler(obj ObjectLayer, instanceType, bucketName string, bucketName: bucketName, objectName: objectName, accessKey: "Invalid-AccessID", - secretKey: credentials.SecretAccessKey, + secretKey: credentials.SecretKey, expectedRespStatus: http.StatusForbidden, }, } @@ -248,8 +248,8 @@ func testAPIGetObjectHandler(obj ObjectLayer, instanceType, bucketName string, a bucketName: bucketName, objectName: objectName, byteRange: "", - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedContent: bytesData[0].byteData, expectedRespStatus: http.StatusOK, @@ -260,8 +260,8 @@ func testAPIGetObjectHandler(obj ObjectLayer, instanceType, bucketName string, a bucketName: bucketName, objectName: "abcd", byteRange: "", - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedContent: encodeResponse(getAPIErrorResponse(getAPIError(ErrNoSuchKey), getGetObjectURL("", bucketName, "abcd"))), expectedRespStatus: http.StatusNotFound, @@ -272,8 +272,8 @@ func testAPIGetObjectHandler(obj ObjectLayer, instanceType, bucketName string, a bucketName: bucketName, objectName: objectName, byteRange: "bytes=10-100", - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedContent: bytesData[0].byteData[10:101], expectedRespStatus: http.StatusPartialContent, @@ -284,8 +284,8 @@ func testAPIGetObjectHandler(obj ObjectLayer, instanceType, bucketName string, a bucketName: bucketName, objectName: objectName, byteRange: "bytes=-0", - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedContent: encodeResponse(getAPIErrorResponse(getAPIError(ErrInvalidRange), getGetObjectURL("", bucketName, objectName))), expectedRespStatus: http.StatusRequestedRangeNotSatisfiable, @@ -297,8 +297,8 @@ func testAPIGetObjectHandler(obj ObjectLayer, instanceType, bucketName string, a bucketName: bucketName, objectName: objectName, byteRange: "bytes=10-1000000000000000", - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedContent: bytesData[0].byteData[10:], expectedRespStatus: http.StatusPartialContent, @@ -311,7 +311,7 @@ func testAPIGetObjectHandler(obj ObjectLayer, instanceType, bucketName string, a objectName: objectName, byteRange: "", accessKey: "Invalid-AccessID", - secretKey: credentials.SecretAccessKey, + secretKey: credentials.SecretKey, expectedContent: encodeResponse(getAPIErrorResponse(getAPIError(ErrInvalidAccessKeyID), getGetObjectURL("", bucketName, objectName))), expectedRespStatus: http.StatusForbidden, @@ -470,8 +470,8 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam chunkSize: 64 * humanize.KiByte, expectedContent: []byte{}, expectedRespStatus: http.StatusOK, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, shouldPass: true, }, // Test case - 2 @@ -484,8 +484,8 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam chunkSize: 1 * humanize.KiByte, expectedContent: []byte{}, expectedRespStatus: http.StatusOK, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, shouldPass: true, }, // Test case - 3 @@ -512,8 +512,8 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam chunkSize: 64 * humanize.KiByte, expectedContent: []byte{}, expectedRespStatus: http.StatusBadRequest, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, shouldPass: false, removeAuthHeader: true, }, @@ -527,8 +527,8 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam chunkSize: 100 * humanize.KiByte, expectedContent: []byte{}, expectedRespStatus: http.StatusOK, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, shouldPass: false, }, // Test case - 6 @@ -541,8 +541,8 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam chunkSize: 1024, expectedContent: []byte{}, expectedRespStatus: http.StatusInternalServerError, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, shouldPass: false, fault: malformedEncoding, }, @@ -556,8 +556,8 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam chunkSize: 1024, expectedContent: []byte{}, expectedRespStatus: http.StatusBadRequest, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, shouldPass: false, fault: unexpectedEOF, }, @@ -571,8 +571,8 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam chunkSize: 1024, expectedContent: []byte{}, expectedRespStatus: http.StatusForbidden, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, shouldPass: false, fault: signatureMismatch, }, @@ -587,8 +587,8 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam chunkSize: 1024, expectedContent: []byte{}, expectedRespStatus: http.StatusForbidden, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, shouldPass: false, fault: chunkDateMismatch, }, @@ -602,8 +602,8 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam chunkSize: 1024, expectedContent: []byte{}, expectedRespStatus: http.StatusInternalServerError, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, shouldPass: false, fault: tooBigDecodedLength, }, @@ -733,8 +733,8 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a objectName: objectName, data: bytesData, dataLen: len(bytesData), - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedRespStatus: http.StatusOK, }, @@ -746,7 +746,7 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a data: bytesData, dataLen: len(bytesData), accessKey: "Wrong-AcessID", - secretKey: credentials.SecretAccessKey, + secretKey: credentials.SecretKey, expectedRespStatus: http.StatusForbidden, }, @@ -758,8 +758,8 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a headers: copySourceHeader, data: bytesData, dataLen: len(bytesData), - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedRespStatus: http.StatusBadRequest, }, // Test case - 4. @@ -770,8 +770,8 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a headers: invalidMD5Header, data: bytesData, dataLen: len(bytesData), - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedRespStatus: http.StatusBadRequest, }, // Test case - 5. @@ -781,8 +781,8 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a objectName: objectName, data: bytesData, dataLen: len(bytesData), - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, fault: TooBigObject, expectedRespStatus: http.StatusBadRequest, }, @@ -793,8 +793,8 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a objectName: objectName, data: bytesData, dataLen: len(bytesData), - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, fault: MissingContentLength, expectedRespStatus: http.StatusLengthRequired, }, @@ -991,8 +991,8 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, bucketName: bucketName, newObjectName: "newObject1", copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName), - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedRespStatus: http.StatusOK, }, @@ -1003,8 +1003,8 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, bucketName: bucketName, newObjectName: "newObject1", copySourceHeader: url.QueryEscape("/"), - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedRespStatus: http.StatusBadRequest, }, @@ -1014,8 +1014,8 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, bucketName: bucketName, newObjectName: objectName, copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName), - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedRespStatus: http.StatusBadRequest, }, @@ -1027,8 +1027,8 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, bucketName: bucketName, newObjectName: objectName, copySourceHeader: url.QueryEscape("/" + bucketName + "/" + "non-existent-object"), - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedRespStatus: http.StatusNotFound, }, @@ -1040,19 +1040,19 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, bucketName: "non-existent-destination-bucket", newObjectName: objectName, copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName), - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedRespStatus: http.StatusNotFound, }, // Test case - 6. - // Case with invalid AccessKeyID. + // Case with invalid AccessKey. { bucketName: bucketName, newObjectName: objectName, copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName), accessKey: "Invalid-AccessID", - secretKey: credentials.SecretAccessKey, + secretKey: credentials.SecretKey, expectedRespStatus: http.StatusForbidden, }, @@ -1175,7 +1175,7 @@ func testAPINewMultipartHandler(obj ObjectLayer, instanceType, bucketName string rec := httptest.NewRecorder() // construct HTTP request for NewMultipart upload. req, err := newTestSignedRequestV4("POST", getNewMultipartURL("", bucketName, objectName), - 0, nil, credentials.AccessKeyID, credentials.SecretAccessKey) + 0, nil, credentials.AccessKey, credentials.SecretKey) if err != nil { t.Fatalf("Failed to create HTTP request for NewMultipart Request: %v", err) @@ -1208,7 +1208,7 @@ func testAPINewMultipartHandler(obj ObjectLayer, instanceType, bucketName string // construct HTTP request for NewMultipart upload. // Setting an invalid accessID. req, err = newTestSignedRequestV4("POST", getNewMultipartURL("", bucketName, objectName), - 0, nil, "Invalid-AccessID", credentials.SecretAccessKey) + 0, nil, "Invalid-AccessID", credentials.SecretKey) if err != nil { t.Fatalf("Failed to create HTTP request for NewMultipart Request: %v", err) @@ -1227,7 +1227,7 @@ func testAPINewMultipartHandler(obj ObjectLayer, instanceType, bucketName string recV2 := httptest.NewRecorder() // construct HTTP request for NewMultipartUpload endpoint. reqV2, err := newTestSignedRequestV2("POST", getNewMultipartURL("", bucketName, objectName), - 0, nil, credentials.AccessKeyID, credentials.SecretAccessKey) + 0, nil, credentials.AccessKey, credentials.SecretKey) if err != nil { t.Fatalf("Failed to create HTTP request for NewMultipart Request: %v", err) @@ -1260,7 +1260,7 @@ func testAPINewMultipartHandler(obj ObjectLayer, instanceType, bucketName string // construct HTTP request for NewMultipartUpload endpoint. // Setting invalid AccessID. reqV2, err = newTestSignedRequestV2("POST", getNewMultipartURL("", bucketName, objectName), - 0, nil, "Invalid-AccessID", credentials.SecretAccessKey) + 0, nil, "Invalid-AccessID", credentials.SecretKey) if err != nil { t.Fatalf("Failed to create HTTP request for NewMultipart Request: %v", err) @@ -1331,7 +1331,7 @@ func testAPINewMultipartHandlerParallel(obj ObjectLayer, instanceType, bucketNam defer wg.Done() rec := httptest.NewRecorder() // construct HTTP request NewMultipartUpload. - req, err := newTestSignedRequestV4("POST", getNewMultipartURL("", bucketName, objectName), 0, nil, credentials.AccessKeyID, credentials.SecretAccessKey) + req, err := newTestSignedRequestV4("POST", getNewMultipartURL("", bucketName, objectName), 0, nil, credentials.AccessKey, credentials.SecretKey) if err != nil { t.Fatalf("Failed to create HTTP request for NewMultipart request: %v", err) @@ -1527,8 +1527,8 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s object: objectName, uploadID: uploadIDs[0], parts: inputParts[0].parts, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedContent: encodeResponse(getAPIErrorResponse(getAPIError(toAPIErrorCode(BadDigest{})), getGetObjectURL("", bucketName, objectName))), @@ -1542,8 +1542,8 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s object: objectName, uploadID: uploadIDs[0], parts: []completePart{}, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedContent: encodeResponse(getAPIErrorResponse(getAPIError(ErrMalformedXML), getGetObjectURL("", bucketName, objectName))), @@ -1557,8 +1557,8 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s object: objectName, uploadID: "abc", parts: inputParts[0].parts, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedContent: encodeResponse(getAPIErrorResponse(getAPIError(toAPIErrorCode(InvalidUploadID{UploadID: "abc"})), getGetObjectURL("", bucketName, objectName))), @@ -1571,8 +1571,8 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s object: objectName, uploadID: uploadIDs[0], parts: inputParts[1].parts, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedContent: encodeResponse(completeMultipartAPIError{int64(4), int64(5242880), 1, "e2fc714c4727ee9395f324cd2e7f331f", getAPIErrorResponse(getAPIError(toAPIErrorCode(PartTooSmall{PartNumber: 1})), @@ -1586,8 +1586,8 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s object: objectName, uploadID: uploadIDs[0], parts: inputParts[2].parts, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedContent: encodeResponse(getAPIErrorResponse(getAPIError(toAPIErrorCode(InvalidPart{})), getGetObjectURL("", bucketName, objectName))), @@ -1601,8 +1601,8 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s object: objectName, uploadID: uploadIDs[0], parts: inputParts[3].parts, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedContent: encodeResponse(getAPIErrorResponse(getAPIError(ErrInvalidPartOrder), getGetObjectURL("", bucketName, objectName))), @@ -1617,7 +1617,7 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s uploadID: uploadIDs[0], parts: inputParts[4].parts, accessKey: "Invalid-AccessID", - secretKey: credentials.SecretAccessKey, + secretKey: credentials.SecretKey, expectedContent: encodeResponse(getAPIErrorResponse(getAPIError(ErrInvalidAccessKeyID), getGetObjectURL("", bucketName, objectName))), @@ -1631,8 +1631,8 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s object: objectName, uploadID: uploadIDs[0], parts: inputParts[4].parts, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedContent: encodedSuccessResponse, expectedRespStatus: http.StatusOK, @@ -1813,8 +1813,8 @@ func testAPIAbortMultipartHandler(obj ObjectLayer, instanceType, bucketName stri bucket: bucketName, object: objectName, uploadID: uploadIDs[0], - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedRespStatus: http.StatusNoContent, }, // Test case - 2. @@ -1823,8 +1823,8 @@ func testAPIAbortMultipartHandler(obj ObjectLayer, instanceType, bucketName stri bucket: bucketName, object: objectName, uploadID: "nonexistent-upload-id", - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedRespStatus: http.StatusNotFound, }, // Test case - 3. @@ -1834,7 +1834,7 @@ func testAPIAbortMultipartHandler(obj ObjectLayer, instanceType, bucketName stri object: objectName, uploadID: uploadIDs[0], accessKey: "Invalid-AccessID", - secretKey: credentials.SecretAccessKey, + secretKey: credentials.SecretKey, expectedRespStatus: http.StatusForbidden, }, } @@ -1957,8 +1957,8 @@ func testAPIDeleteObjectHandler(obj ObjectLayer, instanceType, bucketName string { bucketName: bucketName, objectName: objectName, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedRespStatus: http.StatusNoContent, }, @@ -1968,8 +1968,8 @@ func testAPIDeleteObjectHandler(obj ObjectLayer, instanceType, bucketName string { bucketName: bucketName, objectName: objectName, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedRespStatus: http.StatusNoContent, }, @@ -1980,7 +1980,7 @@ func testAPIDeleteObjectHandler(obj ObjectLayer, instanceType, bucketName string bucketName: bucketName, objectName: objectName, accessKey: "Invalid-AccessKey", - secretKey: credentials.SecretAccessKey, + secretKey: credentials.SecretKey, expectedRespStatus: http.StatusForbidden, }, @@ -2070,7 +2070,7 @@ func testAPIPutObjectPartHandlerPreSign(obj ObjectLayer, instanceType, bucketNam testObject := "testobject" rec := httptest.NewRecorder() req, err := newTestSignedRequestV4("POST", getNewMultipartURL("", bucketName, "testobject"), - 0, nil, credentials.AccessKeyID, credentials.SecretAccessKey) + 0, nil, credentials.AccessKey, credentials.SecretKey) if err != nil { t.Fatalf("[%s] - Failed to create a signed request to initiate multipart upload for %s/%s: %v", instanceType, bucketName, testObject, err) @@ -2096,7 +2096,7 @@ func testAPIPutObjectPartHandlerPreSign(obj ObjectLayer, instanceType, bucketNam t.Fatalf("[%s] - Failed to create an unsigned request to put object part for %s/%s %v", instanceType, bucketName, testObject, err) } - err = preSignV2(req, credentials.AccessKeyID, credentials.SecretAccessKey, int64(10*60*60)) + err = preSignV2(req, credentials.AccessKey, credentials.SecretKey, int64(10*60*60)) if err != nil { t.Fatalf("[%s] - Failed to presign an unsigned request to put object part for %s/%s %v", instanceType, bucketName, testObject, err) @@ -2113,7 +2113,7 @@ func testAPIPutObjectPartHandlerPreSign(obj ObjectLayer, instanceType, bucketNam t.Fatalf("[%s] - Failed to create an unsigned request to put object part for %s/%s %v", instanceType, bucketName, testObject, err) } - err = preSignV4(req, credentials.AccessKeyID, credentials.SecretAccessKey, int64(10*60*60)) + err = preSignV4(req, credentials.AccessKey, credentials.SecretKey, int64(10*60*60)) if err != nil { t.Fatalf("[%s] - Failed to presign an unsigned request to put object part for %s/%s %v", instanceType, bucketName, testObject, err) @@ -2136,7 +2136,7 @@ func testAPIPutObjectPartHandlerStreaming(obj ObjectLayer, instanceType, bucketN testObject := "testobject" rec := httptest.NewRecorder() req, err := newTestSignedRequestV4("POST", getNewMultipartURL("", bucketName, "testobject"), - 0, nil, credentials.AccessKeyID, credentials.SecretAccessKey) + 0, nil, credentials.AccessKey, credentials.SecretKey) if err != nil { t.Fatalf("[%s] - Failed to create a signed request to initiate multipart upload for %s/%s: %v", instanceType, bucketName, testObject, err) @@ -2171,7 +2171,7 @@ func testAPIPutObjectPartHandlerStreaming(obj ObjectLayer, instanceType, bucketN rec = httptest.NewRecorder() req, err = newTestStreamingSignedRequest("PUT", getPutObjectPartURL("", bucketName, testObject, mpartResp.UploadID, "1"), - 5, 1, bytes.NewReader([]byte("hello")), credentials.AccessKeyID, credentials.SecretAccessKey) + 5, 1, bytes.NewReader([]byte("hello")), credentials.AccessKey, credentials.SecretKey) if err != nil { t.Fatalf("Failed to create new streaming signed HTTP request: %v.", err) @@ -2273,8 +2273,8 @@ func testAPIPutObjectPartHandler(obj ObjectLayer, instanceType, bucketName strin reader: bytes.NewReader([]byte("hello")), partNumber: "1", fault: None, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedAPIError: noAPIErr, }, @@ -2285,8 +2285,8 @@ func testAPIPutObjectPartHandler(obj ObjectLayer, instanceType, bucketName strin reader: bytes.NewReader([]byte("hello")), partNumber: "9999999999999999999", fault: None, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedAPIError: invalidPart, }, @@ -2297,8 +2297,8 @@ func testAPIPutObjectPartHandler(obj ObjectLayer, instanceType, bucketName strin reader: bytes.NewReader([]byte("hello")), partNumber: strconv.Itoa(maxPartID + 1), fault: None, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedAPIError: invalidMaxParts, }, @@ -2309,8 +2309,8 @@ func testAPIPutObjectPartHandler(obj ObjectLayer, instanceType, bucketName strin reader: bytes.NewReader([]byte("hello")), partNumber: "1", fault: MissingContentLength, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedAPIError: missingContent, }, @@ -2321,8 +2321,8 @@ func testAPIPutObjectPartHandler(obj ObjectLayer, instanceType, bucketName strin reader: bytes.NewReader([]byte("hello")), partNumber: "1", fault: TooBigObject, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedAPIError: entityTooLarge, }, @@ -2333,8 +2333,8 @@ func testAPIPutObjectPartHandler(obj ObjectLayer, instanceType, bucketName strin reader: bytes.NewReader([]byte("hello")), partNumber: "1", fault: BadSignature, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedAPIError: badSigning, }, @@ -2346,8 +2346,8 @@ func testAPIPutObjectPartHandler(obj ObjectLayer, instanceType, bucketName strin reader: bytes.NewReader([]byte("hello")), partNumber: "1", fault: BadMD5, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedAPIError: badChecksum, }, @@ -2358,8 +2358,8 @@ func testAPIPutObjectPartHandler(obj ObjectLayer, instanceType, bucketName strin reader: bytes.NewReader([]byte("hello")), partNumber: "1", fault: MissingUploadID, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, expectedAPIError: noSuchUploadID, }, @@ -2372,7 +2372,7 @@ func testAPIPutObjectPartHandler(obj ObjectLayer, instanceType, bucketName strin partNumber: "1", fault: None, accessKey: "Invalid-AccessID", - secretKey: credentials.SecretAccessKey, + secretKey: credentials.SecretKey, expectedAPIError: invalidAccessID, }, @@ -2541,7 +2541,7 @@ func testAPIListObjectPartsHandlerPreSign(obj ObjectLayer, instanceType, bucketN testObject := "testobject" rec := httptest.NewRecorder() req, err := newTestSignedRequestV4("POST", getNewMultipartURL("", bucketName, testObject), - 0, nil, credentials.AccessKeyID, credentials.SecretAccessKey) + 0, nil, credentials.AccessKey, credentials.SecretKey) if err != nil { t.Fatalf("[%s] - Failed to create a signed request to initiate multipart upload for %s/%s: %v", instanceType, bucketName, testObject, err) @@ -2564,7 +2564,7 @@ func testAPIListObjectPartsHandlerPreSign(obj ObjectLayer, instanceType, bucketN rec = httptest.NewRecorder() req, err = newTestSignedRequestV4("PUT", getPutObjectPartURL("", bucketName, testObject, mpartResp.UploadID, "1"), - int64(len("hello")), bytes.NewReader([]byte("hello")), credentials.AccessKeyID, credentials.SecretAccessKey) + int64(len("hello")), bytes.NewReader([]byte("hello")), credentials.AccessKey, credentials.SecretKey) if err != nil { t.Fatalf("[%s] - Failed to create a signed request to initiate multipart upload for %s/%s: %v", instanceType, bucketName, testObject, err) @@ -2584,7 +2584,7 @@ func testAPIListObjectPartsHandlerPreSign(obj ObjectLayer, instanceType, bucketN instanceType, bucketName, mpartResp.UploadID) } - err = preSignV2(req, credentials.AccessKeyID, credentials.SecretAccessKey, int64(10*60*60)) + err = preSignV2(req, credentials.AccessKey, credentials.SecretKey, int64(10*60*60)) if err != nil { t.Fatalf("[%s] - Failed to presignV2 an unsigned request to list object parts for bucket %s, uploadId %s", instanceType, bucketName, mpartResp.UploadID) @@ -2604,7 +2604,7 @@ func testAPIListObjectPartsHandlerPreSign(obj ObjectLayer, instanceType, bucketN instanceType, bucketName, mpartResp.UploadID) } - err = preSignV4(req, credentials.AccessKeyID, credentials.SecretAccessKey, int64(10*60*60)) + err = preSignV4(req, credentials.AccessKey, credentials.SecretKey, int64(10*60*60)) if err != nil { t.Fatalf("[%s] - Failed to presignV2 an unsigned request to list object parts for bucket %s, uploadId %s", instanceType, bucketName, mpartResp.UploadID) @@ -2724,7 +2724,7 @@ func testAPIListObjectPartsHandler(obj ObjectLayer, instanceType, bucketName str // constructing a v4 signed HTTP request for ListMultipartUploads. reqV4, err = newTestSignedRequestV4("GET", getListMultipartURLWithParams("", bucketName, testObject, uploadID, test.maxParts, test.partNumberMarker, ""), - 0, nil, credentials.AccessKeyID, credentials.SecretAccessKey) + 0, nil, credentials.AccessKey, credentials.SecretKey) if err != nil { t.Fatalf("Failed to create a V4 signed request to list object parts for %s/%s: %v.", @@ -2734,7 +2734,7 @@ func testAPIListObjectPartsHandler(obj ObjectLayer, instanceType, bucketName str // construct HTTP request for PutObject Part Object endpoint. reqV2, err = newTestSignedRequestV2("GET", getListMultipartURLWithParams("", bucketName, testObject, uploadID, test.maxParts, test.partNumberMarker, ""), - 0, nil, credentials.AccessKeyID, credentials.SecretAccessKey) + 0, nil, credentials.AccessKey, credentials.SecretKey) if err != nil { t.Fatalf("Failed to create a V2 signed request to list object parts for %s/%s: %v.", diff --git a/cmd/post-policy_test.go b/cmd/post-policy_test.go index 6881f18af..1b9b891d9 100644 --- a/cmd/post-policy_test.go +++ b/cmd/post-policy_test.go @@ -154,9 +154,9 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr accessKey string secretKey string }{ - {http.StatusForbidden, "invalidaccesskey", credentials.SecretAccessKey}, - {http.StatusForbidden, credentials.AccessKeyID, "invalidsecretkey"}, - {http.StatusNoContent, credentials.AccessKeyID, credentials.SecretAccessKey}, + {http.StatusForbidden, "invalidaccesskey", credentials.SecretKey}, + {http.StatusForbidden, credentials.AccessKey, "invalidsecretkey"}, + {http.StatusNoContent, credentials.AccessKey, credentials.SecretKey}, } for i, test := range testCasesV2 { @@ -190,8 +190,8 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr data: []byte("Hello, World"), expectedRespStatus: http.StatusNoContent, expectedHeaders: map[string]string{"X-Amz-Meta-Uuid": "1234"}, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, malformedBody: false, }, // Bad case invalid request. @@ -208,8 +208,8 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr objectName: "test", data: []byte("Hello, World"), expectedRespStatus: http.StatusBadRequest, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, malformedBody: true, }, } @@ -262,20 +262,20 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr objectName: "test", data: []byte("Hello, World"), expectedRespStatus: http.StatusNoContent, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, dates: []interface{}{curTimePlus5Min.Format(expirationDateFormat), curTime.Format(iso8601DateFormat), curTime.Format(yyyymmdd)}, - policy: `{"expiration": "%s","conditions":[["eq", "$bucket", "` + bucketName + `"], ["starts-with", "$key", "test/"], ["eq", "$x-amz-algorithm", "AWS4-HMAC-SHA256"], ["eq", "$x-amz-date", "%s"], ["eq", "$x-amz-credential", "` + credentials.AccessKeyID + `/%s/us-east-1/s3/aws4_request"]]}`, + policy: `{"expiration": "%s","conditions":[["eq", "$bucket", "` + bucketName + `"], ["starts-with", "$key", "test/"], ["eq", "$x-amz-algorithm", "AWS4-HMAC-SHA256"], ["eq", "$x-amz-date", "%s"], ["eq", "$x-amz-credential", "` + credentials.AccessKey + `/%s/us-east-1/s3/aws4_request"]]}`, }, // Corrupted Base 64 result { objectName: "test", data: []byte("Hello, World"), expectedRespStatus: http.StatusBadRequest, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, dates: []interface{}{curTimePlus5Min.Format(expirationDateFormat), curTime.Format(iso8601DateFormat), curTime.Format(yyyymmdd)}, - policy: `{"expiration": "%s","conditions":[["eq", "$bucket", "` + bucketName + `"], ["starts-with", "$key", "test/"], ["eq", "$x-amz-algorithm", "AWS4-HMAC-SHA256"], ["eq", "$x-amz-date", "%s"], ["eq", "$x-amz-credential", "` + credentials.AccessKeyID + `/%s/us-east-1/s3/aws4_request"]]}`, + policy: `{"expiration": "%s","conditions":[["eq", "$bucket", "` + bucketName + `"], ["starts-with", "$key", "test/"], ["eq", "$x-amz-algorithm", "AWS4-HMAC-SHA256"], ["eq", "$x-amz-date", "%s"], ["eq", "$x-amz-credential", "` + credentials.AccessKey + `/%s/us-east-1/s3/aws4_request"]]}`, corruptedBase64: true, }, // Corrupted Multipart body @@ -283,10 +283,10 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr objectName: "test", data: []byte("Hello, World"), expectedRespStatus: http.StatusBadRequest, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, dates: []interface{}{curTimePlus5Min.Format(expirationDateFormat), curTime.Format(iso8601DateFormat), curTime.Format(yyyymmdd)}, - policy: `{"expiration": "%s","conditions":[["eq", "$bucket", "` + bucketName + `"], ["starts-with", "$key", "test/"], ["eq", "$x-amz-algorithm", "AWS4-HMAC-SHA256"], ["eq", "$x-amz-date", "%s"], ["eq", "$x-amz-credential", "` + credentials.AccessKeyID + `/%s/us-east-1/s3/aws4_request"]]}`, + policy: `{"expiration": "%s","conditions":[["eq", "$bucket", "` + bucketName + `"], ["starts-with", "$key", "test/"], ["eq", "$x-amz-algorithm", "AWS4-HMAC-SHA256"], ["eq", "$x-amz-date", "%s"], ["eq", "$x-amz-credential", "` + credentials.AccessKey + `/%s/us-east-1/s3/aws4_request"]]}`, corruptedMultipart: true, }, @@ -305,18 +305,18 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr objectName: "test", data: []byte("Hello, World"), expectedRespStatus: http.StatusBadRequest, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, dates: []interface{}{curTime.Add(-1 * time.Minute * 5).Format(expirationDateFormat), curTime.Format(iso8601DateFormat), curTime.Format(yyyymmdd)}, - policy: `{"expiration": "%s","conditions":[["eq", "$bucket", "` + bucketName + `"], ["starts-with", "$key", "test/"], ["eq", "$x-amz-algorithm", "AWS4-HMAC-SHA256"], ["eq", "$x-amz-date", "%s"], ["eq", "$x-amz-credential", "` + credentials.AccessKeyID + `/%s/us-east-1/s3/aws4_request"]]}`, + policy: `{"expiration": "%s","conditions":[["eq", "$bucket", "` + bucketName + `"], ["starts-with", "$key", "test/"], ["eq", "$x-amz-algorithm", "AWS4-HMAC-SHA256"], ["eq", "$x-amz-date", "%s"], ["eq", "$x-amz-credential", "` + credentials.AccessKey + `/%s/us-east-1/s3/aws4_request"]]}`, }, // Corrupted policy document { objectName: "test", data: []byte("Hello, World"), expectedRespStatus: http.StatusBadRequest, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, dates: []interface{}{curTimePlus5Min.Format(expirationDateFormat), curTime.Format(iso8601DateFormat), curTime.Format(yyyymmdd)}, policy: `{"3/aws4_request"]]}`, }, @@ -354,8 +354,8 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr objectName: "test", data: bytes.Repeat([]byte("a"), 1025), expectedRespStatus: http.StatusNoContent, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, malformedBody: false, }, // Failed with entity too small. @@ -363,8 +363,8 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr objectName: "test", data: bytes.Repeat([]byte("a"), 1023), expectedRespStatus: http.StatusBadRequest, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, malformedBody: false, }, // Failed with entity too large. @@ -372,8 +372,8 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr objectName: "test", data: bytes.Repeat([]byte("a"), (1*humanize.MiByte)+1), expectedRespStatus: http.StatusBadRequest, - accessKey: credentials.AccessKeyID, - secretKey: credentials.SecretAccessKey, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, malformedBody: false, }, } @@ -444,14 +444,14 @@ func testPostPolicyBucketHandlerRedirect(obj ObjectLayer, instanceType string, t rec := httptest.NewRecorder() dates := []interface{}{curTimePlus5Min.Format(expirationDateFormat), curTime.Format(iso8601DateFormat), curTime.Format(yyyymmdd)} - policy := `{"expiration": "%s","conditions":[["eq", "$bucket", "` + bucketName + `"], {"success_action_redirect":"` + redirectURL + `"},["starts-with", "$key", "test/"], ["eq", "$x-amz-algorithm", "AWS4-HMAC-SHA256"], ["eq", "$x-amz-date", "%s"], ["eq", "$x-amz-credential", "` + credentials.AccessKeyID + `/%s/us-east-1/s3/aws4_request"]]}` + policy := `{"expiration": "%s","conditions":[["eq", "$bucket", "` + bucketName + `"], {"success_action_redirect":"` + redirectURL + `"},["starts-with", "$key", "test/"], ["eq", "$x-amz-algorithm", "AWS4-HMAC-SHA256"], ["eq", "$x-amz-date", "%s"], ["eq", "$x-amz-credential", "` + credentials.AccessKey + `/%s/us-east-1/s3/aws4_request"]]}` // Generate the final policy document policy = fmt.Sprintf(policy, dates...) // Create a new POST request with success_action_redirect field specified req, perr := newPostRequestV4Generic("", bucketName, keyName, []byte("objData"), - credentials.AccessKeyID, credentials.SecretAccessKey, curTime, + credentials.AccessKey, credentials.SecretKey, curTime, []byte(policy), map[string]string{"success_action_redirect": redirectURL}, false, false) if perr != nil { diff --git a/cmd/prepare-storage-msg.go b/cmd/prepare-storage-msg.go index bf442a0f8..d4d38700a 100644 --- a/cmd/prepare-storage-msg.go +++ b/cmd/prepare-storage-msg.go @@ -103,7 +103,7 @@ func getHealMsg(endpoints []*url.URL, storageDisks []StorageAPI) string { // msg += "MINIO_SECRET_KEY=%s " // msg += "minio control heal %s" // creds := serverConfig.GetCredential() - // msg = fmt.Sprintf(msg, creds.AccessKeyID, creds.SecretAccessKey, getHealEndpoint(isSSL(), endpoints[0])) + // msg = fmt.Sprintf(msg, creds.AccessKey, creds.SecretKey, getHealEndpoint(isSSL(), endpoints[0])) disksInfo, _, _ := getDisksInfo(storageDisks) for i, info := range disksInfo { if storageDisks[i] == nil { diff --git a/cmd/s3-peer-client.go b/cmd/s3-peer-client.go index 758256f8b..670eb14bf 100644 --- a/cmd/s3-peer-client.go +++ b/cmd/s3-peer-client.go @@ -62,8 +62,8 @@ func makeS3Peers(eps []*url.URL) s3Peers { // Check if the remote host has been added already if !seenAddr[ep.Host] { cfg := authConfig{ - accessKey: serverConfig.GetCredential().AccessKeyID, - secretKey: serverConfig.GetCredential().SecretAccessKey, + accessKey: serverConfig.GetCredential().AccessKey, + secretKey: serverConfig.GetCredential().SecretKey, address: ep.Host, secureConn: isSSL(), path: path.Join(reservedBucket, s3Path), diff --git a/cmd/server-startup-msg.go b/cmd/server-startup-msg.go index 8e0e96742..720ede6c7 100644 --- a/cmd/server-startup-msg.go +++ b/cmd/server-startup-msg.go @@ -75,8 +75,8 @@ func printServerCommonMsg(endPoints []string) { endPointStr := strings.Join(endPoints, " ") // Colorize the message and print. console.Println(colorBlue("\nEndpoint: ") + colorBold(fmt.Sprintf(getFormatStr(len(endPointStr), 1), endPointStr))) - console.Println(colorBlue("AccessKey: ") + colorBold(fmt.Sprintf("%s ", cred.AccessKeyID))) - console.Println(colorBlue("SecretKey: ") + colorBold(fmt.Sprintf("%s ", cred.SecretAccessKey))) + console.Println(colorBlue("AccessKey: ") + colorBold(fmt.Sprintf("%s ", cred.AccessKey))) + console.Println(colorBlue("SecretKey: ") + colorBold(fmt.Sprintf("%s ", cred.SecretKey))) console.Println(colorBlue("Region: ") + colorBold(fmt.Sprintf(getFormatStr(len(region), 3), region))) printEventNotifiers() @@ -109,10 +109,10 @@ func printCLIAccessMsg(endPoint string) { // Configure 'mc', following block prints platform specific information for minio client. console.Println(colorBlue("\nCommand-line Access: ") + mcQuickStartGuide) if runtime.GOOS == "windows" { - mcMessage := fmt.Sprintf("$ mc.exe config host add myminio %s %s %s", endPoint, cred.AccessKeyID, cred.SecretAccessKey) + mcMessage := fmt.Sprintf("$ mc.exe config host add myminio %s %s %s", endPoint, cred.AccessKey, cred.SecretKey) console.Println(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage)) } else { - mcMessage := fmt.Sprintf("$ mc config host add myminio %s %s %s", endPoint, cred.AccessKeyID, cred.SecretAccessKey) + mcMessage := fmt.Sprintf("$ mc config host add myminio %s %s %s", endPoint, cred.AccessKey, cred.SecretKey) console.Println(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage)) } } diff --git a/cmd/server_test.go b/cmd/server_test.go index 7d6f57c54..5f6fa9b83 100644 --- a/cmd/server_test.go +++ b/cmd/server_test.go @@ -94,14 +94,10 @@ func (s *TestSuiteCommon) TearDownSuite(c *C) { } func (s *TestSuiteCommon) TestAuth(c *C) { - secretID, err := genSecretAccessKey() - c.Assert(err, IsNil) + cred := newCredential() - accessID, err := genAccessKeyID() - c.Assert(err, IsNil) - - c.Assert(len(secretID), Equals, secretKeyMaxLen) - c.Assert(len(accessID), Equals, accessKeyMaxLen) + c.Assert(len(cred.AccessKey), Equals, accessKeyMaxLen) + c.Assert(len(cred.SecretKey), Equals, secretKeyMaxLen) } func (s *TestSuiteCommon) TestBucketSQSNotification(c *C) { diff --git a/cmd/signature-jwt.go b/cmd/signature-jwt.go index 7866eef4e..ab2d1b4cd 100644 --- a/cmd/signature-jwt.go +++ b/cmd/signature-jwt.go @@ -43,10 +43,10 @@ const ( // newJWT - returns new JWT object. func newJWT(expiry time.Duration, cred credential) (*JWT, error) { - if !isValidAccessKey(cred.AccessKeyID) { + if !isAccessKeyValid(cred.AccessKey) { return nil, errInvalidAccessKeyLength } - if !isValidSecretKey(cred.SecretAccessKey) { + if !isSecretKeyValid(cred.SecretKey) { return nil, errInvalidSecretKeyLength } return &JWT{cred, expiry}, nil @@ -60,7 +60,7 @@ func (jwt *JWT) GenerateToken(accessKey string) (string, error) { // Trim spaces. accessKey = strings.TrimSpace(accessKey) - if !isValidAccessKey(accessKey) { + if !isAccessKeyValid(accessKey) { return "", errInvalidAccessKeyLength } @@ -71,7 +71,7 @@ func (jwt *JWT) GenerateToken(accessKey string) (string, error) { "iat": tUTCNow.Unix(), "sub": accessKey, }) - return token.SignedString([]byte(jwt.SecretAccessKey)) + return token.SignedString([]byte(jwt.SecretKey)) } var errInvalidAccessKeyID = errors.New("The access key ID you provided does not exist in our records") @@ -82,18 +82,18 @@ func (jwt *JWT) Authenticate(accessKey, secretKey string) error { // Trim spaces. accessKey = strings.TrimSpace(accessKey) - if !isValidAccessKey(accessKey) { + if !isAccessKeyValid(accessKey) { return errInvalidAccessKeyLength } - if !isValidSecretKey(secretKey) { + if !isSecretKeyValid(secretKey) { return errInvalidSecretKeyLength } - if accessKey != jwt.AccessKeyID { + if accessKey != jwt.AccessKey { return errInvalidAccessKeyID } - hashedSecretKey, _ := bcrypt.GenerateFromPassword([]byte(jwt.SecretAccessKey), bcrypt.DefaultCost) + hashedSecretKey, _ := bcrypt.GenerateFromPassword([]byte(jwt.SecretKey), bcrypt.DefaultCost) if bcrypt.CompareHashAndPassword(hashedSecretKey, []byte(secretKey)) != nil { return errAuthentication } diff --git a/cmd/signature-jwt_test.go b/cmd/signature-jwt_test.go index 2fe96dcb8..b3421deb1 100644 --- a/cmd/signature-jwt_test.go +++ b/cmd/signature-jwt_test.go @@ -190,11 +190,11 @@ func TestAuthenticate(t *testing.T) { // Authentication error. {"myuser", "mypassword", errInvalidAccessKeyID}, // Authentication error. - {serverConfig.GetCredential().AccessKeyID, "mypassword", errAuthentication}, + {serverConfig.GetCredential().AccessKey, "mypassword", errAuthentication}, // Success. - {serverConfig.GetCredential().AccessKeyID, serverConfig.GetCredential().SecretAccessKey, nil}, + {serverConfig.GetCredential().AccessKey, serverConfig.GetCredential().SecretKey, nil}, // Success when access key contains leading/trailing spaces. - {" " + serverConfig.GetCredential().AccessKeyID + " ", serverConfig.GetCredential().SecretAccessKey, nil}, + {" " + serverConfig.GetCredential().AccessKey + " ", serverConfig.GetCredential().SecretKey, nil}, } // Run tests. diff --git a/cmd/signature-v2.go b/cmd/signature-v2.go index 4913fe71d..3952a912f 100644 --- a/cmd/signature-v2.go +++ b/cmd/signature-v2.go @@ -67,12 +67,12 @@ var resourceList = []string{ func doesPolicySignatureV2Match(formValues map[string]string) APIErrorCode { cred := serverConfig.GetCredential() accessKey := formValues["Awsaccesskeyid"] - if accessKey != cred.AccessKeyID { + if accessKey != cred.AccessKey { return ErrInvalidAccessKeyID } signature := formValues["Signature"] policy := formValues["Policy"] - if signature != calculateSignatureV2(policy, cred.SecretAccessKey) { + if signature != calculateSignatureV2(policy, cred.SecretKey) { return ErrSignatureDoesNotMatch } return ErrNone @@ -126,7 +126,7 @@ func doesPresignV2SignatureMatch(r *http.Request) APIErrorCode { } // Validate if access key id same. - if accessKey != cred.AccessKeyID { + if accessKey != cred.AccessKey { return ErrInvalidAccessKeyID } @@ -150,7 +150,7 @@ func doesPresignV2SignatureMatch(r *http.Request) APIErrorCode { } // Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature; -// Signature = Base64( HMAC-SHA1( YourSecretAccessKeyID, UTF-8-Encoding-Of( StringToSign ) ) ); +// Signature = Base64( HMAC-SHA1( YourSecretKey, UTF-8-Encoding-Of( StringToSign ) ) ); // // StringToSign = HTTP-Verb + "\n" + // Content-Md5 + "\n" + @@ -193,7 +193,7 @@ func validateV2AuthHeader(v2Auth string) APIErrorCode { // Access credentials. cred := serverConfig.GetCredential() - if keySignFields[0] != cred.AccessKeyID { + if keySignFields[0] != cred.AccessKey { return ErrInvalidAccessKeyID } @@ -239,15 +239,15 @@ func calculateSignatureV2(stringToSign string, secret string) string { func preSignatureV2(method string, encodedResource string, encodedQuery string, headers http.Header, expires string) string { cred := serverConfig.GetCredential() stringToSign := presignV2STS(method, encodedResource, encodedQuery, headers, expires) - return calculateSignatureV2(stringToSign, cred.SecretAccessKey) + return calculateSignatureV2(stringToSign, cred.SecretKey) } // Return signature-v2 authrization header. func signatureV2(method string, encodedResource string, encodedQuery string, headers http.Header) string { cred := serverConfig.GetCredential() stringToSign := signV2STS(method, encodedResource, encodedQuery, headers) - signature := calculateSignatureV2(stringToSign, cred.SecretAccessKey) - return fmt.Sprintf("%s %s:%s", signV2Algorithm, cred.AccessKeyID, signature) + signature := calculateSignatureV2(stringToSign, cred.SecretKey) + return fmt.Sprintf("%s %s:%s", signV2Algorithm, cred.AccessKey, signature) } // Return canonical headers. diff --git a/cmd/signature-v2_test.go b/cmd/signature-v2_test.go index 6b8533539..4983eb183 100644 --- a/cmd/signature-v2_test.go +++ b/cmd/signature-v2_test.go @@ -55,7 +55,7 @@ func TestDoesPresignedV2SignatureMatch(t *testing.T) { queryParams: map[string]string{ "Expires": "60s", "Signature": "badsignature", - "AWSAccessKeyId": serverConfig.GetCredential().AccessKeyID, + "AWSAccessKeyId": serverConfig.GetCredential().AccessKey, }, expected: ErrMalformedExpires, }, @@ -64,7 +64,7 @@ func TestDoesPresignedV2SignatureMatch(t *testing.T) { queryParams: map[string]string{ "Expires": "60", "Signature": "badsignature", - "AWSAccessKeyId": serverConfig.GetCredential().AccessKeyID, + "AWSAccessKeyId": serverConfig.GetCredential().AccessKey, }, expected: ErrExpiredPresignRequest, }, @@ -73,7 +73,7 @@ func TestDoesPresignedV2SignatureMatch(t *testing.T) { queryParams: map[string]string{ "Expires": fmt.Sprintf("%d", now.Unix()+60), "Signature": "badsignature", - "AWSAccessKeyId": serverConfig.GetCredential().AccessKeyID, + "AWSAccessKeyId": serverConfig.GetCredential().AccessKey, }, expected: ErrSignatureDoesNotMatch, }, @@ -82,7 +82,7 @@ func TestDoesPresignedV2SignatureMatch(t *testing.T) { queryParams: map[string]string{ "Expires": fmt.Sprintf("%d", now.Unix()), "Signature": "zOM2YrY/yAQe15VWmT78OlBrK6g=", - "AWSAccessKeyId": serverConfig.GetCredential().AccessKeyID, + "AWSAccessKeyId": serverConfig.GetCredential().AccessKey, }, expected: ErrSignatureDoesNotMatch, }, @@ -126,7 +126,7 @@ func TestValidateV2AuthHeader(t *testing.T) { if err := serverConfig.Save(); err != nil { t.Fatal(err) } - accessID := serverConfig.GetCredential().AccessKeyID + accessID := serverConfig.GetCredential().AccessKey testCases := []struct { authString string @@ -207,9 +207,9 @@ func TestDoesPolicySignatureV2Match(t *testing.T) { signature string errCode APIErrorCode }{ - {"invalidAccessKey", policy, calculateSignatureV2(policy, creds.SecretAccessKey), ErrInvalidAccessKeyID}, - {creds.AccessKeyID, policy, calculateSignatureV2("random", creds.SecretAccessKey), ErrSignatureDoesNotMatch}, - {creds.AccessKeyID, policy, calculateSignatureV2(policy, creds.SecretAccessKey), ErrNone}, + {"invalidAccessKey", policy, calculateSignatureV2(policy, creds.SecretKey), ErrInvalidAccessKeyID}, + {creds.AccessKey, policy, calculateSignatureV2("random", creds.SecretKey), ErrSignatureDoesNotMatch}, + {creds.AccessKey, policy, calculateSignatureV2(policy, creds.SecretKey), ErrNone}, } for i, test := range testCases { formValues := make(map[string]string) diff --git a/cmd/signature-v4-parser.go b/cmd/signature-v4-parser.go index 8b24bb22b..913099e47 100644 --- a/cmd/signature-v4-parser.go +++ b/cmd/signature-v4-parser.go @@ -47,7 +47,7 @@ func parseCredentialHeader(credElement string) (credentialHeader, APIErrorCode) if len(credElements) != 5 { return credentialHeader{}, ErrCredMalformed } - if !isValidAccessKey(credElements[0]) { + if !isAccessKeyValid(credElements[0]) { return credentialHeader{}, ErrInvalidAccessKeyID } // Save access key id. diff --git a/cmd/signature-v4.go b/cmd/signature-v4.go index 2e6a0dda4..cf54c85a3 100644 --- a/cmd/signature-v4.go +++ b/cmd/signature-v4.go @@ -171,7 +171,7 @@ func doesPolicySignatureV4Match(formValues map[string]string) APIErrorCode { } // Verify if the access key id matches. - if credHeader.accessKey != cred.AccessKeyID { + if credHeader.accessKey != cred.AccessKey { return ErrInvalidAccessKeyID } @@ -188,7 +188,7 @@ func doesPolicySignatureV4Match(formValues map[string]string) APIErrorCode { } // Get signing key. - signingKey := getSigningKey(cred.SecretAccessKey, t, region) + signingKey := getSigningKey(cred.SecretKey, t, region) // Get signature. newSignature := getSignature(signingKey, formValues["Policy"]) @@ -217,7 +217,7 @@ func doesPresignedSignatureMatch(hashedPayload string, r *http.Request, region s } // Verify if the access key id matches. - if pSignValues.Credential.accessKey != cred.AccessKeyID { + if pSignValues.Credential.accessKey != cred.AccessKey { return ErrInvalidAccessKeyID } @@ -268,7 +268,7 @@ func doesPresignedSignatureMatch(hashedPayload string, r *http.Request, region s query.Set("X-Amz-Date", t.Format(iso8601Format)) query.Set("X-Amz-Expires", strconv.Itoa(expireSeconds)) query.Set("X-Amz-SignedHeaders", getSignedHeaders(extractedSignedHeaders)) - query.Set("X-Amz-Credential", cred.AccessKeyID+"/"+getScope(t, sRegion)) + query.Set("X-Amz-Credential", cred.AccessKey+"/"+getScope(t, sRegion)) // Save other headers available in the request parameters. for k, v := range req.URL.Query() { @@ -313,7 +313,7 @@ func doesPresignedSignatureMatch(hashedPayload string, r *http.Request, region s presignedStringToSign := getStringToSign(presignedCanonicalReq, t, region) // Get hmac presigned signing key. - presignedSigningKey := getSigningKey(cred.SecretAccessKey, t, region) + presignedSigningKey := getSigningKey(cred.SecretKey, t, region) // Get new signature. newSignature := getSignature(presignedSigningKey, presignedStringToSign) @@ -369,7 +369,7 @@ func doesSignatureMatch(hashedPayload string, r *http.Request, region string) AP } // Verify if the access key id matches. - if signV4Values.Credential.accessKey != cred.AccessKeyID { + if signV4Values.Credential.accessKey != cred.AccessKey { return ErrInvalidAccessKeyID } @@ -410,7 +410,7 @@ func doesSignatureMatch(hashedPayload string, r *http.Request, region string) AP stringToSign := getStringToSign(canonicalRequest, t, region) // Get hmac signing key. - signingKey := getSigningKey(cred.SecretAccessKey, t, region) + signingKey := getSigningKey(cred.SecretKey, t, region) // Calculate signature. newSignature := getSignature(signingKey, stringToSign) diff --git a/cmd/signature-v4_test.go b/cmd/signature-v4_test.go index c55520866..45efb295b 100644 --- a/cmd/signature-v4_test.go +++ b/cmd/signature-v4_test.go @@ -36,7 +36,7 @@ func niceError(code APIErrorCode) string { func TestDoesPolicySignatureMatch(t *testing.T) { credentialTemplate := "%s/%s/%s/s3/aws4_request" now := time.Now().UTC() - accessKey := serverConfig.GetCredential().AccessKeyID + accessKey := serverConfig.GetCredential().AccessKey testCases := []struct { form map[string]string @@ -83,7 +83,7 @@ func TestDoesPolicySignatureMatch(t *testing.T) { form: map[string]string{ "X-Amz-Credential": fmt.Sprintf(credentialTemplate, accessKey, now.Format(yyyymmdd), "us-east-1"), "X-Amz-Date": now.Format(iso8601Format), - "X-Amz-Signature": getSignature(getSigningKey(serverConfig.GetCredential().SecretAccessKey, now, "us-east-1"), "policy"), + "X-Amz-Signature": getSignature(getSigningKey(serverConfig.GetCredential().SecretKey, now, "us-east-1"), "policy"), "Policy": "policy", }, expected: ErrNone, @@ -112,7 +112,7 @@ func TestDoesPresignedSignatureMatch(t *testing.T) { credentialTemplate := "%s/%s/%s/s3/aws4_request" region := serverConfig.GetRegion() - accessKeyID := serverConfig.GetCredential().AccessKeyID + accessKeyID := serverConfig.GetCredential().AccessKey testCases := []struct { queryParams map[string]string headers map[string]string diff --git a/cmd/storage-rpc-client.go b/cmd/storage-rpc-client.go index 4ed639c2f..3f969d497 100644 --- a/cmd/storage-rpc-client.go +++ b/cmd/storage-rpc-client.go @@ -104,8 +104,8 @@ func newStorageRPC(ep *url.URL) (StorageAPI, error) { rpcAddr := ep.Host // Initialize rpc client with network address and rpc path. - accessKeyID := serverConfig.GetCredential().AccessKeyID - secretAccessKey := serverConfig.GetCredential().SecretAccessKey + accessKeyID := serverConfig.GetCredential().AccessKey + secretAccessKey := serverConfig.GetCredential().SecretKey if ep.User != nil { accessKeyID = ep.User.Username() if key, set := ep.User.Password(); set { diff --git a/cmd/storage-rpc-server_test.go b/cmd/storage-rpc-server_test.go index 4dc47475c..14ce5daa4 100644 --- a/cmd/storage-rpc-server_test.go +++ b/cmd/storage-rpc-server_test.go @@ -45,12 +45,12 @@ func createTestStorageServer(t *testing.T) *testStorageRPCServer { t.Fatalf("unable to get new JWT, %s", err) } - err = jwt.Authenticate(serverConfig.GetCredential().AccessKeyID, serverConfig.GetCredential().SecretAccessKey) + err = jwt.Authenticate(serverConfig.GetCredential().AccessKey, serverConfig.GetCredential().SecretKey) if err != nil { t.Fatalf("unable for JWT to authenticate, %s", err) } - token, err := jwt.GenerateToken(serverConfig.GetCredential().AccessKeyID) + token, err := jwt.GenerateToken(serverConfig.GetCredential().AccessKey) if err != nil { t.Fatalf("unable for JWT to generate token, %s", err) } diff --git a/cmd/streaming-signature-v4.go b/cmd/streaming-signature-v4.go index 85c0c0063..d2bbc3d66 100644 --- a/cmd/streaming-signature-v4.go +++ b/cmd/streaming-signature-v4.go @@ -56,7 +56,7 @@ func getChunkSignature(seedSignature string, date time.Time, hashedChunk string) hashedChunk // Get hmac signing key. - signingKey := getSigningKey(cred.SecretAccessKey, date, region) + signingKey := getSigningKey(cred.SecretKey, date, region) // Calculate signature. newSignature := getSignature(signingKey, stringToSign) @@ -101,7 +101,7 @@ func calculateSeedSignature(r *http.Request) (signature string, date time.Time, return "", time.Time{}, errCode } // Verify if the access key id matches. - if signV4Values.Credential.accessKey != cred.AccessKeyID { + if signV4Values.Credential.accessKey != cred.AccessKey { return "", time.Time{}, ErrInvalidAccessKeyID } @@ -138,7 +138,7 @@ func calculateSeedSignature(r *http.Request) (signature string, date time.Time, stringToSign := getStringToSign(canonicalRequest, date, region) // Get hmac signing key. - signingKey := getSigningKey(cred.SecretAccessKey, date, region) + signingKey := getSigningKey(cred.SecretKey, date, region) // Calculate signature. newSignature := getSignature(signingKey, stringToSign) diff --git a/cmd/test-utils_test.go b/cmd/test-utils_test.go index fc7440283..b427a6ae2 100644 --- a/cmd/test-utils_test.go +++ b/cmd/test-utils_test.go @@ -199,8 +199,8 @@ func UnstartedTestServer(t TestErrHandler, instanceType string) TestServer { if err != nil { t.Fatalf("Unexpected error %s", err) } - testServer.AccessKey = credentials.AccessKeyID - testServer.SecretKey = credentials.SecretAccessKey + testServer.AccessKey = credentials.AccessKey + testServer.SecretKey = credentials.SecretKey objLayer, storageDisks, err := initObjectLayer(testServer.Disks) if err != nil { @@ -361,8 +361,8 @@ func StartTestStorageRPCServer(t TestErrHandler, instanceType string, diskN int) testRPCServer.Root = root testRPCServer.Disks = endpoints - testRPCServer.AccessKey = credentials.AccessKeyID - testRPCServer.SecretKey = credentials.SecretAccessKey + testRPCServer.AccessKey = credentials.AccessKey + testRPCServer.SecretKey = credentials.SecretKey // Run TestServer. testRPCServer.Server = httptest.NewServer(initTestStorageRPCEndPoint(serverCmdConfig{ @@ -396,8 +396,8 @@ func StartTestPeersRPCServer(t TestErrHandler, instanceType string) TestServer { testRPCServer.Root = root testRPCServer.Disks = endpoints - testRPCServer.AccessKey = credentials.AccessKeyID - testRPCServer.SecretKey = credentials.SecretAccessKey + testRPCServer.AccessKey = credentials.AccessKey + testRPCServer.SecretKey = credentials.SecretKey // create temporary backend for the test server. objLayer, storageDisks, err := initObjectLayer(endpoints) @@ -2131,8 +2131,8 @@ func StartTestBrowserPeerRPCServer(t TestErrHandler, instanceType string) TestSe credentials := serverConfig.GetCredential() testRPCServer.Root = root - testRPCServer.AccessKey = credentials.AccessKeyID - testRPCServer.SecretKey = credentials.SecretAccessKey + testRPCServer.AccessKey = credentials.AccessKey + testRPCServer.SecretKey = credentials.SecretKey // Initialize and run the TestServer. testRPCServer.Server = httptest.NewServer(initTestBrowserPeerRPCEndPoint()) @@ -2152,8 +2152,8 @@ func StartTestS3PeerRPCServer(t TestErrHandler) (TestServer, []string) { credentials := serverConfig.GetCredential() testRPCServer.Root = root - testRPCServer.AccessKey = credentials.AccessKeyID - testRPCServer.SecretKey = credentials.SecretAccessKey + testRPCServer.AccessKey = credentials.AccessKey + testRPCServer.SecretKey = credentials.SecretKey // init disks objLayer, fsDirs, err := prepareXL() diff --git a/cmd/web-handlers.go b/cmd/web-handlers.go index eb6de45ee..2f4324566 100644 --- a/cmd/web-handlers.go +++ b/cmd/web-handlers.go @@ -52,7 +52,7 @@ func isJWTReqAuthenticated(req *http.Request) bool { if _, ok := token.Method.(*jwtgo.SigningMethodHMAC); !ok { return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"]) } - return []byte(jwt.SecretAccessKey), nil + return []byte(jwt.SecretKey), nil } token, err := jwtreq.ParseFromRequest(req, jwtreq.AuthorizationHeaderExtractor, reqCallback) if err != nil { @@ -347,9 +347,9 @@ func (web webAPIHandlers) GenerateAuth(r *http.Request, args *WebGenericArgs, re if !isJWTReqAuthenticated(r) { return toJSONError(errAuthentication) } - cred := mustGenAccessKeys() - reply.AccessKey = cred.AccessKeyID - reply.SecretKey = cred.SecretAccessKey + cred := newCredential() + reply.AccessKey = cred.AccessKey + reply.SecretKey = cred.SecretKey reply.UIVersion = miniobrowser.UIVersion return nil } @@ -375,8 +375,8 @@ func (web *webAPIHandlers) SetAuth(r *http.Request, args *SetAuthArgs, reply *Se // Initialize jwt with the new access keys, fail if not possible. jwt, err := newJWT(defaultJWTExpiry, credential{ - AccessKeyID: args.AccessKey, - SecretAccessKey: args.SecretKey, + AccessKey: args.AccessKey, + SecretKey: args.SecretKey, }) // JWT Expiry set to 24Hrs. if err != nil { return toJSONError(err) @@ -460,8 +460,8 @@ func (web *webAPIHandlers) GetAuth(r *http.Request, args *WebGenericArgs, reply return toJSONError(errAuthentication) } creds := serverConfig.GetCredential() - reply.AccessKey = creds.AccessKeyID - reply.SecretKey = creds.SecretAccessKey + reply.AccessKey = creds.AccessKey + reply.SecretKey = creds.SecretKey reply.UIVersion = miniobrowser.UIVersion return nil } @@ -531,7 +531,7 @@ func (web *webAPIHandlers) Download(w http.ResponseWriter, r *http.Request) { if _, ok := token.Method.(*jwtgo.SigningMethodHMAC); !ok { return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"]) } - return []byte(jwt.SecretAccessKey), nil + return []byte(jwt.SecretKey), nil }) if e != nil || !token.Valid { writeWebErrorResponse(w, errAuthentication) @@ -760,8 +760,8 @@ func presignedGet(host, bucket, object string, expiry int64) string { cred := serverConfig.GetCredential() region := serverConfig.GetRegion() - accessKey := cred.AccessKeyID - secretKey := cred.SecretAccessKey + accessKey := cred.AccessKey + secretKey := cred.SecretKey date := time.Now().UTC() dateStr := date.Format(iso8601Format) diff --git a/cmd/web-handlers_test.go b/cmd/web-handlers_test.go index 8a937c4a8..42cad932a 100644 --- a/cmd/web-handlers_test.go +++ b/cmd/web-handlers_test.go @@ -149,7 +149,7 @@ func testLoginWebHandler(obj ObjectLayer, instanceType string, t TestErrHandler) {"", "foo", false}, {"azerty", "", false}, {"azerty", "foo", false}, - {credentials.AccessKeyID, credentials.SecretAccessKey, true}, + {credentials.AccessKey, credentials.SecretKey, true}, } // Iterating over the test cases, calling the function under test and asserting the response. @@ -186,7 +186,7 @@ func testStorageInfoWebHandler(obj ObjectLayer, instanceType string, t TestErrHa credentials := serverConfig.GetCredential() - authorization, err := getWebRPCToken(apiRouter, credentials.AccessKeyID, credentials.SecretAccessKey) + authorization, err := getWebRPCToken(apiRouter, credentials.AccessKey, credentials.SecretKey) if err != nil { t.Fatal("Cannot authenticate") } @@ -232,7 +232,7 @@ func testServerInfoWebHandler(obj ObjectLayer, instanceType string, t TestErrHan credentials := serverConfig.GetCredential() - authorization, err := getWebRPCToken(apiRouter, credentials.AccessKeyID, credentials.SecretAccessKey) + authorization, err := getWebRPCToken(apiRouter, credentials.AccessKey, credentials.SecretKey) if err != nil { t.Fatal("Cannot authenticate") } @@ -278,7 +278,7 @@ func testMakeBucketWebHandler(obj ObjectLayer, instanceType string, t TestErrHan credentials := serverConfig.GetCredential() - authorization, err := getWebRPCToken(apiRouter, credentials.AccessKeyID, credentials.SecretAccessKey) + authorization, err := getWebRPCToken(apiRouter, credentials.AccessKey, credentials.SecretKey) if err != nil { t.Fatal("Cannot authenticate") } @@ -338,7 +338,7 @@ func testListBucketsWebHandler(obj ObjectLayer, instanceType string, t TestErrHa credentials := serverConfig.GetCredential() - authorization, err := getWebRPCToken(apiRouter, credentials.AccessKeyID, credentials.SecretAccessKey) + authorization, err := getWebRPCToken(apiRouter, credentials.AccessKey, credentials.SecretKey) if err != nil { t.Fatal("Cannot authenticate") } @@ -397,7 +397,7 @@ func testListObjectsWebHandler(obj ObjectLayer, instanceType string, t TestErrHa rec := httptest.NewRecorder() - authorization, err := getWebRPCToken(apiRouter, credentials.AccessKeyID, credentials.SecretAccessKey) + authorization, err := getWebRPCToken(apiRouter, credentials.AccessKey, credentials.SecretKey) if err != nil { t.Fatal("Cannot authenticate") } @@ -468,7 +468,7 @@ func testRemoveObjectWebHandler(obj ObjectLayer, instanceType string, t TestErrH credentials := serverConfig.GetCredential() rec := httptest.NewRecorder() - authorization, err := getWebRPCToken(apiRouter, credentials.AccessKeyID, credentials.SecretAccessKey) + authorization, err := getWebRPCToken(apiRouter, credentials.AccessKey, credentials.SecretKey) if err != nil { t.Fatal("Cannot authenticate") } @@ -544,7 +544,7 @@ func testGenerateAuthWebHandler(obj ObjectLayer, instanceType string, t TestErrH credentials := serverConfig.GetCredential() rec := httptest.NewRecorder() - authorization, err := getWebRPCToken(apiRouter, credentials.AccessKeyID, credentials.SecretAccessKey) + authorization, err := getWebRPCToken(apiRouter, credentials.AccessKey, credentials.SecretKey) if err != nil { t.Fatal("Cannot authenticate") } @@ -590,7 +590,7 @@ func testSetAuthWebHandler(obj ObjectLayer, instanceType string, t TestErrHandle credentials := serverConfig.GetCredential() rec := httptest.NewRecorder() - authorization, err := getWebRPCToken(apiRouter, credentials.AccessKeyID, credentials.SecretAccessKey) + authorization, err := getWebRPCToken(apiRouter, credentials.AccessKey, credentials.SecretKey) if err != nil { t.Fatal("Cannot authenticate") } @@ -651,7 +651,7 @@ func testGetAuthWebHandler(obj ObjectLayer, instanceType string, t TestErrHandle credentials := serverConfig.GetCredential() rec := httptest.NewRecorder() - authorization, err := getWebRPCToken(apiRouter, credentials.AccessKeyID, credentials.SecretAccessKey) + authorization, err := getWebRPCToken(apiRouter, credentials.AccessKey, credentials.SecretKey) if err != nil { t.Fatal("Cannot authenticate") } @@ -670,7 +670,7 @@ func testGetAuthWebHandler(obj ObjectLayer, instanceType string, t TestErrHandle if err != nil { t.Fatalf("Failed, %v", err) } - if getAuthReply.AccessKey != credentials.AccessKeyID || getAuthReply.SecretKey != credentials.SecretAccessKey { + if getAuthReply.AccessKey != credentials.AccessKey || getAuthReply.SecretKey != credentials.SecretKey { t.Fatalf("Failed to get correct auth keys") } } @@ -696,7 +696,7 @@ func testUploadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandler credentials := serverConfig.GetCredential() rec := httptest.NewRecorder() - authorization, err := getWebRPCToken(apiRouter, credentials.AccessKeyID, credentials.SecretAccessKey) + authorization, err := getWebRPCToken(apiRouter, credentials.AccessKey, credentials.SecretKey) if err != nil { t.Fatal("Cannot authenticate") } @@ -760,7 +760,7 @@ func testDownloadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandl credentials := serverConfig.GetCredential() rec := httptest.NewRecorder() - authorization, err := getWebRPCToken(apiRouter, credentials.AccessKeyID, credentials.SecretAccessKey) + authorization, err := getWebRPCToken(apiRouter, credentials.AccessKey, credentials.SecretKey) if err != nil { t.Fatal("Cannot authenticate") } @@ -815,7 +815,7 @@ func testWebPresignedGetHandler(obj ObjectLayer, instanceType string, t TestErrH credentials := serverConfig.GetCredential() - authorization, err := getWebRPCToken(apiRouter, credentials.AccessKeyID, credentials.SecretAccessKey) + authorization, err := getWebRPCToken(apiRouter, credentials.AccessKey, credentials.SecretKey) if err != nil { t.Fatal("Cannot authenticate") } @@ -928,7 +928,7 @@ func testWebGetBucketPolicyHandler(obj ObjectLayer, instanceType string, t TestE credentials := serverConfig.GetCredential() - authorization, err := getWebRPCToken(apiRouter, credentials.AccessKeyID, credentials.SecretAccessKey) + authorization, err := getWebRPCToken(apiRouter, credentials.AccessKey, credentials.SecretKey) if err != nil { t.Fatal("Cannot authenticate") } @@ -1011,7 +1011,7 @@ func testWebListAllBucketPoliciesHandler(obj ObjectLayer, instanceType string, t credentials := serverConfig.GetCredential() - authorization, err := getWebRPCToken(apiRouter, credentials.AccessKeyID, credentials.SecretAccessKey) + authorization, err := getWebRPCToken(apiRouter, credentials.AccessKey, credentials.SecretKey) if err != nil { t.Fatal("Cannot authenticate") } @@ -1117,7 +1117,7 @@ func testWebSetBucketPolicyHandler(obj ObjectLayer, instanceType string, t TestE credentials := serverConfig.GetCredential() - authorization, err := getWebRPCToken(apiRouter, credentials.AccessKeyID, credentials.SecretAccessKey) + authorization, err := getWebRPCToken(apiRouter, credentials.AccessKey, credentials.SecretKey) if err != nil { t.Fatal("Cannot authenticate") } @@ -1278,7 +1278,7 @@ func TestWebObjectLayerNotReady(t *testing.T) { rec := httptest.NewRecorder() credentials := serverConfig.GetCredential() - authorization, err := getWebRPCToken(apiRouter, credentials.AccessKeyID, credentials.SecretAccessKey) + authorization, err := getWebRPCToken(apiRouter, credentials.AccessKey, credentials.SecretKey) if err != nil { t.Fatal("Cannot authenticate", err) } @@ -1382,7 +1382,7 @@ func TestWebObjectLayerFaultyDisks(t *testing.T) { rec := httptest.NewRecorder() credentials := serverConfig.GetCredential() - authorization, err := getWebRPCToken(apiRouter, credentials.AccessKeyID, credentials.SecretAccessKey) + authorization, err := getWebRPCToken(apiRouter, credentials.AccessKey, credentials.SecretKey) if err != nil { t.Fatal("Cannot authenticate", err) } From 36fd317eb2a9674aac366f1b513a67562e330e6a Mon Sep 17 00:00:00 2001 From: Krishnan Parthasarathi Date: Mon, 26 Dec 2016 23:59:55 +0530 Subject: [PATCH 035/100] Clean up lock-instrumentation and improve comments (#3499) - Add a lockStat type to group counters - Remove unnecessary helper functions - Fix stats computation on force unlock - Removed unnecessary checks and cleaned up comments --- cmd/lock-instrument.go | 205 ++++++++++++++---------------------- cmd/lock-instrument_test.go | 83 +++++++-------- cmd/lock-stat.go | 49 +++++++++ cmd/lockinfo-handlers.go | 12 +-- cmd/namespace-lock.go | 7 +- 5 files changed, 176 insertions(+), 180 deletions(-) create mode 100644 cmd/lock-stat.go diff --git a/cmd/lock-instrument.go b/cmd/lock-instrument.go index 7c00ba6bd..0b5fea730 100644 --- a/cmd/lock-instrument.go +++ b/cmd/lock-instrument.go @@ -17,7 +17,6 @@ package cmd import ( - "errors" "fmt" "time" ) @@ -36,59 +35,39 @@ const ( debugWLockStr lockType = "WLock" ) -// Struct containing information of status (ready/running/blocked) of an operation with given operation ID. +// debugLockInfo - represents a single lock's information, uniquely identified by opsID. +// See debugLockInfoPerVolumePath for more context. type debugLockInfo struct { // "RLock" or "WLock". lType lockType - // Contains the trace of the function which invoked the lock, obtained from runtime. + // Contains the backtrace of incl. the function which called (r)(un)lock. lockSource string - // Status can be running/ready/blocked. + // Status can be running/blocked. status statusType - // Time info of the since how long the status holds true. + // Time of last status update. since time.Time } -// debugLockInfo - container for storing locking information for unique copy -// (volume,path) pair. ref variable holds the reference count for locks held for. -// `ref` values helps us understand the n locks held for given pair. -// `running` value helps us understand the total successful locks held (not blocked) -// for given pair and the operation is under execution. `blocked` -// value helps us understand the total number of operations blocked waiting on -// locks for given pair. +// debugLockInfoPerVolumePath - lock state information on all locks held on (volume, path). type debugLockInfoPerVolumePath struct { - ref int64 // running + blocked operations. - running int64 // count of successful lock acquire and running operations. - blocked int64 // count of number of operations blocked waiting on lock. - lockInfo map[string]debugLockInfo // map of [opsID] debugLockInfo{operation, status, since} . + counters *lockStat // Holds stats of lock held on (volume, path) + lockInfo map[string]debugLockInfo // Lock information per operation ID. } -// returns an instance of debugLockInfo. -// need to create this for every unique pair of {volume,path}. -// total locks, number of calls blocked on locks, and number of successful locks held but not unlocked yet. -func newDebugLockInfoPerVolumePath() *debugLockInfoPerVolumePath { - return &debugLockInfoPerVolumePath{ - lockInfo: make(map[string]debugLockInfo), - ref: 0, - blocked: 0, - running: 0, - } -} - -// LockInfoOriginNotFound - While changing the state of the lock info its important that the entry for -// lock at a given origin exists, if not `LockInfoOriginNotFound` is returned. -type LockInfoOriginNotFound struct { +// LockInfoOriginMismatch - represents error when lock origin don't match. +type LockInfoOriginMismatch struct { volume string path string opsID string lockSource string } -func (l LockInfoOriginNotFound) Error() string { - return fmt.Sprintf("No lock state stored for the lock origined at \"%s\", for %s, %s, %s", +func (l LockInfoOriginMismatch) Error() string { + return fmt.Sprintf("No lock state stored for the lock originated at \"%s\", for %s, %s, %s", l.lockSource, l.volume, l.path, l.opsID) } -// LockInfoVolPathMissing - Error interface. Returned when the info the +// LockInfoVolPathMissing - represents error when lock information is missing for a given (volume, path). type LockInfoVolPathMissing struct { volume string path string @@ -98,8 +77,7 @@ func (l LockInfoVolPathMissing) Error() string { return fmt.Sprintf("No entry in debug Lock Map for Volume: %s, path: %s", l.volume, l.path) } -// LockInfoOpsIDNotFound - Returned when the lock state info exists, but the entry for -// given operation ID doesn't exist. +// LockInfoOpsIDNotFound - represents error when lock info entry for a given operation ID doesn't exist. type LockInfoOpsIDNotFound struct { volume string path string @@ -110,8 +88,7 @@ func (l LockInfoOpsIDNotFound) Error() string { return fmt.Sprintf("No entry in lock info for %s, %s, %s", l.opsID, l.volume, l.path) } -// LockInfoStateNotBlocked - When an attempt to change the state of the lock form `blocked` to `running` is done, -// its necessary that the state before the transsition is "blocked", otherwise LockInfoStateNotBlocked returned. +// LockInfoStateNotBlocked - represents error when lock info isn't in blocked state when it should be. type LockInfoStateNotBlocked struct { volume string path string @@ -122,146 +99,126 @@ func (l LockInfoStateNotBlocked) Error() string { return fmt.Sprintf("Lock state should be \"Blocked\" for %s, %s, %s", l.volume, l.path, l.opsID) } -var errLockNotInitialized = errors.New("Debug lockMap not initialized") - -// Initialize lock info volume path. +// Initialize lock info for given (volume, path). func (n *nsLockMap) initLockInfoForVolumePath(param nsParam) { - n.debugLockMap[param] = newDebugLockInfoPerVolumePath() + n.debugLockMap[param] = &debugLockInfoPerVolumePath{ + lockInfo: make(map[string]debugLockInfo), + counters: &lockStat{}, + } } // Change the state of the lock from Blocked to Running. func (n *nsLockMap) statusBlockedToRunning(param nsParam, lockSource, opsID string, readLock bool) error { - // This operation is not executed under the scope nsLockMap.mutex.Lock(), lock has to be explicitly held here. + // This function is called outside nsLockMap.mutex.Lock(), so must be held explicitly. n.lockMapMutex.Lock() defer n.lockMapMutex.Unlock() - // new state info to be set for the lock. - newLockInfo := debugLockInfo{ - lockSource: lockSource, - status: runningStatus, - since: time.Now().UTC(), - } - // Set lock type. - if readLock { - newLockInfo.lType = debugRLockStr - } else { - newLockInfo.lType = debugWLockStr - } - - // Check whether the lock info entry for pair already exists and its not `nil`. - debugLockMap, ok := n.debugLockMap[param] + // Check whether the lock info entry for pair already exists. + _, ok := n.debugLockMap[param] if !ok { - // The lock state info foe given pair should already exist. - // If not return `LockInfoVolPathMissing`. return traceError(LockInfoVolPathMissing{param.volume, param.path}) } - // ``debugLockMap`` entry containing lock info for `param ` is `nil`. - if debugLockMap == nil { - return traceError(errLockNotInitialized) - } + + // Check whether lock info entry for the given `opsID` exists. lockInfo, ok := n.debugLockMap[param].lockInfo[opsID] if !ok { - // The lock info entry for given `opsID` should already exist for given pair. - // If not return `LockInfoOpsIDNotFound`. return traceError(LockInfoOpsIDNotFound{param.volume, param.path, opsID}) } - // The entry for the lock origined at `lockSource` should already exist. If not return `LockInfoOriginNotFound`. + + // Check whether lockSource is same. if lockInfo.lockSource != lockSource { - return traceError(LockInfoOriginNotFound{param.volume, param.path, opsID, lockSource}) + return traceError(LockInfoOriginMismatch{param.volume, param.path, opsID, lockSource}) } - // Status of the lock should already be set to "Blocked". If not return `LockInfoStateNotBlocked`. + + // Status of the lock should be set to "Blocked". if lockInfo.status != blockedStatus { return traceError(LockInfoStateNotBlocked{param.volume, param.path, opsID}) } - // All checks finished. Changing the status of the operation from blocked to running and updating the time. - n.debugLockMap[param].lockInfo[opsID] = newLockInfo + // Change lock status to running and update the time. + n.debugLockMap[param].lockInfo[opsID] = newDebugLockInfo(lockSource, runningStatus, readLock) - // After locking unblocks decrease the blocked counter. - n.blockedCounter-- - // Increase the running counter. - n.runningLockCounter++ - n.debugLockMap[param].blocked-- - n.debugLockMap[param].running++ + // Update global lock stats. + n.counters.lockGranted() + // Update (volume, pair) lock stats. + n.debugLockMap[param].counters.lockGranted() return nil } -// Change the state of the lock from Ready to Blocked. -func (n *nsLockMap) statusNoneToBlocked(param nsParam, lockSource, opsID string, readLock bool) error { - newLockInfo := debugLockInfo{ +// newDebugLockInfo - Constructs a debugLockInfo value given lock source, status and type. +func newDebugLockInfo(lockSource string, status statusType, readLock bool) debugLockInfo { + lType := debugRLockStr + if readLock { + lType = debugRLockStr + } else { + lType = debugWLockStr + } + return debugLockInfo{ lockSource: lockSource, - status: blockedStatus, + lType: lType, + status: status, since: time.Now().UTC(), } - if readLock { - newLockInfo.lType = debugRLockStr - } else { - newLockInfo.lType = debugWLockStr - } +} - lockInfo, ok := n.debugLockMap[param] +// Change the state of the lock to Blocked. +func (n *nsLockMap) statusNoneToBlocked(param nsParam, lockSource, opsID string, readLock bool) error { + _, ok := n.debugLockMap[param] if !ok { - // State info entry for the given doesn't exist, initializing it. - n.initLockInfoForVolumePath(param) - } - if lockInfo == nil { - // *lockInfo is nil, initialize here. + // Lock info entry for (volume, pair) doesn't exist, initialize it. n.initLockInfoForVolumePath(param) } - // lockInfo is a map[string]debugLockInfo, which holds map[OperationID]{status,time, origin} of the lock. - if n.debugLockMap[param].lockInfo == nil { - n.debugLockMap[param].lockInfo = make(map[string]debugLockInfo) - } - // The status of the operation with the given operation ID is marked blocked till its gets unblocked from the lock. - n.debugLockMap[param].lockInfo[opsID] = newLockInfo - // Increment the Global lock counter. - n.globalLockCounter++ - // Increment the counter for number of blocked opertions, decrement it after the locking unblocks. - n.blockedCounter++ - // increment the reference of the lock for the given pair. - n.debugLockMap[param].ref++ - // increment the blocked counter for the given pair. - n.debugLockMap[param].blocked++ + // Mark lock status blocked for given opsID. + n.debugLockMap[param].lockInfo[opsID] = newDebugLockInfo(lockSource, blockedStatus, readLock) + // Update global lock stats. + n.counters.lockWaiting() + // Update (volume, path) lock stats. + n.debugLockMap[param].counters.lockWaiting() return nil } -// deleteLockInfoEntry - Deletes the lock state information for given -// pair. Called when nsLk.ref count is 0. +// deleteLockInfoEntry - Deletes the lock information for given (volume, path). +// Called when nsLk.ref count is 0. func (n *nsLockMap) deleteLockInfoEntryForVolumePath(param nsParam) error { // delete the lock info for the given operation. if _, found := n.debugLockMap[param]; !found { return traceError(LockInfoVolPathMissing{param.volume, param.path}) } - // Remove from the map if there are no more references for the given (volume,path) pair. + + // The following stats update is relevant only in case of a + // ForceUnlock. In case of the last unlock on a (volume, + // path), this would be a no-op. + volumePathLocks := n.debugLockMap[param] + for _, lockInfo := range volumePathLocks.lockInfo { + granted := lockInfo.status == runningStatus + // Update global and (volume, path) stats. + n.counters.lockRemoved(granted) + volumePathLocks.counters.lockRemoved(granted) + } delete(n.debugLockMap, param) return nil } -// deleteLockInfoEntry - Deletes the entry for given opsID in the lock state information -// of given pair. Called when the nsLk ref count for the given -// pair is not 0. +// deleteLockInfoEntry - Deletes lock info entry for given opsID. +// Called when the nsLk ref count for the given (volume, path) is +// not 0. func (n *nsLockMap) deleteLockInfoEntryForOps(param nsParam, opsID string) error { // delete the lock info for the given operation. infoMap, found := n.debugLockMap[param] if !found { return traceError(LockInfoVolPathMissing{param.volume, param.path}) } - // The opertion finished holding the lock on the resource, remove + // The operation finished holding the lock on the resource, remove // the entry for the given operation with the operation ID. - _, foundInfo := infoMap.lockInfo[opsID] + opsIDLock, foundInfo := infoMap.lockInfo[opsID] if !foundInfo { - // Unlock request with invalid opertion ID not accepted. + // Unlock request with invalid operation ID not accepted. return traceError(LockInfoOpsIDNotFound{param.volume, param.path, opsID}) } - // Decrease the global running and lock reference counter. - n.runningLockCounter-- - n.globalLockCounter-- - // Decrease the lock referee counter for the lock info for given pair. - // Decrease the running operation number. Its assumed that the operation is over - // once an attempt to release the lock is made. - infoMap.running-- - // Decrease the total reference count of locks jeld on pair. - infoMap.ref-- + // Update global and (volume, path) lock status. + granted := opsIDLock.status == runningStatus + n.counters.lockRemoved(granted) + infoMap.counters.lockRemoved(granted) delete(infoMap.lockInfo, opsID) return nil } diff --git a/cmd/lock-instrument_test.go b/cmd/lock-instrument_test.go index fe33341a1..503dc7876 100644 --- a/cmd/lock-instrument_test.go +++ b/cmd/lock-instrument_test.go @@ -124,19 +124,19 @@ func verifyGlobalLockStats(l lockStateCase, t *testing.T, testNum int) { globalNSMutex.lockMapMutex.Lock() // Verifying the lock stats. - if globalNSMutex.globalLockCounter != int64(l.expectedGlobalLockCount) { + if globalNSMutex.counters.total != int64(l.expectedGlobalLockCount) { t.Errorf("Test %d: Expected the global lock counter to be %v, but got %v", testNum, int64(l.expectedGlobalLockCount), - globalNSMutex.globalLockCounter) + globalNSMutex.counters.total) } // verify the count for total blocked locks. - if globalNSMutex.blockedCounter != int64(l.expectedBlockedLockCount) { + if globalNSMutex.counters.blocked != int64(l.expectedBlockedLockCount) { t.Errorf("Test %d: Expected the total blocked lock counter to be %v, but got %v", testNum, int64(l.expectedBlockedLockCount), - globalNSMutex.blockedCounter) + globalNSMutex.counters.blocked) } // verify the count for total running locks. - if globalNSMutex.runningLockCounter != int64(l.expectedRunningLockCount) { + if globalNSMutex.counters.granted != int64(l.expectedRunningLockCount) { t.Errorf("Test %d: Expected the total running lock counter to be %v, but got %v", testNum, int64(l.expectedRunningLockCount), - globalNSMutex.runningLockCounter) + globalNSMutex.counters.granted) } globalNSMutex.lockMapMutex.Unlock() // Verifying again with the JSON response of the lock info. @@ -169,19 +169,19 @@ func verifyLockStats(l lockStateCase, t *testing.T, testNum int) { param := nsParam{l.volume, l.path} // Verify the total locks (blocked+running) for given pair. - if globalNSMutex.debugLockMap[param].ref != int64(l.expectedVolPathLockCount) { + if globalNSMutex.debugLockMap[param].counters.total != int64(l.expectedVolPathLockCount) { t.Errorf("Test %d: Expected the total lock count for volume: \"%s\", path: \"%s\" to be %v, but got %v", testNum, - param.volume, param.path, int64(l.expectedVolPathLockCount), globalNSMutex.debugLockMap[param].ref) + param.volume, param.path, int64(l.expectedVolPathLockCount), globalNSMutex.debugLockMap[param].counters.total) } // Verify the total running locks for given pair. - if globalNSMutex.debugLockMap[param].running != int64(l.expectedVolPathRunningCount) { + if globalNSMutex.debugLockMap[param].counters.granted != int64(l.expectedVolPathRunningCount) { t.Errorf("Test %d: Expected the total running locks for volume: \"%s\", path: \"%s\" to be %v, but got %v", testNum, param.volume, param.path, - int64(l.expectedVolPathRunningCount), globalNSMutex.debugLockMap[param].running) + int64(l.expectedVolPathRunningCount), globalNSMutex.debugLockMap[param].counters.granted) } // Verify the total blocked locks for givne pair. - if globalNSMutex.debugLockMap[param].blocked != int64(l.expectedVolPathBlockCount) { + if globalNSMutex.debugLockMap[param].counters.blocked != int64(l.expectedVolPathBlockCount) { t.Errorf("Test %d: Expected the total blocked locks for volume: \"%s\", path: \"%s\" to be %v, but got %v", testNum, param.volume, param.path, - int64(l.expectedVolPathBlockCount), globalNSMutex.debugLockMap[param].blocked) + int64(l.expectedVolPathBlockCount), globalNSMutex.debugLockMap[param].counters.blocked) } } @@ -230,16 +230,19 @@ func verifyLockState(l lockStateCase, t *testing.T, testNum int) { // TestNewDebugLockInfoPerVolumePath - Validates the values initialized by newDebugLockInfoPerVolumePath(). func TestNewDebugLockInfoPerVolumePath(t *testing.T) { - lockInfo := newDebugLockInfoPerVolumePath() + lockInfo := &debugLockInfoPerVolumePath{ + lockInfo: make(map[string]debugLockInfo), + counters: &lockStat{}, + } - if lockInfo.ref != 0 { - t.Errorf("Expected initial reference value of total locks to be 0, got %d", lockInfo.ref) + if lockInfo.counters.total != 0 { + t.Errorf("Expected initial reference value of total locks to be 0, got %d", lockInfo.counters.total) } - if lockInfo.blocked != 0 { - t.Errorf("Expected initial reference of blocked locks to be 0, got %d", lockInfo.blocked) + if lockInfo.counters.blocked != 0 { + t.Errorf("Expected initial reference of blocked locks to be 0, got %d", lockInfo.counters.blocked) } - if lockInfo.running != 0 { - t.Errorf("Expected initial reference value of held locks to be 0, got %d", lockInfo.running) + if lockInfo.counters.granted != 0 { + t.Errorf("Expected initial reference value of held locks to be 0, got %d", lockInfo.counters.granted) } } @@ -300,7 +303,7 @@ func TestNsLockMapStatusBlockedToRunning(t *testing.T) { readLock: true, setBlocked: false, // expected metrics. - expectedErr: LockInfoOriginNotFound{"my-bucket", "my-object", "abcd1234", "Bad Origin"}, + expectedErr: LockInfoOriginMismatch{"my-bucket", "my-object", "abcd1234", "Bad Origin"}, }, // Test case - 5. // Test case with write lock. @@ -332,21 +335,11 @@ func TestNsLockMapStatusBlockedToRunning(t *testing.T) { debugLockMap: make(map[nsParam]*debugLockInfoPerVolumePath), lockMap: make(map[nsParam]*nsLock), } - // Entry for pair is set to nil. Should fail with `errLockNotInitialized`. - globalNSMutex.debugLockMap[param] = nil - actualErr = globalNSMutex.statusBlockedToRunning(param, testCases[0].lockSource, - testCases[0].opsID, testCases[0].readLock) - - if errorCause(actualErr) != errLockNotInitialized { - t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", errLockNotInitialized, actualErr) - } // Setting the lock info the be `nil`. globalNSMutex.debugLockMap[param] = &debugLockInfoPerVolumePath{ lockInfo: nil, // setting the lockinfo to nil. - ref: 0, - blocked: 0, - running: 0, + counters: &lockStat{}, } actualErr = globalNSMutex.statusBlockedToRunning(param, testCases[0].lockSource, @@ -361,9 +354,7 @@ func TestNsLockMapStatusBlockedToRunning(t *testing.T) { // but the initial state if already "Running". Such an attempt should fail globalNSMutex.debugLockMap[param] = &debugLockInfoPerVolumePath{ lockInfo: make(map[string]debugLockInfo), - ref: 0, - blocked: 0, - running: 0, + counters: &lockStat{}, } // Setting the status of the lock to be "Running". @@ -610,14 +601,14 @@ func TestNsLockMapDeleteLockInfoEntryForOps(t *testing.T) { } else { t.Fatalf("Entry for %s, %s should have existed. ", param.volume, param.path) } - if globalNSMutex.runningLockCounter != int64(0) { - t.Errorf("Expected the count of total running locks to be %v, but got %v", int64(0), globalNSMutex.runningLockCounter) + if globalNSMutex.counters.granted != int64(0) { + t.Errorf("Expected the count of total running locks to be %v, but got %v", int64(0), globalNSMutex.counters.granted) } - if globalNSMutex.blockedCounter != int64(0) { - t.Errorf("Expected the count of total blocked locks to be %v, but got %v", int64(0), globalNSMutex.blockedCounter) + if globalNSMutex.counters.blocked != int64(0) { + t.Errorf("Expected the count of total blocked locks to be %v, but got %v", int64(0), globalNSMutex.counters.blocked) } - if globalNSMutex.globalLockCounter != int64(0) { - t.Errorf("Expected the count of all locks to be %v, but got %v", int64(0), globalNSMutex.globalLockCounter) + if globalNSMutex.counters.total != int64(0) { + t.Errorf("Expected the count of all locks to be %v, but got %v", int64(0), globalNSMutex.counters.total) } } @@ -680,13 +671,13 @@ func TestNsLockMapDeleteLockInfoEntryForVolumePath(t *testing.T) { t.Fatalf("Entry for %s, %s should have been deleted. ", param.volume, param.path) } // The lock count values should be 0. - if globalNSMutex.runningLockCounter != int64(0) { - t.Errorf("Expected the count of total running locks to be %v, but got %v", int64(0), globalNSMutex.runningLockCounter) + if globalNSMutex.counters.granted != int64(0) { + t.Errorf("Expected the count of total running locks to be %v, but got %v", int64(0), globalNSMutex.counters.granted) } - if globalNSMutex.blockedCounter != int64(0) { - t.Errorf("Expected the count of total blocked locks to be %v, but got %v", int64(0), globalNSMutex.blockedCounter) + if globalNSMutex.counters.blocked != int64(0) { + t.Errorf("Expected the count of total blocked locks to be %v, but got %v", int64(0), globalNSMutex.counters.blocked) } - if globalNSMutex.globalLockCounter != int64(0) { - t.Errorf("Expected the count of all locks to be %v, but got %v", int64(0), globalNSMutex.globalLockCounter) + if globalNSMutex.counters.total != int64(0) { + t.Errorf("Expected the count of all locks to be %v, but got %v", int64(0), globalNSMutex.counters.total) } } diff --git a/cmd/lock-stat.go b/cmd/lock-stat.go new file mode 100644 index 000000000..67d91d2ac --- /dev/null +++ b/cmd/lock-stat.go @@ -0,0 +1,49 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +// lockStat - encapsulates total, blocked and granted lock counts. +type lockStat struct { + total int64 + blocked int64 + granted int64 +} + +// lockWaiting - updates lock stat when a lock becomes blocked. +func (ls *lockStat) lockWaiting() { + ls.blocked++ + ls.total++ +} + +// lockGranted - updates lock stat when a lock is granted. +func (ls *lockStat) lockGranted() { + ls.blocked-- + ls.granted++ +} + +// lockRemoved - updates lock stat when a lock is removed, by Unlock +// or ForceUnlock. +func (ls *lockStat) lockRemoved(granted bool) { + if granted { + ls.granted-- + ls.total-- + + } else { + ls.blocked-- + ls.total-- + } +} diff --git a/cmd/lockinfo-handlers.go b/cmd/lockinfo-handlers.go index 2ed1b58a5..50f7429b6 100644 --- a/cmd/lockinfo-handlers.go +++ b/cmd/lockinfo-handlers.go @@ -66,17 +66,17 @@ func getSystemLockState() (SystemLockState, error) { lockState := SystemLockState{} - lockState.TotalBlockedLocks = globalNSMutex.blockedCounter - lockState.TotalLocks = globalNSMutex.globalLockCounter - lockState.TotalAcquiredLocks = globalNSMutex.runningLockCounter + lockState.TotalBlockedLocks = globalNSMutex.counters.blocked + lockState.TotalLocks = globalNSMutex.counters.total + lockState.TotalAcquiredLocks = globalNSMutex.counters.granted for param, debugLock := range globalNSMutex.debugLockMap { volLockInfo := VolumeLockInfo{} volLockInfo.Bucket = param.volume volLockInfo.Object = param.path - volLockInfo.LocksOnObject = debugLock.ref - volLockInfo.TotalBlockedLocks = debugLock.blocked - volLockInfo.LocksAcquiredOnObject = debugLock.running + volLockInfo.LocksOnObject = debugLock.counters.total + volLockInfo.TotalBlockedLocks = debugLock.counters.blocked + volLockInfo.LocksAcquiredOnObject = debugLock.counters.granted for opsID, lockInfo := range debugLock.lockInfo { volLockInfo.LockDetailsOnObject = append(volLockInfo.LockDetailsOnObject, OpsLockState{ OperationID: opsID, diff --git a/cmd/namespace-lock.go b/cmd/namespace-lock.go index 9f9ba73a5..b59261408 100644 --- a/cmd/namespace-lock.go +++ b/cmd/namespace-lock.go @@ -61,6 +61,7 @@ func initNSLock(isDistXL bool) { globalNSMutex = &nsLockMap{ isDistXL: isDistXL, lockMap: make(map[nsParam]*nsLock), + counters: &lockStat{}, } // Initialize nsLockMap with entry for instrumentation information. @@ -91,10 +92,8 @@ type nsLock struct { // Unlock, RLock and RUnlock. type nsLockMap struct { // Lock counter used for lock debugging. - globalLockCounter int64 // Total locks held. - blockedCounter int64 // Total operations blocked waiting for locks. - runningLockCounter int64 // Total locks held but not released yet. - debugLockMap map[nsParam]*debugLockInfoPerVolumePath // Info for instrumentation on locks. + counters *lockStat + debugLockMap map[nsParam]*debugLockInfoPerVolumePath // Info for instrumentation on locks. // Indicates whether the locking service is part // of a distributed setup or not. From 69559aa101d4b3b28b9eeb09db9850e4d56f9aa7 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Mon, 26 Dec 2016 16:29:26 -0800 Subject: [PATCH 036/100] objAPI: Implement CopyObject API. (#3487) This is written so that to simplify our handler code and provide a way to only update metadata instead of the data when source and destination in CopyObject request are same. Fixes #3316 --- cmd/api-errors.go | 8 ++- cmd/fs-createfile.go | 39 ----------- cmd/fs-helpers.go | 91 ++++++++++++++++++++++++ cmd/fs-v1.go | 113 ++++++++++++++++++------------ cmd/handler-utils.go | 39 +++++++++++ cmd/object-api-interface.go | 3 +- cmd/object-handlers.go | 134 +++++++++++++++++++----------------- cmd/object-handlers_test.go | 98 ++++++++++++++++++++++++-- cmd/xl-v1-metadata.go | 8 +++ cmd/xl-v1-object.go | 118 +++++++++++++++++++++++++++---- 10 files changed, 485 insertions(+), 166 deletions(-) delete mode 100644 cmd/fs-createfile.go create mode 100644 cmd/fs-helpers.go diff --git a/cmd/api-errors.go b/cmd/api-errors.go index d22f05040..a3958d250 100644 --- a/cmd/api-errors.go +++ b/cmd/api-errors.go @@ -62,6 +62,7 @@ const ( ErrInvalidPartNumberMarker ErrInvalidRequestBody ErrInvalidCopySource + ErrInvalidMetadataDirective ErrInvalidCopyDest ErrInvalidPolicyDocument ErrInvalidObjectState @@ -145,7 +146,7 @@ const ( var errorCodeResponse = map[APIErrorCode]APIError{ ErrInvalidCopyDest: { Code: "InvalidRequest", - Description: "This copy request is illegal because it is trying to copy an object to itself.", + Description: "This copy request is illegal because it is trying to copy an object to itself without changing the object's metadata, storage class, website redirect location or encryption attributes.", HTTPStatusCode: http.StatusBadRequest, }, ErrInvalidCopySource: { @@ -153,6 +154,11 @@ var errorCodeResponse = map[APIErrorCode]APIError{ Description: "Copy Source must mention the source bucket and key: sourcebucket/sourcekey.", HTTPStatusCode: http.StatusBadRequest, }, + ErrInvalidMetadataDirective: { + Code: "InvalidArgument", + Description: "Unknown metadata directive.", + HTTPStatusCode: http.StatusBadRequest, + }, ErrInvalidRequestBody: { Code: "InvalidArgument", Description: "Body shouldn't be set for this request.", diff --git a/cmd/fs-createfile.go b/cmd/fs-createfile.go deleted file mode 100644 index db0afd017..000000000 --- a/cmd/fs-createfile.go +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import "io" - -func fsCreateFile(disk StorageAPI, reader io.Reader, buf []byte, tmpBucket, tempObj string) (int64, error) { - bytesWritten := int64(0) - // Read the buffer till io.EOF and append the read data to the temporary file. - for { - n, rErr := reader.Read(buf) - if rErr != nil && rErr != io.EOF { - return 0, traceError(rErr) - } - bytesWritten += int64(n) - wErr := disk.AppendFile(tmpBucket, tempObj, buf[0:n]) - if wErr != nil { - return 0, traceError(wErr) - } - if rErr == io.EOF { - break - } - } - return bytesWritten, nil -} diff --git a/cmd/fs-helpers.go b/cmd/fs-helpers.go new file mode 100644 index 000000000..0c457fad2 --- /dev/null +++ b/cmd/fs-helpers.go @@ -0,0 +1,91 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import "io" + +// Reads from the requested local location uses a staging buffer. Restricts +// reads upto requested range of length and offset. If successful staging +// buffer is written to the incoming stream. Returns errors if any. +func fsReadFile(disk StorageAPI, bucket, object string, writer io.Writer, totalLeft, startOffset int64, buf []byte) (err error) { + bufSize := int64(len(buf)) + // Start the read loop until requested range. + for { + // Figure out the right size for the buffer. + curLeft := bufSize + if totalLeft < bufSize { + curLeft = totalLeft + } + // Reads the file at offset. + nr, er := disk.ReadFile(bucket, object, startOffset, buf[:curLeft]) + if nr > 0 { + // Write to response writer. + nw, ew := writer.Write(buf[0:nr]) + if nw > 0 { + // Decrement whats left to write. + totalLeft -= int64(nw) + + // Progress the offset + startOffset += int64(nw) + } + if ew != nil { + err = traceError(ew) + break + } + if nr != int64(nw) { + err = traceError(io.ErrShortWrite) + break + } + } + if er == io.EOF || er == io.ErrUnexpectedEOF { + break + } + if er != nil { + err = traceError(er) + break + } + if totalLeft == 0 { + break + } + } + return err +} + +// Reads from input stream until end of file, takes an input buffer for staging reads. +// The staging buffer is then written to the disk. Returns for any error that occurs +// while reading the stream or writing to disk. Caller should cleanup partial files. +// Upon errors total data written will be 0 and returns error, on success returns +// total data written to disk. +func fsCreateFile(disk StorageAPI, reader io.Reader, buf []byte, bucket, object string) (int64, error) { + bytesWritten := int64(0) + // Read the buffer till io.EOF and appends data to path at bucket/object. + for { + n, rErr := reader.Read(buf) + if rErr != nil && rErr != io.EOF { + return 0, traceError(rErr) + } + bytesWritten += int64(n) + wErr := disk.AppendFile(bucket, object, buf[0:n]) + if wErr != nil { + return 0, traceError(wErr) + } + if rErr == io.EOF { + break + } + } + return bytesWritten, nil +} diff --git a/cmd/fs-v1.go b/cmd/fs-v1.go index 1efe7d059..f0e77f6eb 100644 --- a/cmd/fs-v1.go +++ b/cmd/fs-v1.go @@ -217,7 +217,65 @@ func (fs fsObjects) DeleteBucket(bucket string) error { /// Object Operations -// GetObject - get an object. +// CopyObject - copy object source object to destination object. +// if source object and destination object are same we only +// update metadata. +func (fs fsObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string, metadata map[string]string) (ObjectInfo, error) { + // Stat the file to get file size. + fi, err := fs.storage.StatFile(srcBucket, srcObject) + if err != nil { + return ObjectInfo{}, toObjectErr(traceError(err), srcBucket, srcObject) + } + + // Check if this request is only metadata update. + cpMetadataOnly := strings.EqualFold(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject)) + if cpMetadataOnly { + // Save objects' metadata in `fs.json`. + fsMeta := newFSMetaV1() + fsMeta.Meta = metadata + + fsMetaPath := pathJoin(bucketMetaPrefix, dstBucket, dstObject, fsMetaJSONFile) + if err = writeFSMetadata(fs.storage, minioMetaBucket, fsMetaPath, fsMeta); err != nil { + return ObjectInfo{}, toObjectErr(err, dstBucket, dstObject) + } + + // Get object info. + return fs.getObjectInfo(dstBucket, dstObject) + } + + // Length of the file to read. + length := fi.Size + + // Initialize pipe. + pipeReader, pipeWriter := io.Pipe() + + go func() { + startOffset := int64(0) // Read the whole file. + if gerr := fs.GetObject(srcBucket, srcObject, startOffset, length, pipeWriter); gerr != nil { + errorIf(gerr, "Unable to read %s/%s.", srcBucket, srcObject) + pipeWriter.CloseWithError(gerr) + return + } + pipeWriter.Close() // Close writer explicitly signalling we wrote all data. + }() + + objInfo, err := fs.PutObject(dstBucket, dstObject, length, pipeReader, metadata, "") + if err != nil { + return ObjectInfo{}, toObjectErr(err, dstBucket, dstObject) + } + + // Explicitly close the reader. + pipeReader.Close() + + return objInfo, nil +} + +// GetObject - reads an object from the disk. +// Supports additional parameters like offset and length +// which are synonymous with HTTP Range requests. +// +// startOffset indicates the starting read location of the object. +// length indicates the total length of the object. func (fs fsObjects) GetObject(bucket, object string, offset int64, length int64, writer io.Writer) (err error) { if err = checkGetObjArgs(bucket, object); err != nil { return err @@ -254,50 +312,14 @@ func (fs fsObjects) GetObject(bucket, object string, offset int64, length int64, } // Allocate a staging buffer. buf := make([]byte, int(bufSize)) - for { - // Figure out the right size for the buffer. - curLeft := bufSize - if totalLeft < bufSize { - curLeft = totalLeft - } - // Reads the file at offset. - nr, er := fs.storage.ReadFile(bucket, object, offset, buf[:curLeft]) - if nr > 0 { - // Write to response writer. - nw, ew := writer.Write(buf[0:nr]) - if nw > 0 { - // Decrement whats left to write. - totalLeft -= int64(nw) - - // Progress the offset - offset += int64(nw) - } - if ew != nil { - err = traceError(ew) - break - } - if nr != int64(nw) { - err = traceError(io.ErrShortWrite) - break - } - } - if er == io.EOF || er == io.ErrUnexpectedEOF { - break - } - if er != nil { - err = traceError(er) - break - } - if totalLeft == 0 { - break - } + if err = fsReadFile(fs.storage, bucket, object, writer, totalLeft, offset, buf); err != nil { + // Returns any error. + return toObjectErr(err, bucket, object) } - - // Returns any error. - return toObjectErr(err, bucket, object) + return nil } -// getObjectInfo - get object info. +// getObjectInfo - wrapper for reading object metadata and constructs ObjectInfo. func (fs fsObjects) getObjectInfo(bucket, object string) (ObjectInfo, error) { fi, err := fs.storage.StatFile(bucket, object) if err != nil { @@ -342,7 +364,7 @@ func (fs fsObjects) getObjectInfo(bucket, object string) (ObjectInfo, error) { return objInfo, nil } -// GetObjectInfo - get object info. +// GetObjectInfo - reads object metadata and replies back ObjectInfo. func (fs fsObjects) GetObjectInfo(bucket, object string) (ObjectInfo, error) { if err := checkGetObjArgs(bucket, object); err != nil { return ObjectInfo{}, err @@ -350,7 +372,10 @@ func (fs fsObjects) GetObjectInfo(bucket, object string) (ObjectInfo, error) { return fs.getObjectInfo(bucket, object) } -// PutObject - create an object. +// PutObject - creates an object upon reading from the input stream +// until EOF, writes data directly to configured filesystem path. +// Additionally writes `fs.json` which carries the necessary metadata +// for future object operations. func (fs fsObjects) PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (objInfo ObjectInfo, err error) { if err = checkPutObjectArgs(bucket, object, fs); err != nil { return ObjectInfo{}, err diff --git a/cmd/handler-utils.go b/cmd/handler-utils.go index 0e2c7dcf7..23d227140 100644 --- a/cmd/handler-utils.go +++ b/cmd/handler-utils.go @@ -65,6 +65,45 @@ var supportedHeaders = []string{ // Add more supported headers here. } +// isMetadataDirectiveValid - check if metadata-directive is valid. +func isMetadataDirectiveValid(h http.Header) bool { + _, ok := h[http.CanonicalHeaderKey("X-Amz-Metadata-Directive")] + if ok { + // Check atleast set metadata-directive is valid. + return (isMetadataCopy(h) || isMetadataReplace(h)) + } + // By default if x-amz-metadata-directive is not we + // treat it as 'COPY' this function returns true. + return true +} + +// Check if the metadata COPY is requested. +func isMetadataCopy(h http.Header) bool { + return h.Get("X-Amz-Metadata-Directive") == "COPY" +} + +// Check if the metadata REPLACE is requested. +func isMetadataReplace(h http.Header) bool { + return h.Get("X-Amz-Metadata-Directive") == "REPLACE" +} + +// Splits an incoming path into bucket and object components. +func path2BucketAndObject(path string) (bucket, object string) { + // Skip the first element if it is '/', split the rest. + path = strings.TrimPrefix(path, "/") + pathComponents := strings.SplitN(path, "/", 2) + + // Save the bucket and object extracted from path. + switch len(pathComponents) { + case 1: + bucket = pathComponents[0] + case 2: + bucket = pathComponents[0] + object = pathComponents[1] + } + return bucket, object +} + // extractMetadataFromHeader extracts metadata from HTTP header. func extractMetadataFromHeader(header http.Header) map[string]string { metadata := make(map[string]string) diff --git a/cmd/object-api-interface.go b/cmd/object-api-interface.go index 2c3b1ed7a..04a88061b 100644 --- a/cmd/object-api-interface.go +++ b/cmd/object-api-interface.go @@ -34,7 +34,8 @@ type ObjectLayer interface { // Object operations. GetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) (err error) GetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error) - PutObject(bucket, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (objInto ObjectInfo, err error) + PutObject(bucket, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (objInfo ObjectInfo, err error) + CopyObject(srcBucket, srcObject, destBucket, destObject string, metadata map[string]string) (objInfo ObjectInfo, err error) DeleteObject(bucket, object string) error // Multipart operations. diff --git a/cmd/object-handlers.go b/cmd/object-handlers.go index f5b4857d3..87382c8a1 100644 --- a/cmd/object-handlers.go +++ b/cmd/object-handlers.go @@ -19,7 +19,6 @@ package cmd import ( "encoding/hex" "encoding/xml" - "io" "io/ioutil" "net/http" "net/url" @@ -228,14 +227,32 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re w.WriteHeader(http.StatusOK) } +// Extract metadata relevant for an CopyObject operation based on conditional +// header values specified in X-Amz-Metadata-Directive. +func getCpObjMetadataFromHeader(header http.Header, defaultMeta map[string]string) map[string]string { + // if x-amz-metadata-directive says REPLACE then + // we extract metadata from the input headers. + if isMetadataReplace(header) { + return extractMetadataFromHeader(header) + } + // if x-amz-metadata-directive says COPY then we + // return the default metadata. + if isMetadataCopy(header) { + return defaultMeta + } + // Copy is default behavior if not x-amz-metadata-directive is set. + return defaultMeta +} + // CopyObjectHandler - Copy Object // ---------- // This implementation of the PUT operation adds an object to a bucket // while reading the object from another source. func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) - bucket := vars["bucket"] - object := vars["object"] + dstBucket := vars["bucket"] + dstObject := vars["object"] + cpDestPath := "/" + path.Join(dstBucket, dstObject) objectAPI := api.ObjectAPI() if objectAPI == nil { @@ -243,51 +260,58 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re return } - if s3Error := checkRequestAuthType(r, bucket, "s3:PutObject", serverConfig.GetRegion()); s3Error != ErrNone { + if s3Error := checkRequestAuthType(r, dstBucket, "s3:PutObject", serverConfig.GetRegion()); s3Error != ErrNone { writeErrorResponse(w, r, s3Error, r.URL.Path) return } // TODO: Reject requests where body/payload is present, for now we don't even read it. - // objectSource - objectSource, err := url.QueryUnescape(r.Header.Get("X-Amz-Copy-Source")) + // Copy source path. + cpSrcPath, err := url.QueryUnescape(r.Header.Get("X-Amz-Copy-Source")) if err != nil { // Save unescaped string as is. - objectSource = r.Header.Get("X-Amz-Copy-Source") + cpSrcPath = r.Header.Get("X-Amz-Copy-Source") } - // Skip the first element if it is '/', split the rest. - objectSource = strings.TrimPrefix(objectSource, "/") - splits := strings.SplitN(objectSource, "/", 2) - - // Save sourceBucket and sourceObject extracted from url Path. - var sourceBucket, sourceObject string - if len(splits) == 2 { - sourceBucket = splits[0] - sourceObject = splits[1] - } - // If source object is empty, reply back error. - if sourceObject == "" { + srcBucket, srcObject := path2BucketAndObject(cpSrcPath) + // If source object is empty or bucket is empty, reply back invalid copy source. + if srcObject == "" || srcBucket == "" { writeErrorResponse(w, r, ErrInvalidCopySource, r.URL.Path) return } - // Source and destination objects cannot be same, reply back error. - if sourceObject == object && sourceBucket == bucket { - writeErrorResponse(w, r, ErrInvalidCopyDest, r.URL.Path) + // Check if metadata directive is valid. + if !isMetadataDirectiveValid(r.Header) { + writeErrorResponse(w, r, ErrInvalidMetadataDirective, r.URL.Path) return } - // Lock the object before reading. - objectRLock := globalNSMutex.NewNSLock(sourceBucket, sourceObject) - objectRLock.RLock() - defer objectRLock.RUnlock() + cpSrcDstSame := cpSrcPath == cpDestPath + // Hold write lock on destination since in both cases + // - if source and destination are same + // - if source and destination are different + // it is the sole mutating state. + objectDWLock := globalNSMutex.NewNSLock(dstBucket, dstObject) + objectDWLock.Lock() + defer objectDWLock.Unlock() - objInfo, err := objectAPI.GetObjectInfo(sourceBucket, sourceObject) + // if source and destination are different, we have to hold + // additional read lock as well to protect against writes on + // source. + if !cpSrcDstSame { + // Hold read locks on source object only if we are + // going to read data from source object. + objectSRLock := globalNSMutex.NewNSLock(srcBucket, srcObject) + objectSRLock.RLock() + defer objectSRLock.RUnlock() + + } + + objInfo, err := objectAPI.GetObjectInfo(srcBucket, srcObject) if err != nil { errorIf(err, "Unable to fetch object info.") - writeErrorResponse(w, r, toAPIErrorCode(err), objectSource) + writeErrorResponse(w, r, toAPIErrorCode(err), cpSrcPath) return } @@ -298,50 +322,34 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re /// maximum Upload size for object in a single CopyObject operation. if isMaxObjectSize(objInfo.Size) { - writeErrorResponse(w, r, ErrEntityTooLarge, objectSource) + writeErrorResponse(w, r, ErrEntityTooLarge, cpSrcPath) return } - // Size of object. - size := objInfo.Size + defaultMeta := objInfo.UserDefined - pipeReader, pipeWriter := io.Pipe() - go func() { - startOffset := int64(0) // Read the whole file. - // Get the object. - gErr := objectAPI.GetObject(sourceBucket, sourceObject, startOffset, size, pipeWriter) - if gErr != nil { - errorIf(gErr, "Unable to read an object.") - pipeWriter.CloseWithError(gErr) - return - } - pipeWriter.Close() // Close. - }() + // Make sure to remove saved md5sum, object might have been uploaded + // as multipart which doesn't have a standard md5sum, we just let + // CopyObject calculate a new one. + delete(defaultMeta, "md5Sum") - // Save other metadata if available. - metadata := objInfo.UserDefined + newMetadata := getCpObjMetadataFromHeader(r.Header, defaultMeta) + // Check if x-amz-metadata-directive was not set to REPLACE and source, + // desination are same objects. + if !isMetadataReplace(r.Header) && cpSrcDstSame { + // If x-amz-metadata-directive is not set to REPLACE then we need + // to error out if source and destination are same. + writeErrorResponse(w, r, ErrInvalidCopyDest, r.URL.Path) + return + } - // Remove the etag from source metadata because if it was uploaded as a multipart object - // then its ETag will not be MD5sum of the object. - delete(metadata, "md5Sum") - - sha256sum := "" - - objectWLock := globalNSMutex.NewNSLock(bucket, object) - objectWLock.Lock() - defer objectWLock.Unlock() - - // Create the object. - objInfo, err = objectAPI.PutObject(bucket, object, size, pipeReader, metadata, sha256sum) + // Copy source object to destination, if source and destination + // object is same then only metadata is updated. + objInfo, err = objectAPI.CopyObject(srcBucket, srcObject, dstBucket, dstObject, newMetadata) if err != nil { - // Close the this end of the pipe upon error in PutObject. - pipeReader.CloseWithError(err) - errorIf(err, "Unable to create an object.") writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) return } - // Explicitly close the reader, before fetching object info. - pipeReader.Close() md5Sum := objInfo.MD5Sum response := generateCopyObjectResponse(md5Sum, objInfo.ModTime) @@ -354,7 +362,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re // Notify object created event. eventNotify(eventData{ Type: ObjectCreatedCopy, - Bucket: bucket, + Bucket: dstBucket, ObjInfo: objInfo, ReqParams: map[string]string{ "sourceIPAddress": r.RemoteAddr, diff --git a/cmd/object-handlers_test.go b/cmd/object-handlers_test.go index 7ea378bdb..ed454fd87 100644 --- a/cmd/object-handlers_test.go +++ b/cmd/object-handlers_test.go @@ -981,19 +981,25 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, bucketName string newObjectName string // name of the newly copied object. copySourceHeader string // data for "X-Amz-Copy-Source" header. Contains the object to be copied in the URL. + metadataGarbage bool + metadataReplace bool + metadataCopy bool + metadata map[string]string accessKey string secretKey string // expected output. expectedRespStatus int }{ - // Test case - 1. + // Test case - 1, copy metadata from newObject1, ignore request headers. { bucketName: bucketName, newObjectName: "newObject1", copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName), accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, - + metadata: map[string]string{ + "Content-Type": "application/json", + }, expectedRespStatus: http.StatusOK, }, @@ -1008,6 +1014,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, expectedRespStatus: http.StatusBadRequest, }, + // Test case - 3. // Test case with new object name is same as object to be copied. { @@ -1019,7 +1026,58 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, expectedRespStatus: http.StatusBadRequest, }, + // Test case - 4. + // Test case with new object name is same as object to be copied + // but metadata is updated. + { + bucketName: bucketName, + newObjectName: objectName, + copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName), + metadata: map[string]string{ + "Content-Type": "application/json", + }, + metadataReplace: true, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, + + expectedRespStatus: http.StatusOK, + }, + + // Test case - 5. + // Test case with invalid metadata-directive. + { + bucketName: bucketName, + newObjectName: "newObject1", + copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName), + metadata: map[string]string{ + "Content-Type": "application/json", + }, + metadataGarbage: true, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, + + expectedRespStatus: http.StatusBadRequest, + }, + + // Test case - 6. + // Test case with new object name is same as object to be copied + // fail with BadRequest. + { + bucketName: bucketName, + newObjectName: objectName, + copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName), + metadata: map[string]string{ + "Content-Type": "application/json", + }, + metadataCopy: true, + accessKey: credentials.AccessKey, + secretKey: credentials.SecretKey, + + expectedRespStatus: http.StatusBadRequest, + }, + + // Test case - 7. // Test case with non-existent source file. // Case for the purpose of failing `api.ObjectAPI.GetObjectInfo`. // Expecting the response status code to http.StatusNotFound (404). @@ -1032,7 +1090,8 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, expectedRespStatus: http.StatusNotFound, }, - // Test case - 5. + + // Test case - 8. // Test case with non-existent source file. // Case for the purpose of failing `api.ObjectAPI.PutObject`. // Expecting the response status code to http.StatusNotFound (404). @@ -1045,7 +1104,8 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, expectedRespStatus: http.StatusNotFound, }, - // Test case - 6. + + // Test case - 9. // Case with invalid AccessKey. { bucketName: bucketName, @@ -1059,7 +1119,8 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, } for i, testCase := range testCases { - var req, reqV2 *http.Request + var req *http.Request + var reqV2 *http.Request // initialize HTTP NewRecorder, this records any mutations to response writer inside the handler. rec := httptest.NewRecorder() // construct HTTP request for copy object. @@ -1073,6 +1134,19 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, if testCase.copySourceHeader != "" { req.Header.Set("X-Amz-Copy-Source", testCase.copySourceHeader) } + // Add custom metadata. + for k, v := range testCase.metadata { + req.Header.Set(k, v) + } + if testCase.metadataReplace { + req.Header.Set("X-Amz-Metadata-Directive", "REPLACE") + } + if testCase.metadataCopy { + req.Header.Set("X-Amz-Metadata-Directive", "COPY") + } + if testCase.metadataGarbage { + req.Header.Set("X-Amz-Metadata-Directive", "Unknown") + } // Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler. // Call the ServeHTTP to execute the handler, `func (api objectAPIHandlers) CopyObjectHandler` handles the request. apiRouter.ServeHTTP(rec, req) @@ -1106,6 +1180,20 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, reqV2.Header.Set("X-Amz-Copy-Source", testCase.copySourceHeader) } + // Add custom metadata. + for k, v := range testCase.metadata { + reqV2.Header.Set(k, v+"+x") + } + if testCase.metadataReplace { + reqV2.Header.Set("X-Amz-Metadata-Directive", "REPLACE") + } + if testCase.metadataCopy { + reqV2.Header.Set("X-Amz-Metadata-Directive", "COPY") + } + if testCase.metadataGarbage { + reqV2.Header.Set("X-Amz-Metadata-Directive", "Unknown") + } + err = signRequestV2(reqV2, testCase.accessKey, testCase.secretKey) if err != nil { diff --git a/cmd/xl-v1-metadata.go b/cmd/xl-v1-metadata.go index 8edd7d2ba..f50b083ad 100644 --- a/cmd/xl-v1-metadata.go +++ b/cmd/xl-v1-metadata.go @@ -311,6 +311,14 @@ func deleteAllXLMetadata(disks []StorageAPI, bucket, prefix string, errs []error wg.Wait() } +// Rename `xl.json` content to destination location for each disk in order. +func renameXLMetadata(disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string, quorum int) error { + isDir := false + srcXLJSON := path.Join(srcEntry, xlMetaJSONFile) + dstXLJSON := path.Join(dstEntry, xlMetaJSONFile) + return rename(disks, srcBucket, srcXLJSON, dstBucket, dstXLJSON, isDir, quorum) +} + // writeUniqueXLMetadata - writes unique `xl.json` content for each disk in order. func writeUniqueXLMetadata(disks []StorageAPI, bucket, prefix string, xlMetas []xlMetaV1, quorum int) error { var wg = &sync.WaitGroup{} diff --git a/cmd/xl-v1-object.go b/cmd/xl-v1-object.go index 28af752b7..702af34ca 100644 --- a/cmd/xl-v1-object.go +++ b/cmd/xl-v1-object.go @@ -42,13 +42,105 @@ var objectOpIgnoredErrs = []error{ /// Object Operations +// CopyObject - copy object source object to destination object. +// if source object and destination object are same we only +// update metadata. +func (xl xlObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string, metadata map[string]string) (ObjectInfo, error) { + // Read metadata associated with the object from all disks. + metaArr, errs := readAllXLMetadata(xl.storageDisks, srcBucket, srcObject) + // Do we have read quorum? + if !isDiskQuorum(errs, xl.readQuorum) { + return ObjectInfo{}, traceError(InsufficientReadQuorum{}, errs...) + } + + if reducedErr := reduceReadQuorumErrs(errs, objectOpIgnoredErrs, xl.readQuorum); reducedErr != nil { + return ObjectInfo{}, toObjectErr(reducedErr, srcBucket, srcObject) + } + + // List all online disks. + onlineDisks, modTime := listOnlineDisks(xl.storageDisks, metaArr, errs) + + // Pick latest valid metadata. + xlMeta, err := pickValidXLMeta(metaArr, modTime) + if err != nil { + return ObjectInfo{}, toObjectErr(err, srcBucket, srcObject) + } + + // Reorder online disks based on erasure distribution order. + onlineDisks = getOrderedDisks(xlMeta.Erasure.Distribution, onlineDisks) + + // Length of the file to read. + length := xlMeta.Stat.Size + + // Check if this request is only metadata update. + cpMetadataOnly := strings.EqualFold(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject)) + if cpMetadataOnly { + xlMeta.Meta = metadata + partsMetadata := make([]xlMetaV1, len(xl.storageDisks)) + // Update `xl.json` content on each disks. + for index := range partsMetadata { + partsMetadata[index] = xlMeta + } + + tempObj := mustGetUUID() + + // Write unique `xl.json` for each disk. + if err = writeUniqueXLMetadata(onlineDisks, minioMetaTmpBucket, tempObj, partsMetadata, xl.writeQuorum); err != nil { + return ObjectInfo{}, toObjectErr(err, srcBucket, srcObject) + } + // Rename atomically `xl.json` from tmp location to destination for each disk. + if err = renameXLMetadata(onlineDisks, minioMetaTmpBucket, tempObj, srcBucket, srcObject, xl.writeQuorum); err != nil { + return ObjectInfo{}, toObjectErr(err, srcBucket, srcObject) + } + + objInfo := ObjectInfo{ + IsDir: false, + Bucket: srcBucket, + Name: srcObject, + Size: xlMeta.Stat.Size, + ModTime: xlMeta.Stat.ModTime, + MD5Sum: xlMeta.Meta["md5Sum"], + ContentType: xlMeta.Meta["content-type"], + ContentEncoding: xlMeta.Meta["content-encoding"], + } + // md5Sum has already been extracted into objInfo.MD5Sum. We + // need to remove it from xlMetaMap to avoid it from appearing as + // part of response headers. e.g, X-Minio-* or X-Amz-*. + delete(xlMeta.Meta, "md5Sum") + objInfo.UserDefined = xlMeta.Meta + return objInfo, nil + } + + // Initialize pipe. + pipeReader, pipeWriter := io.Pipe() + + go func() { + startOffset := int64(0) // Read the whole file. + if gerr := xl.GetObject(srcBucket, srcObject, startOffset, length, pipeWriter); gerr != nil { + errorIf(gerr, "Unable to read %s of the object `%s/%s`.", srcBucket, srcObject) + pipeWriter.CloseWithError(toObjectErr(gerr, srcBucket, srcObject)) + return + } + pipeWriter.Close() // Close writer explicitly signalling we wrote all data. + }() + + objInfo, err := xl.PutObject(dstBucket, dstObject, length, pipeReader, metadata, "") + if err != nil { + return ObjectInfo{}, toObjectErr(err, dstBucket, dstObject) + } + + // Explicitly close the reader. + pipeReader.Close() + + return objInfo, nil +} + // GetObject - reads an object erasured coded across multiple // disks. Supports additional parameters like offset and length -// which is synonymous with HTTP Range requests. +// which are synonymous with HTTP Range requests. // -// startOffset indicates the location at which the client requested -// object to be read at. length indicates the total length of the -// object requested by client. +// startOffset indicates the starting read location of the object. +// length indicates the total length of the object. func (xl xlObjects) GetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) error { if err := checkGetObjArgs(bucket, object); err != nil { return err @@ -255,13 +347,13 @@ func (xl xlObjects) getObjectInfo(bucket, object string) (objInfo ObjectInfo, er return objInfo, nil } -func undoRename(disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string, isPart bool, errs []error) { +func undoRename(disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string, isDir bool, errs []error) { var wg = &sync.WaitGroup{} // Undo rename object on disks where RenameFile succeeded. // If srcEntry/dstEntry are objects then add a trailing slash to copy // over all the parts inside the object directory - if !isPart { + if isDir { srcEntry = retainSlash(srcEntry) dstEntry = retainSlash(dstEntry) } @@ -284,14 +376,14 @@ func undoRename(disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry str // rename - common function that renamePart and renameObject use to rename // the respective underlying storage layer representations. -func rename(disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string, isPart bool, quorum int) error { +func rename(disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string, isDir bool, quorum int) error { // Initialize sync waitgroup. var wg = &sync.WaitGroup{} // Initialize list of errors. var errs = make([]error, len(disks)) - if !isPart { + if isDir { dstEntry = retainSlash(dstEntry) srcEntry = retainSlash(srcEntry) } @@ -319,7 +411,7 @@ func rename(disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string, // otherwise return failure. Cleanup successful renames. if !isDiskQuorum(errs, quorum) { // Undo all the partial rename operations. - undoRename(disks, srcBucket, srcEntry, dstBucket, dstEntry, isPart, errs) + undoRename(disks, srcBucket, srcEntry, dstBucket, dstEntry, isDir, errs) return traceError(errXLWriteQuorum) } return reduceWriteQuorumErrs(errs, objectOpIgnoredErrs, quorum) @@ -330,8 +422,8 @@ func rename(disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string, // not have a readQuorum partially renamed files are renamed back to // its proper location. func renamePart(disks []StorageAPI, srcBucket, srcPart, dstBucket, dstPart string, quorum int) error { - isPart := true - return rename(disks, srcBucket, srcPart, dstBucket, dstPart, isPart, quorum) + isDir := false + return rename(disks, srcBucket, srcPart, dstBucket, dstPart, isDir, quorum) } // renameObject - renames all source objects to destination object @@ -339,8 +431,8 @@ func renamePart(disks []StorageAPI, srcBucket, srcPart, dstBucket, dstPart strin // not have a readQuorum partially renamed files are renamed back to // its proper location. func renameObject(disks []StorageAPI, srcBucket, srcObject, dstBucket, dstObject string, quorum int) error { - isPart := false - return rename(disks, srcBucket, srcObject, dstBucket, dstObject, isPart, quorum) + isDir := true + return rename(disks, srcBucket, srcObject, dstBucket, dstObject, isDir, quorum) } // PutObject - creates an object upon reading from the input stream From ee0172dfe419febe371f21d50835b577338cadfa Mon Sep 17 00:00:00 2001 From: Bala FA Date: Tue, 27 Dec 2016 21:58:10 +0530 Subject: [PATCH 037/100] Have simpler JWT authentication. (#3501) --- cmd/admin-rpc-server.go | 4 +- cmd/auth-handler.go | 2 +- cmd/auth-rpc-client.go | 24 ---- cmd/browser-peer-rpc.go | 14 +-- cmd/credential.go | 12 ++ cmd/jwt.go | 116 ++++++++++++++++++ cmd/jwt_test.go | 84 +++++++++++++ cmd/lock-rpc-server-common.go | 2 +- cmd/login-server.go | 10 +- cmd/post-policy_test.go | 4 +- cmd/s3-peer-rpc-handlers.go | 8 +- cmd/signature-jwt.go | 103 ---------------- cmd/signature-jwt_test.go | 214 --------------------------------- cmd/storage-rpc-server.go | 26 ++-- cmd/storage-rpc-server_test.go | 13 +- cmd/test-utils_test.go | 4 +- cmd/web-handlers.go | 132 +++++--------------- 17 files changed, 277 insertions(+), 495 deletions(-) create mode 100644 cmd/jwt.go create mode 100644 cmd/jwt_test.go delete mode 100644 cmd/signature-jwt.go delete mode 100644 cmd/signature-jwt_test.go diff --git a/cmd/admin-rpc-server.go b/cmd/admin-rpc-server.go index 3948901d1..af5cd0467 100644 --- a/cmd/admin-rpc-server.go +++ b/cmd/admin-rpc-server.go @@ -32,7 +32,7 @@ type serviceCmd struct { // Shutdown - Shutdown this instance of minio server. func (s *serviceCmd) Shutdown(args *GenericArgs, reply *GenericReply) error { - if !isRPCTokenValid(args.Token) { + if !isAuthTokenValid(args.Token) { return errInvalidToken } globalServiceSignalCh <- serviceStop @@ -41,7 +41,7 @@ func (s *serviceCmd) Shutdown(args *GenericArgs, reply *GenericReply) error { // Restart - Restart this instance of minio server. func (s *serviceCmd) Restart(args *GenericArgs, reply *GenericReply) error { - if !isRPCTokenValid(args.Token) { + if !isAuthTokenValid(args.Token) { return errInvalidToken } globalServiceSignalCh <- serviceRestart diff --git a/cmd/auth-handler.go b/cmd/auth-handler.go index a1708c7ef..c048698bb 100644 --- a/cmd/auth-handler.go +++ b/cmd/auth-handler.go @@ -224,7 +224,7 @@ func (a authHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } else if aType == authTypeJWT { // Validate Authorization header if its valid for JWT request. - if !isJWTReqAuthenticated(r) { + if !isHTTPRequestValid(r) { w.WriteHeader(http.StatusUnauthorized) return } diff --git a/cmd/auth-rpc-client.go b/cmd/auth-rpc-client.go index 978c9382f..9284e0784 100644 --- a/cmd/auth-rpc-client.go +++ b/cmd/auth-rpc-client.go @@ -17,12 +17,9 @@ package cmd import ( - "fmt" "net/rpc" "sync" "time" - - jwtgo "github.com/dgrijalva/jwt-go" ) // GenericReply represents any generic RPC reply. @@ -63,27 +60,6 @@ type RPCLoginReply struct { ServerVersion string } -// Validates if incoming token is valid. -func isRPCTokenValid(tokenStr string) bool { - jwt, err := newJWT(defaultInterNodeJWTExpiry, serverConfig.GetCredential()) - if err != nil { - errorIf(err, "Unable to initialize JWT") - return false - } - token, err := jwtgo.Parse(tokenStr, func(token *jwtgo.Token) (interface{}, error) { - if _, ok := token.Method.(*jwtgo.SigningMethodHMAC); !ok { - return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"]) - } - return []byte(jwt.SecretKey), nil - }) - if err != nil { - errorIf(err, "Unable to parse JWT token string") - return false - } - // Return if token is valid. - return token.Valid -} - // Auth config represents authentication credentials and Login method name to be used // for fetching JWT tokens from the RPC server. type authConfig struct { diff --git a/cmd/browser-peer-rpc.go b/cmd/browser-peer-rpc.go index 3a76e8ff6..17d9254f6 100644 --- a/cmd/browser-peer-rpc.go +++ b/cmd/browser-peer-rpc.go @@ -25,17 +25,11 @@ import ( // Login handler implements JWT login token generator, which upon login request // along with username and password is generated. func (br *browserPeerAPIHandlers) LoginHandler(args *RPCLoginArgs, reply *RPCLoginReply) error { - jwt, err := newJWT(defaultInterNodeJWTExpiry, serverConfig.GetCredential()) - if err != nil { - return err - } - if err = jwt.Authenticate(args.Username, args.Password); err != nil { - return err - } - token, err := jwt.GenerateToken(args.Username) + token, err := authenticateWeb(args.Username, args.Password) if err != nil { return err } + reply.Token = token reply.ServerVersion = Version reply.Timestamp = time.Now().UTC() @@ -54,13 +48,13 @@ type SetAuthPeerArgs struct { // SetAuthPeer - Update to new credentials sent from a peer Minio // server. Since credentials are already validated on the sending // peer, here we just persist to file and update in-memory config. All -// subsequently running isRPCTokenValid() calls will fail, and clients +// subsequently running isAuthTokenValid() calls will fail, and clients // will be forced to re-establish connections. Connections will be // re-established only when the sending client has also updated its // credentials. func (br *browserPeerAPIHandlers) SetAuthPeer(args SetAuthPeerArgs, reply *GenericReply) error { // Check auth - if !isRPCTokenValid(args.Token) { + if !isAuthTokenValid(args.Token) { return errInvalidToken } diff --git a/cmd/credential.go b/cmd/credential.go index 3e2c7d576..4d84c9301 100644 --- a/cmd/credential.go +++ b/cmd/credential.go @@ -72,3 +72,15 @@ type credential struct { func newCredential() credential { return credential{mustGetAccessKey(), mustGetSecretKey()} } + +func getCredential(accessKey, secretKey string) (credential, error) { + if !isAccessKeyValid(accessKey) { + return credential{}, errInvalidAccessKeyLength + } + + if !isSecretKeyValid(secretKey) { + return credential{}, errInvalidSecretKeyLength + } + + return credential{accessKey, secretKey}, nil +} diff --git a/cmd/jwt.go b/cmd/jwt.go new file mode 100644 index 000000000..c87e15ffa --- /dev/null +++ b/cmd/jwt.go @@ -0,0 +1,116 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "errors" + "fmt" + "net/http" + "strings" + "time" + + jwtgo "github.com/dgrijalva/jwt-go" + jwtreq "github.com/dgrijalva/jwt-go/request" + "golang.org/x/crypto/bcrypt" +) + +const ( + jwtAlgorithm = "Bearer" + + // Default JWT token for web handlers is one day. + defaultJWTExpiry = 24 * time.Hour + + // Inter-node JWT token expiry is 100 years approx. + defaultInterNodeJWTExpiry = 100 * 365 * 24 * time.Hour +) + +var errInvalidAccessKeyLength = errors.New("Invalid access key, access key should be 5 to 20 characters in length") +var errInvalidSecretKeyLength = errors.New("Invalid secret key, secret key should be 8 to 40 characters in length") + +var errInvalidAccessKeyID = errors.New("The access key ID you provided does not exist in our records") +var errAuthentication = errors.New("Authentication failed, check your access credentials") + +func authenticateJWT(accessKey, secretKey string, expiry time.Duration) (string, error) { + // Trim spaces. + accessKey = strings.TrimSpace(accessKey) + + if !isAccessKeyValid(accessKey) { + return "", errInvalidAccessKeyLength + } + if !isSecretKeyValid(secretKey) { + return "", errInvalidSecretKeyLength + } + + serverCred := serverConfig.GetCredential() + + // Validate access key. + if accessKey != serverCred.AccessKey { + return "", errInvalidAccessKeyID + } + + // Validate secret key. + // Using bcrypt to avoid timing attacks. + hashedSecretKey, _ := bcrypt.GenerateFromPassword([]byte(serverCred.SecretKey), bcrypt.DefaultCost) + if bcrypt.CompareHashAndPassword(hashedSecretKey, []byte(secretKey)) != nil { + return "", errAuthentication + } + + utcNow := time.Now().UTC() + token := jwtgo.NewWithClaims(jwtgo.SigningMethodHS512, jwtgo.MapClaims{ + "exp": utcNow.Add(expiry).Unix(), + "iat": utcNow.Unix(), + "sub": accessKey, + }) + + return token.SignedString([]byte(serverCred.SecretKey)) +} + +func authenticateNode(accessKey, secretKey string) (string, error) { + return authenticateJWT(accessKey, secretKey, defaultInterNodeJWTExpiry) +} + +func authenticateWeb(accessKey, secretKey string) (string, error) { + return authenticateJWT(accessKey, secretKey, defaultJWTExpiry) +} + +func keyFuncCallback(jwtToken *jwtgo.Token) (interface{}, error) { + if _, ok := jwtToken.Method.(*jwtgo.SigningMethodHMAC); !ok { + return nil, fmt.Errorf("Unexpected signing method: %v", jwtToken.Header["alg"]) + } + + return []byte(serverConfig.GetCredential().SecretKey), nil +} + +func isAuthTokenValid(tokenString string) bool { + jwtToken, err := jwtgo.Parse(tokenString, keyFuncCallback) + if err != nil { + errorIf(err, "Unable to parse JWT token string") + return false + } + + return jwtToken.Valid +} + +func isHTTPRequestValid(req *http.Request) bool { + jwtToken, err := jwtreq.ParseFromRequest(req, jwtreq.AuthorizationHeaderExtractor, keyFuncCallback) + if err != nil { + errorIf(err, "Unable to parse JWT token string") + return false + } + + return jwtToken.Valid +} diff --git a/cmd/jwt_test.go b/cmd/jwt_test.go new file mode 100644 index 000000000..341ecd591 --- /dev/null +++ b/cmd/jwt_test.go @@ -0,0 +1,84 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import "testing" + +func testAuthenticate(authType string, t *testing.T) { + testPath, err := newTestConfig("us-east-1") + if err != nil { + t.Fatalf("unable initialize config file, %s", err) + } + defer removeAll(testPath) + + serverCred := serverConfig.GetCredential() + + // Define test cases. + testCases := []struct { + accessKey string + secretKey string + expectedErr error + }{ + // Access key too small. + {"user", "pass", errInvalidAccessKeyLength}, + // Access key too long. + {"user12345678901234567", "pass", errInvalidAccessKeyLength}, + // Access key contains unsuppported characters. + {"!@#$", "pass", errInvalidAccessKeyLength}, + // Secret key too small. + {"myuser", "pass", errInvalidSecretKeyLength}, + // Secret key too long. + {"myuser", "pass1234567890123456789012345678901234567", errInvalidSecretKeyLength}, + // Authentication error. + {"myuser", "mypassword", errInvalidAccessKeyID}, + // Authentication error. + {serverCred.AccessKey, "mypassword", errAuthentication}, + // Success. + {serverCred.AccessKey, serverCred.SecretKey, nil}, + // Success when access key contains leading/trailing spaces. + {" " + serverCred.AccessKey + " ", serverCred.SecretKey, nil}, + } + + // Run tests. + for _, testCase := range testCases { + var err error + if authType == "node" { + _, err = authenticateNode(testCase.accessKey, testCase.secretKey) + } else if authType == "web" { + _, err = authenticateWeb(testCase.accessKey, testCase.secretKey) + } + + if testCase.expectedErr != nil { + if err == nil { + t.Fatalf("%+v: expected: %s, got: ", testCase, testCase.expectedErr) + } + if testCase.expectedErr.Error() != err.Error() { + t.Fatalf("%+v: expected: %s, got: %s", testCase, testCase.expectedErr, err) + } + } else if err != nil { + t.Fatalf("%+v: expected: , got: %s", testCase, err) + } + } +} + +func TestNodeAuthenticate(t *testing.T) { + testAuthenticate("node", t) +} + +func TestWebAuthenticate(t *testing.T) { + testAuthenticate("web", t) +} diff --git a/cmd/lock-rpc-server-common.go b/cmd/lock-rpc-server-common.go index 716028de6..c0e9a11ad 100644 --- a/cmd/lock-rpc-server-common.go +++ b/cmd/lock-rpc-server-common.go @@ -65,7 +65,7 @@ func (l *lockServer) validateLockArgs(args *LockArgs) error { if curTime.Sub(args.Timestamp) > globalMaxSkewTime || args.Timestamp.Sub(curTime) > globalMaxSkewTime { return errServerTimeMismatch } - if !isRPCTokenValid(args.Token) { + if !isAuthTokenValid(args.Token) { return errInvalidToken } return nil diff --git a/cmd/login-server.go b/cmd/login-server.go index 0de184100..223288609 100644 --- a/cmd/login-server.go +++ b/cmd/login-server.go @@ -23,17 +23,11 @@ type loginServer struct { // LoginHandler - Handles JWT based RPC logic. func (b loginServer) LoginHandler(args *RPCLoginArgs, reply *RPCLoginReply) error { - jwt, err := newJWT(defaultInterNodeJWTExpiry, serverConfig.GetCredential()) - if err != nil { - return err - } - if err = jwt.Authenticate(args.Username, args.Password); err != nil { - return err - } - token, err := jwt.GenerateToken(args.Username) + token, err := authenticateNode(args.Username, args.Password) if err != nil { return err } + reply.Token = token reply.Timestamp = time.Now().UTC() reply.ServerVersion = Version diff --git a/cmd/post-policy_test.go b/cmd/post-policy_test.go index 1b9b891d9..52bd96058 100644 --- a/cmd/post-policy_test.go +++ b/cmd/post-policy_test.go @@ -545,7 +545,7 @@ func buildGenericPolicy(t time.Time, accessKey, bucketName, objectName string, c // Expire the request five minutes from now. expirationTime := t.Add(time.Minute * 5) - credStr := getCredential(accessKey, serverConfig.GetRegion(), t) + credStr := getCredentialString(accessKey, serverConfig.GetRegion(), t) // Create a new post policy. policy := newPostPolicyBytesV4(credStr, bucketName, objectName, expirationTime) if contentLengthRange { @@ -557,7 +557,7 @@ func buildGenericPolicy(t time.Time, accessKey, bucketName, objectName string, c func newPostRequestV4Generic(endPoint, bucketName, objectName string, objData []byte, accessKey, secretKey string, t time.Time, policy []byte, addFormData map[string]string, corruptedB64 bool, corruptedMultipart bool) (*http.Request, error) { // Get the user credential. - credStr := getCredential(accessKey, serverConfig.GetRegion(), t) + credStr := getCredentialString(accessKey, serverConfig.GetRegion(), t) // Only need the encoding. encodedPolicy := base64.StdEncoding.EncodeToString(policy) diff --git a/cmd/s3-peer-rpc-handlers.go b/cmd/s3-peer-rpc-handlers.go index 46b7d9611..769d84f76 100644 --- a/cmd/s3-peer-rpc-handlers.go +++ b/cmd/s3-peer-rpc-handlers.go @@ -37,7 +37,7 @@ func (s *SetBucketNotificationPeerArgs) BucketUpdate(client BucketMetaState) err func (s3 *s3PeerAPIHandlers) SetBucketNotificationPeer(args *SetBucketNotificationPeerArgs, reply *GenericReply) error { // check auth - if !isRPCTokenValid(args.Token) { + if !isAuthTokenValid(args.Token) { return errInvalidToken } @@ -64,7 +64,7 @@ func (s *SetBucketListenerPeerArgs) BucketUpdate(client BucketMetaState) error { func (s3 *s3PeerAPIHandlers) SetBucketListenerPeer(args *SetBucketListenerPeerArgs, reply *GenericReply) error { // check auth - if !isRPCTokenValid(args.Token) { + if !isAuthTokenValid(args.Token) { return errInvalidToken } @@ -86,7 +86,7 @@ type EventArgs struct { // submit an event to the receiving server. func (s3 *s3PeerAPIHandlers) Event(args *EventArgs, reply *GenericReply) error { // check auth - if !isRPCTokenValid(args.Token) { + if !isAuthTokenValid(args.Token) { return errInvalidToken } @@ -114,7 +114,7 @@ func (s *SetBucketPolicyPeerArgs) BucketUpdate(client BucketMetaState) error { // tell receiving server to update a bucket policy func (s3 *s3PeerAPIHandlers) SetBucketPolicyPeer(args *SetBucketPolicyPeerArgs, reply *GenericReply) error { // check auth - if !isRPCTokenValid(args.Token) { + if !isAuthTokenValid(args.Token) { return errInvalidToken } diff --git a/cmd/signature-jwt.go b/cmd/signature-jwt.go deleted file mode 100644 index ab2d1b4cd..000000000 --- a/cmd/signature-jwt.go +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "errors" - "strings" - "time" - - jwtgo "github.com/dgrijalva/jwt-go" - "golang.org/x/crypto/bcrypt" -) - -const jwtAlgorithm = "Bearer" - -// JWT - jwt auth backend -type JWT struct { - credential - expiry time.Duration -} - -const ( - // Default JWT token for web handlers is one day. - defaultJWTExpiry time.Duration = time.Hour * 24 - - // Inter-node JWT token expiry is 100 years. - defaultInterNodeJWTExpiry time.Duration = time.Hour * 24 * 365 * 100 -) - -// newJWT - returns new JWT object. -func newJWT(expiry time.Duration, cred credential) (*JWT, error) { - if !isAccessKeyValid(cred.AccessKey) { - return nil, errInvalidAccessKeyLength - } - if !isSecretKeyValid(cred.SecretKey) { - return nil, errInvalidSecretKeyLength - } - return &JWT{cred, expiry}, nil -} - -var errInvalidAccessKeyLength = errors.New("Invalid access key, access key should be 5 to 20 characters in length") -var errInvalidSecretKeyLength = errors.New("Invalid secret key, secret key should be 8 to 40 characters in length") - -// GenerateToken - generates a new Json Web Token based on the incoming access key. -func (jwt *JWT) GenerateToken(accessKey string) (string, error) { - // Trim spaces. - accessKey = strings.TrimSpace(accessKey) - - if !isAccessKeyValid(accessKey) { - return "", errInvalidAccessKeyLength - } - - tUTCNow := time.Now().UTC() - token := jwtgo.NewWithClaims(jwtgo.SigningMethodHS512, jwtgo.MapClaims{ - // Token expires in 10hrs. - "exp": tUTCNow.Add(jwt.expiry).Unix(), - "iat": tUTCNow.Unix(), - "sub": accessKey, - }) - return token.SignedString([]byte(jwt.SecretKey)) -} - -var errInvalidAccessKeyID = errors.New("The access key ID you provided does not exist in our records") -var errAuthentication = errors.New("Authentication failed, check your access credentials") - -// Authenticate - authenticates incoming access key and secret key. -func (jwt *JWT) Authenticate(accessKey, secretKey string) error { - // Trim spaces. - accessKey = strings.TrimSpace(accessKey) - - if !isAccessKeyValid(accessKey) { - return errInvalidAccessKeyLength - } - if !isSecretKeyValid(secretKey) { - return errInvalidSecretKeyLength - } - - if accessKey != jwt.AccessKey { - return errInvalidAccessKeyID - } - - hashedSecretKey, _ := bcrypt.GenerateFromPassword([]byte(jwt.SecretKey), bcrypt.DefaultCost) - if bcrypt.CompareHashAndPassword(hashedSecretKey, []byte(secretKey)) != nil { - return errAuthentication - } - - // Success. - return nil -} diff --git a/cmd/signature-jwt_test.go b/cmd/signature-jwt_test.go deleted file mode 100644 index b3421deb1..000000000 --- a/cmd/signature-jwt_test.go +++ /dev/null @@ -1,214 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "io/ioutil" - "os" - "path" - "testing" -) - -// Tests newJWT() -func TestNewJWT(t *testing.T) { - savedServerConfig := serverConfig - defer func() { - serverConfig = savedServerConfig - }() - serverConfig = nil - - // Test non-existent config directory. - path1, err := ioutil.TempDir(globalTestTmpDir, "minio-") - if err != nil { - t.Fatalf("Unable to create a temporary directory, %s", err) - } - defer removeAll(path1) - - // Test empty config directory. - path2, err := ioutil.TempDir(globalTestTmpDir, "minio-") - if err != nil { - t.Fatalf("Unable to create a temporary directory, %s", err) - } - defer removeAll(path2) - - // Test empty config file. - path3, err := ioutil.TempDir(globalTestTmpDir, "minio-") - if err != nil { - t.Fatalf("Unable to create a temporary directory, %s", err) - } - defer removeAll(path3) - - if err = ioutil.WriteFile(path.Join(path3, "config.json"), []byte{}, os.ModePerm); err != nil { - t.Fatalf("unable to create config file, %s", err) - } - - // Test initialized config file. - path4, err := ioutil.TempDir(globalTestTmpDir, "minio-") - if err != nil { - t.Fatalf("Unable to create a temporary directory, %s", err) - } - defer removeAll(path4) - - // Define test cases. - testCases := []struct { - dirPath string - init bool - cred *credential - expectedErr error - }{ - // Test initialized config file. - {path4, true, nil, nil}, - // Test to read already created config file. - {path4, true, nil, nil}, - // Access key is too small. - {path4, false, &credential{"user", "pass"}, errInvalidAccessKeyLength}, - // Access key is too long. - {path4, false, &credential{"user12345678901234567", "pass"}, errInvalidAccessKeyLength}, - // Secret key is too small. - {path4, false, &credential{"myuser", "pass"}, errInvalidSecretKeyLength}, - // Secret key is too long. - {path4, false, &credential{"myuser", "pass1234567890123456789012345678901234567"}, errInvalidSecretKeyLength}, - // Valid access/secret keys. - {path4, false, &credential{"myuser", "mypassword"}, nil}, - } - - // Run tests. - for _, testCase := range testCases { - setGlobalConfigPath(testCase.dirPath) - if testCase.init { - if _, err := initConfig(); err != nil { - t.Fatalf("unable initialize config file, %s", err) - } - } - if testCase.cred != nil { - serverConfig.SetCredential(*testCase.cred) - } - _, err := newJWT(defaultJWTExpiry, serverConfig.GetCredential()) - if testCase.expectedErr != nil { - if err == nil { - t.Fatalf("%+v: expected: %s, got: ", testCase, testCase.expectedErr) - } - if testCase.expectedErr.Error() != err.Error() { - t.Fatalf("%+v: expected: %s, got: %s", testCase, testCase.expectedErr, err) - } - } else if err != nil { - t.Fatalf("%+v: expected: , got: %s", testCase, err) - } - } -} - -// Tests JWT.GenerateToken() -func TestGenerateToken(t *testing.T) { - testPath, err := newTestConfig("us-east-1") - if err != nil { - t.Fatalf("unable initialize config file, %s", err) - } - defer removeAll(testPath) - - jwt, err := newJWT(defaultJWTExpiry, serverConfig.GetCredential()) - if err != nil { - t.Fatalf("unable get new JWT, %s", err) - } - - // Define test cases. - testCases := []struct { - accessKey string - expectedErr error - }{ - // Access key is too small. - {"user", errInvalidAccessKeyLength}, - // Access key is too long. - {"user12345678901234567", errInvalidAccessKeyLength}, - // Access key contains unsupported characters. - {"!@#$", errInvalidAccessKeyLength}, - // Valid access key. - {"myuser", nil}, - // Valid access key with leading/trailing spaces. - {" myuser ", nil}, - } - - // Run tests. - for _, testCase := range testCases { - _, err := jwt.GenerateToken(testCase.accessKey) - if testCase.expectedErr != nil { - if err == nil { - t.Fatalf("%+v: expected: %s, got: ", testCase, testCase.expectedErr) - } - - if testCase.expectedErr.Error() != err.Error() { - t.Fatalf("%+v: expected: %s, got: %s", testCase, testCase.expectedErr, err) - } - } else if err != nil { - t.Fatalf("%+v: expected: , got: %s", testCase, err) - } - } -} - -// Tests JWT.Authenticate() -func TestAuthenticate(t *testing.T) { - testPath, err := newTestConfig("us-east-1") - if err != nil { - t.Fatalf("unable initialize config file, %s", err) - } - defer removeAll(testPath) - - jwt, err := newJWT(defaultJWTExpiry, serverConfig.GetCredential()) - if err != nil { - t.Fatalf("unable get new JWT, %s", err) - } - - // Define test cases. - testCases := []struct { - accessKey string - secretKey string - expectedErr error - }{ - // Access key too small. - {"user", "pass", errInvalidAccessKeyLength}, - // Access key too long. - {"user12345678901234567", "pass", errInvalidAccessKeyLength}, - // Access key contains unsuppported characters. - {"!@#$", "pass", errInvalidAccessKeyLength}, - // Secret key too small. - {"myuser", "pass", errInvalidSecretKeyLength}, - // Secret key too long. - {"myuser", "pass1234567890123456789012345678901234567", errInvalidSecretKeyLength}, - // Authentication error. - {"myuser", "mypassword", errInvalidAccessKeyID}, - // Authentication error. - {serverConfig.GetCredential().AccessKey, "mypassword", errAuthentication}, - // Success. - {serverConfig.GetCredential().AccessKey, serverConfig.GetCredential().SecretKey, nil}, - // Success when access key contains leading/trailing spaces. - {" " + serverConfig.GetCredential().AccessKey + " ", serverConfig.GetCredential().SecretKey, nil}, - } - - // Run tests. - for _, testCase := range testCases { - err := jwt.Authenticate(testCase.accessKey, testCase.secretKey) - if testCase.expectedErr != nil { - if err == nil { - t.Fatalf("%+v: expected: %s, got: ", testCase, testCase.expectedErr) - } - if testCase.expectedErr.Error() != err.Error() { - t.Fatalf("%+v: expected: %s, got: %s", testCase, testCase.expectedErr, err) - } - } else if err != nil { - t.Fatalf("%+v: expected: , got: %s", testCase, err) - } - } -} diff --git a/cmd/storage-rpc-server.go b/cmd/storage-rpc-server.go index 66dfc294e..0210a4c8b 100644 --- a/cmd/storage-rpc-server.go +++ b/cmd/storage-rpc-server.go @@ -39,7 +39,7 @@ type storageServer struct { // DiskInfoHandler - disk info handler is rpc wrapper for DiskInfo operation. func (s *storageServer) DiskInfoHandler(args *GenericArgs, reply *disk.Info) error { - if !isRPCTokenValid(args.Token) { + if !isAuthTokenValid(args.Token) { return errInvalidToken } info, err := s.storage.DiskInfo() @@ -51,7 +51,7 @@ func (s *storageServer) DiskInfoHandler(args *GenericArgs, reply *disk.Info) err // MakeVolHandler - make vol handler is rpc wrapper for MakeVol operation. func (s *storageServer) MakeVolHandler(args *GenericVolArgs, reply *GenericReply) error { - if !isRPCTokenValid(args.Token) { + if !isAuthTokenValid(args.Token) { return errInvalidToken } return s.storage.MakeVol(args.Vol) @@ -59,7 +59,7 @@ func (s *storageServer) MakeVolHandler(args *GenericVolArgs, reply *GenericReply // ListVolsHandler - list vols handler is rpc wrapper for ListVols operation. func (s *storageServer) ListVolsHandler(args *GenericArgs, reply *ListVolsReply) error { - if !isRPCTokenValid(args.Token) { + if !isAuthTokenValid(args.Token) { return errInvalidToken } vols, err := s.storage.ListVols() @@ -72,7 +72,7 @@ func (s *storageServer) ListVolsHandler(args *GenericArgs, reply *ListVolsReply) // StatVolHandler - stat vol handler is a rpc wrapper for StatVol operation. func (s *storageServer) StatVolHandler(args *GenericVolArgs, reply *VolInfo) error { - if !isRPCTokenValid(args.Token) { + if !isAuthTokenValid(args.Token) { return errInvalidToken } volInfo, err := s.storage.StatVol(args.Vol) @@ -86,7 +86,7 @@ func (s *storageServer) StatVolHandler(args *GenericVolArgs, reply *VolInfo) err // DeleteVolHandler - delete vol handler is a rpc wrapper for // DeleteVol operation. func (s *storageServer) DeleteVolHandler(args *GenericVolArgs, reply *GenericReply) error { - if !isRPCTokenValid(args.Token) { + if !isAuthTokenValid(args.Token) { return errInvalidToken } return s.storage.DeleteVol(args.Vol) @@ -96,7 +96,7 @@ func (s *storageServer) DeleteVolHandler(args *GenericVolArgs, reply *GenericRep // StatFileHandler - stat file handler is rpc wrapper to stat file. func (s *storageServer) StatFileHandler(args *StatFileArgs, reply *FileInfo) error { - if !isRPCTokenValid(args.Token) { + if !isAuthTokenValid(args.Token) { return errInvalidToken } fileInfo, err := s.storage.StatFile(args.Vol, args.Path) @@ -109,7 +109,7 @@ func (s *storageServer) StatFileHandler(args *StatFileArgs, reply *FileInfo) err // ListDirHandler - list directory handler is rpc wrapper to list dir. func (s *storageServer) ListDirHandler(args *ListDirArgs, reply *[]string) error { - if !isRPCTokenValid(args.Token) { + if !isAuthTokenValid(args.Token) { return errInvalidToken } entries, err := s.storage.ListDir(args.Vol, args.Path) @@ -122,7 +122,7 @@ func (s *storageServer) ListDirHandler(args *ListDirArgs, reply *[]string) error // ReadAllHandler - read all handler is rpc wrapper to read all storage API. func (s *storageServer) ReadAllHandler(args *ReadFileArgs, reply *[]byte) error { - if !isRPCTokenValid(args.Token) { + if !isAuthTokenValid(args.Token) { return errInvalidToken } buf, err := s.storage.ReadAll(args.Vol, args.Path) @@ -135,7 +135,7 @@ func (s *storageServer) ReadAllHandler(args *ReadFileArgs, reply *[]byte) error // ReadFileHandler - read file handler is rpc wrapper to read file. func (s *storageServer) ReadFileHandler(args *ReadFileArgs, reply *[]byte) (err error) { - if !isRPCTokenValid(args.Token) { + if !isAuthTokenValid(args.Token) { return errInvalidToken } @@ -154,7 +154,7 @@ func (s *storageServer) ReadFileHandler(args *ReadFileArgs, reply *[]byte) (err // PrepareFileHandler - prepare file handler is rpc wrapper to prepare file. func (s *storageServer) PrepareFileHandler(args *PrepareFileArgs, reply *GenericReply) error { - if !isRPCTokenValid(args.Token) { + if !isAuthTokenValid(args.Token) { return errInvalidToken } return s.storage.PrepareFile(args.Vol, args.Path, args.Size) @@ -162,7 +162,7 @@ func (s *storageServer) PrepareFileHandler(args *PrepareFileArgs, reply *Generic // AppendFileHandler - append file handler is rpc wrapper to append file. func (s *storageServer) AppendFileHandler(args *AppendFileArgs, reply *GenericReply) error { - if !isRPCTokenValid(args.Token) { + if !isAuthTokenValid(args.Token) { return errInvalidToken } return s.storage.AppendFile(args.Vol, args.Path, args.Buffer) @@ -170,7 +170,7 @@ func (s *storageServer) AppendFileHandler(args *AppendFileArgs, reply *GenericRe // DeleteFileHandler - delete file handler is rpc wrapper to delete file. func (s *storageServer) DeleteFileHandler(args *DeleteFileArgs, reply *GenericReply) error { - if !isRPCTokenValid(args.Token) { + if !isAuthTokenValid(args.Token) { return errInvalidToken } return s.storage.DeleteFile(args.Vol, args.Path) @@ -178,7 +178,7 @@ func (s *storageServer) DeleteFileHandler(args *DeleteFileArgs, reply *GenericRe // RenameFileHandler - rename file handler is rpc wrapper to rename file. func (s *storageServer) RenameFileHandler(args *RenameFileArgs, reply *GenericReply) error { - if !isRPCTokenValid(args.Token) { + if !isAuthTokenValid(args.Token) { return errInvalidToken } return s.storage.RenameFile(args.SrcVol, args.SrcPath, args.DstVol, args.DstPath) diff --git a/cmd/storage-rpc-server_test.go b/cmd/storage-rpc-server_test.go index 14ce5daa4..a64ebf07c 100644 --- a/cmd/storage-rpc-server_test.go +++ b/cmd/storage-rpc-server_test.go @@ -40,17 +40,8 @@ func createTestStorageServer(t *testing.T) *testStorageRPCServer { t.Fatalf("unable initialize config file, %s", err) } - jwt, err := newJWT(defaultInterNodeJWTExpiry, serverConfig.GetCredential()) - if err != nil { - t.Fatalf("unable to get new JWT, %s", err) - } - - err = jwt.Authenticate(serverConfig.GetCredential().AccessKey, serverConfig.GetCredential().SecretKey) - if err != nil { - t.Fatalf("unable for JWT to authenticate, %s", err) - } - - token, err := jwt.GenerateToken(serverConfig.GetCredential().AccessKey) + serverCred := serverConfig.GetCredential() + token, err := authenticateNode(serverCred.AccessKey, serverCred.SecretKey) if err != nil { t.Fatalf("unable for JWT to generate token, %s", err) } diff --git a/cmd/test-utils_test.go b/cmd/test-utils_test.go index b427a6ae2..c6c7a530f 100644 --- a/cmd/test-utils_test.go +++ b/cmd/test-utils_test.go @@ -1077,8 +1077,8 @@ func signRequestV4(req *http.Request, accessKey, secretKey string) error { return nil } -// getCredential generate a credential string. -func getCredential(accessKeyID, location string, t time.Time) string { +// getCredentialString generate a credential string. +func getCredentialString(accessKeyID, location string, t time.Time) string { return accessKeyID + "/" + getScope(t, location) } diff --git a/cmd/web-handlers.go b/cmd/web-handlers.go index 2f4324566..881abf7fc 100644 --- a/cmd/web-handlers.go +++ b/cmd/web-handlers.go @@ -29,8 +29,6 @@ import ( "strings" "time" - jwtgo "github.com/dgrijalva/jwt-go" - jwtreq "github.com/dgrijalva/jwt-go/request" "github.com/dustin/go-humanize" "github.com/gorilla/mux" "github.com/gorilla/rpc/v2/json2" @@ -38,30 +36,6 @@ import ( "github.com/minio/miniobrowser" ) -// isJWTReqAuthenticated validates if any incoming request to be a -// valid JWT authenticated request. -func isJWTReqAuthenticated(req *http.Request) bool { - jwt, err := newJWT(defaultJWTExpiry, serverConfig.GetCredential()) - if err != nil { - errorIf(err, "unable to initialize a new JWT") - return false - } - - var reqCallback jwtgo.Keyfunc - reqCallback = func(token *jwtgo.Token) (interface{}, error) { - if _, ok := token.Method.(*jwtgo.SigningMethodHMAC); !ok { - return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"]) - } - return []byte(jwt.SecretKey), nil - } - token, err := jwtreq.ParseFromRequest(req, jwtreq.AuthorizationHeaderExtractor, reqCallback) - if err != nil { - errorIf(err, "token parsing failed") - return false - } - return token.Valid -} - // WebGenericArgs - empty struct for calls that don't accept arguments // for ex. ServerInfo, GenerateAuth type WebGenericArgs struct{} @@ -84,7 +58,7 @@ type ServerInfoRep struct { // ServerInfo - get server info. func (web *webAPIHandlers) ServerInfo(r *http.Request, args *WebGenericArgs, reply *ServerInfoRep) error { - if !isJWTReqAuthenticated(r) { + if !isHTTPRequestValid(r) { return toJSONError(errAuthentication) } host, err := os.Hostname() @@ -125,7 +99,7 @@ func (web *webAPIHandlers) StorageInfo(r *http.Request, args *GenericArgs, reply if objectAPI == nil { return toJSONError(errServerNotInitialized) } - if !isJWTReqAuthenticated(r) { + if !isHTTPRequestValid(r) { return toJSONError(errAuthentication) } reply.StorageInfo = objectAPI.StorageInfo() @@ -144,7 +118,7 @@ func (web *webAPIHandlers) MakeBucket(r *http.Request, args *MakeBucketArgs, rep if objectAPI == nil { return toJSONError(errServerNotInitialized) } - if !isJWTReqAuthenticated(r) { + if !isHTTPRequestValid(r) { return toJSONError(errAuthentication) } bucketLock := globalNSMutex.NewNSLock(args.BucketName, "") @@ -177,7 +151,7 @@ func (web *webAPIHandlers) ListBuckets(r *http.Request, args *WebGenericArgs, re if objectAPI == nil { return toJSONError(errServerNotInitialized) } - if !isJWTReqAuthenticated(r) { + if !isHTTPRequestValid(r) { return toJSONError(errAuthentication) } buckets, err := objectAPI.ListBuckets() @@ -227,7 +201,7 @@ func (web *webAPIHandlers) ListObjects(r *http.Request, args *ListObjectsArgs, r if objectAPI == nil { return toJSONError(errServerNotInitialized) } - if !isJWTReqAuthenticated(r) { + if !isHTTPRequestValid(r) { return toJSONError(errAuthentication) } marker := "" @@ -271,7 +245,7 @@ func (web *webAPIHandlers) RemoveObject(r *http.Request, args *RemoveObjectArgs, if objectAPI == nil { return toJSONError(errServerNotInitialized) } - if !isJWTReqAuthenticated(r) { + if !isHTTPRequestValid(r) { return toJSONError(errAuthentication) } @@ -318,19 +292,11 @@ type LoginRep struct { // Login - user login handler. func (web *webAPIHandlers) Login(r *http.Request, args *LoginArgs, reply *LoginRep) error { - jwt, err := newJWT(defaultJWTExpiry, serverConfig.GetCredential()) + token, err := authenticateWeb(args.Username, args.Password) if err != nil { return toJSONError(err) } - if err = jwt.Authenticate(args.Username, args.Password); err != nil { - return toJSONError(err) - } - - token, err := jwt.GenerateToken(args.Username) - if err != nil { - return toJSONError(err) - } reply.Token = token reply.UIVersion = miniobrowser.UIVersion return nil @@ -344,7 +310,7 @@ type GenerateAuthReply struct { } func (web webAPIHandlers) GenerateAuth(r *http.Request, args *WebGenericArgs, reply *GenerateAuthReply) error { - if !isJWTReqAuthenticated(r) { + if !isHTTPRequestValid(r) { return toJSONError(errAuthentication) } cred := newCredential() @@ -369,41 +335,16 @@ type SetAuthReply struct { // SetAuth - Set accessKey and secretKey credentials. func (web *webAPIHandlers) SetAuth(r *http.Request, args *SetAuthArgs, reply *SetAuthReply) error { - if !isJWTReqAuthenticated(r) { + if !isHTTPRequestValid(r) { return toJSONError(errAuthentication) } - // Initialize jwt with the new access keys, fail if not possible. - jwt, err := newJWT(defaultJWTExpiry, credential{ - AccessKey: args.AccessKey, - SecretKey: args.SecretKey, - }) // JWT Expiry set to 24Hrs. + // As we already validated the authentication, we save given access/secret keys. + cred, err := getCredential(args.AccessKey, args.SecretKey) if err != nil { return toJSONError(err) } - // Authenticate the secret key properly. - if err = jwt.Authenticate(args.AccessKey, args.SecretKey); err != nil { - return toJSONError(err) - - } - - unexpErrsMsg := "Unexpected error(s) occurred - please check minio server logs." - gaveUpMsg := func(errMsg error, moreErrors bool) *json2.Error { - msg := fmt.Sprintf( - "We gave up due to: '%s', but there were more errors. Please check minio server logs.", - errMsg.Error(), - ) - var err *json2.Error - if moreErrors { - err = toJSONError(errors.New(msg)) - } else { - err = toJSONError(errMsg) - } - return err - } - - cred := credential{args.AccessKey, args.SecretKey} // Notify all other Minio peers to update credentials errsMap := updateCredsOnPeers(cred) @@ -427,19 +368,21 @@ func (web *webAPIHandlers) SetAuth(r *http.Request, args *SetAuthArgs, reply *Se // Since the error message may be very long to display // on the browser, we tell the user to check the // server logs. - return toJSONError(errors.New(unexpErrsMsg)) + return toJSONError(errors.New("unexpected error(s) occurred - please check minio server logs")) } - // Did we have peer errors? - var moreErrors bool - if len(errsMap) > 0 { - moreErrors = true - } - - // Generate a JWT token. - token, err := jwt.GenerateToken(args.AccessKey) + // As we have updated access/secret key, generate new auth token. + token, err := authenticateWeb(args.AccessKey, args.SecretKey) if err != nil { - return gaveUpMsg(err, moreErrors) + // Did we have peer errors? + if len(errsMap) > 0 { + err = fmt.Errorf( + "we gave up due to: '%s', but there were more errors. Please check minio server logs", + err.Error(), + ) + } + + return toJSONError(err) } reply.Token = token @@ -456,7 +399,7 @@ type GetAuthReply struct { // GetAuth - return accessKey and secretKey credentials. func (web *webAPIHandlers) GetAuth(r *http.Request, args *WebGenericArgs, reply *GetAuthReply) error { - if !isJWTReqAuthenticated(r) { + if !isHTTPRequestValid(r) { return toJSONError(errAuthentication) } creds := serverConfig.GetCredential() @@ -474,7 +417,7 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) { return } - if !isJWTReqAuthenticated(r) { + if !isHTTPRequestValid(r) { writeWebErrorResponse(w, errAuthentication) return } @@ -519,24 +462,13 @@ func (web *webAPIHandlers) Download(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) bucket := vars["bucket"] object := vars["object"] - tokenStr := r.URL.Query().Get("token") + token := r.URL.Query().Get("token") - jwt, err := newJWT(defaultJWTExpiry, serverConfig.GetCredential()) // Expiry set to 24Hrs. - if err != nil { - errorIf(err, "error in getting new JWT") - return - } - - token, e := jwtgo.Parse(tokenStr, func(token *jwtgo.Token) (interface{}, error) { - if _, ok := token.Method.(*jwtgo.SigningMethodHMAC); !ok { - return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"]) - } - return []byte(jwt.SecretKey), nil - }) - if e != nil || !token.Valid { + if !isAuthTokenValid(token) { writeWebErrorResponse(w, errAuthentication) return } + // Add content disposition. w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", path.Base(object))) @@ -601,7 +533,7 @@ func (web *webAPIHandlers) GetBucketPolicy(r *http.Request, args *GetBucketPolic return toJSONError(errServerNotInitialized) } - if !isJWTReqAuthenticated(r) { + if !isHTTPRequestValid(r) { return toJSONError(errAuthentication) } @@ -640,7 +572,7 @@ func (web *webAPIHandlers) ListAllBucketPolicies(r *http.Request, args *ListAllB return toJSONError(errServerNotInitialized) } - if !isJWTReqAuthenticated(r) { + if !isHTTPRequestValid(r) { return toJSONError(errAuthentication) } @@ -673,7 +605,7 @@ func (web *webAPIHandlers) SetBucketPolicy(r *http.Request, args *SetBucketPolic return toJSONError(errServerNotInitialized) } - if !isJWTReqAuthenticated(r) { + if !isHTTPRequestValid(r) { return toJSONError(errAuthentication) } @@ -741,7 +673,7 @@ type PresignedGetRep struct { // PresignedGET - returns presigned-Get url. func (web *webAPIHandlers) PresignedGet(r *http.Request, args *PresignedGetArgs, reply *PresignedGetRep) error { - if !isJWTReqAuthenticated(r) { + if !isHTTPRequestValid(r) { return toJSONError(errAuthentication) } From f9025652df54fd00c197fec99b744226c05d7818 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Tue, 27 Dec 2016 17:52:24 -0800 Subject: [PATCH 038/100] Support building specific architectures. --- buildscripts/build.sh | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/buildscripts/build.sh b/buildscripts/build.sh index f5be940f4..5a7af2352 100755 --- a/buildscripts/build.sh +++ b/buildscripts/build.sh @@ -70,12 +70,14 @@ main() { done read -p "If you want to build for all, Just press Enter: " chosen_osarch - if [ "$chosen_osarch" = "" ]; then + if [ "$chosen_osarch" = "" ] || [ "$chosen_osarch" = "all" ]; then for each_osarch in ${SUPPORTED_OSARCH}; do go_build ${each_osarch} done else - go_build ${chosen_osarch} + for each_osarch in $(echo $chosen_osarch | sed 's/,/ /g'); do + go_build ${each_osarch} + done fi } From 00baec17467e54252e4762cd958c4c341039f048 Mon Sep 17 00:00:00 2001 From: Dee Koder Date: Wed, 28 Dec 2016 16:07:05 -0800 Subject: [PATCH 039/100] syntax highlighting : Remove sh from code block which incorrectly highlights shell commands. (#3504) --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index cc24414c2..4f9c0c172 100644 --- a/README.md +++ b/README.md @@ -6,13 +6,13 @@ Minio server is light enough to be bundled with the application stack, similar t ## Docker Container ### Stable -```sh +``` docker pull minio/minio docker run -p 9000:9000 minio/minio server /export ``` ### Edge -```sh +``` docker pull minio/minio:edge docker run -p 9000:9000 minio/minio:edge server /export ``` From 41cf580bb1f713674c497f44fc5ca32e8d91f944 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Thu, 29 Dec 2016 03:13:51 -0800 Subject: [PATCH 040/100] Improve reconnection logic, allow jitters. (#3502) Attempt a reconnect also if disk not found. This is needed since any network operation error is converted to disk not found but we also need to make sure if disk is really not available. Additionally we also need to retry more than once because the server might be in startup sequence which would render other servers to wrongly think that the server is offline. --- cmd/globals.go | 7 +++ cmd/retry-storage.go | 86 ++++++++++++++++++++-------------- cmd/retry-storage_test.go | 35 +++++++------- cmd/retry.go | 2 +- cmd/storage-rpc-client.go | 36 ++++++++------ cmd/storage-rpc-client_test.go | 2 +- cmd/test-utils_test.go | 9 ++++ 7 files changed, 106 insertions(+), 71 deletions(-) diff --git a/cmd/globals.go b/cmd/globals.go index 8e039597f..613d359a3 100644 --- a/cmd/globals.go +++ b/cmd/globals.go @@ -79,13 +79,20 @@ var ( globalMinioPort = "9000" // Holds the host that was passed using --address globalMinioHost = "" + // Peer communication struct globalS3Peers = s3Peers{} // CA root certificates, a nil value means system certs pool will be used globalRootCAs *x509.CertPool + // List of admin peers. globalAdminPeers = adminPeers{} + + // Attempt to retry only this many number of times before + // giving up on the remote disk entirely. + globalMaxStorageRetryThreshold = 3 + // Add new variable global values here. ) diff --git a/cmd/retry-storage.go b/cmd/retry-storage.go index 7712bb31b..9bc8ab1a1 100644 --- a/cmd/retry-storage.go +++ b/cmd/retry-storage.go @@ -17,7 +17,7 @@ package cmd import ( - "net/rpc" + "time" "github.com/minio/minio/pkg/disk" ) @@ -48,7 +48,7 @@ func (f retryStorage) Close() (err error) { // DiskInfo - a retryable implementation of disk info. func (f retryStorage) DiskInfo() (info disk.Info, err error) { info, err = f.remoteStorage.DiskInfo() - if err == rpc.ErrShutdown { + if err == errDiskNotFound { err = f.reInit() if err == nil { return f.remoteStorage.DiskInfo() @@ -60,7 +60,7 @@ func (f retryStorage) DiskInfo() (info disk.Info, err error) { // MakeVol - a retryable implementation of creating a volume. func (f retryStorage) MakeVol(volume string) (err error) { err = f.remoteStorage.MakeVol(volume) - if err == rpc.ErrShutdown { + if err == errDiskNotFound { err = f.reInit() if err == nil { return f.remoteStorage.MakeVol(volume) @@ -72,7 +72,7 @@ func (f retryStorage) MakeVol(volume string) (err error) { // ListVols - a retryable implementation of listing all the volumes. func (f retryStorage) ListVols() (vols []VolInfo, err error) { vols, err = f.remoteStorage.ListVols() - if err == rpc.ErrShutdown { + if err == errDiskNotFound { err = f.reInit() if err == nil { return f.remoteStorage.ListVols() @@ -84,7 +84,7 @@ func (f retryStorage) ListVols() (vols []VolInfo, err error) { // StatVol - a retryable implementation of stating a volume. func (f retryStorage) StatVol(volume string) (vol VolInfo, err error) { vol, err = f.remoteStorage.StatVol(volume) - if err == rpc.ErrShutdown { + if err == errDiskNotFound { err = f.reInit() if err == nil { return f.remoteStorage.StatVol(volume) @@ -96,7 +96,7 @@ func (f retryStorage) StatVol(volume string) (vol VolInfo, err error) { // DeleteVol - a retryable implementation of deleting a volume. func (f retryStorage) DeleteVol(volume string) (err error) { err = f.remoteStorage.DeleteVol(volume) - if err == rpc.ErrShutdown { + if err == errDiskNotFound { err = f.reInit() if err == nil { return f.remoteStorage.DeleteVol(volume) @@ -108,7 +108,7 @@ func (f retryStorage) DeleteVol(volume string) (err error) { // PrepareFile - a retryable implementation of preparing a file. func (f retryStorage) PrepareFile(volume, path string, length int64) (err error) { err = f.remoteStorage.PrepareFile(volume, path, length) - if err == rpc.ErrShutdown { + if err == errDiskNotFound { err = f.reInit() if err == nil { return f.remoteStorage.PrepareFile(volume, path, length) @@ -120,7 +120,7 @@ func (f retryStorage) PrepareFile(volume, path string, length int64) (err error) // AppendFile - a retryable implementation of append to a file. func (f retryStorage) AppendFile(volume, path string, buffer []byte) (err error) { err = f.remoteStorage.AppendFile(volume, path, buffer) - if err == rpc.ErrShutdown { + if err == errDiskNotFound { err = f.reInit() if err == nil { return f.remoteStorage.AppendFile(volume, path, buffer) @@ -132,7 +132,7 @@ func (f retryStorage) AppendFile(volume, path string, buffer []byte) (err error) // StatFile - a retryable implementation of stating a file. func (f retryStorage) StatFile(volume, path string) (fileInfo FileInfo, err error) { fileInfo, err = f.remoteStorage.StatFile(volume, path) - if err == rpc.ErrShutdown { + if err == errDiskNotFound { err = f.reInit() if err == nil { return f.remoteStorage.StatFile(volume, path) @@ -144,7 +144,7 @@ func (f retryStorage) StatFile(volume, path string) (fileInfo FileInfo, err erro // ReadAll - a retryable implementation of reading all the content from a file. func (f retryStorage) ReadAll(volume, path string) (buf []byte, err error) { buf, err = f.remoteStorage.ReadAll(volume, path) - if err == rpc.ErrShutdown { + if err == errDiskNotFound { err = f.reInit() if err == nil { return f.remoteStorage.ReadAll(volume, path) @@ -156,7 +156,7 @@ func (f retryStorage) ReadAll(volume, path string) (buf []byte, err error) { // ReadFile - a retryable implementation of reading at offset from a file. func (f retryStorage) ReadFile(volume, path string, offset int64, buffer []byte) (m int64, err error) { m, err = f.remoteStorage.ReadFile(volume, path, offset, buffer) - if err == rpc.ErrShutdown { + if err == errDiskNotFound { err = f.reInit() if err == nil { return f.remoteStorage.ReadFile(volume, path, offset, buffer) @@ -168,7 +168,7 @@ func (f retryStorage) ReadFile(volume, path string, offset int64, buffer []byte) // ListDir - a retryable implementation of listing directory entries. func (f retryStorage) ListDir(volume, path string) (entries []string, err error) { entries, err = f.remoteStorage.ListDir(volume, path) - if err == rpc.ErrShutdown { + if err == errDiskNotFound { err = f.reInit() if err == nil { return f.remoteStorage.ListDir(volume, path) @@ -180,7 +180,7 @@ func (f retryStorage) ListDir(volume, path string) (entries []string, err error) // DeleteFile - a retryable implementation of deleting a file. func (f retryStorage) DeleteFile(volume, path string) (err error) { err = f.remoteStorage.DeleteFile(volume, path) - if err == rpc.ErrShutdown { + if err == errDiskNotFound { err = f.reInit() if err == nil { return f.remoteStorage.DeleteFile(volume, path) @@ -189,32 +189,10 @@ func (f retryStorage) DeleteFile(volume, path string) (err error) { return err } -// Connect and attempt to load the format from a disconnected node. -func (f retryStorage) reInit() (err error) { - err = f.remoteStorage.Close() - if err != nil { - return err - } - err = f.remoteStorage.Init() - if err == nil { - _, err = loadFormat(f.remoteStorage) - // For load format returning network shutdown - // we now treat it like disk not available. - if err == rpc.ErrShutdown { - err = errDiskNotFound - } - return err - } - if err == rpc.ErrShutdown { - err = errDiskNotFound - } - return err -} - // RenameFile - a retryable implementation of renaming a file. func (f retryStorage) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) (err error) { err = f.remoteStorage.RenameFile(srcVolume, srcPath, dstVolume, dstPath) - if err == rpc.ErrShutdown { + if err == errDiskNotFound { err = f.reInit() if err == nil { return f.remoteStorage.RenameFile(srcVolume, srcPath, dstVolume, dstPath) @@ -222,3 +200,39 @@ func (f retryStorage) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) } return err } + +// Connect and attempt to load the format from a disconnected node, +// attempts three times before giving up. +func (f retryStorage) reInit() (err error) { + // Close the underlying connection. + f.remoteStorage.Close() // Error here is purposefully ignored. + + doneCh := make(chan struct{}) + defer close(doneCh) + for i := range newRetryTimer(time.Second, time.Second*30, MaxJitter, doneCh) { + // Initialize and make a new login attempt. + err = f.remoteStorage.Init() + if err != nil { + // No need to return error until the retry count + // threshold has reached. + if i < globalMaxStorageRetryThreshold { + continue + } + return err + } + // Attempt to load format to see if the disk is really + // a formatted disk and part of the cluster. + _, err = loadFormat(f.remoteStorage) + if err != nil { + // No need to return error until the retry count + // threshold has reached. + if i < globalMaxStorageRetryThreshold { + continue + } + return err + } + // Login and loading format was a success, break and proceed forward. + break + } + return err +} diff --git a/cmd/retry-storage_test.go b/cmd/retry-storage_test.go index 84d79135d..67315a8f2 100644 --- a/cmd/retry-storage_test.go +++ b/cmd/retry-storage_test.go @@ -18,7 +18,6 @@ package cmd import ( "bytes" - "net/rpc" "reflect" "testing" ) @@ -41,7 +40,7 @@ func TestRetryStorage(t *testing.T) { t.Fatal("storage disk is not *retryStorage type") } storageDisks[i] = &retryStorage{newNaughtyDisk(retryDisk, map[int]error{ - 1: rpc.ErrShutdown, + 1: errDiskNotFound, }, nil)} } @@ -54,14 +53,14 @@ func TestRetryStorage(t *testing.T) { t.Fatal("storage disk is not *retryStorage type") } storageDisks[i] = &retryStorage{newNaughtyDisk(retryDisk, map[int]error{ - 1: rpc.ErrShutdown, + 1: errDiskNotFound, }, nil)} } for _, disk := range storageDisks { err = disk.Init() - if err != rpc.ErrShutdown { - t.Fatal("Expected rpc.ErrShutdown, got", err) + if err != errDiskNotFound { + t.Fatal("Expected errDiskNotFound, got", err) } } @@ -79,7 +78,7 @@ func TestRetryStorage(t *testing.T) { t.Fatal("storage disk is not *retryStorage type") } storageDisks[i] = &retryStorage{newNaughtyDisk(retryDisk, map[int]error{ - 1: rpc.ErrShutdown, + 1: errDiskNotFound, }, nil)} } @@ -99,7 +98,7 @@ func TestRetryStorage(t *testing.T) { t.Fatal("storage disk is not *retryStorage type") } storageDisks[i] = &retryStorage{newNaughtyDisk(retryDisk, map[int]error{ - 1: rpc.ErrShutdown, + 1: errDiskNotFound, }, nil)} } @@ -116,7 +115,7 @@ func TestRetryStorage(t *testing.T) { t.Fatal("storage disk is not *retryStorage type") } storageDisks[i] = &retryStorage{newNaughtyDisk(retryDisk, map[int]error{ - 1: rpc.ErrShutdown, + 1: errDiskNotFound, }, nil)} } @@ -133,7 +132,7 @@ func TestRetryStorage(t *testing.T) { t.Fatal("storage disk is not *retryStorage type") } storageDisks[i] = &retryStorage{newNaughtyDisk(retryDisk, map[int]error{ - 1: rpc.ErrShutdown, + 1: errDiskNotFound, }, nil)} } @@ -153,7 +152,7 @@ func TestRetryStorage(t *testing.T) { t.Fatal("storage disk is not *retryStorage type") } storageDisks[i] = &retryStorage{newNaughtyDisk(retryDisk, map[int]error{ - 1: rpc.ErrShutdown, + 1: errDiskNotFound, }, nil)} } @@ -170,7 +169,7 @@ func TestRetryStorage(t *testing.T) { t.Fatal("storage disk is not *retryStorage type") } storageDisks[i] = &retryStorage{newNaughtyDisk(retryDisk, map[int]error{ - 1: rpc.ErrShutdown, + 1: errDiskNotFound, }, nil)} } @@ -187,7 +186,7 @@ func TestRetryStorage(t *testing.T) { t.Fatal("storage disk is not *retryStorage type") } storageDisks[i] = &retryStorage{newNaughtyDisk(retryDisk, map[int]error{ - 1: rpc.ErrShutdown, + 1: errDiskNotFound, }, nil)} } @@ -204,7 +203,7 @@ func TestRetryStorage(t *testing.T) { t.Fatal("storage disk is not *retryStorage type") } storageDisks[i] = &retryStorage{newNaughtyDisk(retryDisk, map[int]error{ - 1: rpc.ErrShutdown, + 1: errDiskNotFound, }, nil)} } @@ -225,7 +224,7 @@ func TestRetryStorage(t *testing.T) { t.Fatal("storage disk is not *retryStorage type") } storageDisks[i] = &retryStorage{newNaughtyDisk(retryDisk, map[int]error{ - 1: rpc.ErrShutdown, + 1: errDiskNotFound, }, nil)} } @@ -250,7 +249,7 @@ func TestRetryStorage(t *testing.T) { t.Fatal("storage disk is not *retryStorage type") } storageDisks[i] = &retryStorage{newNaughtyDisk(retryDisk, map[int]error{ - 1: rpc.ErrShutdown, + 1: errDiskNotFound, }, nil)} } @@ -270,7 +269,7 @@ func TestRetryStorage(t *testing.T) { t.Fatal("storage disk is not *retryStorage type") } storageDisks[i] = &retryStorage{newNaughtyDisk(retryDisk, map[int]error{ - 1: rpc.ErrShutdown, + 1: errDiskNotFound, }, nil)} } @@ -287,7 +286,7 @@ func TestRetryStorage(t *testing.T) { t.Fatal("storage disk is not *retryStorage type") } storageDisks[i] = &retryStorage{newNaughtyDisk(retryDisk, map[int]error{ - 1: rpc.ErrShutdown, + 1: errDiskNotFound, }, nil)} } @@ -308,7 +307,7 @@ func TestRetryStorage(t *testing.T) { t.Fatal("storage disk is not *retryStorage type") } storageDisks[i] = &retryStorage{newNaughtyDisk(retryDisk, map[int]error{ - 1: rpc.ErrShutdown, + 1: errDiskNotFound, }, nil)} } diff --git a/cmd/retry.go b/cmd/retry.go index b7df77d9a..364212af1 100644 --- a/cmd/retry.go +++ b/cmd/retry.go @@ -90,7 +90,7 @@ func newRetryTimer(unit time.Duration, cap time.Duration, jitter float64, doneCh go func() { defer close(attemptCh) - var nextBackoff int + nextBackoff := 1 for { select { // Attempts starts. diff --git a/cmd/storage-rpc-client.go b/cmd/storage-rpc-client.go index 3f969d497..fadf66b0c 100644 --- a/cmd/storage-rpc-client.go +++ b/cmd/storage-rpc-client.go @@ -52,6 +52,10 @@ func toStorageErr(err error) error { return errDiskNotFound } + if err == rpc.ErrShutdown { + return errDiskNotFound + } + switch err.Error() { case io.EOF.Error(): return io.EOF @@ -143,19 +147,21 @@ const maxAllowedNetworkIOError = 1024 // Initializes the remote RPC connection by attempting a login attempt. func (n *networkStorage) Init() (err error) { // Attempt a login to reconnect. - return n.rpcClient.Login() + err = n.rpcClient.Login() + return toStorageErr(err) } // Closes the underlying RPC connection. func (n *networkStorage) Close() (err error) { // Close the underlying connection. - return n.rpcClient.Close() + err = n.rpcClient.Close() + return toStorageErr(err) } // DiskInfo - fetch disk information for a remote disk. func (n *networkStorage) DiskInfo() (info disk.Info, err error) { defer func() { - if err == errDiskNotFound || err == rpc.ErrShutdown { + if err == errDiskNotFound { atomic.AddInt32(&n.networkIOErrCount, 1) } }() @@ -176,7 +182,7 @@ func (n *networkStorage) DiskInfo() (info disk.Info, err error) { // MakeVol - create a volume on a remote disk. func (n *networkStorage) MakeVol(volume string) (err error) { defer func() { - if err == errDiskNotFound || err == rpc.ErrShutdown { + if err == errDiskNotFound { atomic.AddInt32(&n.networkIOErrCount, 1) } }() @@ -198,7 +204,7 @@ func (n *networkStorage) MakeVol(volume string) (err error) { // ListVols - List all volumes on a remote disk. func (n *networkStorage) ListVols() (vols []VolInfo, err error) { defer func() { - if err == errDiskNotFound || err == rpc.ErrShutdown { + if err == errDiskNotFound { atomic.AddInt32(&n.networkIOErrCount, 1) } }() @@ -220,7 +226,7 @@ func (n *networkStorage) ListVols() (vols []VolInfo, err error) { // StatVol - get volume info over the network. func (n *networkStorage) StatVol(volume string) (volInfo VolInfo, err error) { defer func() { - if err == errDiskNotFound || err == rpc.ErrShutdown { + if err == errDiskNotFound { atomic.AddInt32(&n.networkIOErrCount, 1) } }() @@ -241,7 +247,7 @@ func (n *networkStorage) StatVol(volume string) (volInfo VolInfo, err error) { // DeleteVol - Deletes a volume over the network. func (n *networkStorage) DeleteVol(volume string) (err error) { defer func() { - if err == errDiskNotFound || err == rpc.ErrShutdown { + if err == errDiskNotFound { atomic.AddInt32(&n.networkIOErrCount, 1) } }() @@ -264,7 +270,7 @@ func (n *networkStorage) DeleteVol(volume string) (err error) { func (n *networkStorage) PrepareFile(volume, path string, length int64) (err error) { defer func() { - if err == errDiskNotFound || err == rpc.ErrShutdown { + if err == errDiskNotFound { atomic.AddInt32(&n.networkIOErrCount, 1) } }() @@ -288,7 +294,7 @@ func (n *networkStorage) PrepareFile(volume, path string, length int64) (err err // AppendFile - append file writes buffer to a remote network path. func (n *networkStorage) AppendFile(volume, path string, buffer []byte) (err error) { defer func() { - if err == errDiskNotFound || err == rpc.ErrShutdown { + if err == errDiskNotFound { atomic.AddInt32(&n.networkIOErrCount, 1) } }() @@ -313,7 +319,7 @@ func (n *networkStorage) AppendFile(volume, path string, buffer []byte) (err err // StatFile - get latest Stat information for a file at path. func (n *networkStorage) StatFile(volume, path string) (fileInfo FileInfo, err error) { defer func() { - if err == errDiskNotFound || err == rpc.ErrShutdown { + if err == errDiskNotFound { atomic.AddInt32(&n.networkIOErrCount, 1) } }() @@ -339,7 +345,7 @@ func (n *networkStorage) StatFile(volume, path string) (fileInfo FileInfo, err e // not use this on large files as it would cause server to crash. func (n *networkStorage) ReadAll(volume, path string) (buf []byte, err error) { defer func() { - if err == errDiskNotFound || err == rpc.ErrShutdown { + if err == errDiskNotFound { atomic.AddInt32(&n.networkIOErrCount, 1) } }() @@ -362,7 +368,7 @@ func (n *networkStorage) ReadAll(volume, path string) (buf []byte, err error) { // ReadFile - reads a file at remote path and fills the buffer. func (n *networkStorage) ReadFile(volume string, path string, offset int64, buffer []byte) (m int64, err error) { defer func() { - if err == errDiskNotFound || err == rpc.ErrShutdown { + if err == errDiskNotFound { atomic.AddInt32(&n.networkIOErrCount, 1) } }() @@ -398,7 +404,7 @@ func (n *networkStorage) ReadFile(volume string, path string, offset int64, buff // ListDir - list all entries at prefix. func (n *networkStorage) ListDir(volume, path string) (entries []string, err error) { defer func() { - if err == errDiskNotFound || err == rpc.ErrShutdown { + if err == errDiskNotFound { atomic.AddInt32(&n.networkIOErrCount, 1) } }() @@ -422,7 +428,7 @@ func (n *networkStorage) ListDir(volume, path string) (entries []string, err err // DeleteFile - Delete a file at path. func (n *networkStorage) DeleteFile(volume, path string) (err error) { defer func() { - if err == errDiskNotFound || err == rpc.ErrShutdown { + if err == errDiskNotFound { atomic.AddInt32(&n.networkIOErrCount, 1) } }() @@ -446,7 +452,7 @@ func (n *networkStorage) DeleteFile(volume, path string) (err error) { // RenameFile - rename a remote file from source to destination. func (n *networkStorage) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) (err error) { defer func() { - if err == errDiskNotFound || err == rpc.ErrShutdown { + if err == errDiskNotFound { atomic.AddInt32(&n.networkIOErrCount, 1) } }() diff --git a/cmd/storage-rpc-client_test.go b/cmd/storage-rpc-client_test.go index e414b1b18..56970ca79 100644 --- a/cmd/storage-rpc-client_test.go +++ b/cmd/storage-rpc-client_test.go @@ -52,7 +52,7 @@ func TestStorageErr(t *testing.T) { err: &net.OpError{}, }, { - expectedErr: rpc.ErrShutdown, + expectedErr: errDiskNotFound, err: rpc.ErrShutdown, }, { diff --git a/cmd/test-utils_test.go b/cmd/test-utils_test.go index c6c7a530f..39aeb9cc7 100644 --- a/cmd/test-utils_test.go +++ b/cmd/test-utils_test.go @@ -65,6 +65,9 @@ func init() { // Enable caching. setMaxMemory() + + // Tests don't need to retry. + globalMaxStorageRetryThreshold = 1 } func prepareFS() (ObjectLayer, string, error) { @@ -1945,6 +1948,12 @@ func ExecObjectLayerTest(t TestErrHandler, objTest objTestType) { // ExecObjectLayerDiskAlteredTest - executes object layer tests while altering // disks in between tests. Creates XL ObjectLayer instance and runs test for XL layer. func ExecObjectLayerDiskAlteredTest(t *testing.T, objTest objTestDiskNotFoundType) { + configPath, err := newTestConfig("us-east-1") + if err != nil { + t.Fatal("Failed to create config directory", err) + } + defer removeAll(configPath) + objLayer, fsDirs, err := prepareXL() if err != nil { t.Fatalf("Initialization of object layer failed for XL setup: %s", err) From dd68cdd802e5fb61611ac2516f5e08c6d2ca1732 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Thu, 29 Dec 2016 19:42:02 -0800 Subject: [PATCH 041/100] Auto-reconnect for regular authRPC client. (#3506) Implement a storage rpc specific rpc client, which does not reconnect unnecessarily. Instead reconnect is handled at a different layer for storage alone. Rest of the calls using AuthRPC automatically reconnect, i.e upon an error equal to `rpc.ErrShutdown` they dial again and call the requested method again. --- cmd/admin-rpc-client.go | 13 +--- cmd/auth-rpc-client.go | 47 ++++++++------ cmd/bucket-metadata.go | 49 ++------------ cmd/globals.go | 4 ++ cmd/storage-rpc-client.go | 132 +++++++++++++++++++++++++++++++++----- 5 files changed, 155 insertions(+), 90 deletions(-) diff --git a/cmd/admin-rpc-client.go b/cmd/admin-rpc-client.go index d454cc05a..43c9c3ae5 100644 --- a/cmd/admin-rpc-client.go +++ b/cmd/admin-rpc-client.go @@ -17,7 +17,6 @@ package cmd import ( - "net/rpc" "net/url" "path" "sync" @@ -58,22 +57,14 @@ func (lc localAdminClient) Restart() error { func (rc remoteAdminClient) Stop() error { args := GenericArgs{} reply := GenericReply{} - err := rc.Call("Service.Shutdown", &args, &reply) - if err != nil && err == rpc.ErrShutdown { - rc.Close() - } - return err + return rc.Call("Service.Shutdown", &args, &reply) } // Restart - Sends restart command to remote server via RPC. func (rc remoteAdminClient) Restart() error { args := GenericArgs{} reply := GenericReply{} - err := rc.Call("Service.Restart", &args, &reply) - if err != nil && err == rpc.ErrShutdown { - rc.Close() - } - return err + return rc.Call("Service.Restart", &args, &reply) } // adminPeer - represents an entity that implements Stop and Restart methods. diff --git a/cmd/auth-rpc-client.go b/cmd/auth-rpc-client.go index 9284e0784..2e15ed8f1 100644 --- a/cmd/auth-rpc-client.go +++ b/cmd/auth-rpc-client.go @@ -76,7 +76,6 @@ type AuthRPCClient struct { mu sync.Mutex config *authConfig rpc *RPCClient // reconnect'able rpc client built on top of net/rpc Client - isLoggedIn bool // Indicates if the auth client has been logged in and token is valid. serverToken string // Disk rpc JWT based token. serverVersion string // Server version exchanged by the RPC. } @@ -88,8 +87,6 @@ func newAuthClient(cfg *authConfig) *AuthRPCClient { config: cfg, // Initialize a new reconnectable rpc client. rpc: newRPCClient(cfg.address, cfg.path, cfg.secureConn), - // Allocated auth client not logged in yet. - isLoggedIn: false, } } @@ -97,7 +94,7 @@ func newAuthClient(cfg *authConfig) *AuthRPCClient { func (authClient *AuthRPCClient) Close() error { authClient.mu.Lock() // reset token on closing a connection - authClient.isLoggedIn = false + authClient.serverToken = "" authClient.mu.Unlock() return authClient.rpc.Close() } @@ -109,7 +106,7 @@ func (authClient *AuthRPCClient) Login() (err error) { defer authClient.mu.Unlock() // Return if already logged in. - if authClient.isLoggedIn { + if authClient.serverToken != "" { return nil } @@ -135,7 +132,6 @@ func (authClient *AuthRPCClient) Login() (err error) { // Set token, time stamp as received from a successful login call. authClient.serverToken = reply.Token authClient.serverVersion = reply.ServerVersion - authClient.isLoggedIn = true return nil } @@ -146,21 +142,34 @@ func (authClient *AuthRPCClient) Call(serviceMethod string, args interface { SetToken(token string) SetTimestamp(tstamp time.Time) }, reply interface{}) (err error) { - // On successful login, attempt the call. - if err = authClient.Login(); err == nil { - // Set token and timestamp before the rpc call. - args.SetToken(authClient.serverToken) - args.SetTimestamp(time.Now().UTC()) + loginAndCallFn := func() error { + // On successful login, proceed to attempt the requested service method. + if err = authClient.Login(); err == nil { + // Set token and timestamp before the rpc call. + args.SetToken(authClient.serverToken) + args.SetTimestamp(time.Now().UTC()) - // Call the underlying rpc. - err = authClient.rpc.Call(serviceMethod, args, reply) - - // Invalidate token, and mark it for re-login on subsequent reconnect. - if err == rpc.ErrShutdown { - authClient.mu.Lock() - authClient.isLoggedIn = false - authClient.mu.Unlock() + // Finally make the network call using net/rpc client. + err = authClient.rpc.Call(serviceMethod, args, reply) } + return err + } + doneCh := make(chan struct{}) + defer close(doneCh) + for i := range newRetryTimer(time.Second, time.Second*30, MaxJitter, doneCh) { + // Invalidate token, and mark it for re-login and + // reconnect upon rpc shutdown. + if err = loginAndCallFn(); err == rpc.ErrShutdown { + // Close the underlying connection, and proceed to reconnect + // if we haven't reached the retry threshold. + authClient.Close() + + // No need to return error until the retry count threshold has reached. + if i < globalMaxAuthRPCRetryThreshold { + continue + } + } + break } return err } diff --git a/cmd/bucket-metadata.go b/cmd/bucket-metadata.go index 9339c553f..01784e6e0 100644 --- a/cmd/bucket-metadata.go +++ b/cmd/bucket-metadata.go @@ -16,10 +16,7 @@ package cmd -import ( - "encoding/json" - "net/rpc" -) +import "encoding/json" // BucketMetaState - Interface to update bucket metadata in-memory // state. @@ -112,62 +109,26 @@ type remoteBucketMetaState struct { // change to remote peer via RPC call. func (rc *remoteBucketMetaState) UpdateBucketNotification(args *SetBucketNotificationPeerArgs) error { reply := GenericReply{} - err := rc.Call("S3.SetBucketNotificationPeer", args, &reply) - // Check for network error and retry once. - if err != nil && err == rpc.ErrShutdown { - // Close the underlying connection to attempt once more. - rc.Close() - - // Attempt again and proceed. - err = rc.Call("S3.SetBucketNotificationPeer", args, &reply) - } - return err + return rc.Call("S3.SetBucketNotificationPeer", args, &reply) } // remoteBucketMetaState.UpdateBucketListener - sends bucket listener change to // remote peer via RPC call. func (rc *remoteBucketMetaState) UpdateBucketListener(args *SetBucketListenerPeerArgs) error { reply := GenericReply{} - err := rc.Call("S3.SetBucketListenerPeer", args, &reply) - // Check for network error and retry once. - if err != nil && err == rpc.ErrShutdown { - // Close the underlying connection to attempt once more. - rc.Close() - - // Attempt again and proceed. - err = rc.Call("S3.SetBucketListenerPeer", args, &reply) - } - return err + return rc.Call("S3.SetBucketListenerPeer", args, &reply) } // remoteBucketMetaState.UpdateBucketPolicy - sends bucket policy change to remote // peer via RPC call. func (rc *remoteBucketMetaState) UpdateBucketPolicy(args *SetBucketPolicyPeerArgs) error { reply := GenericReply{} - err := rc.Call("S3.SetBucketPolicyPeer", args, &reply) - // Check for network error and retry once. - if err != nil && err == rpc.ErrShutdown { - // Close the underlying connection to attempt once more. - rc.Close() - - // Attempt again and proceed. - err = rc.Call("S3.SetBucketPolicyPeer", args, &reply) - } - return err + return rc.Call("S3.SetBucketPolicyPeer", args, &reply) } // remoteBucketMetaState.SendEvent - sends event for bucket listener to remote // peer via RPC call. func (rc *remoteBucketMetaState) SendEvent(args *EventArgs) error { reply := GenericReply{} - err := rc.Call("S3.Event", args, &reply) - // Check for network error and retry once. - if err != nil && err == rpc.ErrShutdown { - // Close the underlying connection to attempt once more. - rc.Close() - - // Attempt again and proceed. - err = rc.Call("S3.Event", args, &reply) - } - return err + return rc.Call("S3.Event", args, &reply) } diff --git a/cmd/globals.go b/cmd/globals.go index 613d359a3..fad83d694 100644 --- a/cmd/globals.go +++ b/cmd/globals.go @@ -93,6 +93,10 @@ var ( // giving up on the remote disk entirely. globalMaxStorageRetryThreshold = 3 + // Attempt to retry only this many number of times before + // giving up on the remote RPC entirely. + globalMaxAuthRPCRetryThreshold = 1 + // Add new variable global values here. ) diff --git a/cmd/storage-rpc-client.go b/cmd/storage-rpc-client.go index fadf66b0c..4fd0c85f9 100644 --- a/cmd/storage-rpc-client.go +++ b/cmd/storage-rpc-client.go @@ -23,7 +23,9 @@ import ( "net/rpc" "net/url" "path" + "sync" "sync/atomic" + "time" "github.com/minio/minio/pkg/disk" ) @@ -32,7 +34,7 @@ type networkStorage struct { networkIOErrCount int32 // ref: https://golang.org/pkg/sync/atomic/#pkg-note-BUG netAddr string netPath string - rpcClient *AuthRPCClient + rpcClient *storageRPCClient } const ( @@ -97,6 +99,104 @@ func toStorageErr(err error) error { return err } +// storageRPCClient is a wrapper type for RPCClient which provides JWT based authentication across reconnects. +type storageRPCClient struct { + sync.Mutex + cfg storageConfig + rpc *RPCClient // reconnect'able rpc client built on top of net/rpc Client + serverToken string // Disk rpc JWT based token. + serverVersion string // Server version exchanged by the RPC. +} + +// Storage config represents authentication credentials and Login +// method name to be used for fetching JWT tokens from the storage +// server. +type storageConfig struct { + addr string // Network address path of storage RPC server. + path string // Network storage path for HTTP dial. + secureConn bool // Indicates if this storage RPC is on a secured connection. + creds credential +} + +// newStorageClient - returns a jwt based authenticated (go) storage rpc client. +func newStorageClient(storageCfg storageConfig) *storageRPCClient { + return &storageRPCClient{ + // Save the config. + cfg: storageCfg, + rpc: newRPCClient(storageCfg.addr, storageCfg.path, storageCfg.secureConn), + } +} + +// Close - closes underlying rpc connection. +func (storageClient *storageRPCClient) Close() error { + storageClient.Lock() + // reset token on closing a connection + storageClient.serverToken = "" + storageClient.Unlock() + return storageClient.rpc.Close() +} + +// Login - a jwt based authentication is performed with rpc server. +func (storageClient *storageRPCClient) Login() (err error) { + storageClient.Lock() + // As soon as the function returns unlock, + defer storageClient.Unlock() + + // Return if token is already set. + if storageClient.serverToken != "" { + return nil + } + + reply := RPCLoginReply{} + if err = storageClient.rpc.Call("Storage.LoginHandler", RPCLoginArgs{ + Username: storageClient.cfg.creds.AccessKey, + Password: storageClient.cfg.creds.SecretKey, + }, &reply); err != nil { + return err + } + + // Validate if version do indeed match. + if reply.ServerVersion != Version { + return errServerVersionMismatch + } + + // Validate if server timestamp is skewed. + curTime := time.Now().UTC() + if curTime.Sub(reply.Timestamp) > globalMaxSkewTime { + return errServerTimeMismatch + } + + // Set token, time stamp as received from a successful login call. + storageClient.serverToken = reply.Token + storageClient.serverVersion = reply.ServerVersion + return nil +} + +// Call - If rpc connection isn't established yet since previous disconnect, +// connection is established, a jwt authenticated login is performed and then +// the call is performed. +func (storageClient *storageRPCClient) Call(serviceMethod string, args interface { + SetToken(token string) + SetTimestamp(tstamp time.Time) +}, reply interface{}) (err error) { + // On successful login, attempt the call. + if err = storageClient.Login(); err != nil { + return err + } + // Set token and timestamp before the rpc call. + args.SetToken(storageClient.serverToken) + args.SetTimestamp(time.Now().UTC()) + + // Call the underlying rpc. + err = storageClient.rpc.Call(serviceMethod, args, reply) + + // Invalidate token, and mark it for re-login. + if err == rpc.ErrShutdown { + storageClient.Close() + } + return err +} + // Initialize new storage rpc client. func newStorageRPC(ep *url.URL) (StorageAPI, error) { if ep == nil { @@ -108,28 +208,28 @@ func newStorageRPC(ep *url.URL) (StorageAPI, error) { rpcAddr := ep.Host // Initialize rpc client with network address and rpc path. - accessKeyID := serverConfig.GetCredential().AccessKey - secretAccessKey := serverConfig.GetCredential().SecretKey + accessKey := serverConfig.GetCredential().AccessKey + secretKey := serverConfig.GetCredential().SecretKey if ep.User != nil { - accessKeyID = ep.User.Username() + accessKey = ep.User.Username() if key, set := ep.User.Password(); set { - secretAccessKey = key + secretKey = key } } - rpcClient := newAuthClient(&authConfig{ - accessKey: accessKeyID, - secretKey: secretAccessKey, - secureConn: isSSL(), - address: rpcAddr, - path: rpcPath, - loginMethod: "Storage.LoginHandler", - }) // Initialize network storage. ndisk := &networkStorage{ - netAddr: ep.Host, - netPath: getPath(ep), - rpcClient: rpcClient, + netAddr: ep.Host, + netPath: getPath(ep), + rpcClient: newStorageClient(storageConfig{ + addr: rpcAddr, + path: rpcPath, + creds: credential{ + AccessKey: accessKey, + SecretKey: secretKey, + }, + secureConn: isSSL(), + }), } // Returns successfully here. From 8562b2282332ed028a7695318ac22b0490958f08 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Fri, 30 Dec 2016 17:08:02 -0800 Subject: [PATCH 042/100] Fix delays and iterim fix for the partial fix in #3502 (#3511) This patch uses a technique where in a retryable storage before object layer initialization has a higher delay and waits for longer period upto 4 times with time unit of seconds. And uses another set of configuration after the disks have been formatted, i.e use a lower retry backoff rate and retrying only once per 5 millisecond. Network IO error count is reduced to a lower value i.e 256 before we reject the disk completely. This is done so that combination of retry logic and total error count roughly come to around 2.5secs which is when we basically take the disk offline completely. NOTE: This patch doesn't fix the issue of what if the disk is completely dead and comes back again after the initialization. Such a mutating state requires a change in our startup sequence which will be done subsequently. This is an interim fix to alleviate users from these issues. --- cmd/auth-rpc-client.go | 6 +- cmd/fs-v1_test.go | 8 +- cmd/globals.go | 8 -- cmd/prepare-storage.go | 28 ++++++- cmd/retry-storage.go | 21 ++++- cmd/retry-storage_test.go | 166 +++++++++++++++++++++++++++----------- cmd/retry.go | 2 +- cmd/storage-rpc-client.go | 10 ++- cmd/test-utils_test.go | 10 ++- 9 files changed, 191 insertions(+), 68 deletions(-) diff --git a/cmd/auth-rpc-client.go b/cmd/auth-rpc-client.go index 2e15ed8f1..2dec0f58f 100644 --- a/cmd/auth-rpc-client.go +++ b/cmd/auth-rpc-client.go @@ -22,6 +22,10 @@ import ( "time" ) +// Attempt to retry only this many number of times before +// giving up on the remote RPC entirely. +const globalAuthRPCRetryThreshold = 1 + // GenericReply represents any generic RPC reply. type GenericReply struct{} @@ -165,7 +169,7 @@ func (authClient *AuthRPCClient) Call(serviceMethod string, args interface { authClient.Close() // No need to return error until the retry count threshold has reached. - if i < globalMaxAuthRPCRetryThreshold { + if i < globalAuthRPCRetryThreshold { continue } } diff --git a/cmd/fs-v1_test.go b/cmd/fs-v1_test.go index 9b82d862e..7624e2106 100644 --- a/cmd/fs-v1_test.go +++ b/cmd/fs-v1_test.go @@ -20,6 +20,7 @@ import ( "bytes" "path/filepath" "testing" + "time" ) // TestNewFS - tests initialization of all input disks @@ -86,7 +87,12 @@ func TestNewFS(t *testing.T) { if err != errInvalidArgument { t.Errorf("Expecting error invalid argument, got %s", err) } - _, err = newFSObjects(&retryStorage{xlStorageDisks[0]}) + _, err = newFSObjects(&retryStorage{ + remoteStorage: xlStorageDisks[0], + maxRetryAttempts: 1, + retryUnit: time.Millisecond, + retryCap: time.Millisecond * 10, + }) if err != nil { errMsg := "Unable to recognize backend format, Disk is not in FS format." if err.Error() == errMsg { diff --git a/cmd/globals.go b/cmd/globals.go index fad83d694..3eadd1902 100644 --- a/cmd/globals.go +++ b/cmd/globals.go @@ -89,14 +89,6 @@ var ( // List of admin peers. globalAdminPeers = adminPeers{} - // Attempt to retry only this many number of times before - // giving up on the remote disk entirely. - globalMaxStorageRetryThreshold = 3 - - // Attempt to retry only this many number of times before - // giving up on the remote RPC entirely. - globalMaxAuthRPCRetryThreshold = 1 - // Add new variable global values here. ) diff --git a/cmd/prepare-storage.go b/cmd/prepare-storage.go index aebf8bc0a..d922db647 100644 --- a/cmd/prepare-storage.go +++ b/cmd/prepare-storage.go @@ -311,16 +311,40 @@ func waitForFormatDisks(firstDisk bool, endpoints []*url.URL, storageDisks []Sto if storageDisks == nil { return nil, errInvalidArgument } + + // Retryable disks before formatting, we need to have a larger + // retry window so that we wait enough amount of time before + // the disks come online. + retryDisks := make([]StorageAPI, len(storageDisks)) + for i, storage := range storageDisks { + retryDisks[i] = &retryStorage{ + remoteStorage: storage, + maxRetryAttempts: globalStorageInitRetryThreshold, + retryUnit: time.Second, + retryCap: time.Second * 30, // 30 seconds. + } + } + // Start retry loop retrying until disks are formatted properly, until we have reached // a conditional quorum of formatted disks. - err = retryFormattingDisks(firstDisk, endpoints, storageDisks) + err = retryFormattingDisks(firstDisk, endpoints, retryDisks) if err != nil { return nil, err } + // Initialize the disk into a formatted disks wrapper. formattedDisks = make([]StorageAPI, len(storageDisks)) for i, storage := range storageDisks { - formattedDisks[i] = &retryStorage{storage} + // After formatting is done we need a smaller time + // window and lower retry value before formatting. + formattedDisks[i] = &retryStorage{ + remoteStorage: storage, + maxRetryAttempts: globalStorageRetryThreshold, + retryUnit: time.Millisecond, + retryCap: time.Millisecond * 5, // 5 milliseconds. + } } + + // Success. return formattedDisks, nil } diff --git a/cmd/retry-storage.go b/cmd/retry-storage.go index 9bc8ab1a1..c30c152a3 100644 --- a/cmd/retry-storage.go +++ b/cmd/retry-storage.go @@ -22,12 +22,25 @@ import ( "github.com/minio/minio/pkg/disk" ) +const ( + // Attempt to retry only this many number of times before + // giving up on the remote disk entirely during initialization. + globalStorageInitRetryThreshold = 4 + + // Attempt to retry only this many number of times before + // giving up on the remote disk entirely after initialization. + globalStorageRetryThreshold = 1 +) + // Retry storage is an instance of StorageAPI which // additionally verifies upon network shutdown if the // underlying storage is available and is really // formatted. type retryStorage struct { - remoteStorage StorageAPI + remoteStorage StorageAPI + maxRetryAttempts int + retryUnit time.Duration + retryCap time.Duration } // String representation of remoteStorage. @@ -209,13 +222,13 @@ func (f retryStorage) reInit() (err error) { doneCh := make(chan struct{}) defer close(doneCh) - for i := range newRetryTimer(time.Second, time.Second*30, MaxJitter, doneCh) { + for i := range newRetryTimer(f.retryUnit, f.retryCap, MaxJitter, doneCh) { // Initialize and make a new login attempt. err = f.remoteStorage.Init() if err != nil { // No need to return error until the retry count // threshold has reached. - if i < globalMaxStorageRetryThreshold { + if i < f.maxRetryAttempts { continue } return err @@ -226,7 +239,7 @@ func (f retryStorage) reInit() (err error) { if err != nil { // No need to return error until the retry count // threshold has reached. - if i < globalMaxStorageRetryThreshold { + if i < f.maxRetryAttempts { continue } return err diff --git a/cmd/retry-storage_test.go b/cmd/retry-storage_test.go index 67315a8f2..04556323b 100644 --- a/cmd/retry-storage_test.go +++ b/cmd/retry-storage_test.go @@ -20,6 +20,7 @@ import ( "bytes" "reflect" "testing" + "time" ) // Tests retry storage. @@ -39,9 +40,14 @@ func TestRetryStorage(t *testing.T) { if !ok { t.Fatal("storage disk is not *retryStorage type") } - storageDisks[i] = &retryStorage{newNaughtyDisk(retryDisk, map[int]error{ - 1: errDiskNotFound, - }, nil)} + storageDisks[i] = &retryStorage{ + remoteStorage: newNaughtyDisk(retryDisk, map[int]error{ + 1: errDiskNotFound, + }, nil), + maxRetryAttempts: 1, + retryUnit: time.Millisecond, + retryCap: time.Millisecond * 10, + } } // Validate all the conditions for retrying calls. @@ -52,9 +58,14 @@ func TestRetryStorage(t *testing.T) { if !ok { t.Fatal("storage disk is not *retryStorage type") } - storageDisks[i] = &retryStorage{newNaughtyDisk(retryDisk, map[int]error{ - 1: errDiskNotFound, - }, nil)} + storageDisks[i] = &retryStorage{ + remoteStorage: newNaughtyDisk(retryDisk, map[int]error{ + 1: errDiskNotFound, + }, nil), + maxRetryAttempts: 1, + retryUnit: time.Millisecond, + retryCap: time.Millisecond * 10, + } } for _, disk := range storageDisks { @@ -77,9 +88,14 @@ func TestRetryStorage(t *testing.T) { if !ok { t.Fatal("storage disk is not *retryStorage type") } - storageDisks[i] = &retryStorage{newNaughtyDisk(retryDisk, map[int]error{ - 1: errDiskNotFound, - }, nil)} + storageDisks[i] = &retryStorage{ + remoteStorage: newNaughtyDisk(retryDisk, map[int]error{ + 1: errDiskNotFound, + }, nil), + maxRetryAttempts: 1, + retryUnit: time.Millisecond, + retryCap: time.Millisecond * 10, + } } for _, disk := range storageDisks { @@ -97,9 +113,14 @@ func TestRetryStorage(t *testing.T) { if !ok { t.Fatal("storage disk is not *retryStorage type") } - storageDisks[i] = &retryStorage{newNaughtyDisk(retryDisk, map[int]error{ - 1: errDiskNotFound, - }, nil)} + storageDisks[i] = &retryStorage{ + remoteStorage: newNaughtyDisk(retryDisk, map[int]error{ + 1: errDiskNotFound, + }, nil), + maxRetryAttempts: 1, + retryUnit: time.Millisecond, + retryCap: time.Millisecond * 10, + } } for _, disk := range storageDisks { @@ -114,9 +135,14 @@ func TestRetryStorage(t *testing.T) { if !ok { t.Fatal("storage disk is not *retryStorage type") } - storageDisks[i] = &retryStorage{newNaughtyDisk(retryDisk, map[int]error{ - 1: errDiskNotFound, - }, nil)} + storageDisks[i] = &retryStorage{ + remoteStorage: newNaughtyDisk(retryDisk, map[int]error{ + 1: errDiskNotFound, + }, nil), + maxRetryAttempts: 1, + retryUnit: time.Millisecond, + retryCap: time.Millisecond * 10, + } } for _, disk := range storageDisks { @@ -131,9 +157,14 @@ func TestRetryStorage(t *testing.T) { if !ok { t.Fatal("storage disk is not *retryStorage type") } - storageDisks[i] = &retryStorage{newNaughtyDisk(retryDisk, map[int]error{ - 1: errDiskNotFound, - }, nil)} + storageDisks[i] = &retryStorage{ + remoteStorage: newNaughtyDisk(retryDisk, map[int]error{ + 1: errDiskNotFound, + }, nil), + maxRetryAttempts: 1, + retryUnit: time.Millisecond, + retryCap: time.Millisecond * 10, + } } for _, disk := range storageDisks { @@ -151,9 +182,14 @@ func TestRetryStorage(t *testing.T) { if !ok { t.Fatal("storage disk is not *retryStorage type") } - storageDisks[i] = &retryStorage{newNaughtyDisk(retryDisk, map[int]error{ - 1: errDiskNotFound, - }, nil)} + storageDisks[i] = &retryStorage{ + remoteStorage: newNaughtyDisk(retryDisk, map[int]error{ + 1: errDiskNotFound, + }, nil), + maxRetryAttempts: 1, + retryUnit: time.Millisecond, + retryCap: time.Millisecond * 10, + } } for _, disk := range storageDisks { @@ -168,9 +204,14 @@ func TestRetryStorage(t *testing.T) { if !ok { t.Fatal("storage disk is not *retryStorage type") } - storageDisks[i] = &retryStorage{newNaughtyDisk(retryDisk, map[int]error{ - 1: errDiskNotFound, - }, nil)} + storageDisks[i] = &retryStorage{ + remoteStorage: newNaughtyDisk(retryDisk, map[int]error{ + 1: errDiskNotFound, + }, nil), + maxRetryAttempts: 1, + retryUnit: time.Millisecond, + retryCap: time.Millisecond * 10, + } } for _, disk := range storageDisks { @@ -185,9 +226,14 @@ func TestRetryStorage(t *testing.T) { if !ok { t.Fatal("storage disk is not *retryStorage type") } - storageDisks[i] = &retryStorage{newNaughtyDisk(retryDisk, map[int]error{ - 1: errDiskNotFound, - }, nil)} + storageDisks[i] = &retryStorage{ + remoteStorage: newNaughtyDisk(retryDisk, map[int]error{ + 1: errDiskNotFound, + }, nil), + maxRetryAttempts: 1, + retryUnit: time.Millisecond, + retryCap: time.Millisecond * 10, + } } for _, disk := range storageDisks { @@ -202,9 +248,14 @@ func TestRetryStorage(t *testing.T) { if !ok { t.Fatal("storage disk is not *retryStorage type") } - storageDisks[i] = &retryStorage{newNaughtyDisk(retryDisk, map[int]error{ - 1: errDiskNotFound, - }, nil)} + storageDisks[i] = &retryStorage{ + remoteStorage: newNaughtyDisk(retryDisk, map[int]error{ + 1: errDiskNotFound, + }, nil), + maxRetryAttempts: 1, + retryUnit: time.Millisecond, + retryCap: time.Millisecond * 10, + } } for _, disk := range storageDisks { @@ -223,9 +274,14 @@ func TestRetryStorage(t *testing.T) { if !ok { t.Fatal("storage disk is not *retryStorage type") } - storageDisks[i] = &retryStorage{newNaughtyDisk(retryDisk, map[int]error{ - 1: errDiskNotFound, - }, nil)} + storageDisks[i] = &retryStorage{ + remoteStorage: newNaughtyDisk(retryDisk, map[int]error{ + 1: errDiskNotFound, + }, nil), + maxRetryAttempts: 1, + retryUnit: time.Millisecond, + retryCap: time.Millisecond * 10, + } } for _, disk := range storageDisks { @@ -248,9 +304,14 @@ func TestRetryStorage(t *testing.T) { if !ok { t.Fatal("storage disk is not *retryStorage type") } - storageDisks[i] = &retryStorage{newNaughtyDisk(retryDisk, map[int]error{ - 1: errDiskNotFound, - }, nil)} + storageDisks[i] = &retryStorage{ + remoteStorage: newNaughtyDisk(retryDisk, map[int]error{ + 1: errDiskNotFound, + }, nil), + maxRetryAttempts: 1, + retryUnit: time.Millisecond, + retryCap: time.Millisecond * 10, + } } for _, disk := range storageDisks { @@ -268,9 +329,14 @@ func TestRetryStorage(t *testing.T) { if !ok { t.Fatal("storage disk is not *retryStorage type") } - storageDisks[i] = &retryStorage{newNaughtyDisk(retryDisk, map[int]error{ - 1: errDiskNotFound, - }, nil)} + storageDisks[i] = &retryStorage{ + remoteStorage: newNaughtyDisk(retryDisk, map[int]error{ + 1: errDiskNotFound, + }, nil), + maxRetryAttempts: 1, + retryUnit: time.Millisecond, + retryCap: time.Millisecond * 10, + } } for _, disk := range storageDisks { @@ -285,9 +351,14 @@ func TestRetryStorage(t *testing.T) { if !ok { t.Fatal("storage disk is not *retryStorage type") } - storageDisks[i] = &retryStorage{newNaughtyDisk(retryDisk, map[int]error{ - 1: errDiskNotFound, - }, nil)} + storageDisks[i] = &retryStorage{ + remoteStorage: newNaughtyDisk(retryDisk, map[int]error{ + 1: errDiskNotFound, + }, nil), + maxRetryAttempts: 1, + retryUnit: time.Millisecond, + retryCap: time.Millisecond * 10, + } } for _, disk := range storageDisks { @@ -306,9 +377,14 @@ func TestRetryStorage(t *testing.T) { if !ok { t.Fatal("storage disk is not *retryStorage type") } - storageDisks[i] = &retryStorage{newNaughtyDisk(retryDisk, map[int]error{ - 1: errDiskNotFound, - }, nil)} + storageDisks[i] = &retryStorage{ + remoteStorage: newNaughtyDisk(retryDisk, map[int]error{ + 1: errDiskNotFound, + }, nil), + maxRetryAttempts: 1, + retryUnit: time.Millisecond, + retryCap: time.Millisecond * 10, + } } for _, disk := range storageDisks { diff --git a/cmd/retry.go b/cmd/retry.go index 364212af1..d3ad59e20 100644 --- a/cmd/retry.go +++ b/cmd/retry.go @@ -90,7 +90,7 @@ func newRetryTimer(unit time.Duration, cap time.Duration, jitter float64, doneCh go func() { defer close(attemptCh) - nextBackoff := 1 + nextBackoff := 0 for { select { // Attempts starts. diff --git a/cmd/storage-rpc-client.go b/cmd/storage-rpc-client.go index 4fd0c85f9..ebe639dd1 100644 --- a/cmd/storage-rpc-client.go +++ b/cmd/storage-rpc-client.go @@ -241,8 +241,14 @@ func (n *networkStorage) String() string { return n.netAddr + ":" + n.netPath } -// maximum allowed network IOError. -const maxAllowedNetworkIOError = 1024 +// Network IO error count is kept at 256 with some simple +// math. Before we reject the disk completely. The combination +// of retry logic and total error count roughly comes around +// 2.5secs ( 2 * 5 * time.Millisecond * 256) which is when we +// basically take the disk offline completely. This is considered +// sufficient time tradeoff to avoid large delays in-terms of +// incoming i/o. +const maxAllowedNetworkIOError = 256 // maximum allowed network IOError. // Initializes the remote RPC connection by attempting a login attempt. func (n *networkStorage) Init() (err error) { diff --git a/cmd/test-utils_test.go b/cmd/test-utils_test.go index 39aeb9cc7..ba783bcbe 100644 --- a/cmd/test-utils_test.go +++ b/cmd/test-utils_test.go @@ -65,9 +65,6 @@ func init() { // Enable caching. setMaxMemory() - - // Tests don't need to retry. - globalMaxStorageRetryThreshold = 1 } func prepareFS() (ObjectLayer, string, error) { @@ -1656,7 +1653,12 @@ func prepareNErroredDisks(storageDisks []StorageAPI, offline int, err error, t * } for i := 0; i < offline; i++ { - storageDisks[i] = &naughtyDisk{disk: &retryStorage{storageDisks[i]}, defaultErr: err} + storageDisks[i] = &naughtyDisk{disk: &retryStorage{ + remoteStorage: storageDisks[i], + maxRetryAttempts: 1, + retryUnit: time.Millisecond, + retryCap: time.Millisecond * 10, + }, defaultErr: err} } return storageDisks } From cde6496172268083dfed58c26f4f0770567ffa25 Mon Sep 17 00:00:00 2001 From: Matthew Hall Date: Sat, 31 Dec 2016 19:57:33 -0800 Subject: [PATCH 043/100] checkdeps.sh: support unusual git version strings (for OS X et. al.) (#3512) --- buildscripts/checkdeps.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/buildscripts/checkdeps.sh b/buildscripts/checkdeps.sh index 7bcbd1120..057f17b9d 100644 --- a/buildscripts/checkdeps.sh +++ b/buildscripts/checkdeps.sh @@ -144,7 +144,8 @@ assert_check_golang_env() { } assert_check_deps() { - installed_git_version=$(git version | awk '{print $NF}') + # support unusual Git versions such as: 2.7.4 (Apple Git-66) + installed_git_version=$(git version | perl -ne '$_ =~ m/git version (.*?)( |$)/; print "$1\n";') if ! check_minimum_version "${GIT_VERSION}" "${installed_git_version}"; then echo "ERROR" echo "Git version '${installed_git_version}' not supported." From 6d10f4c19af6861e4de1b22ac20a3e5136f69d67 Mon Sep 17 00:00:00 2001 From: "Bala.FA" Date: Fri, 23 Dec 2016 20:42:19 +0530 Subject: [PATCH 044/100] Adopt dsync interface changes and major cleanup on RPC server/client. * Rename GenericArgs to AuthRPCArgs * Rename GenericReply to AuthRPCReply * Remove authConfig.loginMethod and add authConfig.ServiceName * Rename loginServer to AuthRPCServer * Rename RPCLoginArgs to LoginRPCArgs * Rename RPCLoginReply to LoginRPCReply * Version and RequestTime are added to LoginRPCArgs and verified by server side, not client side. * Fix data race in lockMaintainence loop. --- cmd/admin-rpc-client.go | 23 +- cmd/admin-rpc-server.go | 16 +- cmd/admin-rpc-server_test.go | 52 ++--- cmd/auth-rpc-client.go | 202 ++++++---------- cmd/auth-rpc-client_test.go | 44 ++-- cmd/{login-server.go => auth-rpc-server.go} | 22 +- cmd/auth-rpc-server_test.go | 117 ++++++++++ cmd/browser-peer-rpc.go | 42 ++-- cmd/browser-peer-rpc_test.go | 56 ++--- cmd/browser-rpc-router.go | 4 +- cmd/bucket-metadata.go | 8 +- cmd/bucket-notification-handlers.go | 3 +- cmd/event-notifier_test.go | 10 + cmd/lock-rpc-client.go | 71 ++++++ cmd/lock-rpc-server-common.go | 14 -- cmd/lock-rpc-server.go | 187 +++++++-------- cmd/lock-rpc-server_test.go | 216 ++++++++++-------- cmd/login-server_test.go | 67 ------ cmd/namespace-lock.go | 21 +- cmd/net-rpc-client.go | 88 ++++--- cmd/retry-storage.go | 2 + cmd/rpc-common.go | 111 +++++++++ cmd/s3-peer-client.go | 15 +- cmd/s3-peer-router.go | 4 +- cmd/s3-peer-rpc-handlers.go | 36 ++- cmd/s3-peer-rpc-handlers_test.go | 30 +-- cmd/storage-rpc-client.go | 162 +++---------- cmd/storage-rpc-server-datatypes.go | 18 +- cmd/storage-rpc-server.go | 82 ++++--- cmd/storage-rpc-server_test.go | 63 ++--- cmd/test-utils_test.go | 9 +- cmd/utils_test.go | 6 + cmd/web-handlers.go | 2 +- cmd/web-handlers_test.go | 14 +- vendor/github.com/minio/dsync/README.md | 2 +- vendor/github.com/minio/dsync/drwmutex.go | 121 +++++----- vendor/github.com/minio/dsync/dsync.go | 30 ++- .../minio/dsync/rpc-client-interface.go | 56 ++++- vendor/vendor.json | 6 +- 39 files changed, 1083 insertions(+), 949 deletions(-) rename cmd/{login-server.go => auth-rpc-server.go} (61%) create mode 100644 cmd/auth-rpc-server_test.go create mode 100644 cmd/lock-rpc-client.go delete mode 100644 cmd/login-server_test.go create mode 100644 cmd/rpc-common.go diff --git a/cmd/admin-rpc-client.go b/cmd/admin-rpc-client.go index 43c9c3ae5..14f65626d 100644 --- a/cmd/admin-rpc-client.go +++ b/cmd/admin-rpc-client.go @@ -55,15 +55,15 @@ func (lc localAdminClient) Restart() error { // Stop - Sends stop command to remote server via RPC. func (rc remoteAdminClient) Stop() error { - args := GenericArgs{} - reply := GenericReply{} + args := AuthRPCArgs{} + reply := AuthRPCReply{} return rc.Call("Service.Shutdown", &args, &reply) } // Restart - Sends restart command to remote server via RPC. func (rc remoteAdminClient) Restart() error { - args := GenericArgs{} - reply := GenericReply{} + args := AuthRPCArgs{} + reply := AuthRPCReply{} return rc.Call("Service.Restart", &args, &reply) } @@ -90,6 +90,7 @@ func makeAdminPeers(eps []*url.URL) adminPeers { }) seenAddr[globalMinioAddr] = true + serverCred := serverConfig.GetCredential() // iterate over endpoints to find new remote peers and add // them to ret. for _, ep := range eps { @@ -100,17 +101,17 @@ func makeAdminPeers(eps []*url.URL) adminPeers { // Check if the remote host has been added already if !seenAddr[ep.Host] { cfg := authConfig{ - accessKey: serverConfig.GetCredential().AccessKey, - secretKey: serverConfig.GetCredential().SecretKey, - address: ep.Host, - secureConn: isSSL(), - path: path.Join(reservedBucket, servicePath), - loginMethod: "Service.LoginHandler", + accessKey: serverCred.AccessKey, + secretKey: serverCred.SecretKey, + serverAddr: ep.Host, + secureConn: isSSL(), + serviceEndpoint: path.Join(reservedBucket, servicePath), + serviceName: "Service", } servicePeers = append(servicePeers, adminPeer{ addr: ep.Host, - svcClnt: &remoteAdminClient{newAuthClient(&cfg)}, + svcClnt: &remoteAdminClient{newAuthRPCClient(cfg)}, }) seenAddr[ep.Host] = true } diff --git a/cmd/admin-rpc-server.go b/cmd/admin-rpc-server.go index af5cd0467..62751d114 100644 --- a/cmd/admin-rpc-server.go +++ b/cmd/admin-rpc-server.go @@ -27,23 +27,25 @@ const servicePath = "/admin/service" // serviceCmd - exports RPC methods for service status, stop and // restart commands. type serviceCmd struct { - loginServer + AuthRPCServer } // Shutdown - Shutdown this instance of minio server. -func (s *serviceCmd) Shutdown(args *GenericArgs, reply *GenericReply) error { - if !isAuthTokenValid(args.Token) { - return errInvalidToken +func (s *serviceCmd) Shutdown(args *AuthRPCArgs, reply *AuthRPCReply) error { + if err := args.IsAuthenticated(); err != nil { + return err } + globalServiceSignalCh <- serviceStop return nil } // Restart - Restart this instance of minio server. -func (s *serviceCmd) Restart(args *GenericArgs, reply *GenericReply) error { - if !isAuthTokenValid(args.Token) { - return errInvalidToken +func (s *serviceCmd) Restart(args *AuthRPCArgs, reply *AuthRPCReply) error { + if err := args.IsAuthenticated(); err != nil { + return err } + globalServiceSignalCh <- serviceRestart return nil } diff --git a/cmd/admin-rpc-server_test.go b/cmd/admin-rpc-server_test.go index a18f91547..1ebc24568 100644 --- a/cmd/admin-rpc-server_test.go +++ b/cmd/admin-rpc-server_test.go @@ -30,9 +30,14 @@ func testAdminCmd(cmd cmdType, t *testing.T) { adminServer := serviceCmd{} creds := serverConfig.GetCredential() - reply := RPCLoginReply{} - args := RPCLoginArgs{Username: creds.AccessKey, Password: creds.SecretKey} - err = adminServer.LoginHandler(&args, &reply) + args := LoginRPCArgs{ + Username: creds.AccessKey, + Password: creds.SecretKey, + Version: Version, + RequestTime: time.Now().UTC(), + } + reply := LoginRPCReply{} + err = adminServer.Login(&args, &reply) if err != nil { t.Fatalf("Failed to login to admin server - %v", err) } @@ -42,37 +47,16 @@ func testAdminCmd(cmd cmdType, t *testing.T) { <-globalServiceSignalCh }() - validToken := reply.Token - timeNow := time.Now().UTC() - testCases := []struct { - ga GenericArgs - expectedErr error - }{ - // Valid case - { - ga: GenericArgs{Token: validToken, Timestamp: timeNow}, - expectedErr: nil, - }, - // Invalid token - { - ga: GenericArgs{Token: "invalidToken", Timestamp: timeNow}, - expectedErr: errInvalidToken, - }, - } - - genReply := GenericReply{} - for i, test := range testCases { - switch cmd { - case stopCmd: - err = adminServer.Shutdown(&test.ga, &genReply) - if err != test.expectedErr { - t.Errorf("Test %d: Expected error %v but received %v", i+1, test.expectedErr, err) - } - case restartCmd: - err = adminServer.Restart(&test.ga, &genReply) - if err != test.expectedErr { - t.Errorf("Test %d: Expected error %v but received %v", i+1, test.expectedErr, err) - } + ga := AuthRPCArgs{AuthToken: reply.AuthToken, RequestTime: time.Now().UTC()} + genReply := AuthRPCReply{} + switch cmd { + case stopCmd: + if err = adminServer.Shutdown(&ga, &genReply); err != nil { + t.Errorf("stopCmd: Expected: , got: %v", err) + } + case restartCmd: + if err = adminServer.Restart(&ga, &genReply); err != nil { + t.Errorf("restartCmd: Expected: , got: %v", err) } } } diff --git a/cmd/auth-rpc-client.go b/cmd/auth-rpc-client.go index 2dec0f58f..0df2f5286 100644 --- a/cmd/auth-rpc-client.go +++ b/cmd/auth-rpc-client.go @@ -26,151 +26,98 @@ import ( // giving up on the remote RPC entirely. const globalAuthRPCRetryThreshold = 1 -// GenericReply represents any generic RPC reply. -type GenericReply struct{} - -// GenericArgs represents any generic RPC arguments. -type GenericArgs struct { - Token string // Used to authenticate every RPC call. - // Used to verify if the RPC call was issued between - // the same Login() and disconnect event pair. - Timestamp time.Time - - // Indicates if args should be sent to remote peers as well. - Remote bool -} - -// SetToken - sets the token to the supplied value. -func (ga *GenericArgs) SetToken(token string) { - ga.Token = token -} - -// SetTimestamp - sets the timestamp to the supplied value. -func (ga *GenericArgs) SetTimestamp(tstamp time.Time) { - ga.Timestamp = tstamp -} - -// RPCLoginArgs - login username and password for RPC. -type RPCLoginArgs struct { - Username string - Password string -} - -// RPCLoginReply - login reply provides generated token to be used -// with subsequent requests. -type RPCLoginReply struct { - Token string - Timestamp time.Time - ServerVersion string -} - -// Auth config represents authentication credentials and Login method name to be used -// for fetching JWT tokens from the RPC server. +// authConfig requires to make new AuthRPCClient. type authConfig struct { - accessKey string // Username for the server. - secretKey string // Password for the server. - secureConn bool // Ask for a secured connection - address string // Network address path of RPC server. - path string // Network path for HTTP dial. - loginMethod string // RPC service name for authenticating using JWT + accessKey string // Access key (like username) for authentication. + secretKey string // Secret key (like Password) for authentication. + serverAddr string // RPC server address. + serviceEndpoint string // Endpoint on the server to make any RPC call. + secureConn bool // Make TLS connection to RPC server or not. + serviceName string // Service name of auth server. + disableReconnect bool // Disable reconnect on failure or not. } -// AuthRPCClient is a wrapper type for RPCClient which provides JWT based authentication across reconnects. +// AuthRPCClient is a authenticated RPC client which does authentication before doing Call(). type AuthRPCClient struct { - mu sync.Mutex - config *authConfig - rpc *RPCClient // reconnect'able rpc client built on top of net/rpc Client - serverToken string // Disk rpc JWT based token. - serverVersion string // Server version exchanged by the RPC. + sync.Mutex // Mutex to lock this object. + rpcClient *RPCClient // Reconnectable RPC client to make any RPC call. + config authConfig // Authentication configuration information. + authToken string // Authentication token. } -// newAuthClient - returns a jwt based authenticated (go) rpc client, which does automatic reconnect. -func newAuthClient(cfg *authConfig) *AuthRPCClient { +// newAuthRPCClient - returns a JWT based authenticated (go) rpc client, which does automatic reconnect. +func newAuthRPCClient(config authConfig) *AuthRPCClient { return &AuthRPCClient{ - // Save the config. - config: cfg, - // Initialize a new reconnectable rpc client. - rpc: newRPCClient(cfg.address, cfg.path, cfg.secureConn), + rpcClient: newRPCClient(config.serverAddr, config.serviceEndpoint, config.secureConn), + config: config, } } -// Close - closes underlying rpc connection. -func (authClient *AuthRPCClient) Close() error { - authClient.mu.Lock() - // reset token on closing a connection - authClient.serverToken = "" - authClient.mu.Unlock() - return authClient.rpc.Close() -} - // Login - a jwt based authentication is performed with rpc server. func (authClient *AuthRPCClient) Login() (err error) { - authClient.mu.Lock() - // As soon as the function returns unlock, - defer authClient.mu.Unlock() + authClient.Lock() + defer authClient.Unlock() // Return if already logged in. - if authClient.serverToken != "" { + if authClient.authToken != "" { return nil } - reply := RPCLoginReply{} - if err = authClient.rpc.Call(authClient.config.loginMethod, RPCLoginArgs{ - Username: authClient.config.accessKey, - Password: authClient.config.secretKey, - }, &reply); err != nil { + // Call login. + args := LoginRPCArgs{ + Username: authClient.config.accessKey, + Password: authClient.config.secretKey, + Version: Version, + RequestTime: time.Now().UTC(), + } + + reply := LoginRPCReply{} + serviceMethod := authClient.config.serviceName + loginMethodName + if err = authClient.rpcClient.Call(serviceMethod, &args, &reply); err != nil { return err } - // Validate if version do indeed match. - if reply.ServerVersion != Version { - return errServerVersionMismatch - } + // Logged in successfully. + authClient.authToken = reply.AuthToken - // Validate if server timestamp is skewed. - curTime := time.Now().UTC() - if curTime.Sub(reply.Timestamp) > globalMaxSkewTime { - return errServerTimeMismatch - } - - // Set token, time stamp as received from a successful login call. - authClient.serverToken = reply.Token - authClient.serverVersion = reply.ServerVersion return nil } -// Call - If rpc connection isn't established yet since previous disconnect, -// connection is established, a jwt authenticated login is performed and then -// the call is performed. -func (authClient *AuthRPCClient) Call(serviceMethod string, args interface { - SetToken(token string) - SetTimestamp(tstamp time.Time) +// call makes a RPC call after logs into the server. +func (authClient *AuthRPCClient) call(serviceMethod string, args interface { + SetAuthToken(authToken string) + SetRequestTime(requestTime time.Time) }, reply interface{}) (err error) { - loginAndCallFn := func() error { - // On successful login, proceed to attempt the requested service method. - if err = authClient.Login(); err == nil { - // Set token and timestamp before the rpc call. - args.SetToken(authClient.serverToken) - args.SetTimestamp(time.Now().UTC()) + // On successful login, execute RPC call. + if err = authClient.Login(); err == nil { + // Set token and timestamp before the rpc call. + args.SetAuthToken(authClient.authToken) + args.SetRequestTime(time.Now().UTC()) - // Finally make the network call using net/rpc client. - err = authClient.rpc.Call(serviceMethod, args, reply) - } - return err + // Do RPC call. + err = authClient.rpcClient.Call(serviceMethod, args, reply) } + return err +} + +// Call executes RPC call till success or globalAuthRPCRetryThreshold on ErrShutdown. +func (authClient *AuthRPCClient) Call(serviceMethod string, args interface { + SetAuthToken(authToken string) + SetRequestTime(requestTime time.Time) +}, reply interface{}) (err error) { doneCh := make(chan struct{}) defer close(doneCh) - for i := range newRetryTimer(time.Second, time.Second*30, MaxJitter, doneCh) { - // Invalidate token, and mark it for re-login and - // reconnect upon rpc shutdown. - if err = loginAndCallFn(); err == rpc.ErrShutdown { - // Close the underlying connection, and proceed to reconnect - // if we haven't reached the retry threshold. + for i := range newRetryTimer(time.Second, 30*time.Second, MaxJitter, doneCh) { + if err = authClient.call(serviceMethod, args, reply); err == rpc.ErrShutdown { + // As connection at server side is closed, close the rpc client. authClient.Close() - // No need to return error until the retry count threshold has reached. - if i < globalAuthRPCRetryThreshold { - continue + // Retry if reconnect is not disabled. + if !authClient.config.disableReconnect { + // Retry until threshold reaches. + if i < globalAuthRPCRetryThreshold { + continue + } } } break @@ -178,18 +125,21 @@ func (authClient *AuthRPCClient) Call(serviceMethod string, args interface { return err } -// Node returns the node (network address) of the connection -func (authClient *AuthRPCClient) Node() (node string) { - if authClient.rpc != nil { - node = authClient.rpc.node - } - return node +// Close closes underlying RPC Client. +func (authClient *AuthRPCClient) Close() error { + authClient.Lock() + defer authClient.Unlock() + + authClient.authToken = "" + return authClient.rpcClient.Close() } -// RPCPath returns the RPC path of the connection -func (authClient *AuthRPCClient) RPCPath() (rpcPath string) { - if authClient.rpc != nil { - rpcPath = authClient.rpc.rpcPath - } - return rpcPath +// ServerAddr returns the serverAddr (network address) of the connection. +func (authClient *AuthRPCClient) ServerAddr() string { + return authClient.config.serverAddr +} + +// ServiceEndpoint returns the RPC service endpoint of the connection. +func (authClient *AuthRPCClient) ServiceEndpoint() string { + return authClient.config.serviceEndpoint } diff --git a/cmd/auth-rpc-client_test.go b/cmd/auth-rpc-client_test.go index fb27d4822..9b9076d7f 100644 --- a/cmd/auth-rpc-client_test.go +++ b/cmd/auth-rpc-client_test.go @@ -20,32 +20,32 @@ import "testing" // Tests authorized RPC client. func TestAuthRPCClient(t *testing.T) { - authCfg := &authConfig{ + authCfg := authConfig{ + accessKey: "123", + secretKey: "123", + serverAddr: "localhost:9000", + serviceEndpoint: "/rpc/disk", + secureConn: false, + serviceName: "MyPackage", + } + authRPC := newAuthRPCClient(authCfg) + if authRPC.ServerAddr() != authCfg.serverAddr { + t.Fatalf("Unexpected node value %s, but expected %s", authRPC.ServerAddr(), authCfg.serverAddr) + } + if authRPC.ServiceEndpoint() != authCfg.serviceEndpoint { + t.Fatalf("Unexpected node value %s, but expected %s", authRPC.ServiceEndpoint(), authCfg.serviceEndpoint) + } + authCfg = authConfig{ accessKey: "123", secretKey: "123", secureConn: false, - address: "localhost:9000", - path: "/rpc/disk", - loginMethod: "MyPackage.LoginHandler", + serviceName: "MyPackage", } - authRPC := newAuthClient(authCfg) - if authRPC.Node() != authCfg.address { - t.Fatalf("Unexpected node value %s, but expected %s", authRPC.Node(), authCfg.address) + authRPC = newAuthRPCClient(authCfg) + if authRPC.ServerAddr() != authCfg.serverAddr { + t.Fatalf("Unexpected node value %s, but expected %s", authRPC.ServerAddr(), authCfg.serverAddr) } - if authRPC.RPCPath() != authCfg.path { - t.Fatalf("Unexpected node value %s, but expected %s", authRPC.RPCPath(), authCfg.path) - } - authCfg = &authConfig{ - accessKey: "123", - secretKey: "123", - secureConn: false, - loginMethod: "MyPackage.LoginHandler", - } - authRPC = newAuthClient(authCfg) - if authRPC.Node() != authCfg.address { - t.Fatalf("Unexpected node value %s, but expected %s", authRPC.Node(), authCfg.address) - } - if authRPC.RPCPath() != authCfg.path { - t.Fatalf("Unexpected node value %s, but expected %s", authRPC.RPCPath(), authCfg.path) + if authRPC.ServiceEndpoint() != authCfg.serviceEndpoint { + t.Fatalf("Unexpected node value %s, but expected %s", authRPC.ServiceEndpoint(), authCfg.serviceEndpoint) } } diff --git a/cmd/login-server.go b/cmd/auth-rpc-server.go similarity index 61% rename from cmd/login-server.go rename to cmd/auth-rpc-server.go index 223288609..05a62b826 100644 --- a/cmd/login-server.go +++ b/cmd/auth-rpc-server.go @@ -16,20 +16,28 @@ package cmd -import "time" +// Base login method name. It should be used along with service name. +const loginMethodName = ".Login" -type loginServer struct { +// AuthRPCServer RPC server authenticates using JWT. +type AuthRPCServer struct { } -// LoginHandler - Handles JWT based RPC logic. -func (b loginServer) LoginHandler(args *RPCLoginArgs, reply *RPCLoginReply) error { +// Login - Handles JWT based RPC login. +func (b AuthRPCServer) Login(args *LoginRPCArgs, reply *LoginRPCReply) error { + // Validate LoginRPCArgs + if err := args.IsValid(); err != nil { + return err + } + + // Authenticate using JWT. token, err := authenticateNode(args.Username, args.Password) if err != nil { return err } - reply.Token = token - reply.Timestamp = time.Now().UTC() - reply.ServerVersion = Version + // Return the token. + reply.AuthToken = token + return nil } diff --git a/cmd/auth-rpc-server_test.go b/cmd/auth-rpc-server_test.go new file mode 100644 index 000000000..e95c98340 --- /dev/null +++ b/cmd/auth-rpc-server_test.go @@ -0,0 +1,117 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "testing" + "time" +) + +func TestLogin(t *testing.T) { + rootPath, err := newTestConfig("us-east-1") + if err != nil { + t.Fatalf("Failed to create test config - %v", err) + } + defer removeAll(rootPath) + creds := serverConfig.GetCredential() + ls := AuthRPCServer{} + testCases := []struct { + args LoginRPCArgs + skewTime time.Duration + expectedErr error + }{ + // Valid case. + { + args: LoginRPCArgs{ + Username: creds.AccessKey, + Password: creds.SecretKey, + Version: Version, + }, + skewTime: 0, + expectedErr: nil, + }, + // Valid username, password and request time, not version. + { + args: LoginRPCArgs{ + Username: creds.AccessKey, + Password: creds.SecretKey, + Version: "INVALID-" + Version, + }, + skewTime: 0, + expectedErr: errServerVersionMismatch, + }, + // Valid username, password and version, not request time + { + args: LoginRPCArgs{ + Username: creds.AccessKey, + Password: creds.SecretKey, + Version: Version, + }, + skewTime: 20 * time.Minute, + expectedErr: errServerTimeMismatch, + }, + // Invalid username length + { + args: LoginRPCArgs{ + Username: "aaa", + Password: "minio123", + Version: Version, + }, + skewTime: 0, + expectedErr: errInvalidAccessKeyLength, + }, + // Invalid password length + { + args: LoginRPCArgs{ + Username: "minio", + Password: "aaa", + Version: Version, + }, + skewTime: 0, + expectedErr: errInvalidSecretKeyLength, + }, + // Invalid username + { + args: LoginRPCArgs{ + Username: "aaaaa", + Password: creds.SecretKey, + Version: Version, + }, + skewTime: 0, + expectedErr: errInvalidAccessKeyID, + }, + // Invalid password + { + args: LoginRPCArgs{ + Username: creds.AccessKey, + Password: "aaaaaaaa", + Version: Version, + }, + skewTime: 0, + expectedErr: errAuthentication, + }, + } + for i, test := range testCases { + reply := LoginRPCReply{} + test.args.RequestTime = time.Now().Add(test.skewTime).UTC() + err := ls.Login(&test.args, &reply) + if err != test.expectedErr { + t.Errorf("Test %d: Expected error %v but received %v", + i+1, test.expectedErr, err) + } + } +} diff --git a/cmd/browser-peer-rpc.go b/cmd/browser-peer-rpc.go index 17d9254f6..3af174d06 100644 --- a/cmd/browser-peer-rpc.go +++ b/cmd/browser-peer-rpc.go @@ -24,22 +24,28 @@ import ( // Login handler implements JWT login token generator, which upon login request // along with username and password is generated. -func (br *browserPeerAPIHandlers) LoginHandler(args *RPCLoginArgs, reply *RPCLoginReply) error { +func (br *browserPeerAPIHandlers) Login(args *LoginRPCArgs, reply *LoginRPCReply) error { + // Validate LoginRPCArgs + if err := args.IsValid(); err != nil { + return err + } + + // Authenticate using JWT. token, err := authenticateWeb(args.Username, args.Password) if err != nil { return err } - reply.Token = token - reply.ServerVersion = Version - reply.Timestamp = time.Now().UTC() + // Return the token. + reply.AuthToken = token + return nil } // SetAuthPeerArgs - Arguments collection for SetAuth RPC call type SetAuthPeerArgs struct { // For Auth - GenericArgs + AuthRPCArgs // New credentials that receiving peer should update to. Creds credential @@ -52,10 +58,9 @@ type SetAuthPeerArgs struct { // will be forced to re-establish connections. Connections will be // re-established only when the sending client has also updated its // credentials. -func (br *browserPeerAPIHandlers) SetAuthPeer(args SetAuthPeerArgs, reply *GenericReply) error { - // Check auth - if !isAuthTokenValid(args.Token) { - return errInvalidToken +func (br *browserPeerAPIHandlers) SetAuthPeer(args SetAuthPeerArgs, reply *AuthRPCReply) error { + if err := args.IsAuthenticated(); err != nil { + return err } // Update credentials in memory @@ -82,6 +87,7 @@ func updateCredsOnPeers(creds credential) map[string]error { errs := make([]error, len(peers)) var wg sync.WaitGroup + serverCred := serverConfig.GetCredential() // Launch go routines to send request to each peer in parallel. for ix := range peers { wg.Add(1) @@ -96,13 +102,13 @@ func updateCredsOnPeers(creds credential) map[string]error { } // Initialize client - client := newAuthClient(&authConfig{ - accessKey: serverConfig.GetCredential().AccessKey, - secretKey: serverConfig.GetCredential().SecretKey, - address: peers[ix], - secureConn: isSSL(), - path: path.Join(reservedBucket, browserPeerPath), - loginMethod: "Browser.LoginHandler", + client := newAuthRPCClient(authConfig{ + accessKey: serverCred.AccessKey, + secretKey: serverCred.SecretKey, + serverAddr: peers[ix], + secureConn: isSSL(), + serviceEndpoint: path.Join(reservedBucket, browserPeerPath), + serviceName: "Browser", }) // Construct RPC call arguments. @@ -110,14 +116,14 @@ func updateCredsOnPeers(creds credential) map[string]error { // Make RPC call - we only care about error // response and not the reply. - err := client.Call("Browser.SetAuthPeer", &args, &GenericReply{}) + err := client.Call("Browser.SetAuthPeer", &args, &AuthRPCReply{}) // We try a bit hard (3 attempts with 1 second delay) // to set creds on peers in case of failure. if err != nil { for i := 0; i < 2; i++ { time.Sleep(1 * time.Second) // 1 second delay. - err = client.Call("Browser.SetAuthPeer", &args, &GenericReply{}) + err = client.Call("Browser.SetAuthPeer", &args, &AuthRPCReply{}) if err == nil { break } diff --git a/cmd/browser-peer-rpc_test.go b/cmd/browser-peer-rpc_test.go index 4adfd4abf..5554c4f32 100644 --- a/cmd/browser-peer-rpc_test.go +++ b/cmd/browser-peer-rpc_test.go @@ -19,24 +19,25 @@ package cmd import ( "path" "testing" + "time" ) // API suite container common to both FS and XL. type TestRPCBrowserPeerSuite struct { serverType string testServer TestServer - testAuthConf *authConfig + testAuthConf authConfig } // Setting up the test suite and starting the Test server. func (s *TestRPCBrowserPeerSuite) SetUpSuite(c *testing.T) { s.testServer = StartTestBrowserPeerRPCServer(c, s.serverType) - s.testAuthConf = &authConfig{ - address: s.testServer.Server.Listener.Addr().String(), - accessKey: s.testServer.AccessKey, - secretKey: s.testServer.SecretKey, - path: path.Join(reservedBucket, browserPeerPath), - loginMethod: "BrowserPeer.LoginHandler", + s.testAuthConf = authConfig{ + serverAddr: s.testServer.Server.Listener.Addr().String(), + accessKey: s.testServer.AccessKey, + secretKey: s.testServer.SecretKey, + serviceEndpoint: path.Join(reservedBucket, browserPeerPath), + serviceName: "BrowserPeer", } } @@ -69,10 +70,10 @@ func (s *TestRPCBrowserPeerSuite) testBrowserPeerRPC(t *testing.T) { // Validate for invalid token. args := SetAuthPeerArgs{Creds: creds} - args.Token = "garbage" - rclient := newRPCClient(s.testAuthConf.address, s.testAuthConf.path, false) + args.AuthToken = "garbage" + rclient := newRPCClient(s.testAuthConf.serverAddr, s.testAuthConf.serviceEndpoint, false) defer rclient.Close() - err := rclient.Call("BrowserPeer.SetAuthPeer", &args, &GenericReply{}) + err := rclient.Call("BrowserPeer.SetAuthPeer", &args, &AuthRPCReply{}) if err != nil { if err.Error() != errInvalidToken.Error() { t.Fatal(err) @@ -81,22 +82,24 @@ func (s *TestRPCBrowserPeerSuite) testBrowserPeerRPC(t *testing.T) { // Validate for successful Peer update. args = SetAuthPeerArgs{Creds: creds} - client := newAuthClient(s.testAuthConf) + client := newAuthRPCClient(s.testAuthConf) defer client.Close() - err = client.Call("BrowserPeer.SetAuthPeer", &args, &GenericReply{}) + err = client.Call("BrowserPeer.SetAuthPeer", &args, &AuthRPCReply{}) if err != nil { t.Fatal(err) } // Validate for failure in login handler with previous credentials. - rclient = newRPCClient(s.testAuthConf.address, s.testAuthConf.path, false) + rclient = newRPCClient(s.testAuthConf.serverAddr, s.testAuthConf.serviceEndpoint, false) defer rclient.Close() - rargs := &RPCLoginArgs{ - Username: s.testAuthConf.accessKey, - Password: s.testAuthConf.secretKey, + rargs := &LoginRPCArgs{ + Username: creds.AccessKey, + Password: creds.SecretKey, + Version: Version, + RequestTime: time.Now().UTC(), } - rreply := &RPCLoginReply{} - err = rclient.Call("BrowserPeer.LoginHandler", rargs, rreply) + rreply := &LoginRPCReply{} + err = rclient.Call("BrowserPeer"+loginMethodName, rargs, rreply) if err != nil { if err.Error() != errInvalidAccessKeyID.Error() { t.Fatal(err) @@ -104,20 +107,19 @@ func (s *TestRPCBrowserPeerSuite) testBrowserPeerRPC(t *testing.T) { } // Validate for success in loing handled with valid credetnails. - rargs = &RPCLoginArgs{ - Username: creds.AccessKey, - Password: creds.SecretKey, + rargs = &LoginRPCArgs{ + Username: creds.AccessKey, + Password: creds.SecretKey, + Version: Version, + RequestTime: time.Now().UTC(), } - rreply = &RPCLoginReply{} - err = rclient.Call("BrowserPeer.LoginHandler", rargs, rreply) + rreply = &LoginRPCReply{} + err = rclient.Call("BrowserPeer"+loginMethodName, rargs, rreply) if err != nil { t.Fatal(err) } // Validate all the replied fields after successful login. - if rreply.Token == "" { + if rreply.AuthToken == "" { t.Fatalf("Generated token cannot be empty %s", errInvalidToken) } - if rreply.Timestamp.IsZero() { - t.Fatal("Time stamp returned cannot be zero") - } } diff --git a/cmd/browser-rpc-router.go b/cmd/browser-rpc-router.go index 74007f92e..f7b762441 100644 --- a/cmd/browser-rpc-router.go +++ b/cmd/browser-rpc-router.go @@ -31,7 +31,9 @@ const ( ) // The Type exporting methods exposed for RPC calls. -type browserPeerAPIHandlers struct{} +type browserPeerAPIHandlers struct { + AuthRPCServer +} // Register RPC router func registerBrowserPeerRPCRouter(mux *router.Router) error { diff --git a/cmd/bucket-metadata.go b/cmd/bucket-metadata.go index 01784e6e0..fb23b9211 100644 --- a/cmd/bucket-metadata.go +++ b/cmd/bucket-metadata.go @@ -108,27 +108,27 @@ type remoteBucketMetaState struct { // remoteBucketMetaState.UpdateBucketNotification - sends bucket notification // change to remote peer via RPC call. func (rc *remoteBucketMetaState) UpdateBucketNotification(args *SetBucketNotificationPeerArgs) error { - reply := GenericReply{} + reply := AuthRPCReply{} return rc.Call("S3.SetBucketNotificationPeer", args, &reply) } // remoteBucketMetaState.UpdateBucketListener - sends bucket listener change to // remote peer via RPC call. func (rc *remoteBucketMetaState) UpdateBucketListener(args *SetBucketListenerPeerArgs) error { - reply := GenericReply{} + reply := AuthRPCReply{} return rc.Call("S3.SetBucketListenerPeer", args, &reply) } // remoteBucketMetaState.UpdateBucketPolicy - sends bucket policy change to remote // peer via RPC call. func (rc *remoteBucketMetaState) UpdateBucketPolicy(args *SetBucketPolicyPeerArgs) error { - reply := GenericReply{} + reply := AuthRPCReply{} return rc.Call("S3.SetBucketPolicyPeer", args, &reply) } // remoteBucketMetaState.SendEvent - sends event for bucket listener to remote // peer via RPC call. func (rc *remoteBucketMetaState) SendEvent(args *EventArgs) error { - reply := GenericReply{} + reply := AuthRPCReply{} return rc.Call("S3.Event", args, &reply) } diff --git a/cmd/bucket-notification-handlers.go b/cmd/bucket-notification-handlers.go index 0a3b9e8d8..b7d7414e5 100644 --- a/cmd/bucket-notification-handlers.go +++ b/cmd/bucket-notification-handlers.go @@ -404,8 +404,7 @@ func AddBucketListenerConfig(bucket string, lcfg *listenerConfig, objAPI ObjectL func RemoveBucketListenerConfig(bucket string, lcfg *listenerConfig, objAPI ObjectLayer) { listenerCfgs := globalEventNotifier.GetBucketListenerConfig(bucket) - // remove listener with matching ARN - if not found ignore and - // exit. + // remove listener with matching ARN - if not found ignore and exit. var updatedLcfgs []listenerConfig found := false for k, configuredLcfg := range listenerCfgs { diff --git a/cmd/event-notifier_test.go b/cmd/event-notifier_test.go index b255a56d1..ee842676a 100644 --- a/cmd/event-notifier_test.go +++ b/cmd/event-notifier_test.go @@ -227,6 +227,11 @@ func TestSetNGetBucketNotification(t *testing.T) { } func TestInitEventNotifier(t *testing.T) { + currentIsDistXL := globalIsDistXL + defer func() { + globalIsDistXL = currentIsDistXL + }() + s := TestPeerRPCServerData{serverType: "XL"} // setup and teardown @@ -323,6 +328,11 @@ func TestInitEventNotifier(t *testing.T) { } func TestListenBucketNotification(t *testing.T) { + currentIsDistXL := globalIsDistXL + defer func() { + globalIsDistXL = currentIsDistXL + }() + s := TestPeerRPCServerData{serverType: "XL"} // setup and teardown s.Setup(t) diff --git a/cmd/lock-rpc-client.go b/cmd/lock-rpc-client.go new file mode 100644 index 000000000..ab302e3cf --- /dev/null +++ b/cmd/lock-rpc-client.go @@ -0,0 +1,71 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import "github.com/minio/dsync" + +// LockRPCClient is authenticable lock RPC client compatible to dsync.NetLocker +type LockRPCClient struct { + *AuthRPCClient +} + +// newLockRPCClient returns new lock RPC client object. +func newLockRPCClient(config authConfig) *LockRPCClient { + return &LockRPCClient{newAuthRPCClient(config)} +} + +// RLock calls read lock RPC. +func (lockRPCClient *LockRPCClient) RLock(args dsync.LockArgs) (reply bool, err error) { + lockArgs := newLockArgs(args) + err = lockRPCClient.AuthRPCClient.Call("Dsync.RLock", &lockArgs, &reply) + return reply, err +} + +// Lock calls write lock RPC. +func (lockRPCClient *LockRPCClient) Lock(args dsync.LockArgs) (reply bool, err error) { + lockArgs := newLockArgs(args) + err = lockRPCClient.AuthRPCClient.Call("Dsync.Lock", &lockArgs, &reply) + return reply, err +} + +// RUnlock calls read unlock RPC. +func (lockRPCClient *LockRPCClient) RUnlock(args dsync.LockArgs) (reply bool, err error) { + lockArgs := newLockArgs(args) + err = lockRPCClient.AuthRPCClient.Call("Dsync.RUnlock", &lockArgs, &reply) + return reply, err +} + +// Unlock calls write unlock RPC. +func (lockRPCClient *LockRPCClient) Unlock(args dsync.LockArgs) (reply bool, err error) { + lockArgs := newLockArgs(args) + err = lockRPCClient.AuthRPCClient.Call("Dsync.Unlock", &lockArgs, &reply) + return reply, err +} + +// ForceUnlock calls force unlock RPC. +func (lockRPCClient *LockRPCClient) ForceUnlock(args dsync.LockArgs) (reply bool, err error) { + lockArgs := newLockArgs(args) + err = lockRPCClient.AuthRPCClient.Call("Dsync.ForceUnlock", &lockArgs, &reply) + return reply, err +} + +// Expired calls expired RPC. +func (lockRPCClient *LockRPCClient) Expired(args dsync.LockArgs) (reply bool, err error) { + lockArgs := newLockArgs(args) + err = lockRPCClient.AuthRPCClient.Call("Dsync.Expired", &lockArgs, &reply) + return reply, err +} diff --git a/cmd/lock-rpc-server-common.go b/cmd/lock-rpc-server-common.go index c0e9a11ad..41856172c 100644 --- a/cmd/lock-rpc-server-common.go +++ b/cmd/lock-rpc-server-common.go @@ -57,20 +57,6 @@ func (l *lockServer) removeEntry(name, uid string, lri *[]lockRequesterInfo) boo return false } -// Validate lock args. -// - validate time stamp. -// - validate jwt token. -func (l *lockServer) validateLockArgs(args *LockArgs) error { - curTime := time.Now().UTC() - if curTime.Sub(args.Timestamp) > globalMaxSkewTime || args.Timestamp.Sub(curTime) > globalMaxSkewTime { - return errServerTimeMismatch - } - if !isAuthTokenValid(args.Token) { - return errInvalidToken - } - return nil -} - // getLongLivedLocks returns locks that are older than a certain time and // have not been 'checked' for validity too soon enough func getLongLivedLocks(m map[string][]lockRequesterInfo, interval time.Duration) []nameLockRequesterInfoPair { diff --git a/cmd/lock-rpc-server.go b/cmd/lock-rpc-server.go index 6e362557c..63873d4ac 100644 --- a/cmd/lock-rpc-server.go +++ b/cmd/lock-rpc-server.go @@ -25,32 +25,19 @@ import ( "time" router "github.com/gorilla/mux" + "github.com/minio/dsync" ) -const lockRPCPath = "/minio/lock" -const lockMaintenanceLoop = 1 * time.Minute -const lockCheckValidityInterval = 2 * time.Minute +const ( + // Lock rpc server endpoint. + lockRPCPath = "/minio/lock" -// LockArgs besides lock name, holds Token and Timestamp for session -// authentication and validation server restart. -type LockArgs struct { - Name string - Token string - Timestamp time.Time - Node string - RPCPath string - UID string -} + // Lock maintenance interval. + lockMaintenanceInterval = 1 * time.Minute // 1 minute. -// SetToken - sets the token to the supplied value. -func (l *LockArgs) SetToken(token string) { - l.Token = token -} - -// SetTimestamp - sets the timestamp to the supplied value. -func (l *LockArgs) SetTimestamp(tstamp time.Time) { - l.Timestamp = tstamp -} + // Lock validity check interval. + lockValidityCheckInterval = 2 * time.Minute // 2 minutes. +) // lockRequesterInfo stores various info from the client for each lock that is requested type lockRequesterInfo struct { @@ -69,43 +56,61 @@ func isWriteLock(lri []lockRequesterInfo) bool { // lockServer is type for RPC handlers type lockServer struct { - loginServer + AuthRPCServer rpcPath string mutex sync.Mutex lockMap map[string][]lockRequesterInfo } +// Start lock maintenance from all lock servers. +func startLockMaintainence(lockServers []*lockServer) { + for _, locker := range lockServers { + // Start loop for stale lock maintenance + go func(lk *lockServer) { + // Initialize a new ticker with a minute between each ticks. + ticker := time.NewTicker(lockMaintenanceInterval) + + // Start with random sleep time, so as to avoid "synchronous checks" between servers + time.Sleep(time.Duration(rand.Float64() * float64(lockMaintenanceInterval))) + for { + // Verifies every minute for locks held more than 2minutes. + select { + case <-ticker.C: + lk.lockMaintenance(lockValidityCheckInterval) + case <-globalServiceDoneCh: + // Stop the timer. + ticker.Stop() + } + } + }(locker) + } +} + // Register distributed NS lock handlers. func registerDistNSLockRouter(mux *router.Router, serverConfig serverCmdConfig) error { + // Initialize a new set of lock servers. lockServers := newLockServers(serverConfig) + + // Start lock maintenance from all lock servers. + startLockMaintainence(lockServers) + + // Register initialized lock servers to their respective rpc endpoints. return registerStorageLockers(mux, lockServers) } // Create one lock server for every local storage rpc server. func newLockServers(srvConfig serverCmdConfig) (lockServers []*lockServer) { for _, ep := range srvConfig.endpoints { - // Not local storage move to the next node. - if !isLocalStorage(ep) { - continue - } - - // Create handler for lock RPCs - locker := &lockServer{ - rpcPath: getPath(ep), - mutex: sync.Mutex{}, - lockMap: make(map[string][]lockRequesterInfo), - } - - // Start loop for stale lock maintenance - go func() { - // Start with random sleep time, so as to avoid "synchronous checks" between servers - time.Sleep(time.Duration(rand.Float64() * float64(lockMaintenanceLoop))) - for { - time.Sleep(lockMaintenanceLoop) - locker.lockMaintenance(lockCheckValidityInterval) + // Initialize new lock server for each local node. + if isLocalStorage(ep) { + // Create handler for lock RPCs + locker := &lockServer{ + rpcPath: getPath(ep), + mutex: sync.Mutex{}, + lockMap: make(map[string][]lockRequesterInfo), } - }() - lockServers = append(lockServers, locker) + lockServers = append(lockServers, locker) + } } return lockServers } @@ -114,8 +119,7 @@ func newLockServers(srvConfig serverCmdConfig) (lockServers []*lockServer) { func registerStorageLockers(mux *router.Router, lockServers []*lockServer) error { for _, lockServer := range lockServers { lockRPCServer := rpc.NewServer() - err := lockRPCServer.RegisterName("Dsync", lockServer) - if err != nil { + if err := lockRPCServer.RegisterName("Dsync", lockServer); err != nil { return traceError(err) } lockRouter := mux.PathPrefix(reservedBucket).Subrouter() @@ -130,17 +134,17 @@ func registerStorageLockers(mux *router.Router, lockServers []*lockServer) error func (l *lockServer) Lock(args *LockArgs, reply *bool) error { l.mutex.Lock() defer l.mutex.Unlock() - if err := l.validateLockArgs(args); err != nil { + if err := args.IsAuthenticated(); err != nil { return err } - _, *reply = l.lockMap[args.Name] + _, *reply = l.lockMap[args.dsyncLockArgs.Resource] if !*reply { // No locks held on the given name, so claim write lock - l.lockMap[args.Name] = []lockRequesterInfo{ + l.lockMap[args.dsyncLockArgs.Resource] = []lockRequesterInfo{ { writer: true, - node: args.Node, - rpcPath: args.RPCPath, - uid: args.UID, + node: args.dsyncLockArgs.ServerAddr, + rpcPath: args.dsyncLockArgs.ServiceEndpoint, + uid: args.dsyncLockArgs.UID, timestamp: time.Now().UTC(), timeLastCheck: time.Now().UTC(), }, @@ -154,18 +158,18 @@ func (l *lockServer) Lock(args *LockArgs, reply *bool) error { func (l *lockServer) Unlock(args *LockArgs, reply *bool) error { l.mutex.Lock() defer l.mutex.Unlock() - if err := l.validateLockArgs(args); err != nil { + if err := args.IsAuthenticated(); err != nil { return err } var lri []lockRequesterInfo - if lri, *reply = l.lockMap[args.Name]; !*reply { // No lock is held on the given name - return fmt.Errorf("Unlock attempted on an unlocked entity: %s", args.Name) + if lri, *reply = l.lockMap[args.dsyncLockArgs.Resource]; !*reply { // No lock is held on the given name + return fmt.Errorf("Unlock attempted on an unlocked entity: %s", args.dsyncLockArgs.Resource) } if *reply = isWriteLock(lri); !*reply { // Unless it is a write lock - return fmt.Errorf("Unlock attempted on a read locked entity: %s (%d read locks active)", args.Name, len(lri)) + return fmt.Errorf("Unlock attempted on a read locked entity: %s (%d read locks active)", args.dsyncLockArgs.Resource, len(lri)) } - if !l.removeEntry(args.Name, args.UID, &lri) { - return fmt.Errorf("Unlock unable to find corresponding lock for uid: %s", args.UID) + if !l.removeEntry(args.dsyncLockArgs.Resource, args.dsyncLockArgs.UID, &lri) { + return fmt.Errorf("Unlock unable to find corresponding lock for uid: %s", args.dsyncLockArgs.UID) } return nil } @@ -174,23 +178,23 @@ func (l *lockServer) Unlock(args *LockArgs, reply *bool) error { func (l *lockServer) RLock(args *LockArgs, reply *bool) error { l.mutex.Lock() defer l.mutex.Unlock() - if err := l.validateLockArgs(args); err != nil { + if err := args.IsAuthenticated(); err != nil { return err } lrInfo := lockRequesterInfo{ writer: false, - node: args.Node, - rpcPath: args.RPCPath, - uid: args.UID, + node: args.dsyncLockArgs.ServerAddr, + rpcPath: args.dsyncLockArgs.ServiceEndpoint, + uid: args.dsyncLockArgs.UID, timestamp: time.Now().UTC(), timeLastCheck: time.Now().UTC(), } - if lri, ok := l.lockMap[args.Name]; ok { + if lri, ok := l.lockMap[args.dsyncLockArgs.Resource]; ok { if *reply = !isWriteLock(lri); *reply { // Unless there is a write lock - l.lockMap[args.Name] = append(l.lockMap[args.Name], lrInfo) + l.lockMap[args.dsyncLockArgs.Resource] = append(l.lockMap[args.dsyncLockArgs.Resource], lrInfo) } } else { // No locks held on the given name, so claim (first) read lock - l.lockMap[args.Name] = []lockRequesterInfo{lrInfo} + l.lockMap[args.dsyncLockArgs.Resource] = []lockRequesterInfo{lrInfo} *reply = true } return nil @@ -200,18 +204,18 @@ func (l *lockServer) RLock(args *LockArgs, reply *bool) error { func (l *lockServer) RUnlock(args *LockArgs, reply *bool) error { l.mutex.Lock() defer l.mutex.Unlock() - if err := l.validateLockArgs(args); err != nil { + if err := args.IsAuthenticated(); err != nil { return err } var lri []lockRequesterInfo - if lri, *reply = l.lockMap[args.Name]; !*reply { // No lock is held on the given name - return fmt.Errorf("RUnlock attempted on an unlocked entity: %s", args.Name) + if lri, *reply = l.lockMap[args.dsyncLockArgs.Resource]; !*reply { // No lock is held on the given name + return fmt.Errorf("RUnlock attempted on an unlocked entity: %s", args.dsyncLockArgs.Resource) } if *reply = !isWriteLock(lri); !*reply { // A write-lock is held, cannot release a read lock - return fmt.Errorf("RUnlock attempted on a write locked entity: %s", args.Name) + return fmt.Errorf("RUnlock attempted on a write locked entity: %s", args.dsyncLockArgs.Resource) } - if !l.removeEntry(args.Name, args.UID, &lri) { - return fmt.Errorf("RUnlock unable to find corresponding read lock for uid: %s", args.UID) + if !l.removeEntry(args.dsyncLockArgs.Resource, args.dsyncLockArgs.UID, &lri) { + return fmt.Errorf("RUnlock unable to find corresponding read lock for uid: %s", args.dsyncLockArgs.UID) } return nil } @@ -220,14 +224,14 @@ func (l *lockServer) RUnlock(args *LockArgs, reply *bool) error { func (l *lockServer) ForceUnlock(args *LockArgs, reply *bool) error { l.mutex.Lock() defer l.mutex.Unlock() - if err := l.validateLockArgs(args); err != nil { + if err := args.IsAuthenticated(); err != nil { return err } - if len(args.UID) != 0 { - return fmt.Errorf("ForceUnlock called with non-empty UID: %s", args.UID) + if len(args.dsyncLockArgs.UID) != 0 { + return fmt.Errorf("ForceUnlock called with non-empty UID: %s", args.dsyncLockArgs.UID) } - if _, ok := l.lockMap[args.Name]; ok { // Only clear lock when set - delete(l.lockMap, args.Name) // Remove the lock (irrespective of write or read lock) + if _, ok := l.lockMap[args.dsyncLockArgs.Resource]; ok { // Only clear lock when set + delete(l.lockMap, args.dsyncLockArgs.Resource) // Remove the lock (irrespective of write or read lock) } *reply = true return nil @@ -237,21 +241,21 @@ func (l *lockServer) ForceUnlock(args *LockArgs, reply *bool) error { func (l *lockServer) Expired(args *LockArgs, reply *bool) error { l.mutex.Lock() defer l.mutex.Unlock() - if err := l.validateLockArgs(args); err != nil { + if err := args.IsAuthenticated(); err != nil { return err } // Lock found, proceed to verify if belongs to given uid. - if lri, ok := l.lockMap[args.Name]; ok { + if lri, ok := l.lockMap[args.dsyncLockArgs.Resource]; ok { // Check whether uid is still active for _, entry := range lri { - if entry.uid == args.UID { + if entry.uid == args.dsyncLockArgs.UID { *reply = false // When uid found, lock is still active so return not expired. return nil // When uid found *reply is set to true. } } } - // When we get here lock is no longer active due to either args.Name - // being absent from map or uid not found for given args.Name + // When we get here lock is no longer active due to either args.dsyncLockArgs.Resource + // being absent from map or uid not found for given args.dsyncLockArgs.Resource *reply = true return nil } @@ -276,19 +280,24 @@ func (l *lockServer) lockMaintenance(interval time.Duration) { nlripLongLived := getLongLivedLocks(l.lockMap, interval) l.mutex.Unlock() + serverCred := serverConfig.GetCredential() // Validate if long lived locks are indeed clean. for _, nlrip := range nlripLongLived { // Initialize client based on the long live locks. - c := newRPCClient(nlrip.lri.node, nlrip.lri.rpcPath, isSSL()) - - var expired bool + c := newLockRPCClient(authConfig{ + accessKey: serverCred.AccessKey, + secretKey: serverCred.SecretKey, + serverAddr: nlrip.lri.node, + serviceEndpoint: nlrip.lri.rpcPath, + secureConn: isSSL(), + serviceName: "Dsync", + }) // Call back to original server verify whether the lock is still active (based on name & uid) - c.Call("Dsync.Expired", &LockArgs{ - Name: nlrip.name, - UID: nlrip.lri.uid, - }, &expired) - c.Close() // Close the connection regardless of the call response. + expired, _ := c.Expired(dsync.LockArgs{UID: nlrip.lri.uid, Resource: nlrip.name}) + + // Close the connection regardless of the call response. + c.rpcClient.Close() // For successful response, verify if lock is indeed active or stale. if expired { diff --git a/cmd/lock-rpc-server_test.go b/cmd/lock-rpc-server_test.go index 27098309e..eb8bebd31 100644 --- a/cmd/lock-rpc-server_test.go +++ b/cmd/lock-rpc-server_test.go @@ -22,6 +22,8 @@ import ( "sync" "testing" "time" + + "github.com/minio/dsync" ) // Helper function to test equality of locks (without taking timing info into account) @@ -49,38 +51,41 @@ func createLockTestServer(t *testing.T) (string, *lockServer, string) { } locker := &lockServer{ - loginServer: loginServer{}, - rpcPath: "rpc-path", - mutex: sync.Mutex{}, - lockMap: make(map[string][]lockRequesterInfo), + AuthRPCServer: AuthRPCServer{}, + rpcPath: "rpc-path", + mutex: sync.Mutex{}, + lockMap: make(map[string][]lockRequesterInfo), } creds := serverConfig.GetCredential() - loginArgs := RPCLoginArgs{Username: creds.AccessKey, Password: creds.SecretKey} - loginReply := RPCLoginReply{} - err = locker.LoginHandler(&loginArgs, &loginReply) + loginArgs := LoginRPCArgs{ + Username: creds.AccessKey, + Password: creds.SecretKey, + Version: Version, + RequestTime: time.Now().UTC(), + } + loginReply := LoginRPCReply{} + err = locker.Login(&loginArgs, &loginReply) if err != nil { t.Fatalf("Failed to login to lock server - %v", err) } - token := loginReply.Token + token := loginReply.AuthToken return testPath, locker, token } // Test Lock functionality func TestLockRpcServerLock(t *testing.T) { - - timestamp := time.Now().UTC() testPath, locker, token := createLockTestServer(t) defer removeAll(testPath) - la := LockArgs{ - Name: "name", - Token: token, - Timestamp: timestamp, - Node: "node", - RPCPath: "rpc-path", - UID: "0123-4567", - } + la := newLockArgs(dsync.LockArgs{ + UID: "0123-4567", + Resource: "name", + ServerAddr: "node", + ServiceEndpoint: "rpc-path", + }) + la.SetAuthToken(token) + la.SetRequestTime(time.Now().UTC()) // Claim a lock var result bool @@ -107,14 +112,15 @@ func TestLockRpcServerLock(t *testing.T) { } // Try to claim same lock again (will fail) - la2 := LockArgs{ - Name: "name", - Token: token, - Timestamp: timestamp, - Node: "node", - RPCPath: "rpc-path", - UID: "89ab-cdef", - } + la2 := newLockArgs(dsync.LockArgs{ + UID: "89ab-cdef", + Resource: "name", + ServerAddr: "node", + ServiceEndpoint: "rpc-path", + }) + la2.SetAuthToken(token) + la2.SetRequestTime(time.Now().UTC()) + err = locker.Lock(&la2, &result) if err != nil { t.Errorf("Expected %#v, got %#v", nil, err) @@ -127,19 +133,17 @@ func TestLockRpcServerLock(t *testing.T) { // Test Unlock functionality func TestLockRpcServerUnlock(t *testing.T) { - - timestamp := time.Now().UTC() testPath, locker, token := createLockTestServer(t) defer removeAll(testPath) - la := LockArgs{ - Name: "name", - Token: token, - Timestamp: timestamp, - Node: "node", - RPCPath: "rpc-path", - UID: "0123-4567", - } + la := newLockArgs(dsync.LockArgs{ + UID: "0123-4567", + Resource: "name", + ServerAddr: "node", + ServiceEndpoint: "rpc-path", + }) + la.SetAuthToken(token) + la.SetRequestTime(time.Now().UTC()) // First test return of error when attempting to unlock a lock that does not exist var result bool @@ -149,6 +153,7 @@ func TestLockRpcServerUnlock(t *testing.T) { } // Create lock (so that we can release) + la.SetRequestTime(time.Now().UTC()) err = locker.Lock(&la, &result) if err != nil { t.Errorf("Expected %#v, got %#v", nil, err) @@ -157,6 +162,7 @@ func TestLockRpcServerUnlock(t *testing.T) { } // Finally test successful release of lock + la.SetRequestTime(time.Now().UTC()) err = locker.Unlock(&la, &result) if err != nil { t.Errorf("Expected %#v, got %#v", nil, err) @@ -175,19 +181,17 @@ func TestLockRpcServerUnlock(t *testing.T) { // Test RLock functionality func TestLockRpcServerRLock(t *testing.T) { - - timestamp := time.Now().UTC() testPath, locker, token := createLockTestServer(t) defer removeAll(testPath) - la := LockArgs{ - Name: "name", - Token: token, - Timestamp: timestamp, - Node: "node", - RPCPath: "rpc-path", - UID: "0123-4567", - } + la := newLockArgs(dsync.LockArgs{ + UID: "0123-4567", + Resource: "name", + ServerAddr: "node", + ServiceEndpoint: "rpc-path", + }) + la.SetAuthToken(token) + la.SetRequestTime(time.Now().UTC()) // Claim a lock var result bool @@ -214,14 +218,15 @@ func TestLockRpcServerRLock(t *testing.T) { } // Try to claim same again (will succeed) - la2 := LockArgs{ - Name: "name", - Token: token, - Timestamp: timestamp, - Node: "node", - RPCPath: "rpc-path", - UID: "89ab-cdef", - } + la2 := newLockArgs(dsync.LockArgs{ + UID: "89ab-cdef", + Resource: "name", + ServerAddr: "node", + ServiceEndpoint: "rpc-path", + }) + la2.SetAuthToken(token) + la2.SetRequestTime(time.Now().UTC()) + err = locker.RLock(&la2, &result) if err != nil { t.Errorf("Expected %#v, got %#v", nil, err) @@ -234,19 +239,17 @@ func TestLockRpcServerRLock(t *testing.T) { // Test RUnlock functionality func TestLockRpcServerRUnlock(t *testing.T) { - - timestamp := time.Now().UTC() testPath, locker, token := createLockTestServer(t) defer removeAll(testPath) - la := LockArgs{ - Name: "name", - Token: token, - Timestamp: timestamp, - Node: "node", - RPCPath: "rpc-path", - UID: "0123-4567", - } + la := newLockArgs(dsync.LockArgs{ + UID: "0123-4567", + Resource: "name", + ServerAddr: "node", + ServiceEndpoint: "rpc-path", + }) + la.SetAuthToken(token) + la.SetRequestTime(time.Now().UTC()) // First test return of error when attempting to unlock a read-lock that does not exist var result bool @@ -256,6 +259,7 @@ func TestLockRpcServerRUnlock(t *testing.T) { } // Create first lock ... (so that we can release) + la.SetRequestTime(time.Now().UTC()) err = locker.RLock(&la, &result) if err != nil { t.Errorf("Expected %#v, got %#v", nil, err) @@ -263,14 +267,15 @@ func TestLockRpcServerRUnlock(t *testing.T) { t.Errorf("Expected %#v, got %#v", true, result) } - la2 := LockArgs{ - Name: "name", - Token: token, - Timestamp: timestamp, - Node: "node", - RPCPath: "rpc-path", - UID: "89ab-cdef", - } + // Try to claim same again (will succeed) + la2 := newLockArgs(dsync.LockArgs{ + UID: "89ab-cdef", + Resource: "name", + ServerAddr: "node", + ServiceEndpoint: "rpc-path", + }) + la2.SetAuthToken(token) + la2.SetRequestTime(time.Now().UTC()) // ... and create a second lock on same resource err = locker.RLock(&la2, &result) @@ -281,6 +286,7 @@ func TestLockRpcServerRUnlock(t *testing.T) { } // Test successful release of first read lock + la.SetRequestTime(time.Now().UTC()) err = locker.RUnlock(&la, &result) if err != nil { t.Errorf("Expected %#v, got %#v", nil, err) @@ -305,6 +311,7 @@ func TestLockRpcServerRUnlock(t *testing.T) { } // Finally test successful release of second (and last) read lock + la2.SetRequestTime(time.Now().UTC()) err = locker.RUnlock(&la2, &result) if err != nil { t.Errorf("Expected %#v, got %#v", nil, err) @@ -323,19 +330,17 @@ func TestLockRpcServerRUnlock(t *testing.T) { // Test ForceUnlock functionality func TestLockRpcServerForceUnlock(t *testing.T) { - - timestamp := time.Now().UTC() testPath, locker, token := createLockTestServer(t) defer removeAll(testPath) - laForce := LockArgs{ - Name: "name", - Token: token, - Timestamp: timestamp, - Node: "node", - RPCPath: "rpc-path", - UID: "1234-5678", - } + laForce := newLockArgs(dsync.LockArgs{ + UID: "1234-5678", + Resource: "name", + ServerAddr: "node", + ServiceEndpoint: "rpc-path", + }) + laForce.SetAuthToken(token) + laForce.SetRequestTime(time.Now().UTC()) // First test that UID should be empty var result bool @@ -345,20 +350,21 @@ func TestLockRpcServerForceUnlock(t *testing.T) { } // Then test force unlock of a lock that does not exist (not returning an error) - laForce.UID = "" + laForce.dsyncLockArgs.UID = "" + laForce.SetRequestTime(time.Now().UTC()) err = locker.ForceUnlock(&laForce, &result) if err != nil { t.Errorf("Expected no error, got %#v", err) } - la := LockArgs{ - Name: "name", - Token: token, - Timestamp: timestamp, - Node: "node", - RPCPath: "rpc-path", - UID: "0123-4567", - } + la := newLockArgs(dsync.LockArgs{ + UID: "0123-4567", + Resource: "name", + ServerAddr: "node", + ServiceEndpoint: "rpc-path", + }) + la.SetAuthToken(token) + la.SetRequestTime(time.Now().UTC()) // Create lock ... (so that we can force unlock) err = locker.Lock(&la, &result) @@ -369,12 +375,14 @@ func TestLockRpcServerForceUnlock(t *testing.T) { } // Forcefully unlock the lock (not returning an error) + laForce.SetRequestTime(time.Now().UTC()) err = locker.ForceUnlock(&laForce, &result) if err != nil { t.Errorf("Expected no error, got %#v", err) } // Try to get lock again (should be granted) + la.SetRequestTime(time.Now().UTC()) err = locker.Lock(&la, &result) if err != nil { t.Errorf("Expected %#v, got %#v", nil, err) @@ -383,6 +391,7 @@ func TestLockRpcServerForceUnlock(t *testing.T) { } // Finally forcefully unlock the lock once again + laForce.SetRequestTime(time.Now().UTC()) err = locker.ForceUnlock(&laForce, &result) if err != nil { t.Errorf("Expected no error, got %#v", err) @@ -391,18 +400,17 @@ func TestLockRpcServerForceUnlock(t *testing.T) { // Test Expired functionality func TestLockRpcServerExpired(t *testing.T) { - timestamp := time.Now().UTC() testPath, locker, token := createLockTestServer(t) defer removeAll(testPath) - la := LockArgs{ - Name: "name", - Token: token, - Timestamp: timestamp, - Node: "node", - RPCPath: "rpc-path", - UID: "0123-4567", - } + la := newLockArgs(dsync.LockArgs{ + UID: "0123-4567", + Resource: "name", + ServerAddr: "node", + ServiceEndpoint: "rpc-path", + }) + la.SetAuthToken(token) + la.SetRequestTime(time.Now().UTC()) // Unknown lock at server will return expired = true var expired bool @@ -417,6 +425,7 @@ func TestLockRpcServerExpired(t *testing.T) { // Create lock (so that we can test that it is not expired) var result bool + la.SetRequestTime(time.Now().UTC()) err = locker.Lock(&la, &result) if err != nil { t.Errorf("Expected %#v, got %#v", nil, err) @@ -424,6 +433,7 @@ func TestLockRpcServerExpired(t *testing.T) { t.Errorf("Expected %#v, got %#v", true, result) } + la.SetRequestTime(time.Now().UTC()) err = locker.Expired(&la, &expired) if err != nil { t.Errorf("Expected no error, got %#v", err) @@ -439,6 +449,12 @@ func TestLockServers(t *testing.T) { if runtime.GOOS == "windows" { return } + + currentIsDistXL := globalIsDistXL + defer func() { + globalIsDistXL = currentIsDistXL + }() + globalMinioHost = "" testCases := []struct { isDistXL bool diff --git a/cmd/login-server_test.go b/cmd/login-server_test.go deleted file mode 100644 index 3d8f66a97..000000000 --- a/cmd/login-server_test.go +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import "testing" - -func TestLoginHandler(t *testing.T) { - rootPath, err := newTestConfig("us-east-1") - if err != nil { - t.Fatalf("Failed to create test config - %v", err) - } - defer removeAll(rootPath) - creds := serverConfig.GetCredential() - ls := loginServer{} - testCases := []struct { - args RPCLoginArgs - expectedErr error - }{ - // Valid username and password - { - args: RPCLoginArgs{Username: creds.AccessKey, Password: creds.SecretKey}, - expectedErr: nil, - }, - // Invalid username length - { - args: RPCLoginArgs{Username: "aaa", Password: "minio123"}, - expectedErr: errInvalidAccessKeyLength, - }, - // Invalid password length - { - args: RPCLoginArgs{Username: "minio", Password: "aaa"}, - expectedErr: errInvalidSecretKeyLength, - }, - // Invalid username - { - args: RPCLoginArgs{Username: "aaaaa", Password: creds.SecretKey}, - expectedErr: errInvalidAccessKeyID, - }, - // Invalid password - { - args: RPCLoginArgs{Username: creds.AccessKey, Password: "aaaaaaaa"}, - expectedErr: errAuthentication, - }, - } - for i, test := range testCases { - reply := RPCLoginReply{} - err := ls.LoginHandler(&test.args, &reply) - if err != test.expectedErr { - t.Errorf("Test %d: Expected error %v but received %v", - i+1, test.expectedErr, err) - } - } -} diff --git a/cmd/namespace-lock.go b/cmd/namespace-lock.go index b59261408..cdb4f0f5c 100644 --- a/cmd/namespace-lock.go +++ b/cmd/namespace-lock.go @@ -33,27 +33,26 @@ var globalNSMutex *nsLockMap func initDsyncNodes(eps []*url.URL) error { cred := serverConfig.GetCredential() // Initialize rpc lock client information only if this instance is a distributed setup. - clnts := make([]dsync.RPC, len(eps)) + clnts := make([]dsync.NetLocker, len(eps)) myNode := -1 for index, ep := range eps { if ep == nil { return errInvalidArgument } - clnts[index] = newAuthClient(&authConfig{ - accessKey: cred.AccessKey, - secretKey: cred.SecretKey, - // Construct a new dsync server addr. - secureConn: isSSL(), - address: ep.Host, - // Construct a new rpc path for the endpoint. - path: pathutil.Join(lockRPCPath, getPath(ep)), - loginMethod: "Dsync.LoginHandler", + clnts[index] = newLockRPCClient(authConfig{ + accessKey: cred.AccessKey, + secretKey: cred.SecretKey, + serverAddr: ep.Host, + serviceEndpoint: pathutil.Join(lockRPCPath, getPath(ep)), + secureConn: isSSL(), + serviceName: "Dsync", }) if isLocalStorage(ep) && myNode == -1 { myNode = index } } - return dsync.SetNodesWithClients(clnts, myNode) + + return dsync.Init(clnts, myNode) } // initNSLock - initialize name space lock map. diff --git a/cmd/net-rpc-client.go b/cmd/net-rpc-client.go index 1ba1bb8e5..ad50bd10d 100644 --- a/cmd/net-rpc-client.go +++ b/cmd/net-rpc-client.go @@ -33,79 +33,83 @@ import ( // defaultDialTimeout is used for non-secure connection. const defaultDialTimeout = 3 * time.Second -// RPCClient is a wrapper type for rpc.Client which provides reconnect on first failure. +// RPCClient is a reconnectable RPC client on Call(). type RPCClient struct { - mu sync.Mutex - netRPCClient *rpc.Client - node string - rpcPath string - secureConn bool + sync.Mutex // Mutex to lock net rpc client. + netRPCClient *rpc.Client // Base RPC client to make any RPC call. + serverAddr string // RPC server address. + serviceEndpoint string // Endpoint on the server to make any RPC call. + secureConn bool // Make TLS connection to RPC server or not. } -// newClient constructs a RPCClient object with node and rpcPath initialized. +// newRPCClient returns new RPCClient object with given serverAddr and serviceEndpoint. // It does lazy connect to the remote endpoint on Call(). -func newRPCClient(node, rpcPath string, secureConn bool) *RPCClient { +func newRPCClient(serverAddr, serviceEndpoint string, secureConn bool) *RPCClient { return &RPCClient{ - node: node, - rpcPath: rpcPath, - secureConn: secureConn, + serverAddr: serverAddr, + serviceEndpoint: serviceEndpoint, + secureConn: secureConn, } } -// dial tries to establish a connection to the server in a safe manner. +// dial tries to establish a connection to serverAddr in a safe manner. // If there is a valid rpc.Cliemt, it returns that else creates a new one. -func (rpcClient *RPCClient) dial() (*rpc.Client, error) { - rpcClient.mu.Lock() - defer rpcClient.mu.Unlock() +func (rpcClient *RPCClient) dial() (netRPCClient *rpc.Client, err error) { + rpcClient.Lock() + defer rpcClient.Unlock() // Nothing to do as we already have valid connection. if rpcClient.netRPCClient != nil { return rpcClient.netRPCClient, nil } - var err error var conn net.Conn if rpcClient.secureConn { - hostname, _, splitErr := net.SplitHostPort(rpcClient.node) - if splitErr != nil { - err = errors.New("Unable to parse RPC address <" + rpcClient.node + "> : " + splitErr.Error()) - return nil, &net.OpError{ + var hostname string + if hostname, _, err = net.SplitHostPort(rpcClient.serverAddr); err != nil { + err = &net.OpError{ Op: "dial-http", - Net: rpcClient.node + " " + rpcClient.rpcPath, + Net: rpcClient.serverAddr + rpcClient.serviceEndpoint, Addr: nil, - Err: err, + Err: fmt.Errorf("Unable to parse server address <%s>: %s", rpcClient.serverAddr, err.Error()), } + + return nil, err } - // ServerName in tls.Config needs to be specified to support SNI certificates - conn, err = tls.Dial("tcp", rpcClient.node, &tls.Config{ServerName: hostname, RootCAs: globalRootCAs}) + + // ServerName in tls.Config needs to be specified to support SNI certificates. + conn, err = tls.Dial("tcp", rpcClient.serverAddr, &tls.Config{ServerName: hostname, RootCAs: globalRootCAs}) } else { - // Dial with 3 seconds timeout. - conn, err = net.DialTimeout("tcp", rpcClient.node, defaultDialTimeout) + // Dial with a timeout. + conn, err = net.DialTimeout("tcp", rpcClient.serverAddr, defaultDialTimeout) } + if err != nil { - // Print RPC connection errors that are worthy to display in log + // Print RPC connection errors that are worthy to display in log. switch err.(type) { case x509.HostnameError: - errorIf(err, "Unable to establish secure connection to %s", rpcClient.node) + errorIf(err, "Unable to establish secure connection to %s", rpcClient.serverAddr) } + return nil, &net.OpError{ Op: "dial-http", - Net: rpcClient.node + " " + rpcClient.rpcPath, + Net: rpcClient.serverAddr + rpcClient.serviceEndpoint, Addr: nil, Err: err, } } - io.WriteString(conn, "CONNECT "+rpcClient.rpcPath+" HTTP/1.0\n\n") + io.WriteString(conn, "CONNECT "+rpcClient.serviceEndpoint+" HTTP/1.0\n\n") // Require successful HTTP response before switching to RPC protocol. resp, err := http.ReadResponse(bufio.NewReader(conn), &http.Request{Method: "CONNECT"}) if err == nil && resp.Status == "200 Connected to Go RPC" { netRPCClient := rpc.NewClient(conn) + if netRPCClient == nil { return nil, &net.OpError{ Op: "dial-http", - Net: rpcClient.node + " " + rpcClient.rpcPath, + Net: rpcClient.serverAddr + rpcClient.serviceEndpoint, Addr: nil, Err: fmt.Errorf("Unable to initialize new rpc.Client, %s", errUnexpected), } @@ -116,13 +120,15 @@ func (rpcClient *RPCClient) dial() (*rpc.Client, error) { return netRPCClient, nil } + conn.Close() + if err == nil { err = errors.New("unexpected HTTP response: " + resp.Status) } - conn.Close() + return nil, &net.OpError{ Op: "dial-http", - Net: rpcClient.node + " " + rpcClient.rpcPath, + Net: rpcClient.serverAddr + rpcClient.serviceEndpoint, Addr: nil, Err: err, } @@ -141,28 +147,18 @@ func (rpcClient *RPCClient) Call(serviceMethod string, args interface{}, reply i // Close closes underlying rpc.Client. func (rpcClient *RPCClient) Close() error { - rpcClient.mu.Lock() + rpcClient.Lock() if rpcClient.netRPCClient != nil { // We make a copy of rpc.Client and unlock it immediately so that another // goroutine could try to dial or close in parallel. netRPCClient := rpcClient.netRPCClient rpcClient.netRPCClient = nil - rpcClient.mu.Unlock() + rpcClient.Unlock() return netRPCClient.Close() } - rpcClient.mu.Unlock() + rpcClient.Unlock() return nil } - -// Node returns the node (network address) of the connection -func (rpcClient *RPCClient) Node() string { - return rpcClient.node -} - -// RPCPath returns the RPC path of the connection -func (rpcClient *RPCClient) RPCPath() string { - return rpcClient.rpcPath -} diff --git a/cmd/retry-storage.go b/cmd/retry-storage.go index c30c152a3..23a7526a3 100644 --- a/cmd/retry-storage.go +++ b/cmd/retry-storage.go @@ -233,6 +233,7 @@ func (f retryStorage) reInit() (err error) { } return err } + // Attempt to load format to see if the disk is really // a formatted disk and part of the cluster. _, err = loadFormat(f.remoteStorage) @@ -244,6 +245,7 @@ func (f retryStorage) reInit() (err error) { } return err } + // Login and loading format was a success, break and proceed forward. break } diff --git a/cmd/rpc-common.go b/cmd/rpc-common.go new file mode 100644 index 000000000..8180aad45 --- /dev/null +++ b/cmd/rpc-common.go @@ -0,0 +1,111 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "time" + + "github.com/minio/dsync" +) + +// Allow any RPC call request time should be no more/less than 3 seconds. +// 3 seconds is chosen arbitrarily. +const rpcSkewTimeAllowed = 3 * time.Second + +func isRequestTimeAllowed(requestTime time.Time) bool { + // Check whether request time is within acceptable skew time. + utcNow := time.Now().UTC() + return !(requestTime.Sub(utcNow) > rpcSkewTimeAllowed || + utcNow.Sub(requestTime) > rpcSkewTimeAllowed) +} + +// AuthRPCArgs represents minimum required arguments to make any authenticated RPC call. +type AuthRPCArgs struct { + // Authentication token to be verified by the server for every RPC call. + AuthToken string + + // Request time to be verified by the server for every RPC call. + // This is an addition check over Authentication token for time drifting. + RequestTime time.Time +} + +// SetAuthToken - sets the token to the supplied value. +func (args *AuthRPCArgs) SetAuthToken(authToken string) { + args.AuthToken = authToken +} + +// SetRequestTime - sets the requestTime to the supplied value. +func (args *AuthRPCArgs) SetRequestTime(requestTime time.Time) { + args.RequestTime = requestTime +} + +// IsAuthenticated - validated whether this auth RPC args are already authenticated or not. +func (args AuthRPCArgs) IsAuthenticated() error { + // Check whether the token is valid + if !isAuthTokenValid(args.AuthToken) { + return errInvalidToken + } + + // Check if the request time is within the allowed skew limit. + if !isRequestTimeAllowed(args.RequestTime) { + return errServerTimeMismatch + } + + // Good to go. + return nil +} + +// AuthRPCReply represents minimum required reply for any authenticated RPC call. +type AuthRPCReply struct{} + +// LoginRPCArgs - login username and password for RPC. +type LoginRPCArgs struct { + Username string + Password string + Version string + RequestTime time.Time +} + +// IsValid - validates whether this LoginRPCArgs are valid for authentication. +func (args LoginRPCArgs) IsValid() error { + // Check if version matches. + if args.Version != Version { + return errServerVersionMismatch + } + + if !isRequestTimeAllowed(args.RequestTime) { + return errServerTimeMismatch + } + + return nil +} + +// LoginRPCReply - login reply provides generated token to be used +// with subsequent requests. +type LoginRPCReply struct { + AuthToken string +} + +// LockArgs represents arguments for any authenticated lock RPC call. +type LockArgs struct { + AuthRPCArgs + dsyncLockArgs dsync.LockArgs +} + +func newLockArgs(args dsync.LockArgs) LockArgs { + return LockArgs{dsyncLockArgs: args} +} diff --git a/cmd/s3-peer-client.go b/cmd/s3-peer-client.go index 670eb14bf..0f0cba687 100644 --- a/cmd/s3-peer-client.go +++ b/cmd/s3-peer-client.go @@ -52,6 +52,7 @@ func makeS3Peers(eps []*url.URL) s3Peers { }) seenAddr[globalMinioAddr] = true + serverCred := serverConfig.GetCredential() // iterate over endpoints to find new remote peers and add // them to ret. for _, ep := range eps { @@ -62,17 +63,17 @@ func makeS3Peers(eps []*url.URL) s3Peers { // Check if the remote host has been added already if !seenAddr[ep.Host] { cfg := authConfig{ - accessKey: serverConfig.GetCredential().AccessKey, - secretKey: serverConfig.GetCredential().SecretKey, - address: ep.Host, - secureConn: isSSL(), - path: path.Join(reservedBucket, s3Path), - loginMethod: "S3.LoginHandler", + accessKey: serverCred.AccessKey, + secretKey: serverCred.SecretKey, + serverAddr: ep.Host, + serviceEndpoint: path.Join(reservedBucket, s3Path), + secureConn: isSSL(), + serviceName: "S3", } ret = append(ret, s3Peer{ addr: ep.Host, - bmsClient: &remoteBucketMetaState{newAuthClient(&cfg)}, + bmsClient: &remoteBucketMetaState{newAuthRPCClient(cfg)}, }) seenAddr[ep.Host] = true } diff --git a/cmd/s3-peer-router.go b/cmd/s3-peer-router.go index e6575f967..e843c17bb 100644 --- a/cmd/s3-peer-router.go +++ b/cmd/s3-peer-router.go @@ -27,13 +27,13 @@ const ( ) type s3PeerAPIHandlers struct { - loginServer + AuthRPCServer bms BucketMetaState } func registerS3PeerRPCRouter(mux *router.Router) error { s3PeerHandlers := &s3PeerAPIHandlers{ - loginServer{}, + AuthRPCServer{}, &localBucketMetaState{ ObjectAPI: newObjectLayerFn, }, diff --git a/cmd/s3-peer-rpc-handlers.go b/cmd/s3-peer-rpc-handlers.go index 769d84f76..1f568875a 100644 --- a/cmd/s3-peer-rpc-handlers.go +++ b/cmd/s3-peer-rpc-handlers.go @@ -20,7 +20,7 @@ package cmd // call type SetBucketNotificationPeerArgs struct { // For Auth - GenericArgs + AuthRPCArgs Bucket string @@ -35,10 +35,9 @@ func (s *SetBucketNotificationPeerArgs) BucketUpdate(client BucketMetaState) err return client.UpdateBucketNotification(s) } -func (s3 *s3PeerAPIHandlers) SetBucketNotificationPeer(args *SetBucketNotificationPeerArgs, reply *GenericReply) error { - // check auth - if !isAuthTokenValid(args.Token) { - return errInvalidToken +func (s3 *s3PeerAPIHandlers) SetBucketNotificationPeer(args *SetBucketNotificationPeerArgs, reply *AuthRPCReply) error { + if err := args.IsAuthenticated(); err != nil { + return err } return s3.bms.UpdateBucketNotification(args) @@ -47,7 +46,7 @@ func (s3 *s3PeerAPIHandlers) SetBucketNotificationPeer(args *SetBucketNotificati // SetBucketListenerPeerArgs - Arguments collection to SetBucketListenerPeer RPC call type SetBucketListenerPeerArgs struct { // For Auth - GenericArgs + AuthRPCArgs Bucket string @@ -62,10 +61,9 @@ func (s *SetBucketListenerPeerArgs) BucketUpdate(client BucketMetaState) error { return client.UpdateBucketListener(s) } -func (s3 *s3PeerAPIHandlers) SetBucketListenerPeer(args *SetBucketListenerPeerArgs, reply *GenericReply) error { - // check auth - if !isAuthTokenValid(args.Token) { - return errInvalidToken +func (s3 *s3PeerAPIHandlers) SetBucketListenerPeer(args *SetBucketListenerPeerArgs, reply *AuthRPCReply) error { + if err := args.IsAuthenticated(); err != nil { + return err } return s3.bms.UpdateBucketListener(args) @@ -74,7 +72,7 @@ func (s3 *s3PeerAPIHandlers) SetBucketListenerPeer(args *SetBucketListenerPeerAr // EventArgs - Arguments collection for Event RPC call type EventArgs struct { // For Auth - GenericArgs + AuthRPCArgs // event being sent Event []NotificationEvent @@ -84,10 +82,9 @@ type EventArgs struct { } // submit an event to the receiving server. -func (s3 *s3PeerAPIHandlers) Event(args *EventArgs, reply *GenericReply) error { - // check auth - if !isAuthTokenValid(args.Token) { - return errInvalidToken +func (s3 *s3PeerAPIHandlers) Event(args *EventArgs, reply *AuthRPCReply) error { + if err := args.IsAuthenticated(); err != nil { + return err } return s3.bms.SendEvent(args) @@ -96,7 +93,7 @@ func (s3 *s3PeerAPIHandlers) Event(args *EventArgs, reply *GenericReply) error { // SetBucketPolicyPeerArgs - Arguments collection for SetBucketPolicyPeer RPC call type SetBucketPolicyPeerArgs struct { // For Auth - GenericArgs + AuthRPCArgs Bucket string @@ -112,10 +109,9 @@ func (s *SetBucketPolicyPeerArgs) BucketUpdate(client BucketMetaState) error { } // tell receiving server to update a bucket policy -func (s3 *s3PeerAPIHandlers) SetBucketPolicyPeer(args *SetBucketPolicyPeerArgs, reply *GenericReply) error { - // check auth - if !isAuthTokenValid(args.Token) { - return errInvalidToken +func (s3 *s3PeerAPIHandlers) SetBucketPolicyPeer(args *SetBucketPolicyPeerArgs, reply *AuthRPCReply) error { + if err := args.IsAuthenticated(); err != nil { + return err } return s3.bms.UpdateBucketPolicy(args) diff --git a/cmd/s3-peer-rpc-handlers_test.go b/cmd/s3-peer-rpc-handlers_test.go index ff46414bc..a9e813cb7 100644 --- a/cmd/s3-peer-rpc-handlers_test.go +++ b/cmd/s3-peer-rpc-handlers_test.go @@ -25,19 +25,19 @@ import ( type TestRPCS3PeerSuite struct { testServer TestServer - testAuthConf *authConfig + testAuthConf authConfig disks []string } // Set up the suite and start the test server. func (s *TestRPCS3PeerSuite) SetUpSuite(t *testing.T) { s.testServer, s.disks = StartTestS3PeerRPCServer(t) - s.testAuthConf = &authConfig{ - address: s.testServer.Server.Listener.Addr().String(), - accessKey: s.testServer.AccessKey, - secretKey: s.testServer.SecretKey, - path: path.Join(reservedBucket, s3Path), - loginMethod: "S3.LoginHandler", + s.testAuthConf = authConfig{ + serverAddr: s.testServer.Server.Listener.Addr().String(), + accessKey: s.testServer.AccessKey, + secretKey: s.testServer.SecretKey, + serviceEndpoint: path.Join(reservedBucket, s3Path), + serviceName: "S3", } } @@ -62,10 +62,10 @@ func TestS3PeerRPC(t *testing.T) { // Test S3 RPC handlers func (s *TestRPCS3PeerSuite) testS3PeerRPC(t *testing.T) { // Validate for invalid token. - args := GenericArgs{Token: "garbage", Timestamp: time.Now().UTC()} - rclient := newRPCClient(s.testAuthConf.address, s.testAuthConf.path, false) + args := AuthRPCArgs{AuthToken: "garbage", RequestTime: time.Now().UTC()} + rclient := newRPCClient(s.testAuthConf.serverAddr, s.testAuthConf.serviceEndpoint, false) defer rclient.Close() - err := rclient.Call("S3.SetBucketNotificationPeer", &args, &GenericReply{}) + err := rclient.Call("S3.SetBucketNotificationPeer", &args, &AuthRPCReply{}) if err != nil { if err.Error() != errInvalidToken.Error() { t.Fatal(err) @@ -74,16 +74,16 @@ func (s *TestRPCS3PeerSuite) testS3PeerRPC(t *testing.T) { // Check bucket notification call works. BNPArgs := SetBucketNotificationPeerArgs{Bucket: "bucket", NCfg: ¬ificationConfig{}} - client := newAuthClient(s.testAuthConf) + client := newAuthRPCClient(s.testAuthConf) defer client.Close() - err = client.Call("S3.SetBucketNotificationPeer", &BNPArgs, &GenericReply{}) + err = client.Call("S3.SetBucketNotificationPeer", &BNPArgs, &AuthRPCReply{}) if err != nil { t.Fatal(err) } // Check bucket listener update call works. BLPArgs := SetBucketListenerPeerArgs{Bucket: "bucket", LCfg: nil} - err = client.Call("S3.SetBucketListenerPeer", &BLPArgs, &GenericReply{}) + err = client.Call("S3.SetBucketListenerPeer", &BLPArgs, &AuthRPCReply{}) if err != nil { t.Fatal(err) } @@ -95,14 +95,14 @@ func (s *TestRPCS3PeerSuite) testS3PeerRPC(t *testing.T) { t.Fatal(err) } BPPArgs := SetBucketPolicyPeerArgs{Bucket: "bucket", PChBytes: pChBytes} - err = client.Call("S3.SetBucketPolicyPeer", &BPPArgs, &GenericReply{}) + err = client.Call("S3.SetBucketPolicyPeer", &BPPArgs, &AuthRPCReply{}) if err != nil { t.Fatal(err) } // Check event send event call works. evArgs := EventArgs{Event: nil, Arn: "localhost:9000"} - err = client.Call("S3.Event", &evArgs, &GenericReply{}) + err = client.Call("S3.Event", &evArgs, &AuthRPCReply{}) if err != nil { t.Fatal(err) } diff --git a/cmd/storage-rpc-client.go b/cmd/storage-rpc-client.go index ebe639dd1..846b17096 100644 --- a/cmd/storage-rpc-client.go +++ b/cmd/storage-rpc-client.go @@ -23,18 +23,14 @@ import ( "net/rpc" "net/url" "path" - "sync" "sync/atomic" - "time" "github.com/minio/minio/pkg/disk" ) type networkStorage struct { networkIOErrCount int32 // ref: https://golang.org/pkg/sync/atomic/#pkg-note-BUG - netAddr string - netPath string - rpcClient *storageRPCClient + rpcClient *AuthRPCClient } const ( @@ -99,104 +95,6 @@ func toStorageErr(err error) error { return err } -// storageRPCClient is a wrapper type for RPCClient which provides JWT based authentication across reconnects. -type storageRPCClient struct { - sync.Mutex - cfg storageConfig - rpc *RPCClient // reconnect'able rpc client built on top of net/rpc Client - serverToken string // Disk rpc JWT based token. - serverVersion string // Server version exchanged by the RPC. -} - -// Storage config represents authentication credentials and Login -// method name to be used for fetching JWT tokens from the storage -// server. -type storageConfig struct { - addr string // Network address path of storage RPC server. - path string // Network storage path for HTTP dial. - secureConn bool // Indicates if this storage RPC is on a secured connection. - creds credential -} - -// newStorageClient - returns a jwt based authenticated (go) storage rpc client. -func newStorageClient(storageCfg storageConfig) *storageRPCClient { - return &storageRPCClient{ - // Save the config. - cfg: storageCfg, - rpc: newRPCClient(storageCfg.addr, storageCfg.path, storageCfg.secureConn), - } -} - -// Close - closes underlying rpc connection. -func (storageClient *storageRPCClient) Close() error { - storageClient.Lock() - // reset token on closing a connection - storageClient.serverToken = "" - storageClient.Unlock() - return storageClient.rpc.Close() -} - -// Login - a jwt based authentication is performed with rpc server. -func (storageClient *storageRPCClient) Login() (err error) { - storageClient.Lock() - // As soon as the function returns unlock, - defer storageClient.Unlock() - - // Return if token is already set. - if storageClient.serverToken != "" { - return nil - } - - reply := RPCLoginReply{} - if err = storageClient.rpc.Call("Storage.LoginHandler", RPCLoginArgs{ - Username: storageClient.cfg.creds.AccessKey, - Password: storageClient.cfg.creds.SecretKey, - }, &reply); err != nil { - return err - } - - // Validate if version do indeed match. - if reply.ServerVersion != Version { - return errServerVersionMismatch - } - - // Validate if server timestamp is skewed. - curTime := time.Now().UTC() - if curTime.Sub(reply.Timestamp) > globalMaxSkewTime { - return errServerTimeMismatch - } - - // Set token, time stamp as received from a successful login call. - storageClient.serverToken = reply.Token - storageClient.serverVersion = reply.ServerVersion - return nil -} - -// Call - If rpc connection isn't established yet since previous disconnect, -// connection is established, a jwt authenticated login is performed and then -// the call is performed. -func (storageClient *storageRPCClient) Call(serviceMethod string, args interface { - SetToken(token string) - SetTimestamp(tstamp time.Time) -}, reply interface{}) (err error) { - // On successful login, attempt the call. - if err = storageClient.Login(); err != nil { - return err - } - // Set token and timestamp before the rpc call. - args.SetToken(storageClient.serverToken) - args.SetTimestamp(time.Now().UTC()) - - // Call the underlying rpc. - err = storageClient.rpc.Call(serviceMethod, args, reply) - - // Invalidate token, and mark it for re-login. - if err == rpc.ErrShutdown { - storageClient.Close() - } - return err -} - // Initialize new storage rpc client. func newStorageRPC(ep *url.URL) (StorageAPI, error) { if ep == nil { @@ -207,38 +105,35 @@ func newStorageRPC(ep *url.URL) (StorageAPI, error) { rpcPath := path.Join(storageRPCPath, getPath(ep)) rpcAddr := ep.Host - // Initialize rpc client with network address and rpc path. - accessKey := serverConfig.GetCredential().AccessKey - secretKey := serverConfig.GetCredential().SecretKey + serverCred := serverConfig.GetCredential() + accessKey := serverCred.AccessKey + secretKey := serverCred.SecretKey if ep.User != nil { accessKey = ep.User.Username() - if key, set := ep.User.Password(); set { - secretKey = key + if password, ok := ep.User.Password(); ok { + secretKey = password } } - // Initialize network storage. - ndisk := &networkStorage{ - netAddr: ep.Host, - netPath: getPath(ep), - rpcClient: newStorageClient(storageConfig{ - addr: rpcAddr, - path: rpcPath, - creds: credential{ - AccessKey: accessKey, - SecretKey: secretKey, - }, - secureConn: isSSL(), + storageAPI := &networkStorage{ + rpcClient: newAuthRPCClient(authConfig{ + accessKey: accessKey, + secretKey: secretKey, + serverAddr: rpcAddr, + serviceEndpoint: rpcPath, + secureConn: isSSL(), + serviceName: "Storage", + disableReconnect: true, }), } // Returns successfully here. - return ndisk, nil + return storageAPI, nil } // Stringer interface compatible representation of network device. func (n *networkStorage) String() string { - return n.netAddr + ":" + n.netPath + return n.rpcClient.ServerAddr() + ":" + n.rpcClient.ServiceEndpoint() } // Network IO error count is kept at 256 with some simple @@ -250,10 +145,9 @@ func (n *networkStorage) String() string { // incoming i/o. const maxAllowedNetworkIOError = 256 // maximum allowed network IOError. -// Initializes the remote RPC connection by attempting a login attempt. -func (n *networkStorage) Init() (err error) { - // Attempt a login to reconnect. - err = n.rpcClient.Login() +// Init - attempts a login to reconnect. +func (n *networkStorage) Init() error { + err := n.rpcClient.Login() return toStorageErr(err) } @@ -278,7 +172,7 @@ func (n *networkStorage) DiskInfo() (info disk.Info, err error) { return disk.Info{}, errFaultyRemoteDisk } - args := GenericArgs{} + args := AuthRPCArgs{} if err = n.rpcClient.Call("Storage.DiskInfoHandler", &args, &info); err != nil { return disk.Info{}, toStorageErr(err) } @@ -299,7 +193,7 @@ func (n *networkStorage) MakeVol(volume string) (err error) { return errFaultyRemoteDisk } - reply := GenericReply{} + reply := AuthRPCReply{} args := GenericVolArgs{Vol: volume} if err := n.rpcClient.Call("Storage.MakeVolHandler", &args, &reply); err != nil { return toStorageErr(err) @@ -322,7 +216,7 @@ func (n *networkStorage) ListVols() (vols []VolInfo, err error) { } ListVols := ListVolsReply{} - err = n.rpcClient.Call("Storage.ListVolsHandler", &GenericArgs{}, &ListVols) + err = n.rpcClient.Call("Storage.ListVolsHandler", &AuthRPCArgs{}, &ListVols) if err != nil { return nil, toStorageErr(err) } @@ -364,7 +258,7 @@ func (n *networkStorage) DeleteVol(volume string) (err error) { return errFaultyRemoteDisk } - reply := GenericReply{} + reply := AuthRPCReply{} args := GenericVolArgs{Vol: volume} if err := n.rpcClient.Call("Storage.DeleteVolHandler", &args, &reply); err != nil { return toStorageErr(err) @@ -386,7 +280,7 @@ func (n *networkStorage) PrepareFile(volume, path string, length int64) (err err return errFaultyRemoteDisk } - reply := GenericReply{} + reply := AuthRPCReply{} if err = n.rpcClient.Call("Storage.PrepareFileHandler", &PrepareFileArgs{ Vol: volume, Path: path, @@ -411,7 +305,7 @@ func (n *networkStorage) AppendFile(volume, path string, buffer []byte) (err err return errFaultyRemoteDisk } - reply := GenericReply{} + reply := AuthRPCReply{} if err = n.rpcClient.Call("Storage.AppendFileHandler", &AppendFileArgs{ Vol: volume, Path: path, @@ -545,7 +439,7 @@ func (n *networkStorage) DeleteFile(volume, path string) (err error) { return errFaultyRemoteDisk } - reply := GenericReply{} + reply := AuthRPCReply{} if err = n.rpcClient.Call("Storage.DeleteFileHandler", &DeleteFileArgs{ Vol: volume, Path: path, @@ -569,7 +463,7 @@ func (n *networkStorage) RenameFile(srcVolume, srcPath, dstVolume, dstPath strin return errFaultyRemoteDisk } - reply := GenericReply{} + reply := AuthRPCReply{} if err = n.rpcClient.Call("Storage.RenameFileHandler", &RenameFileArgs{ SrcVol: srcVolume, SrcPath: srcPath, diff --git a/cmd/storage-rpc-server-datatypes.go b/cmd/storage-rpc-server-datatypes.go index 8f474feff..e3dd9bf9a 100644 --- a/cmd/storage-rpc-server-datatypes.go +++ b/cmd/storage-rpc-server-datatypes.go @@ -19,7 +19,7 @@ package cmd // GenericVolArgs - generic volume args. type GenericVolArgs struct { // Authentication token generated by Login. - GenericArgs + AuthRPCArgs // Name of the volume. Vol string @@ -34,7 +34,7 @@ type ListVolsReply struct { // ReadAllArgs represents read all RPC arguments. type ReadAllArgs struct { // Authentication token generated by Login. - GenericArgs + AuthRPCArgs // Name of the volume. Vol string @@ -46,7 +46,7 @@ type ReadAllArgs struct { // ReadFileArgs represents read file RPC arguments. type ReadFileArgs struct { // Authentication token generated by Login. - GenericArgs + AuthRPCArgs // Name of the volume. Vol string @@ -64,7 +64,7 @@ type ReadFileArgs struct { // PrepareFileArgs represents append file RPC arguments. type PrepareFileArgs struct { // Authentication token generated by Login. - GenericArgs + AuthRPCArgs // Name of the volume. Vol string @@ -79,7 +79,7 @@ type PrepareFileArgs struct { // AppendFileArgs represents append file RPC arguments. type AppendFileArgs struct { // Authentication token generated by Login. - GenericArgs + AuthRPCArgs // Name of the volume. Vol string @@ -94,7 +94,7 @@ type AppendFileArgs struct { // StatFileArgs represents stat file RPC arguments. type StatFileArgs struct { // Authentication token generated by Login. - GenericArgs + AuthRPCArgs // Name of the volume. Vol string @@ -106,7 +106,7 @@ type StatFileArgs struct { // DeleteFileArgs represents delete file RPC arguments. type DeleteFileArgs struct { // Authentication token generated by Login. - GenericArgs + AuthRPCArgs // Name of the volume. Vol string @@ -118,7 +118,7 @@ type DeleteFileArgs struct { // ListDirArgs represents list contents RPC arguments. type ListDirArgs struct { // Authentication token generated by Login. - GenericArgs + AuthRPCArgs // Name of the volume. Vol string @@ -130,7 +130,7 @@ type ListDirArgs struct { // RenameFileArgs represents rename file RPC arguments. type RenameFileArgs struct { // Authentication token generated by Login. - GenericArgs + AuthRPCArgs // Name of source volume. SrcVol string diff --git a/cmd/storage-rpc-server.go b/cmd/storage-rpc-server.go index 0210a4c8b..6ad287868 100644 --- a/cmd/storage-rpc-server.go +++ b/cmd/storage-rpc-server.go @@ -29,7 +29,7 @@ import ( // Storage server implements rpc primitives to facilitate exporting a // disk over a network. type storageServer struct { - loginServer + AuthRPCServer storage StorageAPI path string timestamp time.Time @@ -38,10 +38,11 @@ type storageServer struct { /// Storage operations handlers. // DiskInfoHandler - disk info handler is rpc wrapper for DiskInfo operation. -func (s *storageServer) DiskInfoHandler(args *GenericArgs, reply *disk.Info) error { - if !isAuthTokenValid(args.Token) { - return errInvalidToken +func (s *storageServer) DiskInfoHandler(args *AuthRPCArgs, reply *disk.Info) error { + if err := args.IsAuthenticated(); err != nil { + return err } + info, err := s.storage.DiskInfo() *reply = info return err @@ -50,18 +51,20 @@ func (s *storageServer) DiskInfoHandler(args *GenericArgs, reply *disk.Info) err /// Volume operations handlers. // MakeVolHandler - make vol handler is rpc wrapper for MakeVol operation. -func (s *storageServer) MakeVolHandler(args *GenericVolArgs, reply *GenericReply) error { - if !isAuthTokenValid(args.Token) { - return errInvalidToken +func (s *storageServer) MakeVolHandler(args *GenericVolArgs, reply *AuthRPCReply) error { + if err := args.IsAuthenticated(); err != nil { + return err } + return s.storage.MakeVol(args.Vol) } // ListVolsHandler - list vols handler is rpc wrapper for ListVols operation. -func (s *storageServer) ListVolsHandler(args *GenericArgs, reply *ListVolsReply) error { - if !isAuthTokenValid(args.Token) { - return errInvalidToken +func (s *storageServer) ListVolsHandler(args *AuthRPCArgs, reply *ListVolsReply) error { + if err := args.IsAuthenticated(); err != nil { + return err } + vols, err := s.storage.ListVols() if err != nil { return err @@ -72,9 +75,10 @@ func (s *storageServer) ListVolsHandler(args *GenericArgs, reply *ListVolsReply) // StatVolHandler - stat vol handler is a rpc wrapper for StatVol operation. func (s *storageServer) StatVolHandler(args *GenericVolArgs, reply *VolInfo) error { - if !isAuthTokenValid(args.Token) { - return errInvalidToken + if err := args.IsAuthenticated(); err != nil { + return err } + volInfo, err := s.storage.StatVol(args.Vol) if err != nil { return err @@ -85,10 +89,11 @@ func (s *storageServer) StatVolHandler(args *GenericVolArgs, reply *VolInfo) err // DeleteVolHandler - delete vol handler is a rpc wrapper for // DeleteVol operation. -func (s *storageServer) DeleteVolHandler(args *GenericVolArgs, reply *GenericReply) error { - if !isAuthTokenValid(args.Token) { - return errInvalidToken +func (s *storageServer) DeleteVolHandler(args *GenericVolArgs, reply *AuthRPCReply) error { + if err := args.IsAuthenticated(); err != nil { + return err } + return s.storage.DeleteVol(args.Vol) } @@ -96,9 +101,10 @@ func (s *storageServer) DeleteVolHandler(args *GenericVolArgs, reply *GenericRep // StatFileHandler - stat file handler is rpc wrapper to stat file. func (s *storageServer) StatFileHandler(args *StatFileArgs, reply *FileInfo) error { - if !isAuthTokenValid(args.Token) { - return errInvalidToken + if err := args.IsAuthenticated(); err != nil { + return err } + fileInfo, err := s.storage.StatFile(args.Vol, args.Path) if err != nil { return err @@ -109,9 +115,10 @@ func (s *storageServer) StatFileHandler(args *StatFileArgs, reply *FileInfo) err // ListDirHandler - list directory handler is rpc wrapper to list dir. func (s *storageServer) ListDirHandler(args *ListDirArgs, reply *[]string) error { - if !isAuthTokenValid(args.Token) { - return errInvalidToken + if err := args.IsAuthenticated(); err != nil { + return err } + entries, err := s.storage.ListDir(args.Vol, args.Path) if err != nil { return err @@ -122,9 +129,10 @@ func (s *storageServer) ListDirHandler(args *ListDirArgs, reply *[]string) error // ReadAllHandler - read all handler is rpc wrapper to read all storage API. func (s *storageServer) ReadAllHandler(args *ReadFileArgs, reply *[]byte) error { - if !isAuthTokenValid(args.Token) { - return errInvalidToken + if err := args.IsAuthenticated(); err != nil { + return err } + buf, err := s.storage.ReadAll(args.Vol, args.Path) if err != nil { return err @@ -135,8 +143,8 @@ func (s *storageServer) ReadAllHandler(args *ReadFileArgs, reply *[]byte) error // ReadFileHandler - read file handler is rpc wrapper to read file. func (s *storageServer) ReadFileHandler(args *ReadFileArgs, reply *[]byte) (err error) { - if !isAuthTokenValid(args.Token) { - return errInvalidToken + if err = args.IsAuthenticated(); err != nil { + return err } var n int64 @@ -153,34 +161,38 @@ func (s *storageServer) ReadFileHandler(args *ReadFileArgs, reply *[]byte) (err } // PrepareFileHandler - prepare file handler is rpc wrapper to prepare file. -func (s *storageServer) PrepareFileHandler(args *PrepareFileArgs, reply *GenericReply) error { - if !isAuthTokenValid(args.Token) { - return errInvalidToken +func (s *storageServer) PrepareFileHandler(args *PrepareFileArgs, reply *AuthRPCReply) error { + if err := args.IsAuthenticated(); err != nil { + return err } + return s.storage.PrepareFile(args.Vol, args.Path, args.Size) } // AppendFileHandler - append file handler is rpc wrapper to append file. -func (s *storageServer) AppendFileHandler(args *AppendFileArgs, reply *GenericReply) error { - if !isAuthTokenValid(args.Token) { - return errInvalidToken +func (s *storageServer) AppendFileHandler(args *AppendFileArgs, reply *AuthRPCReply) error { + if err := args.IsAuthenticated(); err != nil { + return err } + return s.storage.AppendFile(args.Vol, args.Path, args.Buffer) } // DeleteFileHandler - delete file handler is rpc wrapper to delete file. -func (s *storageServer) DeleteFileHandler(args *DeleteFileArgs, reply *GenericReply) error { - if !isAuthTokenValid(args.Token) { - return errInvalidToken +func (s *storageServer) DeleteFileHandler(args *DeleteFileArgs, reply *AuthRPCReply) error { + if err := args.IsAuthenticated(); err != nil { + return err } + return s.storage.DeleteFile(args.Vol, args.Path) } // RenameFileHandler - rename file handler is rpc wrapper to rename file. -func (s *storageServer) RenameFileHandler(args *RenameFileArgs, reply *GenericReply) error { - if !isAuthTokenValid(args.Token) { - return errInvalidToken +func (s *storageServer) RenameFileHandler(args *RenameFileArgs, reply *AuthRPCReply) error { + if err := args.IsAuthenticated(); err != nil { + return err } + return s.storage.RenameFile(args.SrcVol, args.SrcPath, args.DstVol, args.DstPath) } diff --git a/cmd/storage-rpc-server_test.go b/cmd/storage-rpc-server_test.go index a64ebf07c..6ed51d6bc 100644 --- a/cmd/storage-rpc-server_test.go +++ b/cmd/storage-rpc-server_test.go @@ -87,108 +87,113 @@ func TestStorageRPCInvalidToken(t *testing.T) { defer removeAll(st.configDir) storageRPC := st.stServer - timestamp := time.Now().UTC() - ga := GenericArgs{ - Token: st.token, - Timestamp: timestamp, - } - // Construct an invalid token. - badga := ga - badga.Token = "invalidToken" // Following test cases are meant to exercise the invalid // token code path of the storage RPC methods. - var err error - gva := GenericVolArgs{ - GenericArgs: badga, + badAuthRPCArgs := AuthRPCArgs{AuthToken: "invalidToken"} + badGenericVolArgs := GenericVolArgs{ + AuthRPCArgs: badAuthRPCArgs, Vol: "myvol", } // 1. DiskInfoHandler diskInfoReply := &disk.Info{} - err = storageRPC.DiskInfoHandler(&badga, diskInfoReply) + badAuthRPCArgs.RequestTime = time.Now().UTC() + err = storageRPC.DiskInfoHandler(&badAuthRPCArgs, diskInfoReply) errorIfInvalidToken(t, err) // 2. MakeVolHandler - makeVolArgs := &gva - makeVolReply := &GenericReply{} + makeVolArgs := &badGenericVolArgs + makeVolArgs.AuthRPCArgs.RequestTime = time.Now().UTC() + makeVolReply := &AuthRPCReply{} err = storageRPC.MakeVolHandler(makeVolArgs, makeVolReply) errorIfInvalidToken(t, err) // 3. ListVolsHandler listVolReply := &ListVolsReply{} - err = storageRPC.ListVolsHandler(&badga, listVolReply) + badAuthRPCArgs.RequestTime = time.Now().UTC() + err = storageRPC.ListVolsHandler(&badAuthRPCArgs, listVolReply) errorIfInvalidToken(t, err) // 4. StatVolHandler statVolReply := &VolInfo{} - statVolArgs := &gva + statVolArgs := &badGenericVolArgs + statVolArgs.AuthRPCArgs.RequestTime = time.Now().UTC() err = storageRPC.StatVolHandler(statVolArgs, statVolReply) errorIfInvalidToken(t, err) // 5. DeleteVolHandler - deleteVolArgs := &gva - deleteVolReply := &GenericReply{} + deleteVolArgs := &badGenericVolArgs + deleteVolArgs.AuthRPCArgs.RequestTime = time.Now().UTC() + deleteVolReply := &AuthRPCReply{} err = storageRPC.DeleteVolHandler(deleteVolArgs, deleteVolReply) errorIfInvalidToken(t, err) // 6. StatFileHandler statFileArgs := &StatFileArgs{ - GenericArgs: badga, + AuthRPCArgs: badAuthRPCArgs, } + statFileArgs.AuthRPCArgs.RequestTime = time.Now().UTC() statReply := &FileInfo{} err = storageRPC.StatFileHandler(statFileArgs, statReply) errorIfInvalidToken(t, err) // 7. ListDirHandler listDirArgs := &ListDirArgs{ - GenericArgs: badga, + AuthRPCArgs: badAuthRPCArgs, } + listDirArgs.AuthRPCArgs.RequestTime = time.Now().UTC() listDirReply := &[]string{} err = storageRPC.ListDirHandler(listDirArgs, listDirReply) errorIfInvalidToken(t, err) // 8. ReadAllHandler readFileArgs := &ReadFileArgs{ - GenericArgs: badga, + AuthRPCArgs: badAuthRPCArgs, } + readFileArgs.AuthRPCArgs.RequestTime = time.Now().UTC() readFileReply := &[]byte{} err = storageRPC.ReadAllHandler(readFileArgs, readFileReply) errorIfInvalidToken(t, err) // 9. ReadFileHandler + readFileArgs.AuthRPCArgs.RequestTime = time.Now().UTC() err = storageRPC.ReadFileHandler(readFileArgs, readFileReply) errorIfInvalidToken(t, err) // 10. PrepareFileHandler prepFileArgs := &PrepareFileArgs{ - GenericArgs: badga, + AuthRPCArgs: badAuthRPCArgs, } - prepFileReply := &GenericReply{} + prepFileArgs.AuthRPCArgs.RequestTime = time.Now().UTC() + prepFileReply := &AuthRPCReply{} err = storageRPC.PrepareFileHandler(prepFileArgs, prepFileReply) errorIfInvalidToken(t, err) // 11. AppendFileHandler appendArgs := &AppendFileArgs{ - GenericArgs: badga, + AuthRPCArgs: badAuthRPCArgs, } - appendReply := &GenericReply{} + appendArgs.AuthRPCArgs.RequestTime = time.Now().UTC() + appendReply := &AuthRPCReply{} err = storageRPC.AppendFileHandler(appendArgs, appendReply) errorIfInvalidToken(t, err) // 12. DeleteFileHandler delFileArgs := &DeleteFileArgs{ - GenericArgs: badga, + AuthRPCArgs: badAuthRPCArgs, } - delFileRely := &GenericReply{} + delFileArgs.AuthRPCArgs.RequestTime = time.Now().UTC() + delFileRely := &AuthRPCReply{} err = storageRPC.DeleteFileHandler(delFileArgs, delFileRely) errorIfInvalidToken(t, err) // 13. RenameFileHandler renameArgs := &RenameFileArgs{ - GenericArgs: badga, + AuthRPCArgs: badAuthRPCArgs, } - renameReply := &GenericReply{} + renameArgs.AuthRPCArgs.RequestTime = time.Now().UTC() + renameReply := &AuthRPCReply{} err = storageRPC.RenameFileHandler(renameArgs, renameReply) errorIfInvalidToken(t, err) } diff --git a/cmd/test-utils_test.go b/cmd/test-utils_test.go index ba783bcbe..97fd99218 100644 --- a/cmd/test-utils_test.go +++ b/cmd/test-utils_test.go @@ -56,9 +56,11 @@ import ( // Tests should initNSLock only once. func init() { + // Set as non-distributed. + globalIsDistXL = false + // Initialize name space lock. - isDist := false - initNSLock(isDist) + initNSLock(globalIsDistXL) // Disable printing console messages during tests. color.Output = ioutil.Discard @@ -426,9 +428,6 @@ func StartTestPeersRPCServer(t TestErrHandler, instanceType string) TestServer { // Run TestServer. testRPCServer.Server = httptest.NewServer(mux) - // Set as non-distributed. - globalIsDistXL = false - // initialize remainder of serverCmdConfig testRPCServer.SrvCmdCfg = srvCfg diff --git a/cmd/utils_test.go b/cmd/utils_test.go index 80efbf61b..89d0aae67 100644 --- a/cmd/utils_test.go +++ b/cmd/utils_test.go @@ -232,6 +232,12 @@ func TestLocalAddress(t *testing.T) { if runtime.GOOS == "windows" { return } + + currentIsDistXL := globalIsDistXL + defer func() { + globalIsDistXL = currentIsDistXL + }() + // need to set this to avoid stale values from other tests. globalMinioPort = "9000" globalMinioHost = "" diff --git a/cmd/web-handlers.go b/cmd/web-handlers.go index 881abf7fc..5d93a9d81 100644 --- a/cmd/web-handlers.go +++ b/cmd/web-handlers.go @@ -94,7 +94,7 @@ type StorageInfoRep struct { } // StorageInfo - web call to gather storage usage statistics. -func (web *webAPIHandlers) StorageInfo(r *http.Request, args *GenericArgs, reply *StorageInfoRep) error { +func (web *webAPIHandlers) StorageInfo(r *http.Request, args *AuthRPCArgs, reply *StorageInfoRep) error { objectAPI := web.ObjectAPI() if objectAPI == nil { return toJSONError(errServerNotInitialized) diff --git a/cmd/web-handlers_test.go b/cmd/web-handlers_test.go index 42cad932a..d187f3d64 100644 --- a/cmd/web-handlers_test.go +++ b/cmd/web-handlers_test.go @@ -100,7 +100,7 @@ func getWebRPCToken(apiRouter http.Handler, accessKey, secretKey string) (token rec := httptest.NewRecorder() request := LoginArgs{Username: accessKey, Password: secretKey} reply := &LoginRep{} - req, err := newTestWebRPCRequest("Web.Login", "", request) + req, err := newTestWebRPCRequest("Web"+loginMethodName, "", request) if err != nil { return "", err } @@ -193,7 +193,7 @@ func testStorageInfoWebHandler(obj ObjectLayer, instanceType string, t TestErrHa rec := httptest.NewRecorder() - storageInfoRequest := GenericArgs{} + storageInfoRequest := AuthRPCArgs{} storageInfoReply := &StorageInfoRep{} req, err := newTestWebRPCRequest("Web.StorageInfo", authorization, storageInfoRequest) if err != nil { @@ -239,7 +239,7 @@ func testServerInfoWebHandler(obj ObjectLayer, instanceType string, t TestErrHan rec := httptest.NewRecorder() - serverInfoRequest := GenericArgs{} + serverInfoRequest := AuthRPCArgs{} serverInfoReply := &ServerInfoRep{} req, err := newTestWebRPCRequest("Web.ServerInfo", authorization, serverInfoRequest) if err != nil { @@ -1204,7 +1204,7 @@ func TestWebCheckAuthorization(t *testing.T) { "PresignedGet", } for _, rpcCall := range webRPCs { - args := &GenericArgs{} + args := &AuthRPCArgs{} reply := &WebGenericRep{} req, nerr := newTestWebRPCRequest("Web."+rpcCall, "Bearer fooauthorization", args) if nerr != nil { @@ -1288,7 +1288,7 @@ func TestWebObjectLayerNotReady(t *testing.T) { webRPCs := []string{"StorageInfo", "MakeBucket", "ListBuckets", "ListObjects", "RemoveObject", "GetBucketPolicy", "SetBucketPolicy", "ListAllBucketPolicies"} for _, rpcCall := range webRPCs { - args := &GenericArgs{} + args := &AuthRPCArgs{} reply := &WebGenericRep{} req, nerr := newTestWebRPCRequest("Web."+rpcCall, authorization, args) if nerr != nil { @@ -1392,7 +1392,7 @@ func TestWebObjectLayerFaultyDisks(t *testing.T) { "GetBucketPolicy", "SetBucketPolicy"} for _, rpcCall := range webRPCs { - args := &GenericArgs{} + args := &AuthRPCArgs{} reply := &WebGenericRep{} req, nerr := newTestWebRPCRequest("Web."+rpcCall, authorization, args) if nerr != nil { @@ -1409,7 +1409,7 @@ func TestWebObjectLayerFaultyDisks(t *testing.T) { } // Test Web.StorageInfo - storageInfoRequest := GenericArgs{} + storageInfoRequest := AuthRPCArgs{} storageInfoReply := &StorageInfoRep{} req, err := newTestWebRPCRequest("Web.StorageInfo", authorization, storageInfoRequest) if err != nil { diff --git a/vendor/github.com/minio/dsync/README.md b/vendor/github.com/minio/dsync/README.md index 206e2185c..a95f5debe 100644 --- a/vendor/github.com/minio/dsync/README.md +++ b/vendor/github.com/minio/dsync/README.md @@ -193,7 +193,7 @@ The basic steps in the lock process are as follows: ### Unlock process The unlock process is really simple: -- boardcast unlock message to all nodes that granted lock +- broadcast unlock message to all nodes that granted lock - if a destination is not available, retry with gradually longer back-off window to still deliver - ignore the 'result' (cover for cases where destination node has gone down and came back up) diff --git a/vendor/github.com/minio/dsync/drwmutex.go b/vendor/github.com/minio/dsync/drwmutex.go index 450c3043b..b15bd4fb8 100644 --- a/vendor/github.com/minio/dsync/drwmutex.go +++ b/vendor/github.com/minio/dsync/drwmutex.go @@ -19,7 +19,7 @@ package dsync import ( cryptorand "crypto/rand" "fmt" - "log" + golog "log" "math" "math/rand" "net" @@ -36,6 +36,12 @@ func init() { dsyncLog = os.Getenv("DSYNC_LOG") == "1" } +func log(msg ...interface{}) { + if dsyncLog { + golog.Println(msg...) + } +} + // DRWMutexAcquireTimeout - tolerance limit to wait for lock acquisition before. const DRWMutexAcquireTimeout = 25 * time.Millisecond // 25ms. @@ -60,23 +66,6 @@ func isLocked(uid string) bool { return len(uid) > 0 } -type LockArgs struct { - Token string - Timestamp time.Time - Name string - Node string - RPCPath string - UID string -} - -func (l *LockArgs) SetToken(token string) { - l.Token = token -} - -func (l *LockArgs) SetTimestamp(tstamp time.Time) { - l.Timestamp = tstamp -} - func NewDRWMutex(name string) *DRWMutex { return &DRWMutex{ Name: name, @@ -152,7 +141,7 @@ func (dm *DRWMutex) lockBlocking(isReadLock bool) { // lock tries to acquire the distributed lock, returning true or false // -func lock(clnts []RPC, locks *[]string, lockName string, isReadLock bool) bool { +func lock(clnts []NetLocker, locks *[]string, lockName string, isReadLock bool) bool { // Create buffered channel of size equal to total number of nodes. ch := make(chan Granted, dnodeCount) @@ -160,25 +149,29 @@ func lock(clnts []RPC, locks *[]string, lockName string, isReadLock bool) bool { for index, c := range clnts { // broadcast lock request to all nodes - go func(index int, isReadLock bool, c RPC) { + go func(index int, isReadLock bool, c NetLocker) { // All client methods issuing RPCs are thread-safe and goroutine-safe, // i.e. it is safe to call them from multiple concurrently running go routines. - var locked bool bytesUid := [16]byte{} cryptorand.Read(bytesUid[:]) uid := fmt.Sprintf("%X", bytesUid[:]) - args := LockArgs{Name: lockName, Node: clnts[ownNode].Node(), RPCPath: clnts[ownNode].RPCPath(), UID: uid} + + args := LockArgs{ + UID: uid, + Resource: lockName, + ServerAddr: clnts[ownNode].ServerAddr(), + ServiceEndpoint: clnts[ownNode].ServiceEndpoint(), + } + + var locked bool + var err error if isReadLock { - if err := c.Call("Dsync.RLock", &args, &locked); err != nil { - if dsyncLog { - log.Println("Unable to call Dsync.RLock", err) - } + if locked, err = c.RLock(args); err != nil { + log("Unable to call RLock", err) } } else { - if err := c.Call("Dsync.Lock", &args, &locked); err != nil { - if dsyncLog { - log.Println("Unable to call Dsync.Lock", err) - } + if locked, err = c.Lock(args); err != nil { + log("Unable to call Lock", err) } } @@ -284,7 +277,7 @@ func quorumMet(locks *[]string, isReadLock bool) bool { } // releaseAll releases all locks that are marked as locked -func releaseAll(clnts []RPC, locks *[]string, lockName string, isReadLock bool) { +func releaseAll(clnts []NetLocker, locks *[]string, lockName string, isReadLock bool) { for lock := 0; lock < dnodeCount; lock++ { if isLocked((*locks)[lock]) { sendRelease(clnts[lock], lockName, (*locks)[lock], isReadLock) @@ -385,7 +378,7 @@ func (dm *DRWMutex) ForceUnlock() { } // sendRelease sends a release message to a node that previously granted a lock -func sendRelease(c RPC, name, uid string, isReadLock bool) { +func sendRelease(c NetLocker, name, uid string, isReadLock bool) { backOffArray := []time.Duration{ 30 * time.Second, // 30secs. @@ -396,55 +389,47 @@ func sendRelease(c RPC, name, uid string, isReadLock bool) { 1 * time.Hour, // 1hr. } - go func(c RPC, name string) { + go func(c NetLocker, name string) { for _, backOff := range backOffArray { // All client methods issuing RPCs are thread-safe and goroutine-safe, // i.e. it is safe to call them from multiple concurrently running goroutines. - var unlocked bool - args := LockArgs{Name: name, UID: uid} // Just send name & uid (and leave out node and rpcPath; unimportant for unlocks) + args := LockArgs{ + UID: uid, + Resource: name, + ServerAddr: clnts[ownNode].ServerAddr(), + ServiceEndpoint: clnts[ownNode].ServiceEndpoint(), + } + + var err error if len(uid) == 0 { - if err := c.Call("Dsync.ForceUnlock", &args, &unlocked); err == nil { - // ForceUnlock delivered, exit out - return - } else if err != nil { - if dsyncLog { - log.Println("Unable to call Dsync.ForceUnlock", err) - } - if nErr, ok := err.(net.Error); ok && nErr.Timeout() { - // ForceUnlock possibly failed with server timestamp mismatch, server may have restarted. - return - } + if _, err = c.ForceUnlock(args); err != nil { + log("Unable to call ForceUnlock", err) } } else if isReadLock { - if err := c.Call("Dsync.RUnlock", &args, &unlocked); err == nil { - // RUnlock delivered, exit out - return - } else if err != nil { - if dsyncLog { - log.Println("Unable to call Dsync.RUnlock", err) - } - if nErr, ok := err.(net.Error); ok && nErr.Timeout() { - // RUnlock possibly failed with server timestamp mismatch, server may have restarted. - return - } + if _, err = c.RUnlock(args); err != nil { + log("Unable to call RUnlock", err) } } else { - if err := c.Call("Dsync.Unlock", &args, &unlocked); err == nil { - // Unlock delivered, exit out - return - } else if err != nil { - if dsyncLog { - log.Println("Unable to call Dsync.Unlock", err) - } - if nErr, ok := err.(net.Error); ok && nErr.Timeout() { - // Unlock possibly failed with server timestamp mismatch, server may have restarted. - return - } + if _, err = c.Unlock(args); err != nil { + log("Unable to call Unlock", err) } } + if err != nil { + // Ignore if err is net.Error and it is occurred due to timeout. + // The cause could have been server timestamp mismatch or server may have restarted. + // FIXME: This is minio specific behaviour and we would need a way to make it generically. + if nErr, ok := err.(net.Error); ok && nErr.Timeout() { + err = nil + } + } + + if err == nil { + return + } + // Wait.. time.Sleep(backOff) } diff --git a/vendor/github.com/minio/dsync/dsync.go b/vendor/github.com/minio/dsync/dsync.go index 9375e0444..ba027d54f 100644 --- a/vendor/github.com/minio/dsync/dsync.go +++ b/vendor/github.com/minio/dsync/dsync.go @@ -18,16 +18,11 @@ package dsync import "errors" -const RpcPath = "/dsync" -const DebugPath = "/debug" - -const DefaultPath = "/rpc/dsync" - // Number of nodes participating in the distributed locking. var dnodeCount int // List of rpc client objects, one per lock server. -var clnts []RPC +var clnts []NetLocker // Index into rpc client array for server running on localhost var ownNode int @@ -38,20 +33,21 @@ var dquorum int // Simple quorum for read operations, set to dNodeCount/2 var dquorumReads int -// SetNodesWithPath - initializes package-level global state variables such as clnts. -// N B - This function should be called only once inside any program that uses -// dsync. -func SetNodesWithClients(rpcClnts []RPC, rpcOwnNode int) (err error) { +// Init - initializes package-level global state variables such as clnts. +// N B - This function should be called only once inside any program +// that uses dsync. +func Init(rpcClnts []NetLocker, rpcOwnNode int) (err error) { // Validate if number of nodes is within allowable range. if dnodeCount != 0 { return errors.New("Cannot reinitialize dsync package") - } else if len(rpcClnts) < 4 { - return errors.New("Dsync not designed for less than 4 nodes") + } + if len(rpcClnts) < 4 { + return errors.New("Dsync is not designed for less than 4 nodes") } else if len(rpcClnts) > 16 { - return errors.New("Dsync not designed for more than 16 nodes") - } else if len(rpcClnts)&1 == 1 { - return errors.New("Dsync not designed for an uneven number of nodes") + return errors.New("Dsync is not designed for more than 16 nodes") + } else if len(rpcClnts)%2 != 0 { + return errors.New("Dsync is not designed for an uneven number of nodes") } if rpcOwnNode > len(rpcClnts) { @@ -61,8 +57,8 @@ func SetNodesWithClients(rpcClnts []RPC, rpcOwnNode int) (err error) { dnodeCount = len(rpcClnts) dquorum = dnodeCount/2 + 1 dquorumReads = dnodeCount / 2 - // Initialize node name and rpc path for each RPCClient object. - clnts = make([]RPC, dnodeCount) + // Initialize node name and rpc path for each NetLocker object. + clnts = make([]NetLocker, dnodeCount) copy(clnts, rpcClnts) ownNode = rpcOwnNode diff --git a/vendor/github.com/minio/dsync/rpc-client-interface.go b/vendor/github.com/minio/dsync/rpc-client-interface.go index 035faddea..09d4c61ad 100644 --- a/vendor/github.com/minio/dsync/rpc-client-interface.go +++ b/vendor/github.com/minio/dsync/rpc-client-interface.go @@ -16,15 +16,51 @@ package dsync -import "time" +// LockArgs is minimal required values for any dsync compatible lock operation. +type LockArgs struct { + // Unique ID of lock/unlock request. + UID string -// RPC - is dsync compatible client interface. -type RPC interface { - Call(serviceMethod string, args interface { - SetToken(token string) - SetTimestamp(tstamp time.Time) - }, reply interface{}) error - Node() string - RPCPath() string - Close() error + // Resource contains a entity to be locked/unlocked. + Resource string + + // ServerAddr contains the address of the server who requested lock/unlock of the above resource. + ServerAddr string + + // ServiceEndpoint contains the network path of above server to do lock/unlock. + ServiceEndpoint string +} + +// NetLocker is dsync compatible locker interface. +type NetLocker interface { + // Do read lock for given LockArgs. It should return + // * a boolean to indicate success/failure of the operation + // * an error on failure of lock request operation. + RLock(args LockArgs) (bool, error) + + // Do write lock for given LockArgs. It should return + // * a boolean to indicate success/failure of the operation + // * an error on failure of lock request operation. + Lock(args LockArgs) (bool, error) + + // Do read unlock for given LockArgs. It should return + // * a boolean to indicate success/failure of the operation + // * an error on failure of unlock request operation. + RUnlock(args LockArgs) (bool, error) + + // Do write unlock for given LockArgs. It should return + // * a boolean to indicate success/failure of the operation + // * an error on failure of unlock request operation. + Unlock(args LockArgs) (bool, error) + + // Unlock (read/write) forcefully for given LockArgs. It should return + // * a boolean to indicate success/failure of the operation + // * an error on failure of unlock request operation. + ForceUnlock(args LockArgs) (bool, error) + + // Return this lock server address. + ServerAddr() string + + // Return this lock server service endpoint on which the server runs. + ServiceEndpoint() string } diff --git a/vendor/vendor.json b/vendor/vendor.json index e776a4e0e..93d3bcfc8 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -148,10 +148,10 @@ "revisionTime": "2015-11-18T20:00:48-08:00" }, { - "checksumSHA1": "ddMyebkzU3xB7K8dAhM1S+Mflmo=", + "checksumSHA1": "NBGyq2+iTtJvJ+ElG4FzHLe1WSY=", "path": "github.com/minio/dsync", - "revision": "dd0da3743e6668b03559c2905cc661bc0fceeae3", - "revisionTime": "2016-11-28T22:07:34Z" + "revision": "9cafd4d729eb71b31ef7851a8c8f6ceb855d0915", + "revisionTime": "2016-12-23T07:07:24Z" }, { "path": "github.com/minio/go-homedir", From 7bbb532b4bd2c6d879703eebb0ddbd8460dcec1f Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Mon, 2 Jan 2017 10:43:56 -0800 Subject: [PATCH 045/100] Add a `isErr` function to check for errs. DisksInfo() should handle collection of some base errors as offlineDisks. --- cmd/errors.go | 16 +++++++++++++++- cmd/xl-v1.go | 4 ++-- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/cmd/errors.go b/cmd/errors.go index 9e5275b3b..b61685af8 100644 --- a/cmd/errors.go +++ b/cmd/errors.go @@ -121,12 +121,15 @@ func errorsCause(errs []error) []error { return cerrs } -var baseIgnoredErrs = []error{ +// Collection of basic errors. +var baseErrs = []error{ errDiskNotFound, errFaultyDisk, errFaultyRemoteDisk, } +var baseIgnoredErrs = baseErrs + // isErrIgnored returns whether given error is ignored or not. func isErrIgnored(err error, ignoredErrs ...error) bool { err = errorCause(err) @@ -137,3 +140,14 @@ func isErrIgnored(err error, ignoredErrs ...error) bool { } return false } + +// isErr returns whether given error is exact error. +func isErr(err error, errs ...error) bool { + err = errorCause(err) + for _, exactErr := range errs { + if err == exactErr { + return true + } + } + return false +} diff --git a/cmd/xl-v1.go b/cmd/xl-v1.go index e6affaa23..b3abfb615 100644 --- a/cmd/xl-v1.go +++ b/cmd/xl-v1.go @@ -169,10 +169,10 @@ func getDisksInfo(disks []StorageAPI) (disksInfo []disk.Info, onlineDisks int, o info, err := storageDisk.DiskInfo() if err != nil { errorIf(err, "Unable to fetch disk info for %#v", storageDisk) - if err == errDiskNotFound { + if isErr(err, baseErrs...) { offlineDisks++ + continue } - continue } onlineDisks++ disksInfo[i] = info From cae62ce54341776e2ff1957a8990fa7c838be758 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Tue, 3 Jan 2017 01:33:00 -0800 Subject: [PATCH 046/100] browser: Handle proper login errors. (#3518) Also additionally log the remote address. Fixes #3514 --- cmd/web-handlers.go | 24 ++++++++++++++++++++++-- cmd/web-handlers_test.go | 1 + 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/cmd/web-handlers.go b/cmd/web-handlers.go index 5d93a9d81..d358020b4 100644 --- a/cmd/web-handlers.go +++ b/cmd/web-handlers.go @@ -294,6 +294,9 @@ type LoginRep struct { func (web *webAPIHandlers) Login(r *http.Request, args *LoginArgs, reply *LoginRep) error { token, err := authenticateWeb(args.Username, args.Password) if err != nil { + // Make sure to log errors related to browser login, + // for security and auditing reasons. + errorIf(err, "Unable to login request from %s", r.RemoteAddr) return toJSONError(err) } @@ -768,13 +771,30 @@ func toWebAPIError(err error) APIError { HTTPStatusCode: http.StatusForbidden, Description: err.Error(), } - } - if err == errServerNotInitialized { + } else if err == errServerNotInitialized { return APIError{ Code: "XMinioServerNotInitialized", HTTPStatusCode: http.StatusServiceUnavailable, Description: err.Error(), } + } else if err == errInvalidAccessKeyLength { + return APIError{ + Code: "AccessDenied", + HTTPStatusCode: http.StatusForbidden, + Description: err.Error(), + } + } else if err == errInvalidSecretKeyLength { + return APIError{ + Code: "AccessDenied", + HTTPStatusCode: http.StatusForbidden, + Description: err.Error(), + } + } else if err == errInvalidAccessKeyID { + return APIError{ + Code: "AccessDenied", + HTTPStatusCode: http.StatusForbidden, + Description: err.Error(), + } } // Convert error type to api error code. diff --git a/cmd/web-handlers_test.go b/cmd/web-handlers_test.go index d187f3d64..057b28999 100644 --- a/cmd/web-handlers_test.go +++ b/cmd/web-handlers_test.go @@ -149,6 +149,7 @@ func testLoginWebHandler(obj ObjectLayer, instanceType string, t TestErrHandler) {"", "foo", false}, {"azerty", "", false}, {"azerty", "foo", false}, + {"azerty", "azerty123", false}, {credentials.AccessKey, credentials.SecretKey, true}, } From c8f57133a4384ea98206560b691f50e7520007b9 Mon Sep 17 00:00:00 2001 From: Krishnan Parthasarathi Date: Wed, 4 Jan 2017 13:09:22 +0530 Subject: [PATCH 047/100] Implement list, clear locks REST API w/ pkg/madmin support (#3491) * Filter lock info based on bucket, prefix and time since lock was held * Implement list and clear locks REST API * madmin: Add list and clear locks API * locks: Clear locks matching bucket, prefix, relTime. * Gather lock information across nodes for both list and clear locks admin REST API. * docs: Add lock API to management APIs --- cmd/admin-handlers.go | 145 ++++++++++++++- cmd/admin-handlers_test.go | 241 ++++++++++++++++++++++++- cmd/admin-router.go | 10 +- cmd/admin-rpc-client.go | 98 ++++++++-- cmd/admin-rpc-server.go | 38 +++- cmd/admin-rpc-server_test.go | 2 +- cmd/api-errors.go | 6 + cmd/lockinfo-handlers.go | 68 ++++++- cmd/lockinfo-handlers_test.go | 81 +++++++++ cmd/object-api-utils.go | 2 +- docs/admin-api/management-api.md | 82 +++++++++ docs/admin-api/service.md | 33 ---- pkg/madmin/examples/lock-clear.go | 47 +++++ pkg/madmin/examples/lock-list.go | 46 +++++ pkg/madmin/examples/service-restart.go | 4 +- pkg/madmin/examples/service-status.go | 4 +- pkg/madmin/examples/service-stop.go | 4 +- pkg/madmin/lock-commands.go | 151 ++++++++++++++++ pkg/madmin/lock-commands_test.go | 61 +++++++ pkg/madmin/service.go | 6 +- 20 files changed, 1039 insertions(+), 90 deletions(-) create mode 100644 cmd/lockinfo-handlers_test.go create mode 100644 docs/admin-api/management-api.md delete mode 100644 docs/admin-api/service.md create mode 100644 pkg/madmin/examples/lock-clear.go create mode 100644 pkg/madmin/examples/lock-list.go create mode 100644 pkg/madmin/lock-commands.go create mode 100644 pkg/madmin/lock-commands_test.go diff --git a/cmd/admin-handlers.go b/cmd/admin-handlers.go index a08b1a772..f80ec826f 100644 --- a/cmd/admin-handlers.go +++ b/cmd/admin-handlers.go @@ -19,6 +19,8 @@ package cmd import ( "encoding/json" "net/http" + "net/url" + "time" ) const ( @@ -28,9 +30,8 @@ const ( // ServiceStatusHandler - GET /?service // HTTP header x-minio-operation: status // ---------- -// This implementation of the GET operation fetches server status information. -// provides total disk space available to use, online disks, offline disks and -// quorum threshold. +// Fetches server status information like total disk space available +// to use, online disks, offline disks and quorum threshold. func (adminAPI adminAPIHandlers) ServiceStatusHandler(w http.ResponseWriter, r *http.Request) { adminAPIErr := checkRequestAuthType(r, "", "", "") if adminAPIErr != ErrNone { @@ -44,15 +45,16 @@ func (adminAPI adminAPIHandlers) ServiceStatusHandler(w http.ResponseWriter, r * errorIf(err, "Failed to marshal storage info into json.") return } + // Reply with storage information (across nodes in a + // distributed setup) as json. writeSuccessResponse(w, jsonBytes) } // ServiceStopHandler - POST /?service // HTTP header x-minio-operation: stop // ---------- -// This implementation of the POST operation stops minio server gracefully, -// in a distributed setup stops all the servers in the cluster. Body sent -// if any on client request is ignored. +// Stops minio server gracefully. In a distributed setup, stops all the +// servers in the cluster. func (adminAPI adminAPIHandlers) ServiceStopHandler(w http.ResponseWriter, r *http.Request) { adminAPIErr := checkRequestAuthType(r, "", "", "") if adminAPIErr != ErrNone { @@ -67,9 +69,8 @@ func (adminAPI adminAPIHandlers) ServiceStopHandler(w http.ResponseWriter, r *ht // ServiceRestartHandler - POST /?service // HTTP header x-minio-operation: restart // ---------- -// This implementation of the POST operation restarts minio server gracefully, -// in a distributed setup restarts all the servers in the cluster. Body sent -// if any on client request is ignored. +// Restarts minio server gracefully. In a distributed setup, restarts +// all the servers in the cluster. func (adminAPI adminAPIHandlers) ServiceRestartHandler(w http.ResponseWriter, r *http.Request) { adminAPIErr := checkRequestAuthType(r, "", "", "") if adminAPIErr != ErrNone { @@ -80,3 +81,129 @@ func (adminAPI adminAPIHandlers) ServiceRestartHandler(w http.ResponseWriter, r w.WriteHeader(http.StatusOK) sendServiceCmd(globalAdminPeers, serviceRestart) } + +// Type-safe lock query params. +type lockQueryKey string + +// Only valid query params for list/clear locks management APIs. +const ( + lockBucket lockQueryKey = "bucket" + lockPrefix lockQueryKey = "prefix" + lockOlderThan lockQueryKey = "older-than" +) + +// validateLockQueryParams - Validates query params for list/clear locks management APIs. +func validateLockQueryParams(vars url.Values) (string, string, time.Duration, APIErrorCode) { + bucket := vars.Get(string(lockBucket)) + prefix := vars.Get(string(lockPrefix)) + relTimeStr := vars.Get(string(lockOlderThan)) + + // N B empty bucket name is invalid + if !IsValidBucketName(bucket) { + return "", "", time.Duration(0), ErrInvalidBucketName + } + + // empty prefix is valid. + if !IsValidObjectPrefix(prefix) { + return "", "", time.Duration(0), ErrInvalidObjectName + } + + // If older-than parameter was empty then set it to 0s to list + // all locks older than now. + if relTimeStr == "" { + relTimeStr = "0s" + } + relTime, err := time.ParseDuration(relTimeStr) + if err != nil { + errorIf(err, "Failed to parse duration passed as query value.") + return "", "", time.Duration(0), ErrInvalidDuration + } + + return bucket, prefix, relTime, ErrNone +} + +// ListLocksHandler - GET /?lock&bucket=mybucket&prefix=myprefix&older-than=rel_time +// - bucket is a mandatory query parameter +// - prefix and older-than are optional query parameters +// HTTP header x-minio-operation: list +// --------- +// Lists locks held on a given bucket, prefix and relative time. +func (adminAPI adminAPIHandlers) ListLocksHandler(w http.ResponseWriter, r *http.Request) { + adminAPIErr := checkRequestAuthType(r, "", "", "") + if adminAPIErr != ErrNone { + writeErrorResponse(w, r, adminAPIErr, r.URL.Path) + return + } + + vars := r.URL.Query() + bucket, prefix, relTime, adminAPIErr := validateLockQueryParams(vars) + if adminAPIErr != ErrNone { + writeErrorResponse(w, r, adminAPIErr, r.URL.Path) + return + } + + // Fetch lock information of locks matching bucket/prefix that + // are available since relTime. + volLocks, err := listPeerLocksInfo(globalAdminPeers, bucket, prefix, relTime) + if err != nil { + writeErrorResponse(w, r, ErrInternalError, r.URL.Path) + errorIf(err, "Failed to fetch lock information from remote nodes.") + return + } + + // Marshal list of locks as json. + jsonBytes, err := json.Marshal(volLocks) + if err != nil { + writeErrorResponseNoHeader(w, r, ErrInternalError, r.URL.Path) + errorIf(err, "Failed to marshal lock information into json.") + return + } + + // Reply with list of locks held on bucket, matching prefix + // older than relTime supplied, as json. + writeSuccessResponse(w, jsonBytes) +} + +// ClearLocksHandler - POST /?lock&bucket=mybucket&prefix=myprefix&older-than=relTime +// - bucket is a mandatory query parameter +// - prefix and older-than are optional query parameters +// HTTP header x-minio-operation: clear +// --------- +// Clear locks held on a given bucket, prefix and relative time. +func (adminAPI adminAPIHandlers) ClearLocksHandler(w http.ResponseWriter, r *http.Request) { + adminAPIErr := checkRequestAuthType(r, "", "", "") + if adminAPIErr != ErrNone { + writeErrorResponse(w, r, adminAPIErr, r.URL.Path) + return + } + + vars := r.URL.Query() + bucket, prefix, relTime, adminAPIErr := validateLockQueryParams(vars) + if adminAPIErr != ErrNone { + writeErrorResponse(w, r, adminAPIErr, r.URL.Path) + return + } + + // Fetch lock information of locks matching bucket/prefix that + // are available since relTime. + volLocks, err := listPeerLocksInfo(globalAdminPeers, bucket, prefix, relTime) + if err != nil { + writeErrorResponseNoHeader(w, r, ErrInternalError, r.URL.Path) + errorIf(err, "Failed to fetch lock information from remote nodes.") + return + } + + // Marshal list of locks as json. + jsonBytes, err := json.Marshal(volLocks) + if err != nil { + writeErrorResponseNoHeader(w, r, ErrInternalError, r.URL.Path) + errorIf(err, "Failed to marshal lock information into json.") + return + } + // Remove lock matching bucket/prefix older than relTime. + for _, volLock := range volLocks { + globalNSMutex.ForceUnlock(volLock.Bucket, volLock.Object) + } + // Reply with list of locks cleared, as json. + writeSuccessResponse(w, jsonBytes) +} diff --git a/cmd/admin-handlers_test.go b/cmd/admin-handlers_test.go index 95f5b853e..91c7252da 100644 --- a/cmd/admin-handlers_test.go +++ b/cmd/admin-handlers_test.go @@ -18,13 +18,17 @@ package cmd import ( "encoding/json" + "fmt" "net/http" "net/http/httptest" + "net/url" "testing" router "github.com/gorilla/mux" ) +// cmdType - Represents different service subcomands like status, stop +// and restart. type cmdType int const ( @@ -33,6 +37,7 @@ const ( restartCmd ) +// String - String representation for cmdType func (c cmdType) String() string { switch c { case statusCmd: @@ -45,6 +50,8 @@ func (c cmdType) String() string { return "" } +// apiMethod - Returns the HTTP method corresponding to the admin REST +// API for a given cmdType value. func (c cmdType) apiMethod() string { switch c { case statusCmd: @@ -57,6 +64,8 @@ func (c cmdType) apiMethod() string { return "GET" } +// toServiceSignal - Helper function that translates a given cmdType +// value to its corresponding serviceSignal value. func (c cmdType) toServiceSignal() serviceSignal { switch c { case statusCmd: @@ -69,6 +78,8 @@ func (c cmdType) toServiceSignal() serviceSignal { return serviceStatus } +// testServiceSignalReceiver - Helper function that simulates a +// go-routine waiting on service signal. func testServiceSignalReceiver(cmd cmdType, t *testing.T) { expectedCmd := cmd.toServiceSignal() serviceCmd := <-globalServiceSignalCh @@ -77,12 +88,19 @@ func testServiceSignalReceiver(cmd cmdType, t *testing.T) { } } -func getAdminCmdRequest(cmd cmdType, cred credential) (*http.Request, error) { +// getServiceCmdRequest - Constructs a management REST API request for service +// subcommands for a given cmdType value. +func getServiceCmdRequest(cmd cmdType, cred credential) (*http.Request, error) { req, err := newTestRequest(cmd.apiMethod(), "/?service", 0, nil) if err != nil { return nil, err } + + // minioAdminOpHeader is to identify the request as a + // management REST API request. req.Header.Set(minioAdminOpHeader, cmd.String()) + + // management REST API uses signature V4 for authentication. err = signRequestV4(req, cred.AccessKey, cred.SecretKey) if err != nil { return nil, err @@ -90,18 +108,26 @@ func getAdminCmdRequest(cmd cmdType, cred credential) (*http.Request, error) { return req, nil } +// testServicesCmdHandler - parametrizes service subcommand tests on +// cmdType value. func testServicesCmdHandler(cmd cmdType, t *testing.T) { + // Initialize configuration for access/secret credentials. rootPath, err := newTestConfig("us-east-1") if err != nil { t.Fatalf("Unable to initialize server config. %s", err) } defer removeAll(rootPath) - // Initialize admin peers to make admin RPC calls. + // Initialize admin peers to make admin RPC calls. Note: In a + // single node setup, this degenerates to a simple function + // call under the hood. eps, err := parseStorageEndpoints([]string{"http://localhost"}) if err != nil { t.Fatalf("Failed to parse storage end point - %v", err) } + + // Set globalMinioAddr to be able to distinguish local endpoints from remote. + globalMinioAddr = eps[0].Host initGlobalAdminPeers(eps) if cmd == statusCmd { @@ -128,7 +154,7 @@ func testServicesCmdHandler(cmd cmdType, t *testing.T) { registerAdminRouter(adminRouter) rec := httptest.NewRecorder() - req, err := getAdminCmdRequest(cmd, credentials) + req, err := getServiceCmdRequest(cmd, credentials) if err != nil { t.Fatalf("Failed to build service status request %v", err) } @@ -151,14 +177,223 @@ func testServicesCmdHandler(cmd cmdType, t *testing.T) { } } +// Test for service status management REST API. func TestServiceStatusHandler(t *testing.T) { testServicesCmdHandler(statusCmd, t) } +// Test for service stop management REST API. func TestServiceStopHandler(t *testing.T) { testServicesCmdHandler(stopCmd, t) } +// Test for service restart management REST API. func TestServiceRestartHandler(t *testing.T) { testServicesCmdHandler(restartCmd, t) } + +// Test for locks list management REST API. +func TestListLocksHandler(t *testing.T) { + rootPath, err := newTestConfig("us-east-1") + if err != nil { + t.Fatalf("Unable to initialize server config. %s", err) + } + defer removeAll(rootPath) + + // Initialize admin peers to make admin RPC calls. + eps, err := parseStorageEndpoints([]string{"http://localhost"}) + if err != nil { + t.Fatalf("Failed to parse storage end point - %v", err) + } + + // Set globalMinioAddr to be able to distinguish local endpoints from remote. + globalMinioAddr = eps[0].Host + initGlobalAdminPeers(eps) + + testCases := []struct { + bucket string + prefix string + relTime string + expectedStatus int + }{ + // Test 1 - valid testcase + { + bucket: "mybucket", + prefix: "myobject", + relTime: "1s", + expectedStatus: 200, + }, + // Test 2 - invalid duration + { + bucket: "mybucket", + prefix: "myprefix", + relTime: "invalidDuration", + expectedStatus: 400, + }, + // Test 3 - invalid bucket name + { + bucket: `invalid\\Bucket`, + prefix: "myprefix", + relTime: "1h", + expectedStatus: 400, + }, + // Test 4 - invalid prefix + { + bucket: "mybucket", + prefix: `invalid\\Prefix`, + relTime: "1h", + expectedStatus: 400, + }, + } + + adminRouter := router.NewRouter() + registerAdminRouter(adminRouter) + + for i, test := range testCases { + queryStr := fmt.Sprintf("&bucket=%s&prefix=%s&older-than=%s", test.bucket, test.prefix, test.relTime) + req, err := newTestRequest("GET", "/?lock"+queryStr, 0, nil) + if err != nil { + t.Fatalf("Test %d - Failed to construct list locks request - %v", i+1, err) + } + req.Header.Set(minioAdminOpHeader, "list") + + cred := serverConfig.GetCredential() + err = signRequestV4(req, cred.AccessKey, cred.SecretKey) + if err != nil { + t.Fatalf("Test %d - Failed to sign list locks request - %v", i+1, err) + } + rec := httptest.NewRecorder() + adminRouter.ServeHTTP(rec, req) + if test.expectedStatus != rec.Code { + t.Errorf("Test %d - Expected HTTP status code %d but received %d", i+1, test.expectedStatus, rec.Code) + } + } +} + +// Test for locks clear management REST API. +func TestClearLocksHandler(t *testing.T) { + rootPath, err := newTestConfig("us-east-1") + if err != nil { + t.Fatalf("Unable to initialize server config. %s", err) + } + defer removeAll(rootPath) + + // Initialize admin peers to make admin RPC calls. + eps, err := parseStorageEndpoints([]string{"http://localhost"}) + if err != nil { + t.Fatalf("Failed to parse storage end point - %v", err) + } + initGlobalAdminPeers(eps) + + testCases := []struct { + bucket string + prefix string + relTime string + expectedStatus int + }{ + // Test 1 - valid testcase + { + bucket: "mybucket", + prefix: "myobject", + relTime: "1s", + expectedStatus: 200, + }, + // Test 2 - invalid duration + { + bucket: "mybucket", + prefix: "myprefix", + relTime: "invalidDuration", + expectedStatus: 400, + }, + // Test 3 - invalid bucket name + { + bucket: `invalid\\Bucket`, + prefix: "myprefix", + relTime: "1h", + expectedStatus: 400, + }, + // Test 4 - invalid prefix + { + bucket: "mybucket", + prefix: `invalid\\Prefix`, + relTime: "1h", + expectedStatus: 400, + }, + } + + adminRouter := router.NewRouter() + registerAdminRouter(adminRouter) + + for i, test := range testCases { + queryStr := fmt.Sprintf("&bucket=%s&prefix=%s&older-than=%s", test.bucket, test.prefix, test.relTime) + req, err := newTestRequest("POST", "/?lock"+queryStr, 0, nil) + if err != nil { + t.Fatalf("Test %d - Failed to construct clear locks request - %v", i+1, err) + } + req.Header.Set(minioAdminOpHeader, "clear") + + cred := serverConfig.GetCredential() + err = signRequestV4(req, cred.AccessKey, cred.SecretKey) + if err != nil { + t.Fatalf("Test %d - Failed to sign clear locks request - %v", i+1, err) + } + rec := httptest.NewRecorder() + adminRouter.ServeHTTP(rec, req) + if test.expectedStatus != rec.Code { + t.Errorf("Test %d - Expected HTTP status code %d but received %d", i+1, test.expectedStatus, rec.Code) + } + } +} + +// Test for lock query param validation helper function. +func TestValidateLockQueryParams(t *testing.T) { + // Sample query values for test cases. + allValidVal := url.Values{} + allValidVal.Set(string(lockBucket), "bucket") + allValidVal.Set(string(lockPrefix), "prefix") + allValidVal.Set(string(lockOlderThan), "1s") + + invalidBucketVal := url.Values{} + invalidBucketVal.Set(string(lockBucket), `invalid\\Bucket`) + invalidBucketVal.Set(string(lockPrefix), "prefix") + invalidBucketVal.Set(string(lockOlderThan), "invalidDuration") + + invalidPrefixVal := url.Values{} + invalidPrefixVal.Set(string(lockBucket), "bucket") + invalidPrefixVal.Set(string(lockPrefix), `invalid\\PRefix`) + invalidPrefixVal.Set(string(lockOlderThan), "invalidDuration") + + invalidOlderThanVal := url.Values{} + invalidOlderThanVal.Set(string(lockBucket), "bucket") + invalidOlderThanVal.Set(string(lockPrefix), "prefix") + invalidOlderThanVal.Set(string(lockOlderThan), "invalidDuration") + + testCases := []struct { + qVals url.Values + apiErr APIErrorCode + }{ + { + qVals: invalidBucketVal, + apiErr: ErrInvalidBucketName, + }, + { + qVals: invalidPrefixVal, + apiErr: ErrInvalidObjectName, + }, + { + qVals: invalidOlderThanVal, + apiErr: ErrInvalidDuration, + }, + { + qVals: allValidVal, + apiErr: ErrNone, + }, + } + + for i, test := range testCases { + _, _, _, apiErr := validateLockQueryParams(test.qVals) + if apiErr != test.apiErr { + t.Errorf("Test %d - Expected error %v but received %v", i+1, test.apiErr, apiErr) + } + } +} diff --git a/cmd/admin-router.go b/cmd/admin-router.go index 60b5c3aeb..e74d2edc0 100644 --- a/cmd/admin-router.go +++ b/cmd/admin-router.go @@ -29,7 +29,7 @@ func registerAdminRouter(mux *router.Router) { // Admin router adminRouter := mux.NewRoute().PathPrefix("/").Subrouter() - /// Admin operations + /// Service operations // Service status adminRouter.Methods("GET").Queries("service", "").Headers(minioAdminOpHeader, "status").HandlerFunc(adminAPI.ServiceStatusHandler) @@ -37,4 +37,12 @@ func registerAdminRouter(mux *router.Router) { adminRouter.Methods("POST").Queries("service", "").Headers(minioAdminOpHeader, "stop").HandlerFunc(adminAPI.ServiceStopHandler) // Service restart adminRouter.Methods("POST").Queries("service", "").Headers(minioAdminOpHeader, "restart").HandlerFunc(adminAPI.ServiceRestartHandler) + + /// Lock operations + + // List Locks + adminRouter.Methods("GET").Queries("lock", "").Headers(minioAdminOpHeader, "list").HandlerFunc(adminAPI.ListLocksHandler) + + // Clear locks + adminRouter.Methods("POST").Queries("lock", "").Headers(minioAdminOpHeader, "clear").HandlerFunc(adminAPI.ClearLocksHandler) } diff --git a/cmd/admin-rpc-client.go b/cmd/admin-rpc-client.go index 14f65626d..3598b1151 100644 --- a/cmd/admin-rpc-client.go +++ b/cmd/admin-rpc-client.go @@ -20,6 +20,7 @@ import ( "net/url" "path" "sync" + "time" ) // localAdminClient - represents admin operation to be executed locally. @@ -32,11 +33,12 @@ type remoteAdminClient struct { *AuthRPCClient } -// stopRestarter - abstracts stop and restart operations for both -// local and remote execution. -type stopRestarter interface { +// adminCmdRunner - abstracts local and remote execution of admin +// commands like service stop and service restart. +type adminCmdRunner interface { Stop() error Restart() error + ListLocks(bucket, prefix string, relTime time.Duration) ([]VolumeLockInfo, error) } // Stop - Sends a message over channel to the go-routine responsible @@ -53,24 +55,43 @@ func (lc localAdminClient) Restart() error { return nil } +// ListLocks - Fetches lock information from local lock instrumentation. +func (lc localAdminClient) ListLocks(bucket, prefix string, relTime time.Duration) ([]VolumeLockInfo, error) { + return listLocksInfo(bucket, prefix, relTime), nil +} + // Stop - Sends stop command to remote server via RPC. func (rc remoteAdminClient) Stop() error { args := AuthRPCArgs{} reply := AuthRPCReply{} - return rc.Call("Service.Shutdown", &args, &reply) + return rc.Call("Admin.Shutdown", &args, &reply) } // Restart - Sends restart command to remote server via RPC. func (rc remoteAdminClient) Restart() error { args := AuthRPCArgs{} reply := AuthRPCReply{} - return rc.Call("Service.Restart", &args, &reply) + return rc.Call("Admin.Restart", &args, &reply) +} + +// ListLocks - Sends list locks command to remote server via RPC. +func (rc remoteAdminClient) ListLocks(bucket, prefix string, relTime time.Duration) ([]VolumeLockInfo, error) { + listArgs := ListLocksQuery{ + bucket: bucket, + prefix: prefix, + relTime: relTime, + } + var reply ListLocksReply + if err := rc.Call("Admin.ListLocks", &listArgs, &reply); err != nil { + return nil, err + } + return reply.volLocks, nil } // adminPeer - represents an entity that implements Stop and Restart methods. type adminPeer struct { - addr string - svcClnt stopRestarter + addr string + cmdRunner adminCmdRunner } // type alias for a collection of adminPeer. @@ -105,13 +126,13 @@ func makeAdminPeers(eps []*url.URL) adminPeers { secretKey: serverCred.SecretKey, serverAddr: ep.Host, secureConn: isSSL(), - serviceEndpoint: path.Join(reservedBucket, servicePath), - serviceName: "Service", + serviceEndpoint: path.Join(reservedBucket, adminPath), + serviceName: "Admin", } servicePeers = append(servicePeers, adminPeer{ - addr: ep.Host, - svcClnt: &remoteAdminClient{newAuthRPCClient(cfg)}, + addr: ep.Host, + cmdRunner: &remoteAdminClient{newAuthRPCClient(cfg)}, }) seenAddr[ep.Host] = true } @@ -129,9 +150,9 @@ func initGlobalAdminPeers(eps []*url.URL) { func invokeServiceCmd(cp adminPeer, cmd serviceSignal) (err error) { switch cmd { case serviceStop: - err = cp.svcClnt.Stop() + err = cp.cmdRunner.Stop() case serviceRestart: - err = cp.svcClnt.Restart() + err = cp.cmdRunner.Restart() } return err } @@ -147,9 +168,58 @@ func sendServiceCmd(cps adminPeers, cmd serviceSignal) { wg.Add(1) go func(idx int) { defer wg.Done() - errs[idx] = invokeServiceCmd(remotePeers[idx], cmd) + // we use idx+1 because remotePeers slice is 1 position shifted w.r.t cps + errs[idx+1] = invokeServiceCmd(remotePeers[idx], cmd) }(i) } wg.Wait() errs[0] = invokeServiceCmd(cps[0], cmd) } + +func listPeerLocksInfo(peers adminPeers, bucket, prefix string, relTime time.Duration) ([]VolumeLockInfo, error) { + // Used to aggregate volume lock information from all nodes. + allLocks := make([][]VolumeLockInfo, len(peers)) + errs := make([]error, len(peers)) + var wg sync.WaitGroup + localPeer := peers[0] + remotePeers := peers[1:] + for i, remotePeer := range remotePeers { + wg.Add(1) + go func(idx int, remotePeer adminPeer) { + defer wg.Done() + // `remotePeers` is right-shifted by one position relative to `peers` + allLocks[idx], errs[idx] = remotePeer.cmdRunner.ListLocks(bucket, prefix, relTime) + }(i+1, remotePeer) + } + wg.Wait() + allLocks[0], errs[0] = localPeer.cmdRunner.ListLocks(bucket, prefix, relTime) + + // Summarizing errors received for ListLocks RPC across all + // nodes. N B the possible unavailability of quorum in errors + // applies only to distributed setup. + errCount, err := reduceErrs(errs, []error{}) + if err != nil { + if errCount >= (len(peers)/2 + 1) { + return nil, err + } + return nil, InsufficientReadQuorum{} + } + + // Group lock information across nodes by (bucket, object) + // pair. For readability only. + paramLockMap := make(map[nsParam][]VolumeLockInfo) + for _, nodeLocks := range allLocks { + for _, lockInfo := range nodeLocks { + param := nsParam{ + volume: lockInfo.Bucket, + path: lockInfo.Object, + } + paramLockMap[param] = append(paramLockMap[param], lockInfo) + } + } + groupedLockInfos := []VolumeLockInfo{} + for _, volLocks := range paramLockMap { + groupedLockInfos = append(groupedLockInfos, volLocks...) + } + return groupedLockInfos, nil +} diff --git a/cmd/admin-rpc-server.go b/cmd/admin-rpc-server.go index 62751d114..0980cef11 100644 --- a/cmd/admin-rpc-server.go +++ b/cmd/admin-rpc-server.go @@ -18,20 +18,35 @@ package cmd import ( "net/rpc" + "time" router "github.com/gorilla/mux" ) -const servicePath = "/admin/service" +const adminPath = "/admin" -// serviceCmd - exports RPC methods for service status, stop and +// adminCmd - exports RPC methods for service status, stop and // restart commands. -type serviceCmd struct { +type adminCmd struct { AuthRPCServer } +// ListLocksQuery - wraps ListLocks API's query values to send over RPC. +type ListLocksQuery struct { + AuthRPCArgs + bucket string + prefix string + relTime time.Duration +} + +// ListLocksReply - wraps ListLocks response over RPC. +type ListLocksReply struct { + AuthRPCReply + volLocks []VolumeLockInfo +} + // Shutdown - Shutdown this instance of minio server. -func (s *serviceCmd) Shutdown(args *AuthRPCArgs, reply *AuthRPCReply) error { +func (s *adminCmd) Shutdown(args *AuthRPCArgs, reply *AuthRPCReply) error { if err := args.IsAuthenticated(); err != nil { return err } @@ -41,7 +56,7 @@ func (s *serviceCmd) Shutdown(args *AuthRPCArgs, reply *AuthRPCReply) error { } // Restart - Restart this instance of minio server. -func (s *serviceCmd) Restart(args *AuthRPCArgs, reply *AuthRPCReply) error { +func (s *adminCmd) Restart(args *AuthRPCArgs, reply *AuthRPCReply) error { if err := args.IsAuthenticated(); err != nil { return err } @@ -50,16 +65,23 @@ func (s *serviceCmd) Restart(args *AuthRPCArgs, reply *AuthRPCReply) error { return nil } +// ListLocks - lists locks held by requests handled by this server instance. +func (s *adminCmd) ListLocks(query *ListLocksQuery, reply *ListLocksReply) error { + volLocks := listLocksInfo(query.bucket, query.prefix, query.relTime) + *reply = ListLocksReply{volLocks: volLocks} + return nil +} + // registerAdminRPCRouter - registers RPC methods for service status, // stop and restart commands. func registerAdminRPCRouter(mux *router.Router) error { - adminRPCHandler := &serviceCmd{} + adminRPCHandler := &adminCmd{} adminRPCServer := rpc.NewServer() - err := adminRPCServer.RegisterName("Service", adminRPCHandler) + err := adminRPCServer.RegisterName("Admin", adminRPCHandler) if err != nil { return traceError(err) } adminRouter := mux.NewRoute().PathPrefix(reservedBucket).Subrouter() - adminRouter.Path(servicePath).Handler(adminRPCServer) + adminRouter.Path(adminPath).Handler(adminRPCServer) return nil } diff --git a/cmd/admin-rpc-server_test.go b/cmd/admin-rpc-server_test.go index 1ebc24568..6a4a36603 100644 --- a/cmd/admin-rpc-server_test.go +++ b/cmd/admin-rpc-server_test.go @@ -28,7 +28,7 @@ func testAdminCmd(cmd cmdType, t *testing.T) { } defer removeAll(rootPath) - adminServer := serviceCmd{} + adminServer := adminCmd{} creds := serverConfig.GetCredential() args := LoginRPCArgs{ Username: creds.AccessKey, diff --git a/cmd/api-errors.go b/cmd/api-errors.go index a3958d250..a23a7e7d6 100644 --- a/cmd/api-errors.go +++ b/cmd/api-errors.go @@ -110,6 +110,7 @@ const ( ErrInvalidQuerySignatureAlgo ErrInvalidQueryParams ErrBucketAlreadyOwnedByYou + ErrInvalidDuration // Add new error codes here. // Bucket notification related errors. @@ -477,6 +478,11 @@ var errorCodeResponse = map[APIErrorCode]APIError{ Description: "Your previous request to create the named bucket succeeded and you already own it.", HTTPStatusCode: http.StatusConflict, }, + ErrInvalidDuration: { + Code: "InvalidDuration", + Description: "Relative duration provided in the request is invalid.", + HTTPStatusCode: http.StatusBadRequest, + }, /// Bucket notification related errors. ErrEventNotification: { diff --git a/cmd/lockinfo-handlers.go b/cmd/lockinfo-handlers.go index 50f7429b6..70dd4e76f 100644 --- a/cmd/lockinfo-handlers.go +++ b/cmd/lockinfo-handlers.go @@ -16,7 +16,10 @@ package cmd -import "time" +import ( + "strings" + "time" +) // SystemLockState - Structure to fill the lock state of entire object storage. // That is the total locks held, total calls blocked on locks and state of all the locks for the entire system. @@ -26,7 +29,7 @@ type SystemLockState struct { // be released. TotalBlockedLocks int64 `json:"totalBlockedLocks"` // Count of operations which has successfully acquired the lock but - // hasn't unlocked yet( operation in progress). + // hasn't unlocked yet (operation in progress). TotalAcquiredLocks int64 `json:"totalAcquiredLocks"` LocksInfoPerObject []VolumeLockInfo `json:"locksInfoPerObject"` } @@ -64,11 +67,13 @@ func getSystemLockState() (SystemLockState, error) { globalNSMutex.lockMapMutex.Lock() defer globalNSMutex.lockMapMutex.Unlock() - lockState := SystemLockState{} - - lockState.TotalBlockedLocks = globalNSMutex.counters.blocked - lockState.TotalLocks = globalNSMutex.counters.total - lockState.TotalAcquiredLocks = globalNSMutex.counters.granted + // Fetch current time once instead of fetching system time for every lock. + timeNow := time.Now().UTC() + lockState := SystemLockState{ + TotalAcquiredLocks: globalNSMutex.counters.granted, + TotalLocks: globalNSMutex.counters.total, + TotalBlockedLocks: globalNSMutex.counters.blocked, + } for param, debugLock := range globalNSMutex.debugLockMap { volLockInfo := VolumeLockInfo{} @@ -84,10 +89,57 @@ func getSystemLockState() (SystemLockState, error) { LockType: lockInfo.lType, Status: lockInfo.status, Since: lockInfo.since, - Duration: time.Now().UTC().Sub(lockInfo.since), + Duration: timeNow.Sub(lockInfo.since), }) } lockState.LocksInfoPerObject = append(lockState.LocksInfoPerObject, volLockInfo) } return lockState, nil } + +// listLocksInfo - Fetches locks held on bucket, matching prefix older than relTime. +func listLocksInfo(bucket, prefix string, relTime time.Duration) []VolumeLockInfo { + globalNSMutex.lockMapMutex.Lock() + defer globalNSMutex.lockMapMutex.Unlock() + + // Fetch current time once instead of fetching system time for every lock. + timeNow := time.Now().UTC() + volumeLocks := []VolumeLockInfo{} + + for param, debugLock := range globalNSMutex.debugLockMap { + if param.volume != bucket { + continue + } + // N B empty prefix matches all param.path. + if !strings.HasPrefix(param.path, prefix) { + continue + } + + volLockInfo := VolumeLockInfo{ + Bucket: param.volume, + Object: param.path, + LocksOnObject: debugLock.counters.total, + TotalBlockedLocks: debugLock.counters.blocked, + LocksAcquiredOnObject: debugLock.counters.granted, + } + // Filter locks that are held on bucket, prefix. + for opsID, lockInfo := range debugLock.lockInfo { + elapsed := timeNow.Sub(lockInfo.since) + if elapsed < relTime { + continue + } + // Add locks that are older than relTime. + volLockInfo.LockDetailsOnObject = append(volLockInfo.LockDetailsOnObject, + OpsLockState{ + OperationID: opsID, + LockSource: lockInfo.lockSource, + LockType: lockInfo.lType, + Status: lockInfo.status, + Since: lockInfo.since, + Duration: elapsed, + }) + volumeLocks = append(volumeLocks, volLockInfo) + } + } + return volumeLocks +} diff --git a/cmd/lockinfo-handlers_test.go b/cmd/lockinfo-handlers_test.go new file mode 100644 index 000000000..78c969151 --- /dev/null +++ b/cmd/lockinfo-handlers_test.go @@ -0,0 +1,81 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "fmt" + "testing" + "time" +) + +// TestListLocksInfo - Test for listLocksInfo. +func TestListLocksInfo(t *testing.T) { + // Initialize globalNSMutex to validate listing of lock + // instrumentation information. + isDistXL := false + initNSLock(isDistXL) + + // Acquire a few locks to populate lock instrumentation. + // Take 10 read locks on bucket1/prefix1/obj1 + for i := 0; i < 10; i++ { + readLk := globalNSMutex.NewNSLock("bucket1", "prefix1/obj1") + readLk.RLock() + } + + // Take write locks on bucket1/prefix/obj{11..19} + for i := 0; i < 10; i++ { + wrLk := globalNSMutex.NewNSLock("bucket1", fmt.Sprintf("prefix1/obj%d", 10+i)) + wrLk.Lock() + } + + testCases := []struct { + bucket string + prefix string + relTime time.Duration + numLocks int + }{ + // Test 1 - Matches all the locks acquired above. + { + bucket: "bucket1", + prefix: "prefix1", + relTime: time.Duration(0 * time.Second), + numLocks: 20, + }, + // Test 2 - Bucket doesn't match. + { + bucket: "bucket", + prefix: "prefix1", + relTime: time.Duration(0 * time.Second), + numLocks: 0, + }, + // Test 3 - Prefix doesn't match. + { + bucket: "bucket1", + prefix: "prefix11", + relTime: time.Duration(0 * time.Second), + numLocks: 0, + }, + } + + for i, test := range testCases { + actual := listLocksInfo(test.bucket, test.prefix, test.relTime) + if len(actual) != test.numLocks { + t.Errorf("Test %d - Expected %d locks but observed %d locks", + i+1, test.numLocks, len(actual)) + } + } +} diff --git a/cmd/object-api-utils.go b/cmd/object-api-utils.go index f953a73b9..d07a37c6c 100644 --- a/cmd/object-api-utils.go +++ b/cmd/object-api-utils.go @@ -49,7 +49,7 @@ var isIPAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`) // See: http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html func IsValidBucketName(bucket string) bool { // Special case when bucket is equal to 'metaBucket'. - if bucket == minioMetaBucket { + if bucket == minioMetaBucket || bucket == minioMetaMultipartBucket { return true } if len(bucket) < 3 || len(bucket) > 63 { diff --git a/docs/admin-api/management-api.md b/docs/admin-api/management-api.md new file mode 100644 index 000000000..2655c1698 --- /dev/null +++ b/docs/admin-api/management-api.md @@ -0,0 +1,82 @@ +# Management REST API + +## Authentication +- AWS signatureV4 +- We use "minio" as region. Here region is set only for signature calculation. + +## List of management APIs +- Service + - Stop + - Restart + - Status + +- Locks + - List + - Clear + +- Healing + +### Service Management APIs +* Stop + - POST /?service + - x-minio-operation: stop + - Response: On success 200 + +* Restart + - POST /?service + - x-minio-operation: restart + - Response: On success 200 + +* Status + - GET /?service + - x-minio-operation: status + - Response: On success 200, return json formatted StorageInfo object. + +### Lock Management APIs +* ListLocks + - GET /?lock&bucket=mybucket&prefix=myprefix&older-than=rel_time + - x-minio-operation: list + - Response: On success 200, json encoded response containing all locks held, older than rel_time. e.g, older than 3 hours. + - Possible error responses + - ErrInvalidBucketName + + InvalidBucketName + The specified bucket is not valid. + + + / + 3L137 + 3L137 + + + - ErrInvalidObjectName + + XMinioInvalidObjectName + Object name contains unsupported characters. Unsupported characters are `^*|\" + + + / + 3L137 + 3L137 + + + - ErrInvalidDuration + + InvalidDuration + Relative duration provided in the request is invalid. + + + / + 3L137 + 3L137 + + + +* ClearLocks + - POST /?lock&bucket=mybucket&prefix=myprefix&older-than=rel_time + - x-minio-operation: clear + - Response: On success 200, json encoded response containing all locks cleared, older than rel_time. e.g, older than 3 hours. + - Possible error responses, similar to errors listed in ListLocks. + - ErrInvalidBucketName + - ErrInvalidObjectName + - ErrInvalidDuration diff --git a/docs/admin-api/service.md b/docs/admin-api/service.md deleted file mode 100644 index bc1e580ab..000000000 --- a/docs/admin-api/service.md +++ /dev/null @@ -1,33 +0,0 @@ -# Service REST API - -## Authentication -- AWS signatureV4 -- We use "minio" as region. Here region is set only for signature calculation. - -## List of management APIs -- Service - - Stop - - Restart - - Status - -- Locks - - List - - Clear - -- Healing - -### Service Management APIs -* Stop - - POST /?service - - x-minio-operation: stop - - Response: On success 200 - -* Restart - - POST /?service - - x-minio-operation: restart - - Response: On success 200 - -* Status - - GET /?service - - x-minio-operation: status - - Response: On success 200, return json formatted StorageInfo object. diff --git a/pkg/madmin/examples/lock-clear.go b/pkg/madmin/examples/lock-clear.go new file mode 100644 index 000000000..2f7a30de4 --- /dev/null +++ b/pkg/madmin/examples/lock-clear.go @@ -0,0 +1,47 @@ +// +build ignore + +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package main + +import ( + "log" + "time" + + "github.com/minio/minio/pkg/madmin" +) + +func main() { + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY are + // dummy values, please replace them with original values. + + // API requests are secure (HTTPS) if secure=true and insecure (HTTPS) otherwise. + // New returns an Minio Admin client object. + madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + if err != nil { + log.Fatalln(err) + } + + // Clear locks held on mybucket/myprefix older than olderThan seconds. + olderThan := time.Duration(30 * time.Second) + locksCleared, err := madmClnt.ClearLocks("mybucket", "myprefix", olderThan) + if err != nil { + log.Fatalln(err) + } + log.Println(locksCleared) +} diff --git a/pkg/madmin/examples/lock-list.go b/pkg/madmin/examples/lock-list.go new file mode 100644 index 000000000..bc1e9ff08 --- /dev/null +++ b/pkg/madmin/examples/lock-list.go @@ -0,0 +1,46 @@ +// +build ignore + +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package main + +import ( + "log" + "time" + + "github.com/minio/minio/pkg/madmin" +) + +func main() { + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY are + // dummy values, please replace them with original values. + + // API requests are secure (HTTPS) if secure=true and insecure (HTTPS) otherwise. + // New returns an Minio Admin client object. + madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + if err != nil { + log.Fatalln(err) + } + + // List locks held on mybucket/myprefix older than 30s. + locksHeld, err := madmClnt.ListLocks("mybucket", "myprefix", time.Duration(30*time.Second)) + if err != nil { + log.Fatalln(err) + } + log.Println(locksHeld) +} diff --git a/pkg/madmin/examples/service-restart.go b/pkg/madmin/examples/service-restart.go index 26d991488..ba76e1321 100644 --- a/pkg/madmin/examples/service-restart.go +++ b/pkg/madmin/examples/service-restart.go @@ -29,9 +29,7 @@ func main() { // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY are // dummy values, please replace them with original values. - // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access. - // This boolean value is the last argument for New(). - + // API requests are secure (HTTPS) if secure=true and insecure (HTTPS) otherwise. // New returns an Minio Admin client object. madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) if err != nil { diff --git a/pkg/madmin/examples/service-status.go b/pkg/madmin/examples/service-status.go index fbee6fb22..dd8f27faa 100644 --- a/pkg/madmin/examples/service-status.go +++ b/pkg/madmin/examples/service-status.go @@ -29,9 +29,7 @@ func main() { // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are // dummy values, please replace them with original values. - // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access. - // This boolean value is the last argument for New(). - + // API requests are secure (HTTPS) if secure=true and insecure (HTTPS) otherwise. // New returns an Minio Admin client object. madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) if err != nil { diff --git a/pkg/madmin/examples/service-stop.go b/pkg/madmin/examples/service-stop.go index 056140774..05b446eb2 100644 --- a/pkg/madmin/examples/service-stop.go +++ b/pkg/madmin/examples/service-stop.go @@ -29,9 +29,7 @@ func main() { // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are // dummy values, please replace them with original values. - // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access. - // This boolean value is the last argument for New(). - + // API requests are secure (HTTPS) if secure=true and insecure (HTTPS) otherwise. // New returns an Minio Admin client object. madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) if err != nil { diff --git a/pkg/madmin/lock-commands.go b/pkg/madmin/lock-commands.go new file mode 100644 index 000000000..1bae3a8df --- /dev/null +++ b/pkg/madmin/lock-commands.go @@ -0,0 +1,151 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package madmin + +import ( + "encoding/json" + "errors" + "io" + "io/ioutil" + "net/http" + "net/url" + "time" +) + +type statusType string + +const ( + runningStatus statusType = "Running" + blockedStatus statusType = "Blocked" +) + +type lockType string + +const ( + debugRLockStr lockType = "RLock" + debugWLockStr lockType = "WLock" +) + +// OpsLockState - represents lock specific details. +type OpsLockState struct { + OperationID string `json:"opsID"` // String containing operation ID. + LockSource string `json:"lockSource"` // Operation type (GetObject, PutObject...) + LockType lockType `json:"lockType"` // Lock type (RLock, WLock) + Status statusType `json:"status"` // Status can be Running/Ready/Blocked. + Since time.Time `json:"statusSince"` // Time when the lock was initially held. + Duration time.Duration `json:"statusDuration"` // Duration since the lock was held. +} + +// VolumeLockInfo - represents summary and individual lock details of all +// locks held on a given bucket, object. +type VolumeLockInfo struct { + Bucket string `json:"bucket"` + Object string `json:"object"` + // All locks blocked + running for given pair. + LocksOnObject int64 `json:"locksOnObject"` + // Count of operations which has successfully acquired the lock + // but hasn't unlocked yet( operation in progress). + LocksAcquiredOnObject int64 `json:"locksAcquiredOnObject"` + // Count of operations which are blocked waiting for the lock + // to be released. + TotalBlockedLocks int64 `json:"locksBlockedOnObject"` + // State information containing state of the locks for all operations + // on given pair. + LockDetailsOnObject []OpsLockState `json:"lockDetailsOnObject"` +} + +// getLockInfos - unmarshal []VolumeLockInfo from a reader. +func getLockInfos(body io.Reader) ([]VolumeLockInfo, error) { + respBytes, err := ioutil.ReadAll(body) + if err != nil { + return nil, err + } + + var lockInfos []VolumeLockInfo + + err = json.Unmarshal(respBytes, &lockInfos) + if err != nil { + return nil, err + } + + return lockInfos, nil +} + +// ListLocks - Calls List Locks Management API to fetch locks matching +// bucket, prefix and held before the duration supplied. +func (adm *AdminClient) ListLocks(bucket, prefix string, olderThan time.Duration) ([]VolumeLockInfo, error) { + queryVal := make(url.Values) + queryVal.Set("lock", "") + queryVal.Set("bucket", bucket) + queryVal.Set("prefix", prefix) + queryVal.Set("older-than", olderThan.String()) + + hdrs := make(http.Header) + hdrs.Set(minioAdminOpHeader, "list") + + reqData := requestData{ + queryValues: queryVal, + customHeaders: hdrs, + } + + // Execute GET on /?lock to list locks. + resp, err := adm.executeMethod("GET", reqData) + + defer closeResponse(resp) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + return nil, errors.New("Got HTTP Status: " + resp.Status) + } + + return getLockInfos(resp.Body) +} + +// ClearLocks - Calls Clear Locks Management API to clear locks held +// on bucket, matching prefix older than duration supplied. +func (adm *AdminClient) ClearLocks(bucket, prefix string, olderThan time.Duration) ([]VolumeLockInfo, error) { + queryVal := make(url.Values) + queryVal.Set("lock", "") + queryVal.Set("bucket", bucket) + queryVal.Set("prefix", prefix) + queryVal.Set("older-than", olderThan.String()) + + hdrs := make(http.Header) + hdrs.Set(minioAdminOpHeader, "clear") + + reqData := requestData{ + queryValues: queryVal, + customHeaders: hdrs, + } + + // Execute POST on /?lock to clear locks. + resp, err := adm.executeMethod("POST", reqData) + + defer closeResponse(resp) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + return nil, errors.New("Got HTTP Status: " + resp.Status) + } + + return getLockInfos(resp.Body) +} diff --git a/pkg/madmin/lock-commands_test.go b/pkg/madmin/lock-commands_test.go new file mode 100644 index 000000000..447f64cea --- /dev/null +++ b/pkg/madmin/lock-commands_test.go @@ -0,0 +1,61 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package madmin + +import ( + "bytes" + "encoding/json" + "reflect" + "testing" +) + +// Test for getLockInfos helper function. +func TestGetLockInfos(t *testing.T) { + testCases := []struct { + // Used to construct a io.Reader holding xml serialized lock information + inputLocks []VolumeLockInfo + }{ + // To build a reader with _no_ lock information. + { + inputLocks: []VolumeLockInfo{}, + }, + // To build a reader with _one_ lock information. + { + inputLocks: []VolumeLockInfo{{Bucket: "bucket", Object: "object"}}, + }, + } + for i, test := range testCases { + jsonBytes, err := json.Marshal(test.inputLocks) + if err != nil { + t.Fatalf("Test %d - Failed to marshal input lockInfos - %v", i+1, err) + } + actualLocks, err := getLockInfos(bytes.NewReader(jsonBytes)) + if err != nil { + t.Fatalf("Test %d - Failed to get lock information - %v", i+1, err) + } + if !reflect.DeepEqual(actualLocks, test.inputLocks) { + t.Errorf("Test %d - Expected %v but received %v", i+1, test.inputLocks, actualLocks) + } + } + + // Invalid json representation of []VolumeLockInfo + _, err := getLockInfos(bytes.NewReader([]byte("invalidBytes"))) + if err == nil { + t.Errorf("Test expected to fail, but passed") + } +} diff --git a/pkg/madmin/service.go b/pkg/madmin/service.go index 27027bc04..6b25a018b 100644 --- a/pkg/madmin/service.go +++ b/pkg/madmin/service.go @@ -77,7 +77,7 @@ func (adm *AdminClient) ServiceStatus() (ServiceStatusMetadata, error) { } if resp.StatusCode != http.StatusOK { - return ServiceStatusMetadata{}, errors.New("Got " + resp.Status) + return ServiceStatusMetadata{}, errors.New("Got HTTP Status: " + resp.Status) } respBytes, err := ioutil.ReadAll(resp.Body) @@ -113,7 +113,7 @@ func (adm *AdminClient) ServiceStop() error { } if resp.StatusCode != http.StatusOK { - return errors.New("Got " + resp.Status) + return errors.New("Got HTTP Status: " + resp.Status) } return nil @@ -137,7 +137,7 @@ func (adm *AdminClient) ServiceRestart() error { } if resp.StatusCode != http.StatusOK { - return errors.New("Got " + resp.Status) + return errors.New("Got HTTP Status: " + resp.Status) } return nil } From 926c75d0b5104962e1ab0a82c867d99db2515bc2 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Fri, 6 Jan 2017 00:37:00 -0800 Subject: [PATCH 048/100] api: Set appropriate content-type for success/error responses. (#3537) Golang HTTP client automatically detects content-type but for S3 clients this content-type might be incorrect or might misbehave. For example: ``` Content-Type: text/xml; charset=utf-8 ``` Should be ``` Content-Type: application/xml ``` Allow this to be set properly. --- cmd/admin-handlers.go | 36 ++++--- cmd/api-response.go | 73 ++++++++----- cmd/auth-handler.go | 2 +- cmd/bucket-handlers-listobjects.go | 26 +++-- cmd/bucket-handlers.go | 102 +++++++++--------- cmd/bucket-notification-handlers.go | 47 +++++---- cmd/bucket-policy-handlers.go | 34 +++--- cmd/generic-handlers.go | 12 +-- cmd/object-handlers-common.go | 12 +-- cmd/object-handlers.go | 156 ++++++++++++++-------------- 10 files changed, 260 insertions(+), 240 deletions(-) diff --git a/cmd/admin-handlers.go b/cmd/admin-handlers.go index f80ec826f..f3e34b8dc 100644 --- a/cmd/admin-handlers.go +++ b/cmd/admin-handlers.go @@ -35,19 +35,19 @@ const ( func (adminAPI adminAPIHandlers) ServiceStatusHandler(w http.ResponseWriter, r *http.Request) { adminAPIErr := checkRequestAuthType(r, "", "", "") if adminAPIErr != ErrNone { - writeErrorResponse(w, r, adminAPIErr, r.URL.Path) + writeErrorResponse(w, adminAPIErr, r.URL) return } storageInfo := newObjectLayerFn().StorageInfo() jsonBytes, err := json.Marshal(storageInfo) if err != nil { - writeErrorResponseNoHeader(w, r, ErrInternalError, r.URL.Path) + writeErrorResponse(w, ErrInternalError, r.URL) errorIf(err, "Failed to marshal storage info into json.") return } // Reply with storage information (across nodes in a // distributed setup) as json. - writeSuccessResponse(w, jsonBytes) + writeSuccessResponseJSON(w, jsonBytes) } // ServiceStopHandler - POST /?service @@ -58,11 +58,13 @@ func (adminAPI adminAPIHandlers) ServiceStatusHandler(w http.ResponseWriter, r * func (adminAPI adminAPIHandlers) ServiceStopHandler(w http.ResponseWriter, r *http.Request) { adminAPIErr := checkRequestAuthType(r, "", "", "") if adminAPIErr != ErrNone { - writeErrorResponse(w, r, adminAPIErr, r.URL.Path) + writeErrorResponse(w, adminAPIErr, r.URL) return } + // Reply to the client before stopping minio server. w.WriteHeader(http.StatusOK) + sendServiceCmd(globalAdminPeers, serviceStop) } @@ -74,11 +76,13 @@ func (adminAPI adminAPIHandlers) ServiceStopHandler(w http.ResponseWriter, r *ht func (adminAPI adminAPIHandlers) ServiceRestartHandler(w http.ResponseWriter, r *http.Request) { adminAPIErr := checkRequestAuthType(r, "", "", "") if adminAPIErr != ErrNone { - writeErrorResponse(w, r, adminAPIErr, r.URL.Path) + writeErrorResponse(w, adminAPIErr, r.URL) return } + // Reply to the client before restarting minio server. w.WriteHeader(http.StatusOK) + sendServiceCmd(globalAdminPeers, serviceRestart) } @@ -131,14 +135,14 @@ func validateLockQueryParams(vars url.Values) (string, string, time.Duration, AP func (adminAPI adminAPIHandlers) ListLocksHandler(w http.ResponseWriter, r *http.Request) { adminAPIErr := checkRequestAuthType(r, "", "", "") if adminAPIErr != ErrNone { - writeErrorResponse(w, r, adminAPIErr, r.URL.Path) + writeErrorResponse(w, adminAPIErr, r.URL) return } vars := r.URL.Query() bucket, prefix, relTime, adminAPIErr := validateLockQueryParams(vars) if adminAPIErr != ErrNone { - writeErrorResponse(w, r, adminAPIErr, r.URL.Path) + writeErrorResponse(w, adminAPIErr, r.URL) return } @@ -146,7 +150,7 @@ func (adminAPI adminAPIHandlers) ListLocksHandler(w http.ResponseWriter, r *http // are available since relTime. volLocks, err := listPeerLocksInfo(globalAdminPeers, bucket, prefix, relTime) if err != nil { - writeErrorResponse(w, r, ErrInternalError, r.URL.Path) + writeErrorResponse(w, ErrInternalError, r.URL) errorIf(err, "Failed to fetch lock information from remote nodes.") return } @@ -154,14 +158,14 @@ func (adminAPI adminAPIHandlers) ListLocksHandler(w http.ResponseWriter, r *http // Marshal list of locks as json. jsonBytes, err := json.Marshal(volLocks) if err != nil { - writeErrorResponseNoHeader(w, r, ErrInternalError, r.URL.Path) + writeErrorResponse(w, ErrInternalError, r.URL) errorIf(err, "Failed to marshal lock information into json.") return } // Reply with list of locks held on bucket, matching prefix // older than relTime supplied, as json. - writeSuccessResponse(w, jsonBytes) + writeSuccessResponseJSON(w, jsonBytes) } // ClearLocksHandler - POST /?lock&bucket=mybucket&prefix=myprefix&older-than=relTime @@ -173,14 +177,14 @@ func (adminAPI adminAPIHandlers) ListLocksHandler(w http.ResponseWriter, r *http func (adminAPI adminAPIHandlers) ClearLocksHandler(w http.ResponseWriter, r *http.Request) { adminAPIErr := checkRequestAuthType(r, "", "", "") if adminAPIErr != ErrNone { - writeErrorResponse(w, r, adminAPIErr, r.URL.Path) + writeErrorResponse(w, adminAPIErr, r.URL) return } vars := r.URL.Query() bucket, prefix, relTime, adminAPIErr := validateLockQueryParams(vars) if adminAPIErr != ErrNone { - writeErrorResponse(w, r, adminAPIErr, r.URL.Path) + writeErrorResponse(w, adminAPIErr, r.URL) return } @@ -188,7 +192,7 @@ func (adminAPI adminAPIHandlers) ClearLocksHandler(w http.ResponseWriter, r *htt // are available since relTime. volLocks, err := listPeerLocksInfo(globalAdminPeers, bucket, prefix, relTime) if err != nil { - writeErrorResponseNoHeader(w, r, ErrInternalError, r.URL.Path) + writeErrorResponse(w, ErrInternalError, r.URL) errorIf(err, "Failed to fetch lock information from remote nodes.") return } @@ -196,14 +200,16 @@ func (adminAPI adminAPIHandlers) ClearLocksHandler(w http.ResponseWriter, r *htt // Marshal list of locks as json. jsonBytes, err := json.Marshal(volLocks) if err != nil { - writeErrorResponseNoHeader(w, r, ErrInternalError, r.URL.Path) + writeErrorResponse(w, ErrInternalError, r.URL) errorIf(err, "Failed to marshal lock information into json.") return } + // Remove lock matching bucket/prefix older than relTime. for _, volLock := range volLocks { globalNSMutex.ForceUnlock(volLock.Bucket, volLock.Object) } + // Reply with list of locks cleared, as json. - writeSuccessResponse(w, jsonBytes) + writeSuccessResponseJSON(w, jsonBytes) } diff --git a/cmd/api-response.go b/cmd/api-response.go index 420e7a608..42d2730d4 100644 --- a/cmd/api-response.go +++ b/cmd/api-response.go @@ -19,6 +19,7 @@ package cmd import ( "encoding/xml" "net/http" + "net/url" "path" "time" ) @@ -482,51 +483,67 @@ func generateMultiDeleteResponse(quiet bool, deletedObjects []ObjectIdentifier, return deleteResp } -func writeResponse(w http.ResponseWriter, statusCode int, response []byte) { +func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType mimeType) { setCommonHeaders(w) - w.WriteHeader(statusCode) - if response == nil { - return + if mType != mimeNone { + w.Header().Set("Content-Type", string(mType)) + } + w.WriteHeader(statusCode) + if response != nil { + w.Write(response) + w.(http.Flusher).Flush() } - w.Write(response) - w.(http.Flusher).Flush() } -// writeSuccessResponse writes success headers and response if any. -func writeSuccessResponse(w http.ResponseWriter, response []byte) { - writeResponse(w, http.StatusOK, response) +// mimeType represents various MIME type used API responses. +type mimeType string + +const ( + // Means no response type. + mimeNone mimeType = "" + // Means response type is JSON. + mimeJSON mimeType = "application/json" + // Means response type is XML. + mimeXML mimeType = "application/xml" +) + +// writeSuccessResponseJSON writes success headers and response if any, +// with content-type set to `application/json`. +func writeSuccessResponseJSON(w http.ResponseWriter, response []byte) { + writeResponse(w, http.StatusOK, response, mimeJSON) +} + +// writeSuccessResponseXML writes success headers and response if any, +// with content-type set to `application/xml`. +func writeSuccessResponseXML(w http.ResponseWriter, response []byte) { + writeResponse(w, http.StatusOK, response, mimeXML) } // writeSuccessNoContent writes success headers with http status 204 func writeSuccessNoContent(w http.ResponseWriter) { - writeResponse(w, http.StatusNoContent, nil) + writeResponse(w, http.StatusNoContent, nil, mimeNone) } // writeRedirectSeeOther writes Location header with http status 303 func writeRedirectSeeOther(w http.ResponseWriter, location string) { w.Header().Set("Location", location) - writeResponse(w, http.StatusSeeOther, nil) + writeResponse(w, http.StatusSeeOther, nil, mimeNone) +} + +func writeSuccessResponseHeadersOnly(w http.ResponseWriter) { + writeResponse(w, http.StatusOK, nil, mimeNone) } // writeErrorRespone writes error headers -func writeErrorResponse(w http.ResponseWriter, req *http.Request, errorCode APIErrorCode, resource string) { - apiError := getAPIError(errorCode) - // set common headers - setCommonHeaders(w) - // write Header - w.WriteHeader(apiError.HTTPStatusCode) - writeErrorResponseNoHeader(w, req, errorCode, resource) -} - -func writeErrorResponseNoHeader(w http.ResponseWriter, req *http.Request, errorCode APIErrorCode, resource string) { +func writeErrorResponse(w http.ResponseWriter, errorCode APIErrorCode, reqURL *url.URL) { apiError := getAPIError(errorCode) // Generate error response. - errorResponse := getAPIErrorResponse(apiError, resource) + errorResponse := getAPIErrorResponse(apiError, reqURL.Path) encodedErrorResponse := encodeResponse(errorResponse) - // HEAD should have no body, do not attempt to write to it - if req.Method != "HEAD" { - // write error body - w.Write(encodedErrorResponse) - w.(http.Flusher).Flush() - } + writeResponse(w, apiError.HTTPStatusCode, encodedErrorResponse, mimeXML) +} + +func writeErrorResponseHeadersOnly(w http.ResponseWriter, errorCode APIErrorCode) { + apiError := getAPIError(errorCode) + writeResponse(w, apiError.HTTPStatusCode, nil, mimeNone) } diff --git a/cmd/auth-handler.go b/cmd/auth-handler.go index c048698bb..afb8330a8 100644 --- a/cmd/auth-handler.go +++ b/cmd/auth-handler.go @@ -231,5 +231,5 @@ func (a authHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { a.handler.ServeHTTP(w, r) return } - writeErrorResponse(w, r, ErrSignatureVersionNotSupported, r.URL.Path) + writeErrorResponse(w, ErrSignatureVersionNotSupported, r.URL) } diff --git a/cmd/bucket-handlers-listobjects.go b/cmd/bucket-handlers-listobjects.go index 2bdbe9698..81db8b65c 100644 --- a/cmd/bucket-handlers-listobjects.go +++ b/cmd/bucket-handlers-listobjects.go @@ -66,12 +66,12 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http objectAPI := api.ObjectAPI() if objectAPI == nil { - writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path) + writeErrorResponse(w, ErrServerNotInitialized, r.URL) return } if s3Error := checkRequestAuthType(r, bucket, "s3:ListBucket", serverConfig.GetRegion()); s3Error != ErrNone { - writeErrorResponse(w, r, s3Error, r.URL.Path) + writeErrorResponse(w, s3Error, r.URL) return } @@ -88,7 +88,7 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http // Validate the query params before beginning to serve the request. // fetch-owner is not validated since it is a boolean if s3Error := validateListObjectsArgs(prefix, marker, delimiter, maxKeys); s3Error != ErrNone { - writeErrorResponse(w, r, s3Error, r.URL.Path) + writeErrorResponse(w, s3Error, r.URL) return } // Inititate a list objects operation based on the input params. @@ -97,15 +97,14 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http listObjectsInfo, err := objectAPI.ListObjects(bucket, prefix, marker, delimiter, maxKeys) if err != nil { errorIf(err, "Unable to list objects.") - writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) + writeErrorResponse(w, toAPIErrorCode(err), r.URL) return } response := generateListObjectsV2Response(bucket, prefix, token, startAfter, delimiter, fetchOwner, maxKeys, listObjectsInfo) - // Write headers - setCommonHeaders(w) + // Write success response. - writeSuccessResponse(w, encodeResponse(response)) + writeSuccessResponseXML(w, encodeResponse(response)) } // ListObjectsV1Handler - GET Bucket (List Objects) Version 1. @@ -120,12 +119,12 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http objectAPI := api.ObjectAPI() if objectAPI == nil { - writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path) + writeErrorResponse(w, ErrServerNotInitialized, r.URL) return } if s3Error := checkRequestAuthType(r, bucket, "s3:ListBucket", serverConfig.GetRegion()); s3Error != ErrNone { - writeErrorResponse(w, r, s3Error, r.URL.Path) + writeErrorResponse(w, s3Error, r.URL) return } @@ -134,7 +133,7 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http // Validate all the query params before beginning to serve the request. if s3Error := validateListObjectsArgs(prefix, marker, delimiter, maxKeys); s3Error != ErrNone { - writeErrorResponse(w, r, s3Error, r.URL.Path) + writeErrorResponse(w, s3Error, r.URL) return } @@ -144,12 +143,11 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http listObjectsInfo, err := objectAPI.ListObjects(bucket, prefix, marker, delimiter, maxKeys) if err != nil { errorIf(err, "Unable to list objects.") - writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) + writeErrorResponse(w, toAPIErrorCode(err), r.URL) return } response := generateListObjectsV1Response(bucket, prefix, marker, delimiter, maxKeys, listObjectsInfo) - // Write headers - setCommonHeaders(w) + // Write success response. - writeSuccessResponse(w, encodeResponse(response)) + writeSuccessResponseXML(w, encodeResponse(response)) } diff --git a/cmd/bucket-handlers.go b/cmd/bucket-handlers.go index 7a9e12b4b..5aa99885c 100644 --- a/cmd/bucket-handlers.go +++ b/cmd/bucket-handlers.go @@ -80,18 +80,18 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r * objectAPI := api.ObjectAPI() if objectAPI == nil { - writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path) + writeErrorResponse(w, ErrServerNotInitialized, r.URL) return } if s3Error := checkRequestAuthType(r, bucket, "s3:GetBucketLocation", "us-east-1"); s3Error != ErrNone { - writeErrorResponse(w, r, s3Error, r.URL.Path) + writeErrorResponse(w, s3Error, r.URL) return } if _, err := objectAPI.GetBucketInfo(bucket); err != nil { errorIf(err, "Unable to fetch bucket info.") - writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) + writeErrorResponse(w, toAPIErrorCode(err), r.URL) return } @@ -104,8 +104,9 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r * Location: region, }) } - setCommonHeaders(w) // Write headers. - writeSuccessResponse(w, encodedSuccessResponse) + + // Write success response. + writeSuccessResponseXML(w, encodedSuccessResponse) } // ListMultipartUploadsHandler - GET Bucket (List Multipart uploads) @@ -122,24 +123,24 @@ func (api objectAPIHandlers) ListMultipartUploadsHandler(w http.ResponseWriter, objectAPI := api.ObjectAPI() if objectAPI == nil { - writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path) + writeErrorResponse(w, ErrServerNotInitialized, r.URL) return } if s3Error := checkRequestAuthType(r, bucket, "s3:ListBucketMultipartUploads", serverConfig.GetRegion()); s3Error != ErrNone { - writeErrorResponse(w, r, s3Error, r.URL.Path) + writeErrorResponse(w, s3Error, r.URL) return } prefix, keyMarker, uploadIDMarker, delimiter, maxUploads, _ := getBucketMultipartResources(r.URL.Query()) if maxUploads < 0 { - writeErrorResponse(w, r, ErrInvalidMaxUploads, r.URL.Path) + writeErrorResponse(w, ErrInvalidMaxUploads, r.URL) return } if keyMarker != "" { // Marker not common with prefix is not implemented. if !strings.HasPrefix(keyMarker, prefix) { - writeErrorResponse(w, r, ErrNotImplemented, r.URL.Path) + writeErrorResponse(w, ErrNotImplemented, r.URL) return } } @@ -147,16 +148,15 @@ func (api objectAPIHandlers) ListMultipartUploadsHandler(w http.ResponseWriter, listMultipartsInfo, err := objectAPI.ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads) if err != nil { errorIf(err, "Unable to list multipart uploads.") - writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) + writeErrorResponse(w, toAPIErrorCode(err), r.URL) return } // generate response response := generateListMultipartUploadsResponse(bucket, listMultipartsInfo) encodedSuccessResponse := encodeResponse(response) - // write headers. - setCommonHeaders(w) + // write success response. - writeSuccessResponse(w, encodedSuccessResponse) + writeSuccessResponseXML(w, encodedSuccessResponse) } // ListBucketsHandler - GET Service. @@ -166,7 +166,7 @@ func (api objectAPIHandlers) ListMultipartUploadsHandler(w http.ResponseWriter, func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.Request) { objectAPI := api.ObjectAPI() if objectAPI == nil { - writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path) + writeErrorResponse(w, ErrServerNotInitialized, r.URL) return } @@ -177,24 +177,23 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R s3Error = checkRequestAuthType(r, "", "", serverConfig.GetRegion()) } if s3Error != ErrNone { - writeErrorResponse(w, r, s3Error, r.URL.Path) + writeErrorResponse(w, s3Error, r.URL) return } // Invoke the list buckets. bucketsInfo, err := objectAPI.ListBuckets() if err != nil { errorIf(err, "Unable to list buckets.") - writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) + writeErrorResponse(w, toAPIErrorCode(err), r.URL) return } // Generate response. response := generateListBucketsResponse(bucketsInfo) encodedSuccessResponse := encodeResponse(response) - // Write headers. - setCommonHeaders(w) + // Write response. - writeSuccessResponse(w, encodedSuccessResponse) + writeSuccessResponseXML(w, encodedSuccessResponse) } // DeleteMultipleObjectsHandler - deletes multiple objects. @@ -204,26 +203,26 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter, objectAPI := api.ObjectAPI() if objectAPI == nil { - writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path) + writeErrorResponse(w, ErrServerNotInitialized, r.URL) return } if s3Error := checkRequestAuthType(r, bucket, "s3:DeleteObject", serverConfig.GetRegion()); s3Error != ErrNone { - writeErrorResponse(w, r, s3Error, r.URL.Path) + writeErrorResponse(w, s3Error, r.URL) return } // Content-Length is required and should be non-zero // http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html if r.ContentLength <= 0 { - writeErrorResponse(w, r, ErrMissingContentLength, r.URL.Path) + writeErrorResponse(w, ErrMissingContentLength, r.URL) return } // Content-Md5 is requied should be set // http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html if _, ok := r.Header["Content-Md5"]; !ok { - writeErrorResponse(w, r, ErrMissingContentMD5, r.URL.Path) + writeErrorResponse(w, ErrMissingContentMD5, r.URL) return } @@ -233,7 +232,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter, // Read incoming body XML bytes. if _, err := io.ReadFull(r.Body, deleteXMLBytes); err != nil { errorIf(err, "Unable to read HTTP body.") - writeErrorResponse(w, r, ErrInternalError, r.URL.Path) + writeErrorResponse(w, ErrInternalError, r.URL) return } @@ -241,7 +240,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter, deleteObjects := &DeleteObjectsRequest{} if err := xml.Unmarshal(deleteXMLBytes, deleteObjects); err != nil { errorIf(err, "Unable to unmarshal delete objects request XML.") - writeErrorResponse(w, r, ErrMalformedXML, r.URL.Path) + writeErrorResponse(w, ErrMalformedXML, r.URL) return } @@ -289,10 +288,9 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter, // Generate response response := generateMultiDeleteResponse(deleteObjects.Quiet, deletedObjects, deleteErrors) encodedSuccessResponse := encodeResponse(response) - // Write headers - setCommonHeaders(w) + // Write success response. - writeSuccessResponse(w, encodedSuccessResponse) + writeSuccessResponseXML(w, encodedSuccessResponse) // Notify deleted event for objects. for _, dobj := range deletedObjects { @@ -315,13 +313,13 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter, func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) { objectAPI := api.ObjectAPI() if objectAPI == nil { - writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path) + writeErrorResponse(w, ErrServerNotInitialized, r.URL) return } // PutBucket does not have any bucket action. if s3Error := checkRequestAuthType(r, "", "", "us-east-1"); s3Error != ErrNone { - writeErrorResponse(w, r, s3Error, r.URL.Path) + writeErrorResponse(w, s3Error, r.URL) return } @@ -331,7 +329,7 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req // Validate if incoming location constraint is valid, reject // requests which do not follow valid region requirements. if s3Error := isValidLocationConstraint(r); s3Error != ErrNone { - writeErrorResponse(w, r, s3Error, r.URL.Path) + writeErrorResponse(w, s3Error, r.URL) return } @@ -343,12 +341,14 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req err := objectAPI.MakeBucket(bucket) if err != nil { errorIf(err, "Unable to create a bucket.") - writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) + writeErrorResponse(w, toAPIErrorCode(err), r.URL) return } + // Make sure to add Location information here only for bucket w.Header().Set("Location", getLocation(r)) - writeSuccessResponse(w, nil) + + writeSuccessResponseHeadersOnly(w) } // PostPolicyBucketHandler - POST policy @@ -358,7 +358,7 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *http.Request) { objectAPI := api.ObjectAPI() if objectAPI == nil { - writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path) + writeErrorResponse(w, ErrServerNotInitialized, r.URL) return } @@ -367,14 +367,14 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h reader, err := r.MultipartReader() if err != nil { errorIf(err, "Unable to initialize multipart reader.") - writeErrorResponse(w, r, ErrMalformedPOSTRequest, r.URL.Path) + writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL) return } fileBody, fileName, formValues, err := extractPostPolicyFormValues(reader) if err != nil { errorIf(err, "Unable to parse form values.") - writeErrorResponse(w, r, ErrMalformedPOSTRequest, r.URL.Path) + writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL) return } bucket := mux.Vars(r)["bucket"] @@ -390,25 +390,25 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h // Verify policy signature. apiErr := doesPolicySignatureMatch(formValues) if apiErr != ErrNone { - writeErrorResponse(w, r, apiErr, r.URL.Path) + writeErrorResponse(w, apiErr, r.URL) return } policyBytes, err := base64.StdEncoding.DecodeString(formValues["Policy"]) if err != nil { - writeErrorResponse(w, r, ErrMalformedPOSTRequest, r.URL.Path) + writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL) return } postPolicyForm, err := parsePostPolicyForm(string(policyBytes)) if err != nil { - writeErrorResponse(w, r, ErrMalformedPOSTRequest, r.URL.Path) + writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL) return } // Make sure formValues adhere to policy restrictions. if apiErr = checkPostPolicy(formValues, postPolicyForm); apiErr != ErrNone { - writeErrorResponse(w, r, apiErr, r.URL.Path) + writeErrorResponse(w, apiErr, r.URL) return } @@ -442,7 +442,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h objInfo, err := objectAPI.PutObject(bucket, object, -1, fileBody, metadata, sha256sum) if err != nil { errorIf(err, "Unable to create object.") - writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) + writeErrorResponse(w, toAPIErrorCode(err), r.URL) return } w.Header().Set("ETag", "\""+objInfo.MD5Sum+"\"") @@ -471,10 +471,9 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h ETag: "\"" + objInfo.MD5Sum + "\"", Location: getObjectLocation(bucket, object), }) - writeResponse(w, http.StatusCreated, resp) - + writeResponse(w, http.StatusCreated, resp, "application/xml") case "200": - writeSuccessResponse(w, nil) + writeSuccessResponseHeadersOnly(w) default: writeSuccessNoContent(w) } @@ -504,12 +503,12 @@ func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Re objectAPI := api.ObjectAPI() if objectAPI == nil { - writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path) + writeErrorResponseHeadersOnly(w, ErrServerNotInitialized) return } if s3Error := checkRequestAuthType(r, bucket, "s3:ListBucket", serverConfig.GetRegion()); s3Error != ErrNone { - writeErrorResponse(w, r, s3Error, r.URL.Path) + writeErrorResponseHeadersOnly(w, s3Error) return } @@ -519,23 +518,24 @@ func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Re if _, err := objectAPI.GetBucketInfo(bucket); err != nil { errorIf(err, "Unable to fetch bucket info.") - writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) + writeErrorResponseHeadersOnly(w, toAPIErrorCode(err)) return } - writeSuccessResponse(w, nil) + + writeSuccessResponseHeadersOnly(w) } // DeleteBucketHandler - Delete bucket func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) { objectAPI := api.ObjectAPI() if objectAPI == nil { - writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path) + writeErrorResponse(w, ErrServerNotInitialized, r.URL) return } // DeleteBucket does not have any bucket action. if s3Error := checkRequestAuthType(r, "", "", serverConfig.GetRegion()); s3Error != ErrNone { - writeErrorResponse(w, r, s3Error, r.URL.Path) + writeErrorResponse(w, s3Error, r.URL) return } @@ -549,7 +549,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http. // Attempt to delete bucket. if err := objectAPI.DeleteBucket(bucket); err != nil { errorIf(err, "Unable to delete a bucket.") - writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) + writeErrorResponse(w, toAPIErrorCode(err), r.URL) return } diff --git a/cmd/bucket-notification-handlers.go b/cmd/bucket-notification-handlers.go index b7d7414e5..fb9c026ef 100644 --- a/cmd/bucket-notification-handlers.go +++ b/cmd/bucket-notification-handlers.go @@ -42,12 +42,12 @@ const ( func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter, r *http.Request) { objAPI := api.ObjectAPI() if objAPI == nil { - writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path) + writeErrorResponse(w, ErrServerNotInitialized, r.URL) return } if s3Error := checkRequestAuthType(r, "", "", serverConfig.GetRegion()); s3Error != ErrNone { - writeErrorResponse(w, r, s3Error, r.URL.Path) + writeErrorResponse(w, s3Error, r.URL) return } @@ -57,7 +57,7 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter, _, err := objAPI.GetBucketInfo(bucket) if err != nil { errorIf(err, "Unable to find bucket info.") - writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) + writeErrorResponse(w, toAPIErrorCode(err), r.URL) return } @@ -65,7 +65,7 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter, nConfig, err := loadNotificationConfig(bucket, objAPI) if err != nil && err != errNoSuchNotifications { errorIf(err, "Unable to read notification configuration.") - writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) + writeErrorResponse(w, toAPIErrorCode(err), r.URL) return } // For no notifications we write a dummy XML. @@ -77,11 +77,12 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter, if err != nil { // For any marshalling failure. errorIf(err, "Unable to marshal notification configuration into XML.", err) - writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) + writeErrorResponse(w, toAPIErrorCode(err), r.URL) return } + // Success. - writeSuccessResponse(w, notificationBytes) + writeSuccessResponseXML(w, notificationBytes) } // PutBucketNotificationHandler - Minio notification feature enables @@ -95,12 +96,12 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter, func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter, r *http.Request) { objectAPI := api.ObjectAPI() if objectAPI == nil { - writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path) + writeErrorResponse(w, ErrServerNotInitialized, r.URL) return } if s3Error := checkRequestAuthType(r, "", "", serverConfig.GetRegion()); s3Error != ErrNone { - writeErrorResponse(w, r, s3Error, r.URL.Path) + writeErrorResponse(w, s3Error, r.URL) return } @@ -110,7 +111,7 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter, _, err := objectAPI.GetBucketInfo(bucket) if err != nil { errorIf(err, "Unable to find bucket info.") - writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) + writeErrorResponse(w, toAPIErrorCode(err), r.URL) return } @@ -118,7 +119,7 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter, // always needs a Content-Length if incoming request is not chunked. if !contains(r.TransferEncoding, "chunked") { if r.ContentLength == -1 { - writeErrorResponse(w, r, ErrMissingContentLength, r.URL.Path) + writeErrorResponse(w, ErrMissingContentLength, r.URL) return } } @@ -132,7 +133,7 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter, } if err != nil { errorIf(err, "Unable to read incoming body.") - writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) + writeErrorResponse(w, toAPIErrorCode(err), r.URL) return } @@ -141,25 +142,25 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter, notificationConfigBytes := buffer.Bytes() if err = xml.Unmarshal(notificationConfigBytes, ¬ificationCfg); err != nil { errorIf(err, "Unable to parse notification configuration XML.") - writeErrorResponse(w, r, ErrMalformedXML, r.URL.Path) + writeErrorResponse(w, ErrMalformedXML, r.URL) return } // Successfully marshalled notification configuration. // Validate unmarshalled bucket notification configuration. if s3Error := validateNotificationConfig(notificationCfg); s3Error != ErrNone { - writeErrorResponse(w, r, s3Error, r.URL.Path) + writeErrorResponse(w, s3Error, r.URL) return } // Put bucket notification config. err = PutBucketNotificationConfig(bucket, ¬ificationCfg, objectAPI) if err != nil { - writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) + writeErrorResponse(w, toAPIErrorCode(err), r.URL) return } // Success. - writeSuccessResponse(w, nil) + writeSuccessResponseHeadersOnly(w) } // PutBucketNotificationConfig - Put a new notification config for a @@ -249,12 +250,12 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit // Validate if bucket exists. objAPI := api.ObjectAPI() if objAPI == nil { - writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path) + writeErrorResponse(w, ErrServerNotInitialized, r.URL) return } if s3Error := checkRequestAuthType(r, "", "", serverConfig.GetRegion()); s3Error != ErrNone { - writeErrorResponse(w, r, s3Error, r.URL.Path) + writeErrorResponse(w, s3Error, r.URL) return } @@ -265,19 +266,19 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit prefixes, suffixes, events := getListenBucketNotificationResources(r.URL.Query()) if err := validateFilterValues(prefixes); err != ErrNone { - writeErrorResponse(w, r, err, r.URL.Path) + writeErrorResponse(w, err, r.URL) return } if err := validateFilterValues(suffixes); err != ErrNone { - writeErrorResponse(w, r, err, r.URL.Path) + writeErrorResponse(w, err, r.URL) return } // Validate all the resource events. for _, event := range events { if errCode := checkEvent(event); errCode != ErrNone { - writeErrorResponse(w, r, errCode, r.URL.Path) + writeErrorResponse(w, errCode, r.URL) return } } @@ -285,7 +286,7 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit _, err := objAPI.GetBucketInfo(bucket) if err != nil { errorIf(err, "Unable to get bucket info.") - writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) + writeErrorResponse(w, toAPIErrorCode(err), r.URL) return } @@ -338,7 +339,7 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit // Add channel for listener events if err = globalEventNotifier.AddListenerChan(accountARN, nEventCh); err != nil { errorIf(err, "Error adding a listener!") - writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) + writeErrorResponse(w, toAPIErrorCode(err), r.URL) return } // Remove listener channel after the writer has closed or the @@ -355,7 +356,7 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit err = AddBucketListenerConfig(bucket, &lc, objAPI) if err != nil { - writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) + writeErrorResponse(w, toAPIErrorCode(err), r.URL) return } defer RemoveBucketListenerConfig(bucket, &lc, objAPI) diff --git a/cmd/bucket-policy-handlers.go b/cmd/bucket-policy-handlers.go index 50589059f..32dd0aac4 100644 --- a/cmd/bucket-policy-handlers.go +++ b/cmd/bucket-policy-handlers.go @@ -121,12 +121,12 @@ func bucketPolicyConditionMatch(conditions map[string]set.StringSet, statement p func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *http.Request) { objAPI := api.ObjectAPI() if objAPI == nil { - writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path) + writeErrorResponse(w, ErrServerNotInitialized, r.URL) return } if s3Error := checkRequestAuthType(r, "", "", serverConfig.GetRegion()); s3Error != ErrNone { - writeErrorResponse(w, r, s3Error, r.URL.Path) + writeErrorResponse(w, s3Error, r.URL) return } @@ -137,7 +137,7 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht _, err := objAPI.GetBucketInfo(bucket) if err != nil { errorIf(err, "Unable to find bucket info.") - writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) + writeErrorResponse(w, toAPIErrorCode(err), r.URL) return } @@ -146,12 +146,12 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht // incoming request is not chunked. if !contains(r.TransferEncoding, "chunked") { if r.ContentLength == -1 || r.ContentLength == 0 { - writeErrorResponse(w, r, ErrMissingContentLength, r.URL.Path) + writeErrorResponse(w, ErrMissingContentLength, r.URL) return } // If Content-Length is greater than maximum allowed policy size. if r.ContentLength > maxAccessPolicySize { - writeErrorResponse(w, r, ErrEntityTooLarge, r.URL.Path) + writeErrorResponse(w, ErrEntityTooLarge, r.URL) return } } @@ -162,13 +162,13 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht policyBytes, err := ioutil.ReadAll(io.LimitReader(r.Body, maxAccessPolicySize)) if err != nil { errorIf(err, "Unable to read from client.") - writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) + writeErrorResponse(w, toAPIErrorCode(err), r.URL) return } // Parse validate and save bucket policy. if s3Error := parseAndPersistBucketPolicy(bucket, policyBytes, objAPI); s3Error != ErrNone { - writeErrorResponse(w, r, s3Error, r.URL.Path) + writeErrorResponse(w, s3Error, r.URL) return } @@ -183,12 +183,12 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r *http.Request) { objAPI := api.ObjectAPI() if objAPI == nil { - writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path) + writeErrorResponse(w, ErrServerNotInitialized, r.URL) return } if s3Error := checkRequestAuthType(r, "", "", serverConfig.GetRegion()); s3Error != ErrNone { - writeErrorResponse(w, r, s3Error, r.URL.Path) + writeErrorResponse(w, s3Error, r.URL) return } @@ -199,7 +199,7 @@ func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r _, err := objAPI.GetBucketInfo(bucket) if err != nil { errorIf(err, "Unable to find bucket info.") - writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) + writeErrorResponse(w, toAPIErrorCode(err), r.URL) return } @@ -208,9 +208,9 @@ func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r if err := persistAndNotifyBucketPolicyChange(bucket, policyChange{true, nil}, objAPI); err != nil { switch err.(type) { case BucketPolicyNotFound: - writeErrorResponse(w, r, ErrNoSuchBucketPolicy, r.URL.Path) + writeErrorResponse(w, ErrNoSuchBucketPolicy, r.URL) default: - writeErrorResponse(w, r, ErrInternalError, r.URL.Path) + writeErrorResponse(w, ErrInternalError, r.URL) } return } @@ -226,12 +226,12 @@ func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r func (api objectAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *http.Request) { objAPI := api.ObjectAPI() if objAPI == nil { - writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path) + writeErrorResponse(w, ErrServerNotInitialized, r.URL) return } if s3Error := checkRequestAuthType(r, "", "", serverConfig.GetRegion()); s3Error != ErrNone { - writeErrorResponse(w, r, s3Error, r.URL.Path) + writeErrorResponse(w, s3Error, r.URL) return } @@ -242,7 +242,7 @@ func (api objectAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *ht _, err := objAPI.GetBucketInfo(bucket) if err != nil { errorIf(err, "Unable to find bucket info.") - writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) + writeErrorResponse(w, toAPIErrorCode(err), r.URL) return } @@ -252,9 +252,9 @@ func (api objectAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *ht errorIf(err, "Unable to read bucket policy.") switch err.(type) { case BucketPolicyNotFound: - writeErrorResponse(w, r, ErrNoSuchBucketPolicy, r.URL.Path) + writeErrorResponse(w, ErrNoSuchBucketPolicy, r.URL) default: - writeErrorResponse(w, r, ErrInternalError, r.URL.Path) + writeErrorResponse(w, ErrInternalError, r.URL) } return } diff --git a/cmd/generic-handlers.go b/cmd/generic-handlers.go index 6e17fbea9..2ce0a878f 100644 --- a/cmd/generic-handlers.go +++ b/cmd/generic-handlers.go @@ -169,7 +169,7 @@ func setPrivateBucketHandler(h http.Handler) http.Handler { func (h minioPrivateBucketHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // For all non browser requests, reject access to 'reservedBucket'. if !guessIsBrowserReq(r) && path.Clean(r.URL.Path) == reservedBucket { - writeErrorResponse(w, r, ErrAllAccessDisabled, r.URL.Path) + writeErrorResponse(w, ErrAllAccessDisabled, r.URL) return } h.handler.ServeHTTP(w, r) @@ -231,14 +231,14 @@ func (h timeValidityHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // All our internal APIs are sensitive towards Date // header, for all requests where Date header is not // present we will reject such clients. - writeErrorResponse(w, r, apiErr, r.URL.Path) + writeErrorResponse(w, apiErr, r.URL) return } // Verify if the request date header is shifted by less than globalMaxSkewTime parameter in the past // or in the future, reject request otherwise. curTime := time.Now().UTC() if curTime.Sub(amzDate) > globalMaxSkewTime || amzDate.Sub(curTime) > globalMaxSkewTime { - writeErrorResponse(w, r, ErrRequestTimeTooSkewed, r.URL.Path) + writeErrorResponse(w, ErrRequestTimeTooSkewed, r.URL) return } } @@ -327,20 +327,20 @@ func (h resourceHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // If bucketName is present and not objectName check for bucket level resource queries. if bucketName != "" && objectName == "" { if ignoreNotImplementedBucketResources(r) { - writeErrorResponse(w, r, ErrNotImplemented, r.URL.Path) + writeErrorResponse(w, ErrNotImplemented, r.URL) return } } // If bucketName and objectName are present check for its resource queries. if bucketName != "" && objectName != "" { if ignoreNotImplementedObjectResources(r) { - writeErrorResponse(w, r, ErrNotImplemented, r.URL.Path) + writeErrorResponse(w, ErrNotImplemented, r.URL) return } } // A put method on path "/" doesn't make sense, ignore it. if r.Method == "PUT" && r.URL.Path == "/" { - writeErrorResponse(w, r, ErrNotImplemented, r.URL.Path) + writeErrorResponse(w, ErrNotImplemented, r.URL) return } diff --git a/cmd/object-handlers-common.go b/cmd/object-handlers-common.go index b79dd5783..46fbe1ec8 100644 --- a/cmd/object-handlers-common.go +++ b/cmd/object-handlers-common.go @@ -59,7 +59,7 @@ func checkCopyObjectPreconditions(w http.ResponseWriter, r *http.Request, objInf if !ifModifiedSince(objInfo.ModTime, ifModifiedSinceHeader) { // If the object is not modified since the specified time. writeHeaders() - writeErrorResponse(w, r, ErrPreconditionFailed, r.URL.Path) + writeErrorResponse(w, ErrPreconditionFailed, r.URL) return true } } @@ -71,7 +71,7 @@ func checkCopyObjectPreconditions(w http.ResponseWriter, r *http.Request, objInf if ifModifiedSince(objInfo.ModTime, ifUnmodifiedSinceHeader) { // If the object is modified since the specified time. writeHeaders() - writeErrorResponse(w, r, ErrPreconditionFailed, r.URL.Path) + writeErrorResponse(w, ErrPreconditionFailed, r.URL) return true } } @@ -83,7 +83,7 @@ func checkCopyObjectPreconditions(w http.ResponseWriter, r *http.Request, objInf if objInfo.MD5Sum != "" && !isETagEqual(objInfo.MD5Sum, ifMatchETagHeader) { // If the object ETag does not match with the specified ETag. writeHeaders() - writeErrorResponse(w, r, ErrPreconditionFailed, r.URL.Path) + writeErrorResponse(w, ErrPreconditionFailed, r.URL) return true } } @@ -95,7 +95,7 @@ func checkCopyObjectPreconditions(w http.ResponseWriter, r *http.Request, objInf if objInfo.MD5Sum != "" && isETagEqual(objInfo.MD5Sum, ifNoneMatchETagHeader) { // If the object ETag matches with the specified ETag. writeHeaders() - writeErrorResponse(w, r, ErrPreconditionFailed, r.URL.Path) + writeErrorResponse(w, ErrPreconditionFailed, r.URL) return true } } @@ -152,7 +152,7 @@ func checkPreconditions(w http.ResponseWriter, r *http.Request, objInfo ObjectIn if ifModifiedSince(objInfo.ModTime, ifUnmodifiedSinceHeader) { // If the object is modified since the specified time. writeHeaders() - writeErrorResponse(w, r, ErrPreconditionFailed, r.URL.Path) + writeErrorResponse(w, ErrPreconditionFailed, r.URL) return true } } @@ -164,7 +164,7 @@ func checkPreconditions(w http.ResponseWriter, r *http.Request, objInfo ObjectIn if !isETagEqual(objInfo.MD5Sum, ifMatchETagHeader) { // If the object ETag does not match with the specified ETag. writeHeaders() - writeErrorResponse(w, r, ErrPreconditionFailed, r.URL.Path) + writeErrorResponse(w, ErrPreconditionFailed, r.URL) return true } } diff --git a/cmd/object-handlers.go b/cmd/object-handlers.go index 87382c8a1..831ccf818 100644 --- a/cmd/object-handlers.go +++ b/cmd/object-handlers.go @@ -86,12 +86,12 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req // Fetch object stat info. objectAPI := api.ObjectAPI() if objectAPI == nil { - writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path) + writeErrorResponse(w, ErrServerNotInitialized, r.URL) return } if s3Error := checkRequestAuthType(r, bucket, "s3:GetObject", serverConfig.GetRegion()); s3Error != ErrNone { - writeErrorResponse(w, r, s3Error, r.URL.Path) + writeErrorResponse(w, s3Error, r.URL) return } @@ -107,7 +107,7 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req if apiErr == ErrNoSuchKey { apiErr = errAllowableObjectNotFound(bucket, r) } - writeErrorResponse(w, r, apiErr, r.URL.Path) + writeErrorResponse(w, apiErr, r.URL) return } @@ -119,14 +119,13 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req // Handle only errInvalidRange // Ignore other parse error and treat it as regular Get request like Amazon S3. if err == errInvalidRange { - writeErrorResponse(w, r, ErrInvalidRange, r.URL.Path) + writeErrorResponse(w, ErrInvalidRange, r.URL) return } // log the error. errorIf(err, "Invalid request range") } - } // Validate pre-conditions if any. @@ -166,8 +165,7 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req // partial data has already been written before an error // occurred then no point in setting StatusCode and // sending error XML. - apiErr := toAPIErrorCode(err) - writeErrorResponse(w, r, apiErr, r.URL.Path) + writeErrorResponse(w, toAPIErrorCode(err), r.URL) } return } @@ -190,12 +188,12 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re objectAPI := api.ObjectAPI() if objectAPI == nil { - writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path) + writeErrorResponseHeadersOnly(w, ErrServerNotInitialized) return } if s3Error := checkRequestAuthType(r, bucket, "s3:GetObject", serverConfig.GetRegion()); s3Error != ErrNone { - writeErrorResponse(w, r, s3Error, r.URL.Path) + writeErrorResponseHeadersOnly(w, s3Error) return } @@ -211,7 +209,7 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re if apiErr == ErrNoSuchKey { apiErr = errAllowableObjectNotFound(bucket, r) } - writeErrorResponse(w, r, apiErr, r.URL.Path) + writeErrorResponseHeadersOnly(w, apiErr) return } @@ -235,11 +233,13 @@ func getCpObjMetadataFromHeader(header http.Header, defaultMeta map[string]strin if isMetadataReplace(header) { return extractMetadataFromHeader(header) } + // if x-amz-metadata-directive says COPY then we // return the default metadata. if isMetadataCopy(header) { return defaultMeta } + // Copy is default behavior if not x-amz-metadata-directive is set. return defaultMeta } @@ -256,12 +256,12 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re objectAPI := api.ObjectAPI() if objectAPI == nil { - writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path) + writeErrorResponse(w, ErrServerNotInitialized, r.URL) return } if s3Error := checkRequestAuthType(r, dstBucket, "s3:PutObject", serverConfig.GetRegion()); s3Error != ErrNone { - writeErrorResponse(w, r, s3Error, r.URL.Path) + writeErrorResponse(w, s3Error, r.URL) return } @@ -277,13 +277,13 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re srcBucket, srcObject := path2BucketAndObject(cpSrcPath) // If source object is empty or bucket is empty, reply back invalid copy source. if srcObject == "" || srcBucket == "" { - writeErrorResponse(w, r, ErrInvalidCopySource, r.URL.Path) + writeErrorResponse(w, ErrInvalidCopySource, r.URL) return } // Check if metadata directive is valid. if !isMetadataDirectiveValid(r.Header) { - writeErrorResponse(w, r, ErrInvalidMetadataDirective, r.URL.Path) + writeErrorResponse(w, ErrInvalidMetadataDirective, r.URL) return } @@ -311,7 +311,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re objInfo, err := objectAPI.GetObjectInfo(srcBucket, srcObject) if err != nil { errorIf(err, "Unable to fetch object info.") - writeErrorResponse(w, r, toAPIErrorCode(err), cpSrcPath) + writeErrorResponse(w, toAPIErrorCode(err), r.URL) return } @@ -322,7 +322,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re /// maximum Upload size for object in a single CopyObject operation. if isMaxObjectSize(objInfo.Size) { - writeErrorResponse(w, r, ErrEntityTooLarge, cpSrcPath) + writeErrorResponse(w, ErrEntityTooLarge, r.URL) return } @@ -339,7 +339,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re if !isMetadataReplace(r.Header) && cpSrcDstSame { // If x-amz-metadata-directive is not set to REPLACE then we need // to error out if source and destination are same. - writeErrorResponse(w, r, ErrInvalidCopyDest, r.URL.Path) + writeErrorResponse(w, ErrInvalidCopyDest, r.URL) return } @@ -347,17 +347,16 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re // object is same then only metadata is updated. objInfo, err = objectAPI.CopyObject(srcBucket, srcObject, dstBucket, dstObject, newMetadata) if err != nil { - writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) + writeErrorResponse(w, toAPIErrorCode(err), r.URL) return } md5Sum := objInfo.MD5Sum response := generateCopyObjectResponse(md5Sum, objInfo.ModTime) encodedSuccessResponse := encodeResponse(response) - // write headers - setCommonHeaders(w) - // write success response. - writeSuccessResponse(w, encodedSuccessResponse) + + // Write success response. + writeSuccessResponseXML(w, encodedSuccessResponse) // Notify object created event. eventNotify(eventData{ @@ -376,13 +375,13 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) { objectAPI := api.ObjectAPI() if objectAPI == nil { - writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path) + writeErrorResponse(w, ErrServerNotInitialized, r.URL) return } // X-Amz-Copy-Source shouldn't be set for this call. if _, ok := r.Header["X-Amz-Copy-Source"]; ok { - writeErrorResponse(w, r, ErrInvalidCopySource, r.URL.Path) + writeErrorResponse(w, ErrInvalidCopySource, r.URL) return } @@ -394,7 +393,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req md5Bytes, err := checkValidMD5(r.Header.Get("Content-Md5")) if err != nil { errorIf(err, "Unable to validate content-md5 format.") - writeErrorResponse(w, r, ErrInvalidDigest, r.URL.Path) + writeErrorResponse(w, ErrInvalidDigest, r.URL) return } @@ -406,18 +405,18 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req size, err = strconv.ParseInt(sizeStr, 10, 64) if err != nil { errorIf(err, "Unable to parse `x-amz-decoded-content-length` into its integer value", sizeStr) - writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) + writeErrorResponse(w, toAPIErrorCode(err), r.URL) return } } if size == -1 && !contains(r.TransferEncoding, "chunked") { - writeErrorResponse(w, r, ErrMissingContentLength, r.URL.Path) + writeErrorResponse(w, ErrMissingContentLength, r.URL) return } /// maximum Upload size for objects in a single operation if isMaxObjectSize(size) { - writeErrorResponse(w, r, ErrEntityTooLarge, r.URL.Path) + writeErrorResponse(w, ErrEntityTooLarge, r.URL) return } @@ -437,12 +436,12 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req switch rAuthType { default: // For all unknown auth types return error. - writeErrorResponse(w, r, ErrAccessDenied, r.URL.Path) + writeErrorResponse(w, ErrAccessDenied, r.URL) return case authTypeAnonymous: // http://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html if s3Error := enforceBucketPolicy(bucket, "s3:PutObject", r.URL); s3Error != ErrNone { - writeErrorResponse(w, r, s3Error, r.URL.Path) + writeErrorResponse(w, s3Error, r.URL) return } // Create anonymous object. @@ -452,7 +451,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req reader, s3Error := newSignV4ChunkedReader(r) if s3Error != ErrNone { errorIf(errSignatureMismatch, dumpRequest(r)) - writeErrorResponse(w, r, s3Error, r.URL.Path) + writeErrorResponse(w, s3Error, r.URL) return } objInfo, err = objectAPI.PutObject(bucket, object, size, reader, metadata, sha256sum) @@ -460,14 +459,14 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req s3Error := isReqAuthenticatedV2(r) if s3Error != ErrNone { errorIf(errSignatureMismatch, dumpRequest(r)) - writeErrorResponse(w, r, s3Error, r.URL.Path) + writeErrorResponse(w, s3Error, r.URL) return } objInfo, err = objectAPI.PutObject(bucket, object, size, r.Body, metadata, sha256sum) case authTypePresigned, authTypeSigned: if s3Error := reqSignatureV4Verify(r); s3Error != ErrNone { errorIf(errSignatureMismatch, dumpRequest(r)) - writeErrorResponse(w, r, s3Error, r.URL.Path) + writeErrorResponse(w, s3Error, r.URL) return } if !skipContentSha256Cksum(r) { @@ -478,11 +477,11 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req } if err != nil { errorIf(err, "Unable to create an object.") - writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) + writeErrorResponse(w, toAPIErrorCode(err), r.URL) return } w.Header().Set("ETag", "\""+objInfo.MD5Sum+"\"") - writeSuccessResponse(w, nil) + writeSuccessResponseHeadersOnly(w) // Notify object created event. eventNotify(eventData{ @@ -506,12 +505,12 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r objectAPI := api.ObjectAPI() if objectAPI == nil { - writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path) + writeErrorResponse(w, ErrServerNotInitialized, r.URL) return } if s3Error := checkRequestAuthType(r, bucket, "s3:PutObject", serverConfig.GetRegion()); s3Error != ErrNone { - writeErrorResponse(w, r, s3Error, r.URL.Path) + writeErrorResponse(w, s3Error, r.URL) return } @@ -521,16 +520,15 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r uploadID, err := objectAPI.NewMultipartUpload(bucket, object, metadata) if err != nil { errorIf(err, "Unable to initiate new multipart upload id.") - writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) + writeErrorResponse(w, toAPIErrorCode(err), r.URL) return } response := generateInitiateMultipartUploadResponse(bucket, object, uploadID) encodedSuccessResponse := encodeResponse(response) - // write headers - setCommonHeaders(w) - // write success response. - writeSuccessResponse(w, encodedSuccessResponse) + + // Write success response. + writeSuccessResponseXML(w, encodedSuccessResponse) } // PutObjectPartHandler - Upload part @@ -541,14 +539,14 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http objectAPI := api.ObjectAPI() if objectAPI == nil { - writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path) + writeErrorResponse(w, ErrServerNotInitialized, r.URL) return } // get Content-Md5 sent by client and verify if valid md5Bytes, err := checkValidMD5(r.Header.Get("Content-Md5")) if err != nil { - writeErrorResponse(w, r, ErrInvalidDigest, r.URL.Path) + writeErrorResponse(w, ErrInvalidDigest, r.URL) return } @@ -562,18 +560,18 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http size, err = strconv.ParseInt(sizeStr, 10, 64) if err != nil { errorIf(err, "Unable to parse `x-amz-decoded-content-length` into its integer value", sizeStr) - writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) + writeErrorResponse(w, toAPIErrorCode(err), r.URL) return } } if size == -1 { - writeErrorResponse(w, r, ErrMissingContentLength, r.URL.Path) + writeErrorResponse(w, ErrMissingContentLength, r.URL) return } /// maximum Upload size for multipart objects in a single operation if isMaxObjectSize(size) { - writeErrorResponse(w, r, ErrEntityTooLarge, r.URL.Path) + writeErrorResponse(w, ErrEntityTooLarge, r.URL) return } @@ -582,13 +580,13 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http partID, err := strconv.Atoi(partIDString) if err != nil { - writeErrorResponse(w, r, ErrInvalidPart, r.URL.Path) + writeErrorResponse(w, ErrInvalidPart, r.URL) return } // check partID with maximum part ID for multipart objects if isMaxPartID(partID) { - writeErrorResponse(w, r, ErrInvalidMaxParts, r.URL.Path) + writeErrorResponse(w, ErrInvalidMaxParts, r.URL) return } @@ -598,12 +596,12 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http switch rAuthType { default: // For all unknown auth types return error. - writeErrorResponse(w, r, ErrAccessDenied, r.URL.Path) + writeErrorResponse(w, ErrAccessDenied, r.URL) return case authTypeAnonymous: // http://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html if s3Error := enforceBucketPolicy(bucket, "s3:PutObject", r.URL); s3Error != ErrNone { - writeErrorResponse(w, r, s3Error, r.URL.Path) + writeErrorResponse(w, s3Error, r.URL) return } // No need to verify signature, anonymous request access is already allowed. @@ -613,7 +611,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http reader, s3Error := newSignV4ChunkedReader(r) if s3Error != ErrNone { errorIf(errSignatureMismatch, dumpRequest(r)) - writeErrorResponse(w, r, s3Error, r.URL.Path) + writeErrorResponse(w, s3Error, r.URL) return } partMD5, err = objectAPI.PutObjectPart(bucket, object, uploadID, partID, size, reader, incomingMD5, sha256sum) @@ -621,14 +619,14 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http s3Error := isReqAuthenticatedV2(r) if s3Error != ErrNone { errorIf(errSignatureMismatch, dumpRequest(r)) - writeErrorResponse(w, r, s3Error, r.URL.Path) + writeErrorResponse(w, s3Error, r.URL) return } partMD5, err = objectAPI.PutObjectPart(bucket, object, uploadID, partID, size, r.Body, incomingMD5, sha256sum) case authTypePresigned, authTypeSigned: if s3Error := reqSignatureV4Verify(r); s3Error != ErrNone { errorIf(errSignatureMismatch, dumpRequest(r)) - writeErrorResponse(w, r, s3Error, r.URL.Path) + writeErrorResponse(w, s3Error, r.URL) return } @@ -640,13 +638,14 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http if err != nil { errorIf(err, "Unable to create object part.") // Verify if the underlying error is signature mismatch. - writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) + writeErrorResponse(w, toAPIErrorCode(err), r.URL) return } if partMD5 != "" { w.Header().Set("ETag", "\""+partMD5+"\"") } - writeSuccessResponse(w, nil) + + writeSuccessResponseHeadersOnly(w) } // AbortMultipartUploadHandler - Abort multipart upload @@ -657,19 +656,19 @@ func (api objectAPIHandlers) AbortMultipartUploadHandler(w http.ResponseWriter, objectAPI := api.ObjectAPI() if objectAPI == nil { - writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path) + writeErrorResponse(w, ErrServerNotInitialized, r.URL) return } if s3Error := checkRequestAuthType(r, bucket, "s3:AbortMultipartUpload", serverConfig.GetRegion()); s3Error != ErrNone { - writeErrorResponse(w, r, s3Error, r.URL.Path) + writeErrorResponse(w, s3Error, r.URL) return } uploadID, _, _, _ := getObjectResources(r.URL.Query()) if err := objectAPI.AbortMultipartUpload(bucket, object, uploadID); err != nil { errorIf(err, "Unable to abort multipart upload.") - writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) + writeErrorResponse(w, toAPIErrorCode(err), r.URL) return } writeSuccessNoContent(w) @@ -683,36 +682,35 @@ func (api objectAPIHandlers) ListObjectPartsHandler(w http.ResponseWriter, r *ht objectAPI := api.ObjectAPI() if objectAPI == nil { - writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path) + writeErrorResponse(w, ErrServerNotInitialized, r.URL) return } if s3Error := checkRequestAuthType(r, bucket, "s3:ListMultipartUploadParts", serverConfig.GetRegion()); s3Error != ErrNone { - writeErrorResponse(w, r, s3Error, r.URL.Path) + writeErrorResponse(w, s3Error, r.URL) return } uploadID, partNumberMarker, maxParts, _ := getObjectResources(r.URL.Query()) if partNumberMarker < 0 { - writeErrorResponse(w, r, ErrInvalidPartNumberMarker, r.URL.Path) + writeErrorResponse(w, ErrInvalidPartNumberMarker, r.URL) return } if maxParts < 0 { - writeErrorResponse(w, r, ErrInvalidMaxParts, r.URL.Path) + writeErrorResponse(w, ErrInvalidMaxParts, r.URL) return } listPartsInfo, err := objectAPI.ListObjectParts(bucket, object, uploadID, partNumberMarker, maxParts) if err != nil { errorIf(err, "Unable to list uploaded parts.") - writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) + writeErrorResponse(w, toAPIErrorCode(err), r.URL) return } response := generateListPartsResponse(listPartsInfo) encodedSuccessResponse := encodeResponse(response) - // Write headers. - setCommonHeaders(w) + // Write success response. - writeSuccessResponse(w, encodedSuccessResponse) + writeSuccessResponseXML(w, encodedSuccessResponse) } // CompleteMultipartUploadHandler - Complete multipart upload. @@ -723,12 +721,12 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite objectAPI := api.ObjectAPI() if objectAPI == nil { - writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path) + writeErrorResponse(w, ErrServerNotInitialized, r.URL) return } if s3Error := checkRequestAuthType(r, bucket, "s3:PutObject", serverConfig.GetRegion()); s3Error != ErrNone { - writeErrorResponse(w, r, s3Error, r.URL.Path) + writeErrorResponse(w, s3Error, r.URL) return } @@ -739,23 +737,24 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite completeMultipartBytes, err := ioutil.ReadAll(r.Body) if err != nil { errorIf(err, "Unable to complete multipart upload.") - writeErrorResponse(w, r, ErrInternalError, r.URL.Path) + writeErrorResponse(w, ErrInternalError, r.URL) return } complMultipartUpload := &completeMultipartUpload{} if err = xml.Unmarshal(completeMultipartBytes, complMultipartUpload); err != nil { errorIf(err, "Unable to parse complete multipart upload XML.") - writeErrorResponse(w, r, ErrMalformedXML, r.URL.Path) + writeErrorResponse(w, ErrMalformedXML, r.URL) return } if len(complMultipartUpload.Parts) == 0 { - writeErrorResponse(w, r, ErrMalformedXML, r.URL.Path) + writeErrorResponse(w, ErrMalformedXML, r.URL) return } if !sort.IsSorted(completedParts(complMultipartUpload.Parts)) { - writeErrorResponse(w, r, ErrInvalidPartOrder, r.URL.Path) + writeErrorResponse(w, ErrInvalidPartOrder, r.URL) return } + // Complete parts. var completeParts []completePart for _, part := range complMultipartUpload.Parts { @@ -779,7 +778,7 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite writePartSmallErrorResponse(w, r, oErr) default: // Handle all other generic issues. - writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) + writeErrorResponse(w, toAPIErrorCode(err), r.URL) } return } @@ -791,7 +790,7 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite encodedSuccessResponse := encodeResponse(response) if err != nil { errorIf(err, "Unable to parse CompleteMultipartUpload response") - writeErrorResponseNoHeader(w, r, ErrInternalError, r.URL.Path) + writeErrorResponse(w, ErrInternalError, r.URL) return } @@ -799,8 +798,7 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite w.Header().Set("ETag", "\""+md5Sum+"\"") // Write success response. - w.Write(encodedSuccessResponse) - w.(http.Flusher).Flush() + writeSuccessResponseXML(w, encodedSuccessResponse) // Fetch object info for notifications. objInfo, err := objectAPI.GetObjectInfo(bucket, object) @@ -830,12 +828,12 @@ func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http. objectAPI := api.ObjectAPI() if objectAPI == nil { - writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path) + writeErrorResponse(w, ErrServerNotInitialized, r.URL) return } if s3Error := checkRequestAuthType(r, bucket, "s3:DeleteObject", serverConfig.GetRegion()); s3Error != ErrNone { - writeErrorResponse(w, r, s3Error, r.URL.Path) + writeErrorResponse(w, s3Error, r.URL) return } From 464f9d34d681411d7fd3edbaf9a6ea019e3985fe Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Fri, 6 Jan 2017 18:40:49 -0800 Subject: [PATCH 049/100] Remove all references to GO15VENDOREXPERIMENT --- Makefile | 46 +++++++++++++++++++++++----------------------- appveyor.yml | 1 - 2 files changed, 23 insertions(+), 24 deletions(-) diff --git a/Makefile b/Makefile index 772032d07..57bdf888e 100644 --- a/Makefile +++ b/Makefile @@ -71,64 +71,64 @@ verifiers: vet fmt lint cyclo spelling vet: @echo "Running $@:" - @GO15VENDOREXPERIMENT=1 go tool vet -all ./cmd - @GO15VENDOREXPERIMENT=1 go tool vet -all ./pkg - @GO15VENDOREXPERIMENT=1 go tool vet -shadow=true ./cmd - @GO15VENDOREXPERIMENT=1 go tool vet -shadow=true ./pkg + @go tool vet -all ./cmd + @go tool vet -all ./pkg + @go tool vet -shadow=true ./cmd + @go tool vet -shadow=true ./pkg fmt: @echo "Running $@:" - @GO15VENDOREXPERIMENT=1 gofmt -s -l cmd - @GO15VENDOREXPERIMENT=1 gofmt -s -l pkg + @gofmt -s -l cmd + @gofmt -s -l pkg lint: @echo "Running $@:" - @GO15VENDOREXPERIMENT=1 ${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/cmd... - @GO15VENDOREXPERIMENT=1 ${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/pkg... + @${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/cmd... + @${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/pkg... ineffassign: @echo "Running $@:" - @GO15VENDOREXPERIMENT=1 ${GOPATH}/bin/ineffassign . + @${GOPATH}/bin/ineffassign . cyclo: @echo "Running $@:" - @GO15VENDOREXPERIMENT=1 ${GOPATH}/bin/gocyclo -over 100 cmd - @GO15VENDOREXPERIMENT=1 ${GOPATH}/bin/gocyclo -over 100 pkg + @${GOPATH}/bin/gocyclo -over 100 cmd + @${GOPATH}/bin/gocyclo -over 100 pkg build: getdeps verifiers $(UI_ASSETS) deadcode: - @GO15VENDOREXPERIMENT=1 ${GOPATH}/bin/deadcode + @${GOPATH}/bin/deadcode spelling: - @-GO15VENDOREXPERIMENT=1 ${GOPATH}/bin/misspell -error `find cmd/` - @-GO15VENDOREXPERIMENT=1 ${GOPATH}/bin/misspell -error `find pkg/` - @-GO15VENDOREXPERIMENT=1 ${GOPATH}/bin/misspell -error `find docs/` + @${GOPATH}/bin/misspell -error `find cmd/` + @${GOPATH}/bin/misspell -error `find pkg/` + @${GOPATH}/bin/misspell -error `find docs/` test: build @echo "Running all minio testing:" - @GO15VENDOREXPERIMENT=1 go test $(GOFLAGS) github.com/minio/minio/cmd... - @GO15VENDOREXPERIMENT=1 go test $(GOFLAGS) github.com/minio/minio/pkg... + @go test $(GOFLAGS) github.com/minio/minio/cmd... + @go test $(GOFLAGS) github.com/minio/minio/pkg... coverage: build @echo "Running all coverage for minio:" - @GO15VENDOREXPERIMENT=1 ./buildscripts/go-coverage.sh + @./buildscripts/go-coverage.sh gomake-all: build @echo "Installing minio:" - @GO15VENDOREXPERIMENT=1 go build --ldflags $(BUILD_LDFLAGS) -o $(GOPATH)/bin/minio + @go build --ldflags $(BUILD_LDFLAGS) -o $(GOPATH)/bin/minio pkg-add: - @GO15VENDOREXPERIMENT=1 ${GOPATH}/bin/govendor add $(PKG) + ${GOPATH}/bin/govendor add $(PKG) pkg-update: - @GO15VENDOREXPERIMENT=1 ${GOPATH}/bin/govendor update $(PKG) + ${GOPATH}/bin/govendor update $(PKG) pkg-remove: - @GO15VENDOREXPERIMENT=1 ${GOPATH}/bin/govendor remove $(PKG) + ${GOPATH}/bin/govendor remove $(PKG) pkg-list: - @GO15VENDOREXPERIMENT=1 $(GOPATH)/bin/govendor list + @$(GOPATH)/bin/govendor list install: gomake-all diff --git a/appveyor.yml b/appveyor.yml index 088fe69db..66afd4cb2 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -12,7 +12,6 @@ clone_folder: c:\gopath\src\github.com\minio\minio # Environment variables environment: GOPATH: c:\gopath - GO15VENDOREXPERIMENT: 1 # scripts that run after cloning repository install: From 2f4a7483eac98e29a708d170d114ed0160b94421 Mon Sep 17 00:00:00 2001 From: Karthic Rao Date: Sun, 8 Jan 2017 00:57:01 +0530 Subject: [PATCH 050/100] Test Function to reset globals. (#3538) - Adding reset functions for important global variables. - Using them in tests. --- cmd/admin-handlers_test.go | 22 ++++++++++++++++++++++ cmd/admin-rpc-server_test.go | 4 ++++ cmd/auth-rpc-client_test.go | 4 ++++ cmd/bucket-policy-handlers_test.go | 2 +- cmd/test-utils_test.go | 26 ++++++++++++++++++++++++++ 5 files changed, 57 insertions(+), 1 deletion(-) diff --git a/cmd/admin-handlers_test.go b/cmd/admin-handlers_test.go index 91c7252da..01f3f899f 100644 --- a/cmd/admin-handlers_test.go +++ b/cmd/admin-handlers_test.go @@ -111,6 +111,11 @@ func getServiceCmdRequest(cmd cmdType, cred credential) (*http.Request, error) { // testServicesCmdHandler - parametrizes service subcommand tests on // cmdType value. func testServicesCmdHandler(cmd cmdType, t *testing.T) { + // reset globals. + // this is to make sure that the tests are not affected by modified value. + resetTestGlobals() + // initialize NSLock. + initNSLock(false) // Initialize configuration for access/secret credentials. rootPath, err := newTestConfig("us-east-1") if err != nil { @@ -194,6 +199,12 @@ func TestServiceRestartHandler(t *testing.T) { // Test for locks list management REST API. func TestListLocksHandler(t *testing.T) { + // reset globals. + // this is to make sure that the tests are not affected by modified globals. + resetTestGlobals() + // initialize NSLock. + initNSLock(false) + rootPath, err := newTestConfig("us-east-1") if err != nil { t.Fatalf("Unable to initialize server config. %s", err) @@ -272,6 +283,12 @@ func TestListLocksHandler(t *testing.T) { // Test for locks clear management REST API. func TestClearLocksHandler(t *testing.T) { + // reset globals. + // this is to make sure that the tests are not affected by modified globals. + resetTestGlobals() + // initialize NSLock. + initNSLock(false) + rootPath, err := newTestConfig("us-east-1") if err != nil { t.Fatalf("Unable to initialize server config. %s", err) @@ -347,6 +364,11 @@ func TestClearLocksHandler(t *testing.T) { // Test for lock query param validation helper function. func TestValidateLockQueryParams(t *testing.T) { + // reset globals. + // this is to make sure that the tests are not affected by modified globals. + resetTestGlobals() + // initialize NSLock. + initNSLock(false) // Sample query values for test cases. allValidVal := url.Values{} allValidVal.Set(string(lockBucket), "bucket") diff --git a/cmd/admin-rpc-server_test.go b/cmd/admin-rpc-server_test.go index 6a4a36603..26365d4ab 100644 --- a/cmd/admin-rpc-server_test.go +++ b/cmd/admin-rpc-server_test.go @@ -22,6 +22,10 @@ import ( ) func testAdminCmd(cmd cmdType, t *testing.T) { + // reset globals. + // this is to make sure that the tests are not affected by modified globals. + resetTestGlobals() + rootPath, err := newTestConfig("us-east-1") if err != nil { t.Fatalf("Failed to create test config - %v", err) diff --git a/cmd/auth-rpc-client_test.go b/cmd/auth-rpc-client_test.go index 9b9076d7f..2210ac9d9 100644 --- a/cmd/auth-rpc-client_test.go +++ b/cmd/auth-rpc-client_test.go @@ -20,6 +20,10 @@ import "testing" // Tests authorized RPC client. func TestAuthRPCClient(t *testing.T) { + // reset globals. + // this is to make sure that the tests are not affected by modified globals. + resetTestGlobals() + authCfg := authConfig{ accessKey: "123", secretKey: "123", diff --git a/cmd/bucket-policy-handlers_test.go b/cmd/bucket-policy-handlers_test.go index c4d893e5d..16536de51 100644 --- a/cmd/bucket-policy-handlers_test.go +++ b/cmd/bucket-policy-handlers_test.go @@ -945,7 +945,7 @@ func TestBucketPolicyConditionMatch(t *testing.T) { } for i, tc := range testCases { - t.Run(fmt.Sprintf("Test case %d: Failed.", i+1), func(t *testing.T) { + t.Run(fmt.Sprintf("Case %d", i+1), func(t *testing.T) { // call the function under test and assert the result with the expected result. doesMatch := bucketPolicyConditionMatch(tc.condition, tc.statementCondition) if tc.expectedMatch != doesMatch { diff --git a/cmd/test-utils_test.go b/cmd/test-utils_test.go index 97fd99218..cce866721 100644 --- a/cmd/test-utils_test.go +++ b/cmd/test-utils_test.go @@ -456,6 +456,23 @@ func resetGlobalConfig() { serverConfigMu.Unlock() } +// reset global NSLock. +func resetGlobalNSLock() { + if globalNSMutex != nil { + globalNSMutex = nil + } +} + +// reset global event notifier. +func resetGlobalEventNotifier() { + globalEventNotifier = nil +} + +// reset Global event notifier. +func resetGlobalEventnotify() { + globalEventNotifier = nil +} + // Resets all the globals used modified in tests. // Resetting ensures that the changes made to globals by one test doesn't affect others. func resetTestGlobals() { @@ -465,6 +482,10 @@ func resetTestGlobals() { resetGlobalConfigPath() // Reset Global server config. resetGlobalConfig() + // Reset global NSLock. + resetGlobalNSLock() + // Reset global event notifier. + resetGlobalEventnotify() } // Configure the server for the test run. @@ -1876,6 +1897,11 @@ func ExecObjectLayerAPINilTest(t TestErrHandler, bucketName, objectName, instanc // ExecObjectLayerAPITest - executes object layer API tests. // Creates single node and XL ObjectLayer instance, registers the specified API end points and runs test for both the layers. func ExecObjectLayerAPITest(t *testing.T, objAPITest objAPITestType, endpoints []string) { + // reset globals. + // this is to make sure that the tests are not affected by modified value. + resetTestGlobals() + // initialize NSLock. + initNSLock(false) // initialize the server and obtain the credentials and root. // credentials are necessary to sign the HTTP request. rootPath, err := newTestConfig("us-east-1") From f37f56ac15e92e353dac135d8cbaacaba8202646 Mon Sep 17 00:00:00 2001 From: koolhead17 Date: Sun, 8 Jan 2017 08:05:12 +0530 Subject: [PATCH 051/100] docs: Added guide for distributed minio on Windows (#3507) --- docs/docker/README.md | 68 +++++++++++++++++++++++++++++++++++-------- 1 file changed, 56 insertions(+), 12 deletions(-) diff --git a/docs/docker/README.md b/docs/docker/README.md index aa7e0bb83..44676730c 100644 --- a/docs/docker/README.md +++ b/docs/docker/README.md @@ -1,51 +1,95 @@ # Minio Docker Quickstart Guide [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) -## 1. Test Minio on Docker. +## 1. Prerequisites + +### GNU/Linux +Install Docker for GNU/Linux from [here](https://www.docker.com/products/docker#/linux) + +### Microsoft Windows +Install Docker for Windows from [here](https://www.docker.com/products/docker#/windows) + +### macOS +Install Docker for macOS from [here](https://www.docker.com/products/docker#/mac) + +## 2. Test Minio on Docker. Minio generates new access and secret keys each time you run this command. Container state is lost after you end this session. This mode is only intended for testing purpose. ```sh - docker run -p 9000:9000 minio/minio server /export - ``` -## 2. Run Minio Standalone on Docker. +## 3. Run Minio Standalone on Docker. Minio container requires a persistent volume to store configuration and application data. Following command maps local persistent directories from the host OS to virtual config `~/.minio` and export `/export` directories. -```sh +### GNU/Linux and macOS +```sh docker run -p 9000:9000 --name minio1 \ -v /mnt/export/minio1:/export \ -v /mnt/config/minio1:/root/.minio \ minio/minio server /export - ``` -## 3. Run Minio Standalone on Docker with Custom Access and Secret Keys +### Microsoft Windows + +```sh +docker run -p 9000:9000 --name minio1 \ + -v D:\export\minio1:/export \ + -v D:\export\minio1-config:/root/.minio \ + minio/minio server /export +``` + +## 4. Run Minio Standalone on Docker with Custom Access and Secret Keys To override Minio's auto-generated keys, you may pass secret and access keys explicitly as environment variables. Minio server also allows regular strings as access and secret keys. -```sh +### GNU/Linux and macOS +```sh docker run -p 9000:9000 --name minio1 \ -e "MINIO_ACCESS_KEY=AKIAIOSFODNN7EXAMPLE" \ -e "MINIO_SECRET_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" \ -v /mnt/export/minio1:/export \ -v /mnt/config/minio1:/root/.minio \ minio/minio server /export - ``` -## 4. Test Distributed Minio on Docker +### Microsoft Windows + +```sh +docker run -p 9000:9000 --name minio1 \ + -e "MINIO_ACCESS_KEY=AKIAIOSFODNN7EXAMPLE" \ + -e "MINIO_SECRET_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" \ + -v D:\export\minio1:/export \ + -v D:\export\minio1-config:/root/.minio \ + minio/minio server /export +``` + +## 5. Test Distributed Minio on Docker This example shows how to run 4 node Minio cluster inside different docker containers using [docker-compose](https://docs.docker.com/compose/). Please download [docker-compose.yml](https://raw.githubusercontent.com/minio/minio/master/docs/docker/docker-compose.yml) to your current working directory, docker-compose pulls the Minio Docker image. -### Run `docker-compose` + +#### Run `docker-compose` on GNU/Linux and macOS ```sh docker-compose pull docker-compose up ``` -Each instance is accessible on the host at ports 9001 through 9004, proceed to access the Web browser at http://localhost:9001/ +#### Run `docker-compose.exe` on Microsoft Windows + +```sh +docker-compose.exe pull +docker-compose.exe up +``` + +Each instance is accessible on the host at ports 9001 through 9004, proceed to access the Web browser at http://127.0.0.1:9001/ + +## 6. Explore Further + +* [Minio Erasure Code QuickStart Guide](https://docs.minio.io/docs/minio-erasure-code-quickstart-guide) +* [Distributed Minio Quickstart Guide ](https://docs.minio.io/docs/distributed-minio-quickstart-guide) +* [Docker Compose](https://docs.docker.com/compose/) + From a091fe3ed62f11fb7df0e92a8aacf4e516688acf Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Sun, 8 Jan 2017 11:17:02 -0800 Subject: [PATCH 052/100] docs: Fix caching docs to comply with current features. --- docs/caching/README.md | 36 +++++++++++++++--------------------- 1 file changed, 15 insertions(+), 21 deletions(-) diff --git a/docs/caching/README.md b/docs/caching/README.md index 4c52a5054..0a9be05bf 100644 --- a/docs/caching/README.md +++ b/docs/caching/README.md @@ -1,33 +1,27 @@ -## Object caching +## Object Caching -Object caching by turned on by default with following settings +Object caching is on by default with following settings - - Default cache size 8GB. Cache size also automatically picks - a lower value if your local memory size is lower than 8GB. + - Cache size is 50% of your RAM size. Caching is disabled + if your RAM size is smaller than 8GB. - - Default expiration of entries happensat 72 hours, - this option cannot be changed. + - Expiration of each entries happen on every 72 hours. - - Default expiry interval is 1/4th of the expiration hours, so - expiration sweep happens across the cache every 1/4th the time - duration of the set entry expiration duration. + - Garbage collection sweep of the expired entries happen every + 1/4th the set expiration hours value (every 18 hours). + +NOTE: None of the settings can be configured manually. ### Behavior Caching happens on both GET and PUT operations. - GET caches new objects for entries not found in cache. + Otherwise serves from the cache. -- PUT/POST caches all successfully uploaded objects. +- PUT/POST caches all successfully uploaded objects. Replaces + existing cached entry for the same object if needed. -In all other cases if objects are served from cache. - -NOTE: - -Cache is always populated upon object is successfully -read from the disk. - -Expiration happens automatically based on the configured -interval as explained above, frequently accessed objects -stay alive for significantly longer time due to the fact -that expiration time is reset for every cache hit. +NOTE: Expiration happens automatically based on the configured +interval as explained above, frequently accessed objects stay +alive in cache for a significantly longer time on every cache hit. From e1142e99f2ba429631d1d3c7270318884c08f208 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Sun, 8 Jan 2017 20:37:53 -0800 Subject: [PATCH 053/100] rpc/lock: Make sure to capitalize for proper marshalling. (#3544) Distributed setup stopped working for certain types of operations `6d10f4c19af6861e4de1b22ac20a3e5136f69d67` This is a regression. Fixes #3543 --- cmd/lock-rpc-server.go | 58 ++++++++++++++++++------------------- cmd/lock-rpc-server_test.go | 2 +- cmd/rpc-common.go | 4 +-- 3 files changed, 32 insertions(+), 32 deletions(-) diff --git a/cmd/lock-rpc-server.go b/cmd/lock-rpc-server.go index 63873d4ac..af2ffdf55 100644 --- a/cmd/lock-rpc-server.go +++ b/cmd/lock-rpc-server.go @@ -137,14 +137,14 @@ func (l *lockServer) Lock(args *LockArgs, reply *bool) error { if err := args.IsAuthenticated(); err != nil { return err } - _, *reply = l.lockMap[args.dsyncLockArgs.Resource] + _, *reply = l.lockMap[args.LockArgs.Resource] if !*reply { // No locks held on the given name, so claim write lock - l.lockMap[args.dsyncLockArgs.Resource] = []lockRequesterInfo{ + l.lockMap[args.LockArgs.Resource] = []lockRequesterInfo{ { writer: true, - node: args.dsyncLockArgs.ServerAddr, - rpcPath: args.dsyncLockArgs.ServiceEndpoint, - uid: args.dsyncLockArgs.UID, + node: args.LockArgs.ServerAddr, + rpcPath: args.LockArgs.ServiceEndpoint, + uid: args.LockArgs.UID, timestamp: time.Now().UTC(), timeLastCheck: time.Now().UTC(), }, @@ -162,14 +162,14 @@ func (l *lockServer) Unlock(args *LockArgs, reply *bool) error { return err } var lri []lockRequesterInfo - if lri, *reply = l.lockMap[args.dsyncLockArgs.Resource]; !*reply { // No lock is held on the given name - return fmt.Errorf("Unlock attempted on an unlocked entity: %s", args.dsyncLockArgs.Resource) + if lri, *reply = l.lockMap[args.LockArgs.Resource]; !*reply { // No lock is held on the given name + return fmt.Errorf("Unlock attempted on an unlocked entity: %s", args.LockArgs.Resource) } if *reply = isWriteLock(lri); !*reply { // Unless it is a write lock - return fmt.Errorf("Unlock attempted on a read locked entity: %s (%d read locks active)", args.dsyncLockArgs.Resource, len(lri)) + return fmt.Errorf("Unlock attempted on a read locked entity: %s (%d read locks active)", args.LockArgs.Resource, len(lri)) } - if !l.removeEntry(args.dsyncLockArgs.Resource, args.dsyncLockArgs.UID, &lri) { - return fmt.Errorf("Unlock unable to find corresponding lock for uid: %s", args.dsyncLockArgs.UID) + if !l.removeEntry(args.LockArgs.Resource, args.LockArgs.UID, &lri) { + return fmt.Errorf("Unlock unable to find corresponding lock for uid: %s", args.LockArgs.UID) } return nil } @@ -183,18 +183,18 @@ func (l *lockServer) RLock(args *LockArgs, reply *bool) error { } lrInfo := lockRequesterInfo{ writer: false, - node: args.dsyncLockArgs.ServerAddr, - rpcPath: args.dsyncLockArgs.ServiceEndpoint, - uid: args.dsyncLockArgs.UID, + node: args.LockArgs.ServerAddr, + rpcPath: args.LockArgs.ServiceEndpoint, + uid: args.LockArgs.UID, timestamp: time.Now().UTC(), timeLastCheck: time.Now().UTC(), } - if lri, ok := l.lockMap[args.dsyncLockArgs.Resource]; ok { + if lri, ok := l.lockMap[args.LockArgs.Resource]; ok { if *reply = !isWriteLock(lri); *reply { // Unless there is a write lock - l.lockMap[args.dsyncLockArgs.Resource] = append(l.lockMap[args.dsyncLockArgs.Resource], lrInfo) + l.lockMap[args.LockArgs.Resource] = append(l.lockMap[args.LockArgs.Resource], lrInfo) } } else { // No locks held on the given name, so claim (first) read lock - l.lockMap[args.dsyncLockArgs.Resource] = []lockRequesterInfo{lrInfo} + l.lockMap[args.LockArgs.Resource] = []lockRequesterInfo{lrInfo} *reply = true } return nil @@ -208,14 +208,14 @@ func (l *lockServer) RUnlock(args *LockArgs, reply *bool) error { return err } var lri []lockRequesterInfo - if lri, *reply = l.lockMap[args.dsyncLockArgs.Resource]; !*reply { // No lock is held on the given name - return fmt.Errorf("RUnlock attempted on an unlocked entity: %s", args.dsyncLockArgs.Resource) + if lri, *reply = l.lockMap[args.LockArgs.Resource]; !*reply { // No lock is held on the given name + return fmt.Errorf("RUnlock attempted on an unlocked entity: %s", args.LockArgs.Resource) } if *reply = !isWriteLock(lri); !*reply { // A write-lock is held, cannot release a read lock - return fmt.Errorf("RUnlock attempted on a write locked entity: %s", args.dsyncLockArgs.Resource) + return fmt.Errorf("RUnlock attempted on a write locked entity: %s", args.LockArgs.Resource) } - if !l.removeEntry(args.dsyncLockArgs.Resource, args.dsyncLockArgs.UID, &lri) { - return fmt.Errorf("RUnlock unable to find corresponding read lock for uid: %s", args.dsyncLockArgs.UID) + if !l.removeEntry(args.LockArgs.Resource, args.LockArgs.UID, &lri) { + return fmt.Errorf("RUnlock unable to find corresponding read lock for uid: %s", args.LockArgs.UID) } return nil } @@ -227,11 +227,11 @@ func (l *lockServer) ForceUnlock(args *LockArgs, reply *bool) error { if err := args.IsAuthenticated(); err != nil { return err } - if len(args.dsyncLockArgs.UID) != 0 { - return fmt.Errorf("ForceUnlock called with non-empty UID: %s", args.dsyncLockArgs.UID) + if len(args.LockArgs.UID) != 0 { + return fmt.Errorf("ForceUnlock called with non-empty UID: %s", args.LockArgs.UID) } - if _, ok := l.lockMap[args.dsyncLockArgs.Resource]; ok { // Only clear lock when set - delete(l.lockMap, args.dsyncLockArgs.Resource) // Remove the lock (irrespective of write or read lock) + if _, ok := l.lockMap[args.LockArgs.Resource]; ok { // Only clear lock when set + delete(l.lockMap, args.LockArgs.Resource) // Remove the lock (irrespective of write or read lock) } *reply = true return nil @@ -245,17 +245,17 @@ func (l *lockServer) Expired(args *LockArgs, reply *bool) error { return err } // Lock found, proceed to verify if belongs to given uid. - if lri, ok := l.lockMap[args.dsyncLockArgs.Resource]; ok { + if lri, ok := l.lockMap[args.LockArgs.Resource]; ok { // Check whether uid is still active for _, entry := range lri { - if entry.uid == args.dsyncLockArgs.UID { + if entry.uid == args.LockArgs.UID { *reply = false // When uid found, lock is still active so return not expired. return nil // When uid found *reply is set to true. } } } - // When we get here lock is no longer active due to either args.dsyncLockArgs.Resource - // being absent from map or uid not found for given args.dsyncLockArgs.Resource + // When we get here lock is no longer active due to either args.LockArgs.Resource + // being absent from map or uid not found for given args.LockArgs.Resource *reply = true return nil } diff --git a/cmd/lock-rpc-server_test.go b/cmd/lock-rpc-server_test.go index eb8bebd31..9e406dbda 100644 --- a/cmd/lock-rpc-server_test.go +++ b/cmd/lock-rpc-server_test.go @@ -350,7 +350,7 @@ func TestLockRpcServerForceUnlock(t *testing.T) { } // Then test force unlock of a lock that does not exist (not returning an error) - laForce.dsyncLockArgs.UID = "" + laForce.LockArgs.UID = "" laForce.SetRequestTime(time.Now().UTC()) err = locker.ForceUnlock(&laForce, &result) if err != nil { diff --git a/cmd/rpc-common.go b/cmd/rpc-common.go index 8180aad45..eeaa5e0c3 100644 --- a/cmd/rpc-common.go +++ b/cmd/rpc-common.go @@ -103,9 +103,9 @@ type LoginRPCReply struct { // LockArgs represents arguments for any authenticated lock RPC call. type LockArgs struct { AuthRPCArgs - dsyncLockArgs dsync.LockArgs + LockArgs dsync.LockArgs } func newLockArgs(args dsync.LockArgs) LockArgs { - return LockArgs{dsyncLockArgs: args} + return LockArgs{LockArgs: args} } From 95d9e47353ef1301cbd53c4a94aec833f415a0b6 Mon Sep 17 00:00:00 2001 From: Anis Elleuch Date: Mon, 9 Jan 2017 23:22:20 +0100 Subject: [PATCH 054/100] Presign V2: Unescape non-std queries in urls (#3549) A client sends escaped characters in values of some query parameters in a presign url. This commit properly unescapes queires to fix signature calculation. --- cmd/signature-v2.go | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/cmd/signature-v2.go b/cmd/signature-v2.go index 3952a912f..2698891d3 100644 --- a/cmd/signature-v2.go +++ b/cmd/signature-v2.go @@ -111,13 +111,18 @@ func doesPresignV2SignatureMatch(r *http.Request) APIErrorCode { case "Expires": expires, err = url.QueryUnescape(keyval[1]) default: - filteredQueries = append(filteredQueries, query) + unescapedQuery, qerr := url.QueryUnescape(query) + if qerr == nil { + filteredQueries = append(filteredQueries, unescapedQuery) + } else { + err = qerr + } + } + // Check if the query unescaped properly. + if err != nil { + errorIf(err, "Unable to unescape query values", queries) + return ErrInvalidQueryParams } - } - // Check if the query unescaped properly. - if err != nil { - errorIf(err, "Unable to unescape query values", queries) - return ErrInvalidQueryParams } // Invalid access key. From eb6d53d2f5d66e16a621141df111bd78bb843ecb Mon Sep 17 00:00:00 2001 From: Anis Elleuch Date: Tue, 10 Jan 2017 00:48:13 +0100 Subject: [PATCH 055/100] heal: Fix new entries computation in listDirHeal (#3551) A crash was happening due to an incorrect interpreation of the return value of sort.SearchString() --- cmd/xl-v1-list-objects-heal.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cmd/xl-v1-list-objects-heal.go b/cmd/xl-v1-list-objects-heal.go index b38411bb9..1871470e5 100644 --- a/cmd/xl-v1-list-objects-heal.go +++ b/cmd/xl-v1-list-objects-heal.go @@ -58,7 +58,9 @@ func listDirHealFactory(isLeaf isLeafFunc, disks ...StorageAPI) listDirFunc { // find elements in entries which are not in mergedentries for _, entry := range entries { idx := sort.SearchStrings(mergedEntries, entry) - if mergedEntries[idx] == entry { + // idx different from len(mergedEntries) means entry is not found + // in mergedEntries + if idx < len(mergedEntries) { continue } newEntries = append(newEntries, entry) From 0563a9235aad4c7b07fc25d77dcfbc9fc3edf26c Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Tue, 10 Jan 2017 11:01:23 -0800 Subject: [PATCH 056/100] handlers: Handle crash if r.URL.Path is empty. (#3554) URL paths can be empty and not have preceding separator, we do not yet know the conditions this can happen inside Go http server. This patch is to ensure that we do not crash ourselves under conditions where r.URL.Path may be empty. Fixes #3553 --- cmd/generic-handlers.go | 13 +----- cmd/utils.go | 32 +++++++++++++++ cmd/utils_test.go | 88 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 121 insertions(+), 12 deletions(-) diff --git a/cmd/generic-handlers.go b/cmd/generic-handlers.go index 2ce0a878f..e38b65339 100644 --- a/cmd/generic-handlers.go +++ b/cmd/generic-handlers.go @@ -311,18 +311,7 @@ var notimplementedObjectResourceNames = map[string]bool{ // Resource handler ServeHTTP() wrapper func (h resourceHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - // Skip the first element which is usually '/' and split the rest. - splits := strings.SplitN(r.URL.Path[1:], "/", 2) - - // Save bucketName and objectName extracted from url Path. - var bucketName, objectName string - if len(splits) == 1 { - bucketName = splits[0] - } - if len(splits) == 2 { - bucketName = splits[0] - objectName = splits[1] - } + bucketName, objectName := urlPath2BucketObjectName(r.URL) // If bucketName is present and not objectName check for bucket level resource queries. if bucketName != "" && objectName == "" { diff --git a/cmd/utils.go b/cmd/utils.go index 359715fd6..8723e62ad 100644 --- a/cmd/utils.go +++ b/cmd/utils.go @@ -70,6 +70,38 @@ func checkDuplicateStrings(list []string) error { return nil } +// splitStr splits a string into n parts, empty strings are added +// if we are not able to reach n elements +func splitStr(path, sep string, n int) []string { + splits := strings.SplitN(path, sep, n) + // Add empty strings if we found elements less than nr + for i := n - len(splits); i > 0; i-- { + splits = append(splits, "") + } + return splits +} + +// Convert url path into bucket and object name. +func urlPath2BucketObjectName(u *url.URL) (bucketName, objectName string) { + if u == nil { + // Empty url, return bucket and object names. + return + } + + // Trim any preceding slash separator. + urlPath := strings.TrimPrefix(u.Path, slashSeparator) + + // Split urlpath using slash separator into a given number of + // expected tokens. + tokens := splitStr(urlPath, slashSeparator, 2) + + // Extract bucket and objects. + bucketName, objectName = tokens[0], tokens[1] + + // Success. + return bucketName, objectName +} + // checkDuplicates - function to validate if there are duplicates in a slice of endPoints. func checkDuplicateEndpoints(endpoints []*url.URL) error { var strs []string diff --git a/cmd/utils_test.go b/cmd/utils_test.go index 89d0aae67..a112b1d55 100644 --- a/cmd/utils_test.go +++ b/cmd/utils_test.go @@ -220,6 +220,94 @@ func TestMaxPartID(t *testing.T) { } } +// Tests extracting bucket and objectname from various types of URL paths. +func TestURL2BucketObjectName(t *testing.T) { + testCases := []struct { + u *url.URL + bucket, object string + }{ + // Test case 1 normal case. + { + u: &url.URL{ + Path: "/bucket/object", + }, + bucket: "bucket", + object: "object", + }, + // Test case 2 where url only has separator. + { + u: &url.URL{ + Path: "/", + }, + bucket: "", + object: "", + }, + // Test case 3 only bucket is present. + { + u: &url.URL{ + Path: "/bucket", + }, + bucket: "bucket", + object: "", + }, + // Test case 4 many separators and object is a directory. + { + u: &url.URL{ + Path: "/bucket/object/1/", + }, + bucket: "bucket", + object: "object/1/", + }, + // Test case 5 object has many trailing separators. + { + u: &url.URL{ + Path: "/bucket/object/1///", + }, + bucket: "bucket", + object: "object/1///", + }, + // Test case 6 object has only trailing separators. + { + u: &url.URL{ + Path: "/bucket/object///////", + }, + bucket: "bucket", + object: "object///////", + }, + // Test case 7 object has preceding separators. + { + u: &url.URL{ + Path: "/bucket////object////", + }, + bucket: "bucket", + object: "///object////", + }, + // Test case 8 url is not allocated. + { + u: nil, + bucket: "", + object: "", + }, + // Test case 9 url path is empty. + { + u: &url.URL{}, + bucket: "", + object: "", + }, + } + + // Validate all test cases. + for i, testCase := range testCases { + bucketName, objectName := urlPath2BucketObjectName(testCase.u) + if bucketName != testCase.bucket { + t.Errorf("Test %d: failed expected bucket name \"%s\", got \"%s\"", i+1, testCase.bucket, bucketName) + } + if objectName != testCase.object { + t.Errorf("Test %d: failed expected bucket name \"%s\", got \"%s\"", i+1, testCase.object, objectName) + } + } +} + // Add tests for starting and stopping different profilers. func TestStartProfiler(t *testing.T) { if startProfiler("") != nil { From b0cfceb2110e44f20a9095c82d9dedf78ab25397 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Tue, 10 Jan 2017 16:43:48 -0800 Subject: [PATCH 057/100] event: Enhance event message struct to provide origin server. (#3557) `principalId` i.e user identity is kept as AccessKey in accordance with S3 spec. Additionally responseElements{} are added starting with `x-amz-request-id` is a hexadecimal of the event time itself in nanosecs. `x-minio-origin-server` - points to the server generating the event. Fixes #3556 --- cmd/api-datatypes.go | 5 ++ cmd/api-headers.go | 23 +++----- cmd/api-headers_test.go | 3 +- cmd/bucket-handlers.go | 2 +- cmd/bucket-notification-datatypes.go | 31 ++++++++-- cmd/bucket-policy-handlers_test.go | 24 ++++---- cmd/bucket-policy-parser.go | 11 +--- cmd/bucket-policy-parser_test.go | 28 +++++----- cmd/bucket-policy.go | 6 ++ cmd/certs.go | 19 +++++++ cmd/event-notifier.go | 55 +++++++++++++----- cmd/globals.go | 4 ++ cmd/lock-instrument.go | 11 +++- cmd/lock-instrument_test.go | 19 +++++++ cmd/server-main.go | 84 ++++------------------------ cmd/server-main_test.go | 4 +- cmd/server-startup-msg.go | 26 ++++++--- cmd/server-startup-msg_test.go | 36 ++++++++++++ cmd/server-startup-utils.go | 67 ++++++++++++++++++++++ cmd/web-handlers_test.go | 12 ++-- 20 files changed, 309 insertions(+), 161 deletions(-) create mode 100644 cmd/server-startup-utils.go diff --git a/cmd/api-datatypes.go b/cmd/api-datatypes.go index 12b7f4bf7..f74c64440 100644 --- a/cmd/api-datatypes.go +++ b/cmd/api-datatypes.go @@ -20,6 +20,11 @@ import ( "encoding/xml" ) +const ( + // Response request id. + responseRequestIDKey = "x-amz-request-id" +) + // ObjectIdentifier carries key name for the object to delete. type ObjectIdentifier struct { ObjectName string `xml:"Key"` diff --git a/cmd/api-headers.go b/cmd/api-headers.go index a16470ca0..62d32a0ec 100644 --- a/cmd/api-headers.go +++ b/cmd/api-headers.go @@ -18,33 +18,24 @@ package cmd import ( "bytes" - "crypto/rand" "encoding/xml" + "fmt" "net/http" "runtime" "strconv" + "time" ) -const requestIDLen = 16 - -// mustGetRequestID generates and returns request ID string. -func mustGetRequestID() string { - reqBytes := make([]byte, requestIDLen) - if _, err := rand.Read(reqBytes); err != nil { - panic(err) - } - - for i := 0; i < requestIDLen; i++ { - reqBytes[i] = alphaNumericTable[reqBytes[i]%alphaNumericTableLen] - } - - return string(reqBytes) +// Returns a hexadecimal representation of time at the +// time response is sent to the client. +func mustGetRequestID(t time.Time) string { + return fmt.Sprintf("%X", t.UnixNano()) } // Write http common headers func setCommonHeaders(w http.ResponseWriter) { // Set unique request ID for each reply. - w.Header().Set("X-Amz-Request-Id", mustGetRequestID()) + w.Header().Set(responseRequestIDKey, mustGetRequestID(time.Now().UTC())) w.Header().Set("Server", ("Minio/" + ReleaseTag + " (" + runtime.GOOS + "; " + runtime.GOARCH + ")")) w.Header().Set("Accept-Ranges", "bytes") } diff --git a/cmd/api-headers_test.go b/cmd/api-headers_test.go index 540d136ee..0d19f13dd 100644 --- a/cmd/api-headers_test.go +++ b/cmd/api-headers_test.go @@ -18,11 +18,12 @@ package cmd import ( "testing" + "time" ) func TestNewRequestID(t *testing.T) { // Ensure that it returns an alphanumeric result of length 16. - var id = mustGetRequestID() + var id = mustGetRequestID(time.Now().UTC()) if len(id) != 16 { t.Fail() diff --git a/cmd/bucket-handlers.go b/cmd/bucket-handlers.go index 5aa99885c..8f845a046 100644 --- a/cmd/bucket-handlers.go +++ b/cmd/bucket-handlers.go @@ -56,7 +56,7 @@ func enforceBucketPolicy(bucket string, action string, reqURL *url.URL) (s3Error } // Construct resource in 'arn:aws:s3:::examplebucket/object' format. - resource := AWSResourcePrefix + strings.TrimSuffix(strings.TrimPrefix(reqURL.Path, "/"), "/") + resource := bucketARNPrefix + strings.TrimSuffix(strings.TrimPrefix(reqURL.Path, "/"), "/") // Get conditions for policy verification. conditionKeyMap := make(map[string]set.StringSet) diff --git a/cmd/bucket-notification-datatypes.go b/cmd/bucket-notification-datatypes.go index 252947602..02695b36f 100644 --- a/cmd/bucket-notification-datatypes.go +++ b/cmd/bucket-notification-datatypes.go @@ -114,15 +114,11 @@ func (eventName EventName) String() string { } } -// Indentity represents the user id, this is a compliance field. +// Indentity represents the accessKey who caused the event. type identity struct { PrincipalID string `json:"principalId"` } -func defaultIdentity() identity { - return identity{"minio"} -} - // Notification event bucket metadata. type bucketMeta struct { Name string `json:"name"` @@ -139,6 +135,21 @@ type objectMeta struct { Sequencer string `json:"sequencer"` } +const ( + // Event schema version number defaulting to the value in S3 spec. + // ref: http://docs.aws.amazon.com/AmazonS3/latest/dev/notification-content-structure.html + eventSchemaVersion = "1.0" + + // Default ID found in bucket notification configuration. + // ref: http://docs.aws.amazon.com/AmazonS3/latest/dev/notification-content-structure.html + eventConfigID = "Config" +) + +const ( + // Response element origin endpoint key. + responseOriginEndpointKey = "x-minio-origin-endpoint" +) + // Notification event server specific metadata. type eventMeta struct { SchemaVersion string `json:"s3SchemaVersion"` @@ -147,6 +158,16 @@ type eventMeta struct { Object objectMeta `json:"object"` } +const ( + // Event source static value defaulting to the value in S3 spec. + // ref: http://docs.aws.amazon.com/AmazonS3/latest/dev/notification-content-structure.html + eventSource = "aws:s3" + + // Event version number defaulting to the value in S3 spec. + // ref: http://docs.aws.amazon.com/AmazonS3/latest/dev/notification-content-structure.html + eventVersion = "2.0" +) + // NotificationEvent represents an Amazon an S3 bucket notification event. type NotificationEvent struct { EventVersion string `json:"eventVersion"` diff --git a/cmd/bucket-policy-handlers_test.go b/cmd/bucket-policy-handlers_test.go index 16536de51..cfc690ef7 100644 --- a/cmd/bucket-policy-handlers_test.go +++ b/cmd/bucket-policy-handlers_test.go @@ -40,7 +40,7 @@ func TestBucketPolicyResourceMatch(t *testing.T) { // generates resource prefix. generateResource := func(bucketName, objectName string) string { - return AWSResourcePrefix + bucketName + "/" + objectName + return bucketARNPrefix + bucketName + "/" + objectName } testCases := []struct { @@ -50,30 +50,30 @@ func TestBucketPolicyResourceMatch(t *testing.T) { }{ // Test case 1-4. // Policy with resource ending with bucket/* allows access to all objects inside the given bucket. - {generateResource("minio-bucket", ""), generateStatement(fmt.Sprintf("%s%s", AWSResourcePrefix, "minio-bucket"+"/*")), true}, - {generateResource("minio-bucket", ""), generateStatement(fmt.Sprintf("%s%s", AWSResourcePrefix, "minio-bucket"+"/*")), true}, - {generateResource("minio-bucket", ""), generateStatement(fmt.Sprintf("%s%s", AWSResourcePrefix, "minio-bucket"+"/*")), true}, - {generateResource("minio-bucket", ""), generateStatement(fmt.Sprintf("%s%s", AWSResourcePrefix, "minio-bucket"+"/*")), true}, + {generateResource("minio-bucket", ""), generateStatement(fmt.Sprintf("%s%s", bucketARNPrefix, "minio-bucket"+"/*")), true}, + {generateResource("minio-bucket", ""), generateStatement(fmt.Sprintf("%s%s", bucketARNPrefix, "minio-bucket"+"/*")), true}, + {generateResource("minio-bucket", ""), generateStatement(fmt.Sprintf("%s%s", bucketARNPrefix, "minio-bucket"+"/*")), true}, + {generateResource("minio-bucket", ""), generateStatement(fmt.Sprintf("%s%s", bucketARNPrefix, "minio-bucket"+"/*")), true}, // Test case - 5. // Policy with resource ending with bucket/oo* should not allow access to bucket/output.txt. - {generateResource("minio-bucket", "output.txt"), generateStatement(fmt.Sprintf("%s%s", AWSResourcePrefix, "minio-bucket"+"/oo*")), false}, + {generateResource("minio-bucket", "output.txt"), generateStatement(fmt.Sprintf("%s%s", bucketARNPrefix, "minio-bucket"+"/oo*")), false}, // Test case - 6. // Policy with resource ending with bucket/oo* should allow access to bucket/ootput.txt. - {generateResource("minio-bucket", "ootput.txt"), generateStatement(fmt.Sprintf("%s%s", AWSResourcePrefix, "minio-bucket"+"/oo*")), true}, + {generateResource("minio-bucket", "ootput.txt"), generateStatement(fmt.Sprintf("%s%s", bucketARNPrefix, "minio-bucket"+"/oo*")), true}, // Test case - 7. // Policy with resource ending with bucket/oo* allows access to all sub-dirs starting with "oo" inside given bucket. - {generateResource("minio-bucket", "oop-bucket/my-file"), generateStatement(fmt.Sprintf("%s%s", AWSResourcePrefix, "minio-bucket"+"/oo*")), true}, + {generateResource("minio-bucket", "oop-bucket/my-file"), generateStatement(fmt.Sprintf("%s%s", bucketARNPrefix, "minio-bucket"+"/oo*")), true}, // Test case - 8. - {generateResource("minio-bucket", "Asia/India/1.pjg"), generateStatement(fmt.Sprintf("%s%s", AWSResourcePrefix, "minio-bucket"+"/Asia/Japan/*")), false}, + {generateResource("minio-bucket", "Asia/India/1.pjg"), generateStatement(fmt.Sprintf("%s%s", bucketARNPrefix, "minio-bucket"+"/Asia/Japan/*")), false}, // Test case - 9. - {generateResource("minio-bucket", "Asia/India/1.pjg"), generateStatement(fmt.Sprintf("%s%s", AWSResourcePrefix, "minio-bucket"+"/Asia/Japan/*")), false}, + {generateResource("minio-bucket", "Asia/India/1.pjg"), generateStatement(fmt.Sprintf("%s%s", bucketARNPrefix, "minio-bucket"+"/Asia/Japan/*")), false}, // Test case - 10. // Proves that the name space is flat. - {generateResource("minio-bucket", "Africa/Bihar/India/design_info.doc/Bihar"), generateStatement(fmt.Sprintf("%s%s", AWSResourcePrefix, + {generateResource("minio-bucket", "Africa/Bihar/India/design_info.doc/Bihar"), generateStatement(fmt.Sprintf("%s%s", bucketARNPrefix, "minio-bucket"+"/*/India/*/Bihar")), true}, // Test case - 11. // Proves that the name space is flat. - {generateResource("minio-bucket", "Asia/China/India/States/Bihar/output.txt"), generateStatement(fmt.Sprintf("%s%s", AWSResourcePrefix, + {generateResource("minio-bucket", "Asia/China/India/States/Bihar/output.txt"), generateStatement(fmt.Sprintf("%s%s", bucketARNPrefix, "minio-bucket"+"/*/India/*/Bihar/*")), true}, } for i, testCase := range testCases { diff --git a/cmd/bucket-policy-parser.go b/cmd/bucket-policy-parser.go index da47c3d8e..1264a610c 100644 --- a/cmd/bucket-policy-parser.go +++ b/cmd/bucket-policy-parser.go @@ -29,11 +29,6 @@ import ( "github.com/minio/minio-go/pkg/set" ) -const ( - // AWSResourcePrefix - bucket policy resource prefix. - AWSResourcePrefix = "arn:aws:s3:::" -) - // supportedActionMap - lists all the actions supported by minio. var supportedActionMap = set.CreateStringSet("*", "s3:*", "s3:GetObject", "s3:ListBucket", "s3:PutObject", "s3:GetBucketLocation", "s3:DeleteObject", @@ -111,11 +106,11 @@ func isValidResources(resources set.StringSet) (err error) { return err } for resource := range resources { - if !strings.HasPrefix(resource, AWSResourcePrefix) { + if !strings.HasPrefix(resource, bucketARNPrefix) { err = errors.New("Unsupported resource style found: ‘" + resource + "’, please validate your policy document") return err } - resourceSuffix := strings.SplitAfter(resource, AWSResourcePrefix)[1] + resourceSuffix := strings.SplitAfter(resource, bucketARNPrefix)[1] if len(resourceSuffix) == 0 || strings.HasPrefix(resourceSuffix, "/") { err = errors.New("Invalid resource style found: ‘" + resource + "’, please validate your policy document") return err @@ -236,7 +231,7 @@ func checkBucketPolicyResources(bucket string, bucketPolicy *bucketPolicy) APIEr for _, statement := range bucketPolicy.Statements { for action := range statement.Actions { for resource := range statement.Resources { - resourcePrefix := strings.SplitAfter(resource, AWSResourcePrefix)[1] + resourcePrefix := strings.SplitAfter(resource, bucketARNPrefix)[1] if _, ok := invalidPrefixActions[action]; ok { // Resource prefix is not equal to bucket for // prefix invalid actions, reject them. diff --git a/cmd/bucket-policy-parser_test.go b/cmd/bucket-policy-parser_test.go index e7fd82dab..522dc2c63 100644 --- a/cmd/bucket-policy-parser_test.go +++ b/cmd/bucket-policy-parser_test.go @@ -79,7 +79,7 @@ func getReadWriteObjectStatement(bucketName, objectPrefix string) policyStatemen objectResourceStatement.Principal = map[string]interface{}{ "AWS": "*", } - objectResourceStatement.Resources = set.CreateStringSet([]string{fmt.Sprintf("%s%s", AWSResourcePrefix, bucketName+"/"+objectPrefix+"*")}...) + objectResourceStatement.Resources = set.CreateStringSet([]string{fmt.Sprintf("%s%s", bucketARNPrefix, bucketName+"/"+objectPrefix+"*")}...) objectResourceStatement.Actions = set.CreateStringSet(readWriteObjectActions...) return objectResourceStatement } @@ -91,7 +91,7 @@ func getReadWriteBucketStatement(bucketName, objectPrefix string) policyStatemen bucketResourceStatement.Principal = map[string]interface{}{ "AWS": "*", } - bucketResourceStatement.Resources = set.CreateStringSet([]string{fmt.Sprintf("%s%s", AWSResourcePrefix, bucketName)}...) + bucketResourceStatement.Resources = set.CreateStringSet([]string{fmt.Sprintf("%s%s", bucketARNPrefix, bucketName)}...) bucketResourceStatement.Actions = set.CreateStringSet(readWriteBucketActions...) return bucketResourceStatement } @@ -111,7 +111,7 @@ func getReadOnlyBucketStatement(bucketName, objectPrefix string) policyStatement bucketResourceStatement.Principal = map[string]interface{}{ "AWS": "*", } - bucketResourceStatement.Resources = set.CreateStringSet([]string{fmt.Sprintf("%s%s", AWSResourcePrefix, bucketName)}...) + bucketResourceStatement.Resources = set.CreateStringSet([]string{fmt.Sprintf("%s%s", bucketARNPrefix, bucketName)}...) bucketResourceStatement.Actions = set.CreateStringSet(readOnlyBucketActions...) return bucketResourceStatement } @@ -123,7 +123,7 @@ func getReadOnlyObjectStatement(bucketName, objectPrefix string) policyStatement objectResourceStatement.Principal = map[string]interface{}{ "AWS": "*", } - objectResourceStatement.Resources = set.CreateStringSet([]string{fmt.Sprintf("%s%s", AWSResourcePrefix, bucketName+"/"+objectPrefix+"*")}...) + objectResourceStatement.Resources = set.CreateStringSet([]string{fmt.Sprintf("%s%s", bucketARNPrefix, bucketName+"/"+objectPrefix+"*")}...) objectResourceStatement.Actions = set.CreateStringSet(readOnlyObjectActions...) return objectResourceStatement } @@ -144,7 +144,7 @@ func getWriteOnlyBucketStatement(bucketName, objectPrefix string) policyStatemen bucketResourceStatement.Principal = map[string]interface{}{ "AWS": "*", } - bucketResourceStatement.Resources = set.CreateStringSet([]string{fmt.Sprintf("%s%s", AWSResourcePrefix, bucketName)}...) + bucketResourceStatement.Resources = set.CreateStringSet([]string{fmt.Sprintf("%s%s", bucketARNPrefix, bucketName)}...) bucketResourceStatement.Actions = set.CreateStringSet(writeOnlyBucketActions...) return bucketResourceStatement } @@ -156,7 +156,7 @@ func getWriteOnlyObjectStatement(bucketName, objectPrefix string) policyStatemen objectResourceStatement.Principal = map[string]interface{}{ "AWS": "*", } - objectResourceStatement.Resources = set.CreateStringSet([]string{fmt.Sprintf("%s%s", AWSResourcePrefix, bucketName+"/"+objectPrefix+"*")}...) + objectResourceStatement.Resources = set.CreateStringSet([]string{fmt.Sprintf("%s%s", bucketARNPrefix, bucketName+"/"+objectPrefix+"*")}...) objectResourceStatement.Actions = set.CreateStringSet(writeOnlyObjectActions...) return objectResourceStatement } @@ -269,19 +269,19 @@ func TestIsValidResources(t *testing.T) { // Empty Resources. {[]string{}, errors.New("Resource list cannot be empty"), false}, // Test case - 2. - // A valid resource should have prefix "arn:aws:s3:::". + // A valid resource should have prefix bucketARNPrefix. {[]string{"my-resource"}, errors.New("Unsupported resource style found: ‘my-resource’, please validate your policy document"), false}, // Test case - 3. - // A Valid resource should have bucket name followed by "arn:aws:s3:::". - {[]string{"arn:aws:s3:::"}, errors.New("Invalid resource style found: ‘arn:aws:s3:::’, please validate your policy document"), false}, + // A Valid resource should have bucket name followed by bucketARNPrefix. + {[]string{bucketARNPrefix}, errors.New("Invalid resource style found: ‘arn:aws:s3:::’, please validate your policy document"), false}, // Test Case - 4. - // Valid resource shouldn't have slash('/') followed by "arn:aws:s3:::". - {[]string{"arn:aws:s3:::/"}, errors.New("Invalid resource style found: ‘arn:aws:s3:::/’, please validate your policy document"), false}, + // Valid resource shouldn't have slash('/') followed by bucketARNPrefix. + {[]string{bucketARNPrefix + "/"}, errors.New("Invalid resource style found: ‘arn:aws:s3:::/’, please validate your policy document"), false}, // Test cases with valid Resources. - {[]string{"arn:aws:s3:::my-bucket"}, nil, true}, - {[]string{"arn:aws:s3:::my-bucket/Asia/*"}, nil, true}, - {[]string{"arn:aws:s3:::my-bucket/Asia/India/*"}, nil, true}, + {[]string{bucketARNPrefix + "my-bucket"}, nil, true}, + {[]string{bucketARNPrefix + "my-bucket/Asia/*"}, nil, true}, + {[]string{bucketARNPrefix + "my-bucket/Asia/India/*"}, nil, true}, } for i, testCase := range testCases { err := isValidResources(set.CreateStringSet(testCase.resources...)) diff --git a/cmd/bucket-policy.go b/cmd/bucket-policy.go index 0f12aee62..9619e1c05 100644 --- a/cmd/bucket-policy.go +++ b/cmd/bucket-policy.go @@ -24,6 +24,12 @@ import ( "sync" ) +const ( + // Static prefix to be used while constructing bucket ARN. + // refer to S3 docs for more info. + bucketARNPrefix = "arn:" + eventSource + ":::" +) + // Variable represents bucket policies in memory. var globalBucketPolicies *bucketPolicies diff --git a/cmd/certs.go b/cmd/certs.go index 31896afd1..ce24bf37f 100644 --- a/cmd/certs.go +++ b/cmd/certs.go @@ -145,3 +145,22 @@ func parseCertificateChain(bytes []byte) ([]*x509.Certificate, error) { } return certs, nil } + +// loadRootCAs fetches CA files provided in minio config and adds them to globalRootCAs +// Currently under Windows, there is no way to load system + user CAs at the same time +func loadRootCAs() { + caFiles := mustGetCAFiles() + if len(caFiles) == 0 { + return + } + // Get system cert pool, and empty cert pool under Windows because it is not supported + globalRootCAs = mustGetSystemCertPool() + // Load custom root CAs for client requests + for _, caFile := range caFiles { + caCert, err := ioutil.ReadFile(caFile) + if err != nil { + fatalIf(err, "Unable to load a CA file") + } + globalRootCAs.AppendCertsFromPEM(caCert) + } +} diff --git a/cmd/event-notifier.go b/cmd/event-notifier.go index 1a1e63c98..85c600242 100644 --- a/cmd/event-notifier.go +++ b/cmd/event-notifier.go @@ -88,49 +88,76 @@ type eventData struct { // New notification event constructs a new notification event message from // input request metadata which completed successfully. func newNotificationEvent(event eventData) NotificationEvent { - /// Construct a new object created event. + // Fetch the region. region := serverConfig.GetRegion() - tnow := time.Now().UTC() - sequencer := fmt.Sprintf("%X", tnow.UnixNano()) + + // Fetch the credentials. + creds := serverConfig.GetCredential() + + // Time when Minio finished processing the request. + eventTime := time.Now().UTC() + + // API endpoint is captured here to be returned back + // to the client for it to differentiate from which + // server the request came from. + var apiEndpoint string + if len(globalAPIEndpoints) >= 1 { + apiEndpoint = globalAPIEndpoints[0] + } + + // Fetch a hexadecimal representation of event time in nano seconds. + uniqueID := mustGetRequestID(eventTime) + + /// Construct a new object created event. + // Following blocks fills in all the necessary details of s3 // event message structure. // http://docs.aws.amazon.com/AmazonS3/latest/dev/notification-content-structure.html nEvent := NotificationEvent{ - EventVersion: "2.0", - EventSource: "aws:s3", + EventVersion: eventVersion, + EventSource: eventSource, AwsRegion: region, - EventTime: tnow.Format(timeFormatAMZ), + EventTime: eventTime.Format(timeFormatAMZ), EventName: event.Type.String(), - UserIdentity: defaultIdentity(), + UserIdentity: identity{creds.AccessKey}, RequestParameters: event.ReqParams, - ResponseElements: map[string]string{}, + ResponseElements: map[string]string{ + responseRequestIDKey: uniqueID, + // Following is a custom response element to indicate + // event origin server endpoint. + responseOriginEndpointKey: apiEndpoint, + }, S3: eventMeta{ - SchemaVersion: "1.0", - ConfigurationID: "Config", + SchemaVersion: eventSchemaVersion, + ConfigurationID: eventConfigID, Bucket: bucketMeta{ Name: event.Bucket, - OwnerIdentity: defaultIdentity(), - ARN: "arn:aws:s3:::" + event.Bucket, + OwnerIdentity: identity{creds.AccessKey}, + ARN: bucketARNPrefix + event.Bucket, }, }, } + // Escape the object name. For example "red flower.jpg" becomes "red+flower.jpg". escapedObj := url.QueryEscape(event.ObjInfo.Name) + // For delete object event type, we do not need to set ETag and Size. if event.Type == ObjectRemovedDelete { nEvent.S3.Object = objectMeta{ Key: escapedObj, - Sequencer: sequencer, + Sequencer: uniqueID, } return nEvent } + // For all other events we should set ETag and Size. nEvent.S3.Object = objectMeta{ Key: escapedObj, ETag: event.ObjInfo.MD5Sum, Size: event.ObjInfo.Size, - Sequencer: sequencer, + Sequencer: uniqueID, } + // Success. return nEvent } diff --git a/cmd/globals.go b/cmd/globals.go index 3eadd1902..9f31944eb 100644 --- a/cmd/globals.go +++ b/cmd/globals.go @@ -73,6 +73,7 @@ var ( // Cache expiry. globalCacheExpiry = objcache.DefaultExpiry + // Minio local server address (in `host:port` format) globalMinioAddr = "" // Minio default port, can be changed through command line. @@ -80,6 +81,9 @@ var ( // Holds the host that was passed using --address globalMinioHost = "" + // Holds the list of API endpoints for a given server. + globalAPIEndpoints = []string{} + // Peer communication struct globalS3Peers = s3Peers{} diff --git a/cmd/lock-instrument.go b/cmd/lock-instrument.go index 0b5fea730..b11dd81e5 100644 --- a/cmd/lock-instrument.go +++ b/cmd/lock-instrument.go @@ -17,6 +17,7 @@ package cmd import ( + "crypto/rand" "fmt" "time" ) @@ -225,5 +226,13 @@ func (n *nsLockMap) deleteLockInfoEntryForOps(param nsParam, opsID string) error // Return randomly generated string ID func getOpsID() string { - return mustGetRequestID() + const opsIDLen = 16 + opsIDBytes := make([]byte, opsIDLen) + if _, err := rand.Read(opsIDBytes); err != nil { + panic(err) + } + for i := 0; i < opsIDLen; i++ { + opsIDBytes[i] = alphaNumericTable[opsIDBytes[i]%alphaNumericTableLen] + } + return string(opsIDBytes) } diff --git a/cmd/lock-instrument_test.go b/cmd/lock-instrument_test.go index 503dc7876..d77030559 100644 --- a/cmd/lock-instrument_test.go +++ b/cmd/lock-instrument_test.go @@ -228,6 +228,25 @@ func verifyLockState(l lockStateCase, t *testing.T, testNum int) { verifyLockStats(l, t, testNum) } +func TestGetOpsID(t *testing.T) { + // Ensure that it returns an alphanumeric result of length 16. + var id = getOpsID() + + if len(id) != 16 { + t.Fail() + } + + var e rune + for _, char := range id { + e = rune(char) + + // Ensure that it is alphanumeric, in this case, between 0-9 and A-Z. + if !(('0' <= e && e <= '9') || ('A' <= e && e <= 'Z')) { + t.Fail() + } + } +} + // TestNewDebugLockInfoPerVolumePath - Validates the values initialized by newDebugLockInfoPerVolumePath(). func TestNewDebugLockInfoPerVolumePath(t *testing.T) { lockInfo := &debugLockInfoPerVolumePath{ diff --git a/cmd/server-main.go b/cmd/server-main.go index 23ba3cac1..36436ead2 100644 --- a/cmd/server-main.go +++ b/cmd/server-main.go @@ -18,9 +18,7 @@ package cmd import ( "fmt" - "io/ioutil" "net" - "net/http" "net/url" "os" "path" @@ -129,69 +127,6 @@ func parseStorageEndpoints(eps []string) (endpoints []*url.URL, err error) { return endpoints, nil } -// getListenIPs - gets all the ips to listen on. -func getListenIPs(serverAddr string) (hosts []string, port string, err error) { - var host string - host, port, err = net.SplitHostPort(serverAddr) - if err != nil { - return nil, port, fmt.Errorf("Unable to parse host address %s", err) - } - if host == "" { - var ipv4s []net.IP - ipv4s, err = getInterfaceIPv4s() - if err != nil { - return nil, port, fmt.Errorf("Unable reverse sorted ips from hosts %s", err) - } - for _, ip := range ipv4s { - hosts = append(hosts, ip.String()) - } - return hosts, port, nil - } // if host != "" { - // Proceed to append itself, since user requested a specific endpoint. - hosts = append(hosts, host) - return hosts, port, nil -} - -// Finalizes the endpoints based on the host list and port. -func finalizeEndpoints(tls bool, apiServer *http.Server) (endPoints []string) { - // Verify current scheme. - scheme := "http" - if tls { - scheme = "https" - } - - // Get list of listen ips and port. - hosts, port, err := getListenIPs(apiServer.Addr) - fatalIf(err, "Unable to get list of ips to listen on") - - // Construct proper endpoints. - for _, host := range hosts { - endPoints = append(endPoints, fmt.Sprintf("%s://%s:%s", scheme, host, port)) - } - - // Success. - return endPoints -} - -// loadRootCAs fetches CA files provided in minio config and adds them to globalRootCAs -// Currently under Windows, there is no way to load system + user CAs at the same time -func loadRootCAs() { - caFiles := mustGetCAFiles() - if len(caFiles) == 0 { - return - } - // Get system cert pool, and empty cert pool under Windows because it is not supported - globalRootCAs = mustGetSystemCertPool() - // Load custom root CAs for client requests - for _, caFile := range mustGetCAFiles() { - caCert, err := ioutil.ReadFile(caFile) - if err != nil { - fatalIf(err, "Unable to load a CA file") - } - globalRootCAs.AppendCertsFromPEM(caCert) - } -} - // initServerConfig initialize server config. func initServerConfig(c *cli.Context) { // Create certs path. @@ -430,6 +365,9 @@ func serverMain(c *cli.Context) { fatalIf(errInvalidArgument, "None of the disks passed as command line args are local to this server.") } + // Is TLS configured?. + tls := isSSL() + // Sort endpoints for consistent ordering across multiple // nodes in a distributed setup. This is to avoid format.json // corruption if the disks aren't supplied in the same order @@ -473,15 +411,15 @@ func serverMain(c *cli.Context) { // Initialize a new HTTP server. apiServer := NewServerMux(serverAddr, handler) - // If https. - tls := isSSL() - - // Fetch endpoints which we are going to serve from. - endPoints := finalizeEndpoints(tls, apiServer.Server) - - // Initialize local server address + // Set the global minio addr for this server. globalMinioAddr = getLocalAddress(srvConfig) + // Determine API endpoints where we are going to serve the S3 API from. + apiEndPoints := finalizeAPIEndpoints(tls, apiServer.Server) + + // Set the global API endpoints value. + globalAPIEndpoints = apiEndPoints + // Initialize S3 Peers inter-node communication initGlobalS3Peers(endpoints) @@ -512,7 +450,7 @@ func serverMain(c *cli.Context) { globalObjLayerMutex.Unlock() // Prints the formatted startup message once object layer is initialized. - printStartupMessage(endPoints) + printStartupMessage(apiEndPoints) // Waits on the server. <-globalServiceDoneCh diff --git a/cmd/server-main_test.go b/cmd/server-main_test.go index d666d31c9..3c64d6880 100644 --- a/cmd/server-main_test.go +++ b/cmd/server-main_test.go @@ -63,7 +63,7 @@ func TestGetListenIPs(t *testing.T) { } } -func TestFinalizeEndpoints(t *testing.T) { +func TestFinalizeAPIEndpoints(t *testing.T) { testCases := []struct { tls bool addr string @@ -75,7 +75,7 @@ func TestFinalizeEndpoints(t *testing.T) { } for i, test := range testCases { - endPoints := finalizeEndpoints(test.tls, &http.Server{Addr: test.addr}) + endPoints := finalizeAPIEndpoints(test.tls, &http.Server{Addr: test.addr}) if len(endPoints) <= 0 { t.Errorf("Test case %d returned with no API end points for %s", i+1, test.addr) diff --git a/cmd/server-startup-msg.go b/cmd/server-startup-msg.go index 720ede6c7..af2fbae25 100644 --- a/cmd/server-startup-msg.go +++ b/cmd/server-startup-msg.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -43,20 +43,30 @@ func getFormatStr(strLen int, padding int) string { } // Prints the formatted startup message. -func printStartupMessage(endPoints []string) { +func printStartupMessage(apiEndPoints []string) { // If quiet flag is set do not print startup message. if globalQuiet { return } - printServerCommonMsg(endPoints) - printCLIAccessMsg(endPoints[0]) + + // Prints credential, region and browser access. + printServerCommonMsg(apiEndPoints) + + // Prints `mc` cli configuration message chooses + // first endpoint as default. + printCLIAccessMsg(apiEndPoints[0]) + + // Prints documentation message. printObjectAPIMsg() + // Object layer is initialized then print StorageInfo. objAPI := newObjectLayerFn() if objAPI != nil { printStorageInfo(objAPI.StorageInfo()) } + // SSL is configured reads certification chain, prints + // authority and expiry. if isSSL() { certs, err := readCertificateChain() fatalIf(err, "Unable to read certificate chain.") @@ -65,23 +75,23 @@ func printStartupMessage(endPoints []string) { } // Prints common server startup message. Prints credential, region and browser access. -func printServerCommonMsg(endPoints []string) { +func printServerCommonMsg(apiEndpoints []string) { // Get saved credentials. cred := serverConfig.GetCredential() // Get saved region. region := serverConfig.GetRegion() - endPointStr := strings.Join(endPoints, " ") + apiEndpointStr := strings.Join(apiEndpoints, " ") // Colorize the message and print. - console.Println(colorBlue("\nEndpoint: ") + colorBold(fmt.Sprintf(getFormatStr(len(endPointStr), 1), endPointStr))) + console.Println(colorBlue("\nEndpoint: ") + colorBold(fmt.Sprintf(getFormatStr(len(apiEndpointStr), 1), apiEndpointStr))) console.Println(colorBlue("AccessKey: ") + colorBold(fmt.Sprintf("%s ", cred.AccessKey))) console.Println(colorBlue("SecretKey: ") + colorBold(fmt.Sprintf("%s ", cred.SecretKey))) console.Println(colorBlue("Region: ") + colorBold(fmt.Sprintf(getFormatStr(len(region), 3), region))) printEventNotifiers() console.Println(colorBlue("\nBrowser Access:")) - console.Println(fmt.Sprintf(getFormatStr(len(endPointStr), 3), endPointStr)) + console.Println(fmt.Sprintf(getFormatStr(len(apiEndpointStr), 3), apiEndpointStr)) } // Prints bucket notification configurations. diff --git a/cmd/server-startup-msg_test.go b/cmd/server-startup-msg_test.go index 4a291546d..1a219858c 100644 --- a/cmd/server-startup-msg_test.go +++ b/cmd/server-startup-msg_test.go @@ -94,3 +94,39 @@ func TestCertificateNotExpired(t *testing.T) { t.Fatalf("Expected empty message was: %s", msg) } } + +// Test printing server common message. +func TestPrintServerCommonMessage(t *testing.T) { + root, err := newTestConfig("us-east-1") + if err != nil { + t.Fatal(err) + } + defer removeAll(root) + + apiEndpoints := []string{"127.0.0.1:9000"} + printServerCommonMsg(apiEndpoints) +} + +// Tests print cli access message. +func TestPrintCLIAccessMsg(t *testing.T) { + root, err := newTestConfig("us-east-1") + if err != nil { + t.Fatal(err) + } + defer removeAll(root) + + apiEndpoints := []string{"127.0.0.1:9000"} + printCLIAccessMsg(apiEndpoints[0]) +} + +// Test print startup message. +func TestPrintStartupMessage(t *testing.T) { + root, err := newTestConfig("us-east-1") + if err != nil { + t.Fatal(err) + } + defer removeAll(root) + + apiEndpoints := []string{"127.0.0.1:9000"} + printStartupMessage(apiEndpoints) +} diff --git a/cmd/server-startup-utils.go b/cmd/server-startup-utils.go new file mode 100644 index 000000000..fa884f934 --- /dev/null +++ b/cmd/server-startup-utils.go @@ -0,0 +1,67 @@ +/* + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "fmt" + "net" + "net/http" +) + +// getListenIPs - gets all the ips to listen on. +func getListenIPs(serverAddr string) (hosts []string, port string, err error) { + var host string + host, port, err = net.SplitHostPort(serverAddr) + if err != nil { + return nil, port, fmt.Errorf("Unable to parse host address %s", err) + } + if host == "" { + var ipv4s []net.IP + ipv4s, err = getInterfaceIPv4s() + if err != nil { + return nil, port, fmt.Errorf("Unable reverse sort ips from hosts %s", err) + } + for _, ip := range ipv4s { + hosts = append(hosts, ip.String()) + } + return hosts, port, nil + } // if host != "" { + // Proceed to append itself, since user requested a specific endpoint. + hosts = append(hosts, host) + return hosts, port, nil +} + +// Finalizes the API endpoints based on the host list and port. +func finalizeAPIEndpoints(tls bool, apiServer *http.Server) (endPoints []string) { + // Verify current scheme. + scheme := "http" + if tls { + scheme = "https" + } + + // Get list of listen ips and port. + hosts, port, err := getListenIPs(apiServer.Addr) + fatalIf(err, "Unable to get list of ips to listen on") + + // Construct proper endpoints. + for _, host := range hosts { + endPoints = append(endPoints, fmt.Sprintf("%s://%s:%s", scheme, host, port)) + } + + // Success. + return endPoints +} diff --git a/cmd/web-handlers_test.go b/cmd/web-handlers_test.go index 057b28999..e793d202c 100644 --- a/cmd/web-handlers_test.go +++ b/cmd/web-handlers_test.go @@ -948,14 +948,14 @@ func testWebGetBucketPolicyHandler(obj ObjectLayer, instanceType string, t TestE Actions: set.CreateStringSet("s3:GetBucketLocation", "s3:ListBucket"), Effect: "Allow", Principal: map[string][]string{"AWS": {"*"}}, - Resources: set.CreateStringSet("arn:aws:s3:::" + bucketName), + Resources: set.CreateStringSet(bucketARNPrefix + bucketName), Sid: "", }, { Actions: set.CreateStringSet("s3:GetObject"), Effect: "Allow", Principal: map[string][]string{"AWS": {"*"}}, - Resources: set.CreateStringSet("arn:aws:s3:::" + bucketName + "/*"), + Resources: set.CreateStringSet(bucketARNPrefix + bucketName + "/*"), Sid: "", }, }, @@ -1031,7 +1031,7 @@ func testWebListAllBucketPoliciesHandler(obj ObjectLayer, instanceType string, t Actions: set.CreateStringSet("s3:GetBucketLocation"), Effect: "Allow", Principal: map[string][]string{"AWS": {"*"}}, - Resources: set.CreateStringSet("arn:aws:s3:::" + bucketName), + Resources: set.CreateStringSet(bucketARNPrefix + bucketName), Sid: "", }, { @@ -1043,14 +1043,14 @@ func testWebListAllBucketPoliciesHandler(obj ObjectLayer, instanceType string, t }, Effect: "Allow", Principal: map[string][]string{"AWS": {"*"}}, - Resources: set.CreateStringSet("arn:aws:s3:::" + bucketName), + Resources: set.CreateStringSet(bucketARNPrefix + bucketName), Sid: "", }, { Actions: set.CreateStringSet("s3:ListBucketMultipartUploads"), Effect: "Allow", Principal: map[string][]string{"AWS": {"*"}}, - Resources: set.CreateStringSet("arn:aws:s3:::" + bucketName), + Resources: set.CreateStringSet(bucketARNPrefix + bucketName), Sid: "", }, { @@ -1058,7 +1058,7 @@ func testWebListAllBucketPoliciesHandler(obj ObjectLayer, instanceType string, t "s3:GetObject", "s3:ListMultipartUploadParts", "s3:PutObject"), Effect: "Allow", Principal: map[string][]string{"AWS": {"*"}}, - Resources: set.CreateStringSet("arn:aws:s3:::" + bucketName + "/hello*"), + Resources: set.CreateStringSet(bucketARNPrefix + bucketName + "/hello*"), Sid: "", }, }, From 7c6d77734a810cd3d982ad13fab6822498582c76 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Wed, 11 Jan 2017 00:26:00 -0800 Subject: [PATCH 058/100] Fix vendor.json to remove minio/blake2b-simd ref --- vendor/vendor.json | 6 ------ 1 file changed, 6 deletions(-) diff --git a/vendor/vendor.json b/vendor/vendor.json index 93d3bcfc8..549f66740 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -136,12 +136,6 @@ "revision": "56b76bdf51f7708750eac80fa38b952bb9f32639", "revisionTime": "2015-12-11T09:06:21+09:00" }, - { - "checksumSHA1": "IgPoMBktWdCLuyzDBfzi34sT+jg=", - "path": "github.com/minio/blake2b-simd", - "revision": "c50cace0dc7d72a80244a5f88ddd3e08a73db8de", - "revisionTime": "2016-07-22T09:38:12Z" - }, { "path": "github.com/minio/cli", "revision": "c4a07c7b68db77ccd119183fb1d01dd5972434ab", From 12a7a15daa9e6f9b7a54323563835cb1a51bac67 Mon Sep 17 00:00:00 2001 From: Krishna Srinivas Date: Thu, 12 Jan 2017 02:56:42 +0530 Subject: [PATCH 059/100] browser: Allow anonymous browsing of readable buckets. (#3515) --- cmd/bucket-handlers.go | 16 ++++ cmd/jwt.go | 19 +++- cmd/web-handlers.go | 58 ++++++++---- cmd/web-handlers_test.go | 188 +++++++++++++++++++++++++++++---------- 4 files changed, 216 insertions(+), 65 deletions(-) diff --git a/cmd/bucket-handlers.go b/cmd/bucket-handlers.go index 8f845a046..c0ebb652f 100644 --- a/cmd/bucket-handlers.go +++ b/cmd/bucket-handlers.go @@ -23,6 +23,7 @@ import ( "io" "net/http" "net/url" + "path" "strings" "sync" @@ -71,6 +72,21 @@ func enforceBucketPolicy(bucket string, action string, reqURL *url.URL) (s3Error return ErrNone } +// Check if the action is allowed on the bucket/prefix. +func isBucketActionAllowed(action, bucket, prefix string) bool { + policy := globalBucketPolicies.GetBucketPolicy(bucket) + if policy == nil { + return false + } + resource := bucketARNPrefix + path.Join(bucket, prefix) + var conditionKeyMap map[string]set.StringSet + // Validate action, resource and conditions with current policy statements. + if !bucketPolicyEvalStatements(action, resource, conditionKeyMap, policy.Statements) { + return false + } + return true +} + // GetBucketLocationHandler - GET Bucket location. // ------------------------- // This operation returns bucket location. diff --git a/cmd/jwt.go b/cmd/jwt.go index c87e15ffa..549617cab 100644 --- a/cmd/jwt.go +++ b/cmd/jwt.go @@ -43,6 +43,7 @@ var errInvalidSecretKeyLength = errors.New("Invalid secret key, secret key shoul var errInvalidAccessKeyID = errors.New("The access key ID you provided does not exist in our records") var errAuthentication = errors.New("Authentication failed, check your access credentials") +var errNoAuthToken = errors.New("JWT token missing") func authenticateJWT(accessKey, secretKey string, expiry time.Duration) (string, error) { // Trim spaces. @@ -106,11 +107,23 @@ func isAuthTokenValid(tokenString string) bool { } func isHTTPRequestValid(req *http.Request) bool { + return webReqestAuthenticate(req) == nil +} + +// Check if the request is authenticated. +// Returns nil if the request is authenticated. errNoAuthToken if token missing. +// Returns errAuthentication for all other errors. +func webReqestAuthenticate(req *http.Request) error { jwtToken, err := jwtreq.ParseFromRequest(req, jwtreq.AuthorizationHeaderExtractor, keyFuncCallback) if err != nil { - errorIf(err, "Unable to parse JWT token string") - return false + if err == jwtreq.ErrNoTokenInRequest { + return errNoAuthToken + } + return errAuthentication } - return jwtToken.Valid + if !jwtToken.Valid { + return errAuthentication + } + return nil } diff --git a/cmd/web-handlers.go b/cmd/web-handlers.go index d358020b4..d4cb4a498 100644 --- a/cmd/web-handlers.go +++ b/cmd/web-handlers.go @@ -151,21 +151,23 @@ func (web *webAPIHandlers) ListBuckets(r *http.Request, args *WebGenericArgs, re if objectAPI == nil { return toJSONError(errServerNotInitialized) } - if !isHTTPRequestValid(r) { - return toJSONError(errAuthentication) + authErr := webReqestAuthenticate(r) + if authErr != nil { + return toJSONError(authErr) } buckets, err := objectAPI.ListBuckets() if err != nil { return toJSONError(err) } for _, bucket := range buckets { - // List all buckets which are not private. - if bucket.Name != path.Base(reservedBucket) { - reply.Buckets = append(reply.Buckets, WebBucketInfo{ - Name: bucket.Name, - CreationDate: bucket.Created, - }) + if bucket.Name == path.Base(reservedBucket) { + continue } + + reply.Buckets = append(reply.Buckets, WebBucketInfo{ + Name: bucket.Name, + CreationDate: bucket.Created, + }) } reply.UIVersion = miniobrowser.UIVersion return nil @@ -180,6 +182,7 @@ type ListObjectsArgs struct { // ListObjectsRep - list objects response. type ListObjectsRep struct { Objects []WebObjectInfo `json:"objects"` + Writable bool `json:"writable"` // Used by client to show "upload file" button. UIVersion string `json:"uiVersion"` } @@ -197,12 +200,30 @@ type WebObjectInfo struct { // ListObjects - list objects api. func (web *webAPIHandlers) ListObjects(r *http.Request, args *ListObjectsArgs, reply *ListObjectsRep) error { + reply.UIVersion = miniobrowser.UIVersion objectAPI := web.ObjectAPI() if objectAPI == nil { return toJSONError(errServerNotInitialized) } - if !isHTTPRequestValid(r) { - return toJSONError(errAuthentication) + prefix := args.Prefix + "test" // To test if GetObject/PutObject with the specified prefix is allowed. + readable := isBucketActionAllowed("s3:GetObject", args.BucketName, prefix) + writable := isBucketActionAllowed("s3:PutObject", args.BucketName, prefix) + authErr := webReqestAuthenticate(r) + switch { + case authErr == errAuthentication: + return toJSONError(authErr) + case authErr == nil: + break + case readable && writable: + reply.Writable = true + break + case readable: + break + case writable: + reply.Writable = true + return nil + default: + return errAuthentication } marker := "" for { @@ -228,7 +249,6 @@ func (web *webAPIHandlers) ListObjects(r *http.Request, args *ListObjectsArgs, r break } } - reply.UIVersion = miniobrowser.UIVersion return nil } @@ -420,14 +440,20 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) { return } - if !isHTTPRequestValid(r) { - writeWebErrorResponse(w, errAuthentication) - return - } vars := mux.Vars(r) bucket := vars["bucket"] object := vars["object"] + authErr := webReqestAuthenticate(r) + if authErr == errAuthentication { + writeWebErrorResponse(w, errAuthentication) + return + } + if authErr != nil && !isBucketActionAllowed("s3:PutObject", bucket, object) { + writeWebErrorResponse(w, errAuthentication) + return + } + // Extract incoming metadata if any. metadata := extractMetadataFromHeader(r.Header) @@ -467,7 +493,7 @@ func (web *webAPIHandlers) Download(w http.ResponseWriter, r *http.Request) { object := vars["object"] token := r.URL.Query().Get("token") - if !isAuthTokenValid(token) { + if !isAuthTokenValid(token) && !isBucketActionAllowed("s3:GetObject", bucket, object) { writeWebErrorResponse(w, errAuthentication) return } diff --git a/cmd/web-handlers_test.go b/cmd/web-handlers_test.go index e793d202c..5031cbee6 100644 --- a/cmd/web-handlers_test.go +++ b/cmd/web-handlers_test.go @@ -19,6 +19,7 @@ package cmd import ( "bytes" "errors" + "fmt" "io/ioutil" "net/http" "net/http/httptest" @@ -422,30 +423,62 @@ func testListObjectsWebHandler(obj ObjectLayer, instanceType string, t TestErrHa t.Fatalf("Was not able to upload an object, %v", err) } - listObjectsRequest := ListObjectsArgs{BucketName: bucketName, Prefix: ""} - listObjectsReply := &ListObjectsRep{} - req, err := newTestWebRPCRequest("Web.ListObjects", authorization, listObjectsRequest) - if err != nil { - t.Fatalf("Failed to create HTTP request: %v", err) + test := func(token string) (error, *ListObjectsRep) { + listObjectsRequest := ListObjectsArgs{BucketName: bucketName, Prefix: ""} + listObjectsReply := &ListObjectsRep{} + var req *http.Request + req, err = newTestWebRPCRequest("Web.ListObjects", token, listObjectsRequest) + if err != nil { + t.Fatalf("Failed to create HTTP request: %v", err) + } + apiRouter.ServeHTTP(rec, req) + if rec.Code != http.StatusOK { + return fmt.Errorf("Expected the response status to be 200, but instead found `%d`", rec.Code), listObjectsReply + } + err = getTestWebRPCResponse(rec, &listObjectsReply) + if err != nil { + return err, listObjectsReply + } + return nil, listObjectsReply } - apiRouter.ServeHTTP(rec, req) - if rec.Code != http.StatusOK { - t.Fatalf("Expected the response status to be 200, but instead found `%d`", rec.Code) - } - err = getTestWebRPCResponse(rec, &listObjectsReply) - if err != nil { - t.Fatalf("Failed, %v", err) - } - if len(listObjectsReply.Objects) == 0 { - t.Fatalf("Cannot find the object") - } - if listObjectsReply.Objects[0].Key != objectName { - t.Fatalf("Found another object other than already created by PutObject") - } - if listObjectsReply.Objects[0].Size != int64(objectSize) { - t.Fatalf("Found a object with the same name but with a different size") + verifyReply := func(reply *ListObjectsRep) { + if len(reply.Objects) == 0 { + t.Fatalf("Cannot find the object") + } + if reply.Objects[0].Key != objectName { + t.Fatalf("Found another object other than already created by PutObject") + } + if reply.Objects[0].Size != int64(objectSize) { + t.Fatalf("Found a object with the same name but with a different size") + } } + // Authenticated ListObjects should succeed. + err, reply := test(authorization) + if err != nil { + t.Fatal(err) + } + verifyReply(reply) + + // Unauthenticated ListObjects should fail. + err, reply = test("") + if err == nil { + t.Fatalf("Expected error `%s`", err) + } + + policy := bucketPolicy{ + Version: "1.0", + Statements: []policyStatement{getReadOnlyObjectStatement(bucketName, "")}, + } + + globalBucketPolicies.SetBucketPolicy(bucketName, policyChange{false, &policy}) + + // Unauthenticated ListObjects with READ bucket policy should succeed. + err, reply = test("") + if err != nil { + t.Fatal(err) + } + verifyReply(reply) } // Wrapper for calling RemoveObject Web Handler @@ -695,8 +728,8 @@ func testUploadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandler defer removeAll(rootPath) credentials := serverConfig.GetCredential() + content := []byte("temporary file's content") - rec := httptest.NewRecorder() authorization, err := getWebRPCToken(apiRouter, credentials.AccessKey, credentials.SecretKey) if err != nil { t.Fatal("Cannot authenticate") @@ -704,6 +737,26 @@ func testUploadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandler objectName := "test.file" bucketName := getRandomBucketName() + + test := func(token string) int { + rec := httptest.NewRecorder() + var req *http.Request + req, err = http.NewRequest("PUT", "/minio/upload/"+bucketName+"/"+objectName, nil) + if err != nil { + t.Fatalf("Cannot create upload request, %v", err) + } + + req.Header.Set("Content-Length", strconv.Itoa(len(content))) + req.Header.Set("x-amz-date", "20160814T114029Z") + req.Header.Set("Accept", "*/*") + req.Body = ioutil.NopCloser(bytes.NewReader(content)) + + if token != "" { + req.Header.Set("Authorization", "Bearer "+authorization) + } + apiRouter.ServeHTTP(rec, req) + return rec.Code + } // Create bucket. err = obj.MakeBucket(bucketName) if err != nil { @@ -711,22 +764,10 @@ func testUploadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandler t.Fatalf("%s : %s", instanceType, err) } - content := []byte("temporary file's content") - - req, err := http.NewRequest("PUT", "/minio/upload/"+bucketName+"/"+objectName, nil) - req.Header.Set("Authorization", "Bearer "+authorization) - req.Header.Set("Content-Length", strconv.Itoa(len(content))) - req.Header.Set("x-amz-date", "20160814T114029Z") - req.Header.Set("Accept", "*/*") - req.Body = ioutil.NopCloser(bytes.NewReader(content)) - - if err != nil { - t.Fatalf("Cannot create upload request, %v", err) - } - - apiRouter.ServeHTTP(rec, req) - if rec.Code != http.StatusOK { - t.Fatalf("Expected the response status to be 200, but instead found `%d`", rec.Code) + // Authenticated upload should succeed. + code := test(authorization) + if code != http.StatusOK { + t.Fatalf("Expected the response status to be 200, but instead found `%d`", code) } var byteBuffer bytes.Buffer @@ -738,6 +779,25 @@ func testUploadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandler if bytes.Compare(byteBuffer.Bytes(), content) != 0 { t.Fatalf("The upload file is different from the download file") } + + // Unauthenticated upload should fail. + code = test("") + if code != http.StatusForbidden { + t.Fatalf("Expected the response status to be 403, but instead found `%d`", code) + } + + policy := bucketPolicy{ + Version: "1.0", + Statements: []policyStatement{getWriteOnlyObjectStatement(bucketName, "")}, + } + + globalBucketPolicies.SetBucketPolicy(bucketName, policyChange{false, &policy}) + + // Unauthenticated upload with WRITE policy should succeed. + code = test("") + if code != http.StatusOK { + t.Fatalf("Expected the response status to be 200, but instead found `%d`", code) + } } // Wrapper for calling Upload Handler @@ -760,7 +820,6 @@ func testDownloadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandl credentials := serverConfig.GetCredential() - rec := httptest.NewRecorder() authorization, err := getWebRPCToken(apiRouter, credentials.AccessKey, credentials.SecretKey) if err != nil { t.Fatal("Cannot authenticate") @@ -768,6 +827,24 @@ func testDownloadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandl objectName := "test.file" bucketName := getRandomBucketName() + + test := func(token string) (int, []byte) { + rec := httptest.NewRecorder() + path := "/minio/download/" + bucketName + "/" + objectName + "?token=" + if token != "" { + path = path + token + } + var req *http.Request + req, err = http.NewRequest("GET", path, nil) + + if err != nil { + t.Fatalf("Cannot create upload request, %v", err) + } + + apiRouter.ServeHTTP(rec, req) + return rec.Code, rec.Body.Bytes() + } + // Create bucket. err = obj.MakeBucket(bucketName) if err != nil { @@ -781,18 +858,37 @@ func testDownloadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandl t.Fatalf("Was not able to upload an object, %v", err) } - req, err := http.NewRequest("GET", "/minio/download/"+bucketName+"/"+objectName+"?token="+authorization, nil) + // Authenticated download should succeed. + code, bodyContent := test(authorization) - if err != nil { - t.Fatalf("Cannot create upload request, %v", err) + if code != http.StatusOK { + t.Fatalf("Expected the response status to be 200, but instead found `%d`", code) } - apiRouter.ServeHTTP(rec, req) - if rec.Code != http.StatusOK { - t.Fatalf("Expected the response status to be 200, but instead found `%d`", rec.Code) + if bytes.Compare(bodyContent, content) != 0 { + t.Fatalf("The downloaded file is corrupted") } - if bytes.Compare(rec.Body.Bytes(), content) != 0 { + // Unauthenticated download should fail. + code, bodyContent = test("") + if code != http.StatusForbidden { + t.Fatalf("Expected the response status to be 403, but instead found `%d`", code) + } + + policy := bucketPolicy{ + Version: "1.0", + Statements: []policyStatement{getReadOnlyObjectStatement(bucketName, "")}, + } + + globalBucketPolicies.SetBucketPolicy(bucketName, policyChange{false, &policy}) + + // Unauthenticated download with READ policy should succeed. + code, bodyContent = test("") + if code != http.StatusOK { + t.Fatalf("Expected the response status to be 200, but instead found `%d`", code) + } + + if bytes.Compare(bodyContent, content) != 0 { t.Fatalf("The downloaded file is corrupted") } } From 08b6cfb082bcb68001d77c28a2219b364117c074 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Wed, 11 Jan 2017 13:59:51 -0800 Subject: [PATCH 060/100] ssl: Set a global boolean to enable SSL across Minio (#3558) We have been using `isSSL()` everywhere we can set a global value once and re-use it again. --- cmd/admin-rpc-client.go | 2 +- cmd/browser-peer-rpc.go | 2 +- cmd/globals.go | 6 +++ cmd/lock-rpc-server.go | 2 +- cmd/main.go | 5 ++- cmd/namespace-lock.go | 2 +- cmd/prepare-storage-msg.go | 6 +-- cmd/s3-peer-client.go | 2 +- cmd/server-main.go | 78 ++++++++++++++++++++++--------------- cmd/server-main_test.go | 60 ++++++++++++++++++++++++---- cmd/server-startup-msg.go | 2 +- cmd/server-startup-utils.go | 15 ++++--- cmd/storage-rpc-client.go | 2 +- cmd/update-main.go | 6 +-- cmd/version-main.go | 5 +-- 15 files changed, 130 insertions(+), 65 deletions(-) diff --git a/cmd/admin-rpc-client.go b/cmd/admin-rpc-client.go index 3598b1151..b80ce882f 100644 --- a/cmd/admin-rpc-client.go +++ b/cmd/admin-rpc-client.go @@ -125,7 +125,7 @@ func makeAdminPeers(eps []*url.URL) adminPeers { accessKey: serverCred.AccessKey, secretKey: serverCred.SecretKey, serverAddr: ep.Host, - secureConn: isSSL(), + secureConn: globalIsSSL, serviceEndpoint: path.Join(reservedBucket, adminPath), serviceName: "Admin", } diff --git a/cmd/browser-peer-rpc.go b/cmd/browser-peer-rpc.go index 3af174d06..834a9c75d 100644 --- a/cmd/browser-peer-rpc.go +++ b/cmd/browser-peer-rpc.go @@ -106,7 +106,7 @@ func updateCredsOnPeers(creds credential) map[string]error { accessKey: serverCred.AccessKey, secretKey: serverCred.SecretKey, serverAddr: peers[ix], - secureConn: isSSL(), + secureConn: globalIsSSL, serviceEndpoint: path.Join(reservedBucket, browserPeerPath), serviceName: "Browser", }) diff --git a/cmd/globals.go b/cmd/globals.go index 9f31944eb..307a02198 100644 --- a/cmd/globals.go +++ b/cmd/globals.go @@ -90,6 +90,9 @@ var ( // CA root certificates, a nil value means system certs pool will be used globalRootCAs *x509.CertPool + // IsSSL indicates if the server is configured with SSL. + globalIsSSL bool + // List of admin peers. globalAdminPeers = adminPeers{} @@ -124,4 +127,7 @@ func setGlobalsFromContext(c *cli.Context) { } // Set global quiet flag. globalQuiet = c.Bool("quiet") || c.GlobalBool("quiet") + + // Is TLS configured?. + globalIsSSL = isSSL() } diff --git a/cmd/lock-rpc-server.go b/cmd/lock-rpc-server.go index af2ffdf55..c1761dbd7 100644 --- a/cmd/lock-rpc-server.go +++ b/cmd/lock-rpc-server.go @@ -289,7 +289,7 @@ func (l *lockServer) lockMaintenance(interval time.Duration) { secretKey: serverCred.SecretKey, serverAddr: nlrip.lri.node, serviceEndpoint: nlrip.lri.rpcPath, - secureConn: isSSL(), + secureConn: globalIsSSL, serviceName: "Dsync", }) diff --git a/cmd/main.go b/cmd/main.go index 073f3e922..02f672613 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -164,7 +164,10 @@ func checkUpdate() { } // Generic Minio initialization to create/load config, prepare loggers, etc.. -func minioInit() { +func minioInit(ctx *cli.Context) { + // Set global variables after parsing passed arguments + setGlobalsFromContext(ctx) + // Sets new config directory. setGlobalConfigPath(globalConfigDir) diff --git a/cmd/namespace-lock.go b/cmd/namespace-lock.go index cdb4f0f5c..316b3fba4 100644 --- a/cmd/namespace-lock.go +++ b/cmd/namespace-lock.go @@ -44,7 +44,7 @@ func initDsyncNodes(eps []*url.URL) error { secretKey: cred.SecretKey, serverAddr: ep.Host, serviceEndpoint: pathutil.Join(lockRPCPath, getPath(ep)), - secureConn: isSSL(), + secureConn: globalIsSSL, serviceName: "Dsync", }) if isLocalStorage(ep) && myNode == -1 { diff --git a/cmd/prepare-storage-msg.go b/cmd/prepare-storage-msg.go index d4d38700a..9cc699b9f 100644 --- a/cmd/prepare-storage-msg.go +++ b/cmd/prepare-storage-msg.go @@ -99,11 +99,7 @@ func getHealEndpoint(tls bool, firstEndpoint *url.URL) (cEndpoint *url.URL) { func getHealMsg(endpoints []*url.URL, storageDisks []StorageAPI) string { msg := fmt.Sprintln("\nData volume requires HEALING. Healing is not implemented yet stay tuned:") // FIXME: Enable this after we bring in healing. - // msg += "MINIO_ACCESS_KEY=%s " - // msg += "MINIO_SECRET_KEY=%s " - // msg += "minio control heal %s" - // creds := serverConfig.GetCredential() - // msg = fmt.Sprintf(msg, creds.AccessKey, creds.SecretKey, getHealEndpoint(isSSL(), endpoints[0])) + // msg := "mc admin heal myminio" disksInfo, _, _ := getDisksInfo(storageDisks) for i, info := range disksInfo { if storageDisks[i] == nil { diff --git a/cmd/s3-peer-client.go b/cmd/s3-peer-client.go index 0f0cba687..6358f5881 100644 --- a/cmd/s3-peer-client.go +++ b/cmd/s3-peer-client.go @@ -67,7 +67,7 @@ func makeS3Peers(eps []*url.URL) s3Peers { secretKey: serverCred.SecretKey, serverAddr: ep.Host, serviceEndpoint: path.Join(reservedBucket, s3Path), - secureConn: isSSL(), + secureConn: globalIsSSL, serviceName: "S3", } diff --git a/cmd/server-main.go b/cmd/server-main.go index 36436ead2..b2a6b6d61 100644 --- a/cmd/server-main.go +++ b/cmd/server-main.go @@ -17,12 +17,14 @@ package cmd import ( + "errors" "fmt" "net" "net/url" "os" "path" "sort" + "strconv" "strings" "runtime" @@ -296,11 +298,10 @@ func checkServerSyntax(c *cli.Context) { } } - tls := isSSL() for _, ep := range endpoints { - if ep.Scheme == "https" && !tls { + if ep.Scheme == "https" && !globalIsSSL { // Certificates should be provided for https configuration. - fatalIf(errInvalidArgument, "Certificates not provided for https") + fatalIf(errInvalidArgument, "Certificates not provided for secure configuration") } } } @@ -317,17 +318,47 @@ func isAnyEndpointLocal(eps []*url.URL) bool { return anyLocalEp } +// Returned when there are no ports. +var errEmptyPort = errors.New("Port cannot be empty or '0', please use `--address` to pick a specific port") + +// Convert an input address of form host:port into, host and port, returns if any. +func getHostPort(address string) (host, port string, err error) { + // Check if requested port is available. + host, port, err = net.SplitHostPort(address) + if err != nil { + return "", "", err + } + + // Empty ports. + if port == "0" || port == "" { + // Port zero or empty means use requested to choose any freely available + // port. Avoid this since it won't work with any configured clients, + // can lead to serious loss of availability. + return "", "", errEmptyPort + } + + // Parse port. + if _, err = strconv.Atoi(port); err != nil { + return "", "", err + } + + // Check if port is available. + if err = checkPortAvailability(port); err != nil { + return "", "", err + } + + // Success. + return host, port, nil +} + // serverMain handler called for 'minio server' command. func serverMain(c *cli.Context) { if !c.Args().Present() || c.Args().First() == "help" { cli.ShowCommandHelpAndExit(c, "server", 1) } - // Set global variables after parsing passed arguments - setGlobalsFromContext(c) - // Initialization routine, such as config loading, enable logging, .. - minioInit() + minioInit(c) // Check for minio updates from dl.minio.io checkUpdate() @@ -335,20 +366,9 @@ func serverMain(c *cli.Context) { // Server address. serverAddr := c.String("address") - // Check if requested port is available. - host, portStr, err := net.SplitHostPort(serverAddr) - fatalIf(err, "Unable to parse %s.", serverAddr) - if portStr == "0" || portStr == "" { - // Port zero or empty means use requested to choose any freely available - // port. Avoid this since it won't work with any configured clients, - // can lead to serious loss of availability. - fatalIf(errInvalidArgument, "Invalid port `%s`, please use `--address` to pick a specific port", portStr) - } - globalMinioHost = host - - // Check if requested port is available. - fatalIf(checkPortAvailability(portStr), "Port unavailable %s", portStr) - globalMinioPort = portStr + var err error + globalMinioHost, globalMinioPort, err = getHostPort(serverAddr) + fatalIf(err, "Unable to extract host and port %s", serverAddr) // Check server syntax and exit in case of errors. // Done after globalMinioHost and globalMinioPort is set as parseStorageEndpoints() @@ -365,9 +385,6 @@ func serverMain(c *cli.Context) { fatalIf(errInvalidArgument, "None of the disks passed as command line args are local to this server.") } - // Is TLS configured?. - tls := isSSL() - // Sort endpoints for consistent ordering across multiple // nodes in a distributed setup. This is to avoid format.json // corruption if the disks aren't supplied in the same order @@ -415,7 +432,8 @@ func serverMain(c *cli.Context) { globalMinioAddr = getLocalAddress(srvConfig) // Determine API endpoints where we are going to serve the S3 API from. - apiEndPoints := finalizeAPIEndpoints(tls, apiServer.Server) + apiEndPoints, err := finalizeAPIEndpoints(apiServer.Server) + fatalIf(err, "Unable to finalize API endpoints for %s", apiServer.Server.Addr) // Set the global API endpoints value. globalAPIEndpoints = apiEndPoints @@ -427,15 +445,13 @@ func serverMain(c *cli.Context) { initGlobalAdminPeers(endpoints) // Start server, automatically configures TLS if certs are available. - go func(tls bool) { - var lerr error + go func() { cert, key := "", "" - if tls { + if globalIsSSL { cert, key = mustGetCertFile(), mustGetKeyFile() } - lerr = apiServer.ListenAndServe(cert, key) - fatalIf(lerr, "Failed to start minio server.") - }(tls) + fatalIf(apiServer.ListenAndServe(cert, key), "Failed to start minio server.") + }() // Wait for formatting of disks. formattedDisks, err := waitForFormatDisks(firstDisk, endpoints, storageDisks) diff --git a/cmd/server-main_test.go b/cmd/server-main_test.go index 3c64d6880..0586c8113 100644 --- a/cmd/server-main_test.go +++ b/cmd/server-main_test.go @@ -63,20 +63,66 @@ func TestGetListenIPs(t *testing.T) { } } +// Tests get host port. +func TestGetHostPort(t *testing.T) { + testCases := []struct { + addr string + err error + }{ + // Test 1 - successful. + { + addr: ":" + getFreePort(), + err: nil, + }, + // Test 2 port empty. + { + addr: ":0", + err: errEmptyPort, + }, + // Test 3 port empty. + { + addr: ":", + err: errEmptyPort, + }, + // Test 4 invalid port. + { + addr: "linux:linux", + err: errors.New("strconv.ParseInt: parsing \"linux\": invalid syntax"), + }, + // Test 5 port not present. + { + addr: "hostname", + err: errors.New("missing port in address hostname"), + }, + } + + // Validate all tests. + for i, testCase := range testCases { + _, _, err := getHostPort(testCase.addr) + if err != nil { + if err.Error() != testCase.err.Error() { + t.Fatalf("Test %d: Error: %s", i+1, err) + } + } + } +} + +// Tests finalize api endpoints. func TestFinalizeAPIEndpoints(t *testing.T) { testCases := []struct { - tls bool addr string }{ - {false, ":80"}, - {true, ":80"}, - {false, "localhost:80"}, - {true, "localhost:80"}, + {":80"}, + {":80"}, + {"localhost:80"}, + {"localhost:80"}, } for i, test := range testCases { - endPoints := finalizeAPIEndpoints(test.tls, &http.Server{Addr: test.addr}) - if len(endPoints) <= 0 { + endPoints, err := finalizeAPIEndpoints(&http.Server{ + Addr: test.addr, + }) + if err != nil && len(endPoints) <= 0 { t.Errorf("Test case %d returned with no API end points for %s", i+1, test.addr) } diff --git a/cmd/server-startup-msg.go b/cmd/server-startup-msg.go index af2fbae25..0bc8b74f2 100644 --- a/cmd/server-startup-msg.go +++ b/cmd/server-startup-msg.go @@ -67,7 +67,7 @@ func printStartupMessage(apiEndPoints []string) { // SSL is configured reads certification chain, prints // authority and expiry. - if isSSL() { + if globalIsSSL { certs, err := readCertificateChain() fatalIf(err, "Unable to read certificate chain.") printCertificateMsg(certs) diff --git a/cmd/server-startup-utils.go b/cmd/server-startup-utils.go index fa884f934..1bb4fce70 100644 --- a/cmd/server-startup-utils.go +++ b/cmd/server-startup-utils.go @@ -40,22 +40,27 @@ func getListenIPs(serverAddr string) (hosts []string, port string, err error) { } return hosts, port, nil } // if host != "" { + // Proceed to append itself, since user requested a specific endpoint. hosts = append(hosts, host) + + // Success. return hosts, port, nil } // Finalizes the API endpoints based on the host list and port. -func finalizeAPIEndpoints(tls bool, apiServer *http.Server) (endPoints []string) { +func finalizeAPIEndpoints(apiServer *http.Server) (endPoints []string, err error) { // Verify current scheme. scheme := "http" - if tls { + if globalIsSSL { scheme = "https" } // Get list of listen ips and port. - hosts, port, err := getListenIPs(apiServer.Addr) - fatalIf(err, "Unable to get list of ips to listen on") + hosts, port, err1 := getListenIPs(apiServer.Addr) + if err1 != nil { + return nil, err1 + } // Construct proper endpoints. for _, host := range hosts { @@ -63,5 +68,5 @@ func finalizeAPIEndpoints(tls bool, apiServer *http.Server) (endPoints []string) } // Success. - return endPoints + return endPoints, nil } diff --git a/cmd/storage-rpc-client.go b/cmd/storage-rpc-client.go index 846b17096..acb2b1376 100644 --- a/cmd/storage-rpc-client.go +++ b/cmd/storage-rpc-client.go @@ -121,7 +121,7 @@ func newStorageRPC(ep *url.URL) (StorageAPI, error) { secretKey: secretKey, serverAddr: rpcAddr, serviceEndpoint: rpcPath, - secureConn: isSSL(), + secureConn: globalIsSSL, serviceName: "Storage", disableReconnect: true, }), diff --git a/cmd/update-main.go b/cmd/update-main.go index e56d9682d..1cc39dd6b 100644 --- a/cmd/update-main.go +++ b/cmd/update-main.go @@ -265,12 +265,8 @@ func getReleaseUpdate(updateURL string, duration time.Duration) (updateMsg updat // main entry point for update command. func mainUpdate(ctx *cli.Context) { - - // Set global variables after parsing passed arguments - setGlobalsFromContext(ctx) - // Initialization routine, such as config loading, enable logging, .. - minioInit() + minioInit(ctx) if globalQuiet { return diff --git a/cmd/version-main.go b/cmd/version-main.go index 71e34f343..9ed543842 100644 --- a/cmd/version-main.go +++ b/cmd/version-main.go @@ -44,11 +44,8 @@ func mainVersion(ctx *cli.Context) { cli.ShowCommandHelpAndExit(ctx, "version", 1) } - // Set global variables after parsing passed arguments - setGlobalsFromContext(ctx) - // Initialization routine, such as config loading, enable logging, .. - minioInit() + minioInit(ctx) if globalQuiet { return From f24753812a2b9ccfdd8645cb8d4d8f635065de03 Mon Sep 17 00:00:00 2001 From: Anis Elleuch Date: Thu, 12 Jan 2017 01:41:05 +0100 Subject: [PATCH 061/100] nats: Add support of NATS.io Streaming server (#3494) --- cmd/config-migrate.go | 127 +- cmd/config-migrate_test.go | 11 +- cmd/config-old.go | 68 +- cmd/{config-v11.go => config-v12.go} | 68 +- ...{config-v11_test.go => config-v12_test.go} | 0 cmd/globals.go | 2 +- cmd/notifiers.go | 4 +- cmd/notify-nats.go | 141 +- vendor/github.com/gogo/protobuf/LICENSE | 36 + .../gogo/protobuf/gogoproto/Makefile | 37 + .../github.com/gogo/protobuf/gogoproto/doc.go | 168 + .../gogo/protobuf/gogoproto/gogo.pb.go | 749 +++++ .../gogo/protobuf/gogoproto/gogo.pb.golden | 45 + .../gogo/protobuf/gogoproto/gogo.proto | 125 + .../gogo/protobuf/gogoproto/helper.go | 345 ++ .../github.com/gogo/protobuf/proto/Makefile | 43 + .../github.com/gogo/protobuf/proto/clone.go | 234 ++ .../github.com/gogo/protobuf/proto/decode.go | 978 ++++++ .../gogo/protobuf/proto/decode_gogo.go | 172 + .../gogo/protobuf/proto/duration.go | 100 + .../gogo/protobuf/proto/duration_gogo.go | 203 ++ .../github.com/gogo/protobuf/proto/encode.go | 1362 ++++++++ .../gogo/protobuf/proto/encode_gogo.go | 350 +++ .../github.com/gogo/protobuf/proto/equal.go | 300 ++ .../gogo/protobuf/proto/extensions.go | 693 ++++ .../gogo/protobuf/proto/extensions_gogo.go | 294 ++ vendor/github.com/gogo/protobuf/proto/lib.go | 898 ++++++ .../gogo/protobuf/proto/lib_gogo.go | 42 + .../gogo/protobuf/proto/message_set.go | 311 ++ .../gogo/protobuf/proto/pointer_reflect.go | 484 +++ .../protobuf/proto/pointer_reflect_gogo.go | 85 + .../gogo/protobuf/proto/pointer_unsafe.go | 270 ++ .../protobuf/proto/pointer_unsafe_gogo.go | 128 + .../gogo/protobuf/proto/properties.go | 968 ++++++ .../gogo/protobuf/proto/properties_gogo.go | 111 + .../gogo/protobuf/proto/skip_gogo.go | 119 + vendor/github.com/gogo/protobuf/proto/text.go | 928 ++++++ .../gogo/protobuf/proto/text_gogo.go | 57 + .../gogo/protobuf/proto/text_parser.go | 1013 ++++++ .../gogo/protobuf/proto/timestamp.go | 113 + .../gogo/protobuf/proto/timestamp_gogo.go | 229 ++ .../protoc-gen-gogo/descriptor/Makefile | 36 + .../protoc-gen-gogo/descriptor/descriptor.go | 92 + .../descriptor/descriptor.pb.go | 2088 ++++++++++++ .../descriptor/descriptor_gostring.gen.go | 715 +++++ .../protoc-gen-gogo/descriptor/helper.go | 357 +++ .../nats-io/go-nats-streaming/LICENSE | 20 + .../nats-io/go-nats-streaming/README.md | 350 +++ .../nats-io/go-nats-streaming/TODO.md | 15 + .../go-nats-streaming/pb/protocol.pb.go | 2794 +++++++++++++++++ .../go-nats-streaming/pb/protocol.proto | 115 + .../nats-io/go-nats-streaming/stan.go | 476 +++ .../nats-io/go-nats-streaming/sub.go | 472 +++ vendor/github.com/nats-io/go-nats/LICENSE | 20 + vendor/github.com/nats-io/go-nats/README.md | 322 ++ vendor/github.com/nats-io/go-nats/TODO.md | 26 + vendor/github.com/nats-io/go-nats/enc.go | 249 ++ .../go-nats/encoders/builtin/default_enc.go | 106 + .../go-nats/encoders/builtin/gob_enc.go | 34 + .../go-nats/encoders/builtin/json_enc.go | 45 + vendor/github.com/nats-io/go-nats/nats.go | 2630 ++++++++++++++++ vendor/github.com/nats-io/go-nats/netchan.go | 100 + vendor/github.com/nats-io/go-nats/parser.go | 470 +++ vendor/github.com/nats-io/go-nats/util/tls.go | 37 + .../nats-io/go-nats/util/tls_pre17.go | 35 + vendor/vendor.json | 48 + 66 files changed, 23979 insertions(+), 84 deletions(-) rename cmd/{config-v11.go => config-v12.go} (77%) rename cmd/{config-v11_test.go => config-v12_test.go} (100%) create mode 100644 vendor/github.com/gogo/protobuf/LICENSE create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/Makefile create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/doc.go create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.proto create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/helper.go create mode 100644 vendor/github.com/gogo/protobuf/proto/Makefile create mode 100644 vendor/github.com/gogo/protobuf/proto/clone.go create mode 100644 vendor/github.com/gogo/protobuf/proto/decode.go create mode 100644 vendor/github.com/gogo/protobuf/proto/decode_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/duration.go create mode 100644 vendor/github.com/gogo/protobuf/proto/duration_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/encode.go create mode 100644 vendor/github.com/gogo/protobuf/proto/encode_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/equal.go create mode 100644 vendor/github.com/gogo/protobuf/proto/extensions.go create mode 100644 vendor/github.com/gogo/protobuf/proto/extensions_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/lib.go create mode 100644 vendor/github.com/gogo/protobuf/proto/lib_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/message_set.go create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_reflect.go create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/properties.go create mode 100644 vendor/github.com/gogo/protobuf/proto/properties_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/skip_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/text.go create mode 100644 vendor/github.com/gogo/protobuf/proto/text_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/text_parser.go create mode 100644 vendor/github.com/gogo/protobuf/proto/timestamp.go create mode 100644 vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go create mode 100644 vendor/github.com/nats-io/go-nats-streaming/LICENSE create mode 100644 vendor/github.com/nats-io/go-nats-streaming/README.md create mode 100644 vendor/github.com/nats-io/go-nats-streaming/TODO.md create mode 100644 vendor/github.com/nats-io/go-nats-streaming/pb/protocol.pb.go create mode 100644 vendor/github.com/nats-io/go-nats-streaming/pb/protocol.proto create mode 100644 vendor/github.com/nats-io/go-nats-streaming/stan.go create mode 100644 vendor/github.com/nats-io/go-nats-streaming/sub.go create mode 100644 vendor/github.com/nats-io/go-nats/LICENSE create mode 100644 vendor/github.com/nats-io/go-nats/README.md create mode 100644 vendor/github.com/nats-io/go-nats/TODO.md create mode 100644 vendor/github.com/nats-io/go-nats/enc.go create mode 100644 vendor/github.com/nats-io/go-nats/encoders/builtin/default_enc.go create mode 100644 vendor/github.com/nats-io/go-nats/encoders/builtin/gob_enc.go create mode 100644 vendor/github.com/nats-io/go-nats/encoders/builtin/json_enc.go create mode 100644 vendor/github.com/nats-io/go-nats/nats.go create mode 100644 vendor/github.com/nats-io/go-nats/netchan.go create mode 100644 vendor/github.com/nats-io/go-nats/parser.go create mode 100644 vendor/github.com/nats-io/go-nats/util/tls.go create mode 100644 vendor/github.com/nats-io/go-nats/util/tls_pre17.go diff --git a/cmd/config-migrate.go b/cmd/config-migrate.go index 04da1c882..8f6bccf4e 100644 --- a/cmd/config-migrate.go +++ b/cmd/config-migrate.go @@ -66,6 +66,11 @@ func migrateConfig() error { if err := migrateV10ToV11(); err != nil { return err } + // Migrate version '11' to '12'. + if err := migrateV11ToV12(); err != nil { + return err + } + return nil } @@ -423,7 +428,7 @@ func migrateV7ToV8() error { srvConfig.Logger.File = cv7.Logger.File srvConfig.Logger.Syslog = cv7.Logger.Syslog srvConfig.Notify.AMQP = make(map[string]amqpNotify) - srvConfig.Notify.NATS = make(map[string]natsNotify) + srvConfig.Notify.NATS = make(map[string]natsNotifyV1) srvConfig.Notify.ElasticSearch = make(map[string]elasticSearchNotify) srvConfig.Notify.Redis = make(map[string]redisNotify) srvConfig.Notify.PostgreSQL = make(map[string]postgreSQLNotify) @@ -433,7 +438,7 @@ func migrateV7ToV8() error { srvConfig.Notify.AMQP = cv7.Notify.AMQP } if len(cv7.Notify.NATS) == 0 { - srvConfig.Notify.NATS["1"] = natsNotify{} + srvConfig.Notify.NATS["1"] = natsNotifyV1{} } else { srvConfig.Notify.NATS = cv7.Notify.NATS } @@ -502,8 +507,8 @@ func migrateV8ToV9() error { srvConfig.Notify.AMQP = cv8.Notify.AMQP } if len(cv8.Notify.NATS) == 0 { - srvConfig.Notify.NATS = make(map[string]natsNotify) - srvConfig.Notify.NATS["1"] = natsNotify{} + srvConfig.Notify.NATS = make(map[string]natsNotifyV1) + srvConfig.Notify.NATS["1"] = natsNotifyV1{} } else { srvConfig.Notify.NATS = cv8.Notify.NATS } @@ -587,8 +592,8 @@ func migrateV9ToV10() error { srvConfig.Notify.AMQP = cv9.Notify.AMQP } if len(cv9.Notify.NATS) == 0 { - srvConfig.Notify.NATS = make(map[string]natsNotify) - srvConfig.Notify.NATS["1"] = natsNotify{} + srvConfig.Notify.NATS = make(map[string]natsNotifyV1) + srvConfig.Notify.NATS["1"] = natsNotifyV1{} } else { srvConfig.Notify.NATS = cv9.Notify.NATS } @@ -672,8 +677,8 @@ func migrateV10ToV11() error { srvConfig.Notify.AMQP = cv10.Notify.AMQP } if len(cv10.Notify.NATS) == 0 { - srvConfig.Notify.NATS = make(map[string]natsNotify) - srvConfig.Notify.NATS["1"] = natsNotify{} + srvConfig.Notify.NATS = make(map[string]natsNotifyV1) + srvConfig.Notify.NATS["1"] = natsNotifyV1{} } else { srvConfig.Notify.NATS = cv10.Notify.NATS } @@ -725,3 +730,109 @@ func migrateV10ToV11() error { ) return nil } + +// Version '11' to '12' migration. Add support for NATS streaming +// notifications. +func migrateV11ToV12() error { + cv11, err := loadConfigV11() + if err != nil { + if os.IsNotExist(err) { + return nil + } + return fmt.Errorf("Unable to load config version ‘11’. %v", err) + } + if cv11.Version != "11" { + return nil + } + + // Copy over fields from V11 into V12 config struct + srvConfig := &serverConfigV12{} + srvConfig.Version = "12" + srvConfig.Credential = cv11.Credential + srvConfig.Region = cv11.Region + if srvConfig.Region == "" { + // Region needs to be set for AWS Signature Version 4. + srvConfig.Region = "us-east-1" + } + srvConfig.Logger.Console = cv11.Logger.Console + srvConfig.Logger.File = cv11.Logger.File + + // check and set notifiers config + if len(cv11.Notify.AMQP) == 0 { + srvConfig.Notify.AMQP = make(map[string]amqpNotify) + srvConfig.Notify.AMQP["1"] = amqpNotify{} + } else { + srvConfig.Notify.AMQP = cv11.Notify.AMQP + } + if len(cv11.Notify.ElasticSearch) == 0 { + srvConfig.Notify.ElasticSearch = make(map[string]elasticSearchNotify) + srvConfig.Notify.ElasticSearch["1"] = elasticSearchNotify{} + } else { + srvConfig.Notify.ElasticSearch = cv11.Notify.ElasticSearch + } + if len(cv11.Notify.Redis) == 0 { + srvConfig.Notify.Redis = make(map[string]redisNotify) + srvConfig.Notify.Redis["1"] = redisNotify{} + } else { + srvConfig.Notify.Redis = cv11.Notify.Redis + } + if len(cv11.Notify.PostgreSQL) == 0 { + srvConfig.Notify.PostgreSQL = make(map[string]postgreSQLNotify) + srvConfig.Notify.PostgreSQL["1"] = postgreSQLNotify{} + } else { + srvConfig.Notify.PostgreSQL = cv11.Notify.PostgreSQL + } + if len(cv11.Notify.Kafka) == 0 { + srvConfig.Notify.Kafka = make(map[string]kafkaNotify) + srvConfig.Notify.Kafka["1"] = kafkaNotify{} + } else { + srvConfig.Notify.Kafka = cv11.Notify.Kafka + } + + // V12 will have an updated config of nats. So we create a new one or we + // update the old one if found. + if len(cv11.Notify.NATS) == 0 { + srvConfig.Notify.NATS = make(map[string]natsNotify) + srvConfig.Notify.NATS["1"] = natsNotify{} + } else { + srvConfig.Notify.NATS = make(map[string]natsNotify) + for k, v := range cv11.Notify.NATS { + n := natsNotify{} + n.Enable = v.Enable + n.Address = v.Address + n.Subject = v.Subject + n.Username = v.Username + n.Password = v.Password + n.Token = v.Token + n.Secure = v.Secure + n.PingInterval = v.PingInterval + srvConfig.Notify.NATS[k] = n + } + } + + qc, err := quick.New(srvConfig) + if err != nil { + return fmt.Errorf("Unable to initialize the quick config. %v", + err) + } + configFile, err := getConfigFile() + if err != nil { + return fmt.Errorf("Unable to get config file. %v", err) + } + + err = qc.Save(configFile) + if err != nil { + return fmt.Errorf( + "Failed to migrate config from ‘"+ + cv11.Version+"’ to ‘"+srvConfig.Version+ + "’ failed. %v", err, + ) + } + + console.Println( + "Migration from version ‘" + + cv11.Version + "’ to ‘" + srvConfig.Version + + "’ completed successfully.", + ) + return nil +} diff --git a/cmd/config-migrate_test.go b/cmd/config-migrate_test.go index f2f4d3229..d4897e049 100644 --- a/cmd/config-migrate_test.go +++ b/cmd/config-migrate_test.go @@ -100,10 +100,13 @@ func TestServerConfigMigrateInexistentConfig(t *testing.T) { if err := migrateV10ToV11(); err != nil { t.Fatal("migrate v10 to v11 should succeed when no config file is found") } + if err := migrateV11ToV12(); err != nil { + t.Fatal("migrate v10 to v11 should succeed when no config file is found") + } } -// Test if a config migration from v2 to v11 is successfully done -func TestServerConfigMigrateV2toV11(t *testing.T) { +// Test if a config migration from v2 to v12 is successfully done +func TestServerConfigMigrateV2toV12(t *testing.T) { rootPath, err := newTestConfig("us-east-1") if err != nil { t.Fatalf("Init Test config failed") @@ -206,4 +209,8 @@ func TestServerConfigMigrateFaultyConfig(t *testing.T) { if err := migrateV10ToV11(); err == nil { t.Fatal("migrateConfigV10ToV11() should fail with a corrupted json") } + if err := migrateV11ToV12(); err == nil { + t.Fatal("migrateConfigV11ToV12() should fail with a corrupted json") + } + } diff --git a/cmd/config-old.go b/cmd/config-old.go index ebf8edb3b..30b7fd460 100644 --- a/cmd/config-old.go +++ b/cmd/config-old.go @@ -301,7 +301,7 @@ type configV6 struct { Logger loggerV6 `json:"logger"` // Notification queue configuration. - Notify notifier `json:"notify"` + Notify notifierV1 `json:"notify"` } // loadConfigV6 load config version '6'. @@ -325,6 +325,16 @@ func loadConfigV6() (*configV6, error) { return c, nil } +// Notifier represents collection of supported notification queues. +type notifierV1 struct { + AMQP map[string]amqpNotify `json:"amqp"` + NATS map[string]natsNotifyV1 `json:"nats"` + ElasticSearch map[string]elasticSearchNotify `json:"elasticsearch"` + Redis map[string]redisNotify `json:"redis"` + PostgreSQL map[string]postgreSQLNotify `json:"postgresql"` + Kafka map[string]kafkaNotify `json:"kafka"` +} + // configV7 server configuration version '7'. type serverConfigV7 struct { Version string `json:"version"` @@ -337,7 +347,7 @@ type serverConfigV7 struct { Logger loggerV6 `json:"logger"` // Notification queue configuration. - Notify notifier `json:"notify"` + Notify notifierV1 `json:"notify"` // Read Write mutex. rwMutex *sync.RWMutex @@ -377,7 +387,7 @@ type serverConfigV8 struct { Logger loggerV6 `json:"logger"` // Notification queue configuration. - Notify notifier `json:"notify"` + Notify notifierV1 `json:"notify"` // Read Write mutex. rwMutex *sync.RWMutex @@ -417,7 +427,7 @@ type serverConfigV9 struct { Logger loggerV6 `json:"logger"` // Notification queue configuration. - Notify notifier `json:"notify"` + Notify notifierV1 `json:"notify"` // Read Write mutex. rwMutex *sync.RWMutex @@ -458,7 +468,7 @@ type serverConfigV10 struct { Logger logger `json:"logger"` // Notification queue configuration. - Notify notifier `json:"notify"` + Notify notifierV1 `json:"notify"` } func loadConfigV10() (*serverConfigV10, error) { @@ -480,3 +490,51 @@ func loadConfigV10() (*serverConfigV10, error) { } return srvCfg, nil } + +// natsNotifyV1 - structure was valid until config V 11 +type natsNotifyV1 struct { + Enable bool `json:"enable"` + Address string `json:"address"` + Subject string `json:"subject"` + Username string `json:"username"` + Password string `json:"password"` + Token string `json:"token"` + Secure bool `json:"secure"` + PingInterval int64 `json:"pingInterval"` +} + +// serverConfigV11 server configuration version '11' which is like +// version '10' except it adds support for Kafka notifications. +type serverConfigV11 struct { + Version string `json:"version"` + + // S3 API configuration. + Credential credential `json:"credential"` + Region string `json:"region"` + + // Additional error logging configuration. + Logger logger `json:"logger"` + + // Notification queue configuration. + Notify notifierV1 `json:"notify"` +} + +func loadConfigV11() (*serverConfigV11, error) { + configFile, err := getConfigFile() + if err != nil { + return nil, err + } + if _, err = os.Stat(configFile); err != nil { + return nil, err + } + srvCfg := &serverConfigV11{} + srvCfg.Version = "11" + qc, err := quick.New(srvCfg) + if err != nil { + return nil, err + } + if err := qc.Load(configFile); err != nil { + return nil, err + } + return srvCfg, nil +} diff --git a/cmd/config-v11.go b/cmd/config-v12.go similarity index 77% rename from cmd/config-v11.go rename to cmd/config-v12.go index ebef56f36..017529303 100644 --- a/cmd/config-v11.go +++ b/cmd/config-v12.go @@ -26,9 +26,9 @@ import ( // Read Write mutex for safe access to ServerConfig. var serverConfigMu sync.RWMutex -// serverConfigV11 server configuration version '11' which is like -// version '10' except it adds support for Kafka notifications. -type serverConfigV11 struct { +// serverConfigV12 server configuration version '12' which is like +// version '11' except it adds support for NATS streaming notifications. +type serverConfigV12 struct { Version string `json:"version"` // S3 API configuration. @@ -47,7 +47,7 @@ type serverConfigV11 struct { func initConfig() (bool, error) { if !isConfigFileExists() { // Initialize server config. - srvCfg := &serverConfigV11{} + srvCfg := &serverConfigV12{} srvCfg.Version = globalMinioConfigVersion srvCfg.Region = "us-east-1" srvCfg.Credential = newCredential() @@ -94,7 +94,7 @@ func initConfig() (bool, error) { if _, err = os.Stat(configFile); err != nil { return false, err } - srvCfg := &serverConfigV11{} + srvCfg := &serverConfigV12{} srvCfg.Version = globalMinioConfigVersion qc, err := quick.New(srvCfg) if err != nil { @@ -116,10 +116,10 @@ func initConfig() (bool, error) { } // serverConfig server config. -var serverConfig *serverConfigV11 +var serverConfig *serverConfigV12 // GetVersion get current config version. -func (s serverConfigV11) GetVersion() string { +func (s serverConfigV12) GetVersion() string { serverConfigMu.RLock() defer serverConfigMu.RUnlock() @@ -128,14 +128,14 @@ func (s serverConfigV11) GetVersion() string { /// Logger related. -func (s *serverConfigV11) SetAMQPNotifyByID(accountID string, amqpn amqpNotify) { +func (s *serverConfigV12) SetAMQPNotifyByID(accountID string, amqpn amqpNotify) { serverConfigMu.Lock() defer serverConfigMu.Unlock() s.Notify.AMQP[accountID] = amqpn } -func (s serverConfigV11) GetAMQP() map[string]amqpNotify { +func (s serverConfigV12) GetAMQP() map[string]amqpNotify { serverConfigMu.RLock() defer serverConfigMu.RUnlock() @@ -143,7 +143,7 @@ func (s serverConfigV11) GetAMQP() map[string]amqpNotify { } // GetAMQPNotify get current AMQP logger. -func (s serverConfigV11) GetAMQPNotifyByID(accountID string) amqpNotify { +func (s serverConfigV12) GetAMQPNotifyByID(accountID string) amqpNotify { serverConfigMu.RLock() defer serverConfigMu.RUnlock() @@ -151,35 +151,35 @@ func (s serverConfigV11) GetAMQPNotifyByID(accountID string) amqpNotify { } // -func (s *serverConfigV11) SetNATSNotifyByID(accountID string, natsn natsNotify) { +func (s *serverConfigV12) SetNATSNotifyByID(accountID string, natsn natsNotify) { serverConfigMu.Lock() defer serverConfigMu.Unlock() s.Notify.NATS[accountID] = natsn } -func (s serverConfigV11) GetNATS() map[string]natsNotify { +func (s serverConfigV12) GetNATS() map[string]natsNotify { serverConfigMu.RLock() defer serverConfigMu.RUnlock() return s.Notify.NATS } // GetNATSNotify get current NATS logger. -func (s serverConfigV11) GetNATSNotifyByID(accountID string) natsNotify { +func (s serverConfigV12) GetNATSNotifyByID(accountID string) natsNotify { serverConfigMu.RLock() defer serverConfigMu.RUnlock() return s.Notify.NATS[accountID] } -func (s *serverConfigV11) SetElasticSearchNotifyByID(accountID string, esNotify elasticSearchNotify) { +func (s *serverConfigV12) SetElasticSearchNotifyByID(accountID string, esNotify elasticSearchNotify) { serverConfigMu.Lock() defer serverConfigMu.Unlock() s.Notify.ElasticSearch[accountID] = esNotify } -func (s serverConfigV11) GetElasticSearch() map[string]elasticSearchNotify { +func (s serverConfigV12) GetElasticSearch() map[string]elasticSearchNotify { serverConfigMu.RLock() defer serverConfigMu.RUnlock() @@ -187,21 +187,21 @@ func (s serverConfigV11) GetElasticSearch() map[string]elasticSearchNotify { } // GetElasticSearchNotify get current ElasicSearch logger. -func (s serverConfigV11) GetElasticSearchNotifyByID(accountID string) elasticSearchNotify { +func (s serverConfigV12) GetElasticSearchNotifyByID(accountID string) elasticSearchNotify { serverConfigMu.RLock() defer serverConfigMu.RUnlock() return s.Notify.ElasticSearch[accountID] } -func (s *serverConfigV11) SetRedisNotifyByID(accountID string, rNotify redisNotify) { +func (s *serverConfigV12) SetRedisNotifyByID(accountID string, rNotify redisNotify) { serverConfigMu.Lock() defer serverConfigMu.Unlock() s.Notify.Redis[accountID] = rNotify } -func (s serverConfigV11) GetRedis() map[string]redisNotify { +func (s serverConfigV12) GetRedis() map[string]redisNotify { serverConfigMu.RLock() defer serverConfigMu.RUnlock() @@ -209,28 +209,28 @@ func (s serverConfigV11) GetRedis() map[string]redisNotify { } // GetRedisNotify get current Redis logger. -func (s serverConfigV11) GetRedisNotifyByID(accountID string) redisNotify { +func (s serverConfigV12) GetRedisNotifyByID(accountID string) redisNotify { serverConfigMu.RLock() defer serverConfigMu.RUnlock() return s.Notify.Redis[accountID] } -func (s *serverConfigV11) SetPostgreSQLNotifyByID(accountID string, pgn postgreSQLNotify) { +func (s *serverConfigV12) SetPostgreSQLNotifyByID(accountID string, pgn postgreSQLNotify) { serverConfigMu.Lock() defer serverConfigMu.Unlock() s.Notify.PostgreSQL[accountID] = pgn } -func (s serverConfigV11) GetPostgreSQL() map[string]postgreSQLNotify { +func (s serverConfigV12) GetPostgreSQL() map[string]postgreSQLNotify { serverConfigMu.RLock() defer serverConfigMu.RUnlock() return s.Notify.PostgreSQL } -func (s serverConfigV11) GetPostgreSQLNotifyByID(accountID string) postgreSQLNotify { +func (s serverConfigV12) GetPostgreSQLNotifyByID(accountID string) postgreSQLNotify { serverConfigMu.RLock() defer serverConfigMu.RUnlock() @@ -238,21 +238,21 @@ func (s serverConfigV11) GetPostgreSQLNotifyByID(accountID string) postgreSQLNot } // Kafka related functions -func (s *serverConfigV11) SetKafkaNotifyByID(accountID string, kn kafkaNotify) { +func (s *serverConfigV12) SetKafkaNotifyByID(accountID string, kn kafkaNotify) { serverConfigMu.Lock() defer serverConfigMu.Unlock() s.Notify.Kafka[accountID] = kn } -func (s serverConfigV11) GetKafka() map[string]kafkaNotify { +func (s serverConfigV12) GetKafka() map[string]kafkaNotify { serverConfigMu.RLock() defer serverConfigMu.RUnlock() return s.Notify.Kafka } -func (s serverConfigV11) GetKafkaNotifyByID(accountID string) kafkaNotify { +func (s serverConfigV12) GetKafkaNotifyByID(accountID string) kafkaNotify { serverConfigMu.RLock() defer serverConfigMu.RUnlock() @@ -260,7 +260,7 @@ func (s serverConfigV11) GetKafkaNotifyByID(accountID string) kafkaNotify { } // SetFileLogger set new file logger. -func (s *serverConfigV11) SetFileLogger(flogger fileLogger) { +func (s *serverConfigV12) SetFileLogger(flogger fileLogger) { serverConfigMu.Lock() defer serverConfigMu.Unlock() @@ -268,7 +268,7 @@ func (s *serverConfigV11) SetFileLogger(flogger fileLogger) { } // GetFileLogger get current file logger. -func (s serverConfigV11) GetFileLogger() fileLogger { +func (s serverConfigV12) GetFileLogger() fileLogger { serverConfigMu.RLock() defer serverConfigMu.RUnlock() @@ -276,7 +276,7 @@ func (s serverConfigV11) GetFileLogger() fileLogger { } // SetConsoleLogger set new console logger. -func (s *serverConfigV11) SetConsoleLogger(clogger consoleLogger) { +func (s *serverConfigV12) SetConsoleLogger(clogger consoleLogger) { serverConfigMu.Lock() defer serverConfigMu.Unlock() @@ -284,7 +284,7 @@ func (s *serverConfigV11) SetConsoleLogger(clogger consoleLogger) { } // GetConsoleLogger get current console logger. -func (s serverConfigV11) GetConsoleLogger() consoleLogger { +func (s serverConfigV12) GetConsoleLogger() consoleLogger { serverConfigMu.RLock() defer serverConfigMu.RUnlock() @@ -292,7 +292,7 @@ func (s serverConfigV11) GetConsoleLogger() consoleLogger { } // SetRegion set new region. -func (s *serverConfigV11) SetRegion(region string) { +func (s *serverConfigV12) SetRegion(region string) { serverConfigMu.Lock() defer serverConfigMu.Unlock() @@ -300,7 +300,7 @@ func (s *serverConfigV11) SetRegion(region string) { } // GetRegion get current region. -func (s serverConfigV11) GetRegion() string { +func (s serverConfigV12) GetRegion() string { serverConfigMu.RLock() defer serverConfigMu.RUnlock() @@ -308,7 +308,7 @@ func (s serverConfigV11) GetRegion() string { } // SetCredentials set new credentials. -func (s *serverConfigV11) SetCredential(creds credential) { +func (s *serverConfigV12) SetCredential(creds credential) { serverConfigMu.Lock() defer serverConfigMu.Unlock() @@ -316,7 +316,7 @@ func (s *serverConfigV11) SetCredential(creds credential) { } // GetCredentials get current credentials. -func (s serverConfigV11) GetCredential() credential { +func (s serverConfigV12) GetCredential() credential { serverConfigMu.RLock() defer serverConfigMu.RUnlock() @@ -324,7 +324,7 @@ func (s serverConfigV11) GetCredential() credential { } // Save config. -func (s serverConfigV11) Save() error { +func (s serverConfigV12) Save() error { serverConfigMu.RLock() defer serverConfigMu.RUnlock() diff --git a/cmd/config-v11_test.go b/cmd/config-v12_test.go similarity index 100% rename from cmd/config-v11_test.go rename to cmd/config-v12_test.go diff --git a/cmd/globals.go b/cmd/globals.go index 307a02198..0f0eb7476 100644 --- a/cmd/globals.go +++ b/cmd/globals.go @@ -36,7 +36,7 @@ const ( // minio configuration related constants. const ( - globalMinioConfigVersion = "11" + globalMinioConfigVersion = "12" globalMinioConfigDir = ".minio" globalMinioCertsDir = "certs" globalMinioCertsCADir = "CAs" diff --git a/cmd/notifiers.go b/cmd/notifiers.go index f40190421..8e65b9d9a 100644 --- a/cmd/notifiers.go +++ b/cmd/notifiers.go @@ -93,12 +93,12 @@ func isNATSQueue(sqsArn arnSQS) bool { return false } // Connect to nats server to validate. - natsC, err := dialNATS(natsL) + natsC, err := dialNATS(natsL, true) if err != nil { errorIf(err, "Unable to connect to nats service. %#v", natsL) return false } - defer natsC.Close() + closeNATS(natsC) return true } diff --git a/cmd/notify-nats.go b/cmd/notify-nats.go index 8fdbe2944..bec7cdcdf 100644 --- a/cmd/notify-nats.go +++ b/cmd/notify-nats.go @@ -20,53 +20,117 @@ import ( "io/ioutil" "github.com/Sirupsen/logrus" + "github.com/nats-io/go-nats-streaming" "github.com/nats-io/nats" ) +// natsNotifyStreaming contains specific options related to connection +// to a NATS streaming server +type natsNotifyStreaming struct { + Enable bool `json:"enable"` + ClusterID string `json:"clusterID"` + ClientID string `json:"clientID"` + Async bool `json:"async"` + MaxPubAcksInflight int `json:"maxPubAcksInflight"` +} + // natsNotify - represents logrus compatible NATS hook. // All fields represent NATS configuration details. type natsNotify struct { - Enable bool `json:"enable"` - Address string `json:"address"` - Subject string `json:"subject"` - Username string `json:"username"` - Password string `json:"password"` - Token string `json:"token"` - Secure bool `json:"secure"` - PingInterval int64 `json:"pingInterval"` + Enable bool `json:"enable"` + Address string `json:"address"` + Subject string `json:"subject"` + Username string `json:"username"` + Password string `json:"password"` + Token string `json:"token"` + Secure bool `json:"secure"` + PingInterval int64 `json:"pingInterval"` + Streaming natsNotifyStreaming `json:"streaming"` } -type natsConn struct { - params natsNotify - *nats.Conn +// natsIOConn abstracts connection to any type of NATS server +type natsIOConn struct { + params natsNotify + natsConn *nats.Conn + stanConn stan.Conn } -// dialNATS - dials and returns an natsConn instance, +// dialNATS - dials and returns an natsIOConn instance, // for sending notifications. Returns error if nats logger // is not enabled. -func dialNATS(natsL natsNotify) (natsConn, error) { +func dialNATS(natsL natsNotify, testDial bool) (natsIOConn, error) { if !natsL.Enable { - return natsConn{}, errNotifyNotEnabled + return natsIOConn{}, errNotifyNotEnabled } - // Configure and connect to NATS server - natsC := nats.DefaultOptions - natsC.Url = "nats://" + natsL.Address - natsC.User = natsL.Username - natsC.Password = natsL.Password - natsC.Token = natsL.Token - natsC.Secure = natsL.Secure - conn, err := natsC.Connect() - if err != nil { - return natsConn{}, err + + // Construct natsIOConn which holds all NATS connection information + conn := natsIOConn{params: natsL} + + if natsL.Streaming.Enable { + // Construct scheme to differentiate between clear and TLS connections + scheme := "nats" + if natsL.Secure { + scheme = "tls" + } + // Construct address URL + addressURL := scheme + "://" + natsL.Username + ":" + natsL.Password + "@" + natsL.Address + // Fetch the user-supplied client ID and provide a random one if not provided + clientID := natsL.Streaming.ClientID + if clientID == "" { + clientID = mustGetUUID() + } + // Add test suffix to clientID to avoid clientID already registered error + if testDial { + clientID += "-test" + } + connOpts := []stan.Option{ + stan.NatsURL(addressURL), + } + // Setup MaxPubAcksInflight parameter + if natsL.Streaming.MaxPubAcksInflight > 0 { + connOpts = append(connOpts, + stan.MaxPubAcksInflight(natsL.Streaming.MaxPubAcksInflight)) + } + // Do the real connection to the NATS server + sc, err := stan.Connect(natsL.Streaming.ClusterID, clientID, connOpts...) + if err != nil { + return natsIOConn{}, err + } + // Save the created connection + conn.stanConn = sc + } else { + // Configure and connect to NATS server + natsC := nats.DefaultOptions + natsC.Url = "nats://" + natsL.Address + natsC.User = natsL.Username + natsC.Password = natsL.Password + natsC.Token = natsL.Token + natsC.Secure = natsL.Secure + // Do the real connection + nc, err := natsC.Connect() + if err != nil { + return natsIOConn{}, err + } + // Save the created connection + conn.natsConn = nc + } + return conn, nil +} + +// closeNATS - close the underlying NATS connection +func closeNATS(conn natsIOConn) { + if conn.params.Streaming.Enable { + conn.stanConn.Close() + } else { + conn.natsConn.Close() } - return natsConn{Conn: conn, params: natsL}, nil } func newNATSNotify(accountID string) (*logrus.Logger, error) { natsL := serverConfig.GetNATSNotifyByID(accountID) // Connect to nats server. - natsC, err := dialNATS(natsL) + natsC, err := dialNATS(natsL, false) if err != nil { return nil, err } @@ -87,21 +151,34 @@ func newNATSNotify(accountID string) (*logrus.Logger, error) { } // Fire is called when an event should be sent to the message broker -func (n natsConn) Fire(entry *logrus.Entry) error { - ch := n.Conn +func (n natsIOConn) Fire(entry *logrus.Entry) error { body, err := entry.Reader() if err != nil { return err } - err = ch.Publish(n.params.Subject, body.Bytes()) - if err != nil { - return err + if n.params.Streaming.Enable { + // Streaming flag is enabled, publish the log synchronously or asynchronously + // depending on the user supplied parameter + if n.params.Streaming.Async { + _, err = n.stanConn.PublishAsync(n.params.Subject, body.Bytes(), nil) + } else { + err = n.stanConn.Publish(n.params.Subject, body.Bytes()) + } + if err != nil { + return err + } + } else { + // Publish the log + err = n.natsConn.Publish(n.params.Subject, body.Bytes()) + if err != nil { + return err + } } return nil } // Levels is available logging levels. -func (n natsConn) Levels() []logrus.Level { +func (n natsIOConn) Levels() []logrus.Level { return []logrus.Level{ logrus.InfoLevel, } diff --git a/vendor/github.com/gogo/protobuf/LICENSE b/vendor/github.com/gogo/protobuf/LICENSE new file mode 100644 index 000000000..7be0cc7b6 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/LICENSE @@ -0,0 +1,36 @@ +Protocol Buffers for Go with Gadgets + +Copyright (c) 2013, The GoGo Authors. All rights reserved. +http://github.com/gogo/protobuf + +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/gogo/protobuf/gogoproto/Makefile b/vendor/github.com/gogo/protobuf/gogoproto/Makefile new file mode 100644 index 000000000..02f9c62c2 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/Makefile @@ -0,0 +1,37 @@ +# Protocol Buffers for Go with Gadgets +# +# Copyright (c) 2013, The GoGo Authors. All rights reserved. +# http://github.com/gogo/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +regenerate: + go install github.com/gogo/protobuf/protoc-gen-gogo + protoc --gogo_out=Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor:. --proto_path=../../../../:../protobuf/:. *.proto + +restore: + cp gogo.pb.golden gogo.pb.go + +preserve: + cp gogo.pb.go gogo.pb.golden diff --git a/vendor/github.com/gogo/protobuf/gogoproto/doc.go b/vendor/github.com/gogo/protobuf/gogoproto/doc.go new file mode 100644 index 000000000..5ecfae113 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/doc.go @@ -0,0 +1,168 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package gogoproto provides extensions for protocol buffers to achieve: + + - fast marshalling and unmarshalling. + - peace of mind by optionally generating test and benchmark code. + - more canonical Go structures. + - less typing by optionally generating extra helper code. + - goprotobuf compatibility + +More Canonical Go Structures + +A lot of time working with a goprotobuf struct will lead you to a place where you create another struct that is easier to work with and then have a function to copy the values between the two structs. +You might also find that basic structs that started their life as part of an API need to be sent over the wire. With gob, you could just send it. With goprotobuf, you need to make a parallel struct. +Gogoprotobuf tries to fix these problems with the nullable, embed, customtype and customname field extensions. + + - nullable, if false, a field is generated without a pointer (see warning below). + - embed, if true, the field is generated as an embedded field. + - customtype, It works with the Marshal and Unmarshal methods, to allow you to have your own types in your struct, but marshal to bytes. For example, custom.Uuid or custom.Fixed128 + - customname (beta), Changes the generated fieldname. This is especially useful when generated methods conflict with fieldnames. + - casttype (beta), Changes the generated fieldtype. All generated code assumes that this type is castable to the protocol buffer field type. It does not work for structs or enums. + - castkey (beta), Changes the generated fieldtype for a map key. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. + - castvalue (beta), Changes the generated fieldtype for a map value. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. + +Warning about nullable: According to the Protocol Buffer specification, you should be able to tell whether a field is set or unset. With the option nullable=false this feature is lost, since your non-nullable fields will always be set. It can be seen as a layer on top of Protocol Buffers, where before and after marshalling all non-nullable fields are set and they cannot be unset. + +Let us look at: + + github.com/gogo/protobuf/test/example/example.proto + +for a quicker overview. + +The following message: + + package test; + + import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + + message A { + optional string Description = 1 [(gogoproto.nullable) = false]; + optional int64 Number = 2 [(gogoproto.nullable) = false]; + optional bytes Id = 3 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uuid", (gogoproto.nullable) = false]; + } + +Will generate a go struct which looks a lot like this: + + type A struct { + Description string + Number int64 + Id github_com_gogo_protobuf_test_custom.Uuid + } + +You will see there are no pointers, since all fields are non-nullable. +You will also see a custom type which marshals to a string. +Be warned it is your responsibility to test your custom types thoroughly. +You should think of every possible empty and nil case for your marshaling, unmarshaling and size methods. + +Next we will embed the message A in message B. + + message B { + optional A A = 1 [(gogoproto.nullable) = false, (gogoproto.embed) = true]; + repeated bytes G = 2 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uint128", (gogoproto.nullable) = false]; + } + +See below that A is embedded in B. + + type B struct { + A + G []github_com_gogo_protobuf_test_custom.Uint128 + } + +Also see the repeated custom type. + + type Uint128 [2]uint64 + +Next we will create a custom name for one of our fields. + + message C { + optional int64 size = 1 [(gogoproto.customname) = "MySize"]; + } + +See below that the field's name is MySize and not Size. + + type C struct { + MySize *int64 + } + +The is useful when having a protocol buffer message with a field name which conflicts with a generated method. +As an example, having a field name size and using the sizer plugin to generate a Size method will cause a go compiler error. +Using customname you can fix this error without changing the field name. +This is typically useful when working with a protocol buffer that was designed before these methods and/or the go language were avialable. + +Gogoprotobuf also has some more subtle changes, these could be changed back: + + - the generated package name for imports do not have the extra /filename.pb, + but are actually the imports specified in the .proto file. + +Gogoprotobuf also has lost some features which should be brought back with time: + + - Marshalling and unmarshalling with reflect and without the unsafe package, + this requires work in pointer_reflect.go + +Why does nullable break protocol buffer specifications: + +The protocol buffer specification states, somewhere, that you should be able to tell whether a +field is set or unset. With the option nullable=false this feature is lost, +since your non-nullable fields will always be set. It can be seen as a layer on top of +protocol buffers, where before and after marshalling all non-nullable fields are set +and they cannot be unset. + +Goprotobuf Compatibility: + +Gogoprotobuf is compatible with Goprotobuf, because it is compatible with protocol buffers. +Gogoprotobuf generates the same code as goprotobuf if no extensions are used. +The enumprefix, getters and stringer extensions can be used to remove some of the unnecessary code generated by goprotobuf: + + - gogoproto_import, if false, the generated code imports github.com/golang/protobuf/proto instead of github.com/gogo/protobuf/proto. + - goproto_enum_prefix, if false, generates the enum constant names without the messagetype prefix + - goproto_enum_stringer (experimental), if false, the enum is generated without the default string method, this is useful for rather using enum_stringer, or allowing you to write your own string method. + - goproto_getters, if false, the message is generated without get methods, this is useful when you would rather want to use face + - goproto_stringer, if false, the message is generated without the default string method, this is useful for rather using stringer, or allowing you to write your own string method. + - goproto_extensions_map (beta), if false, the extensions field is generated as type []byte instead of type map[int32]proto.Extension + - goproto_unrecognized (beta), if false, XXX_unrecognized field is not generated. This is useful in conjunction with gogoproto.nullable=false, to generate structures completely devoid of pointers and reduce GC pressure at the cost of losing information about unrecognized fields. + +Less Typing and Peace of Mind is explained in their specific plugin folders godoc: + + - github.com/gogo/protobuf/plugin/ + +If you do not use any of these extension the code that is generated +will be the same as if goprotobuf has generated it. + +The most complete way to see examples is to look at + + github.com/gogo/protobuf/test/thetest.proto + +Gogoprototest is a seperate project, +because we want to keep gogoprotobuf independant of goprotobuf, +but we still want to test it thoroughly. + +*/ +package gogoproto diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go new file mode 100644 index 000000000..d88ba8013 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go @@ -0,0 +1,749 @@ +// Code generated by protoc-gen-gogo. +// source: gogo.proto +// DO NOT EDIT! + +/* +Package gogoproto is a generated protocol buffer package. + +It is generated from these files: + gogo.proto + +It has these top-level messages: +*/ +package gogoproto + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +var E_GoprotoEnumPrefix = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62001, + Name: "gogoproto.goproto_enum_prefix", + Tag: "varint,62001,opt,name=goproto_enum_prefix,json=goprotoEnumPrefix", + Filename: "gogo.proto", +} + +var E_GoprotoEnumStringer = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62021, + Name: "gogoproto.goproto_enum_stringer", + Tag: "varint,62021,opt,name=goproto_enum_stringer,json=goprotoEnumStringer", + Filename: "gogo.proto", +} + +var E_EnumStringer = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62022, + Name: "gogoproto.enum_stringer", + Tag: "varint,62022,opt,name=enum_stringer,json=enumStringer", + Filename: "gogo.proto", +} + +var E_EnumCustomname = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.EnumOptions)(nil), + ExtensionType: (*string)(nil), + Field: 62023, + Name: "gogoproto.enum_customname", + Tag: "bytes,62023,opt,name=enum_customname,json=enumCustomname", + Filename: "gogo.proto", +} + +var E_EnumvalueCustomname = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.EnumValueOptions)(nil), + ExtensionType: (*string)(nil), + Field: 66001, + Name: "gogoproto.enumvalue_customname", + Tag: "bytes,66001,opt,name=enumvalue_customname,json=enumvalueCustomname", + Filename: "gogo.proto", +} + +var E_GoprotoGettersAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63001, + Name: "gogoproto.goproto_getters_all", + Tag: "varint,63001,opt,name=goproto_getters_all,json=goprotoGettersAll", + Filename: "gogo.proto", +} + +var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63002, + Name: "gogoproto.goproto_enum_prefix_all", + Tag: "varint,63002,opt,name=goproto_enum_prefix_all,json=goprotoEnumPrefixAll", + Filename: "gogo.proto", +} + +var E_GoprotoStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63003, + Name: "gogoproto.goproto_stringer_all", + Tag: "varint,63003,opt,name=goproto_stringer_all,json=goprotoStringerAll", + Filename: "gogo.proto", +} + +var E_VerboseEqualAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63004, + Name: "gogoproto.verbose_equal_all", + Tag: "varint,63004,opt,name=verbose_equal_all,json=verboseEqualAll", + Filename: "gogo.proto", +} + +var E_FaceAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63005, + Name: "gogoproto.face_all", + Tag: "varint,63005,opt,name=face_all,json=faceAll", + Filename: "gogo.proto", +} + +var E_GostringAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63006, + Name: "gogoproto.gostring_all", + Tag: "varint,63006,opt,name=gostring_all,json=gostringAll", + Filename: "gogo.proto", +} + +var E_PopulateAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63007, + Name: "gogoproto.populate_all", + Tag: "varint,63007,opt,name=populate_all,json=populateAll", + Filename: "gogo.proto", +} + +var E_StringerAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63008, + Name: "gogoproto.stringer_all", + Tag: "varint,63008,opt,name=stringer_all,json=stringerAll", + Filename: "gogo.proto", +} + +var E_OnlyoneAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63009, + Name: "gogoproto.onlyone_all", + Tag: "varint,63009,opt,name=onlyone_all,json=onlyoneAll", + Filename: "gogo.proto", +} + +var E_EqualAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63013, + Name: "gogoproto.equal_all", + Tag: "varint,63013,opt,name=equal_all,json=equalAll", + Filename: "gogo.proto", +} + +var E_DescriptionAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63014, + Name: "gogoproto.description_all", + Tag: "varint,63014,opt,name=description_all,json=descriptionAll", + Filename: "gogo.proto", +} + +var E_TestgenAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63015, + Name: "gogoproto.testgen_all", + Tag: "varint,63015,opt,name=testgen_all,json=testgenAll", + Filename: "gogo.proto", +} + +var E_BenchgenAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63016, + Name: "gogoproto.benchgen_all", + Tag: "varint,63016,opt,name=benchgen_all,json=benchgenAll", + Filename: "gogo.proto", +} + +var E_MarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63017, + Name: "gogoproto.marshaler_all", + Tag: "varint,63017,opt,name=marshaler_all,json=marshalerAll", + Filename: "gogo.proto", +} + +var E_UnmarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63018, + Name: "gogoproto.unmarshaler_all", + Tag: "varint,63018,opt,name=unmarshaler_all,json=unmarshalerAll", + Filename: "gogo.proto", +} + +var E_StableMarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63019, + Name: "gogoproto.stable_marshaler_all", + Tag: "varint,63019,opt,name=stable_marshaler_all,json=stableMarshalerAll", + Filename: "gogo.proto", +} + +var E_SizerAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63020, + Name: "gogoproto.sizer_all", + Tag: "varint,63020,opt,name=sizer_all,json=sizerAll", + Filename: "gogo.proto", +} + +var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63021, + Name: "gogoproto.goproto_enum_stringer_all", + Tag: "varint,63021,opt,name=goproto_enum_stringer_all,json=goprotoEnumStringerAll", + Filename: "gogo.proto", +} + +var E_EnumStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63022, + Name: "gogoproto.enum_stringer_all", + Tag: "varint,63022,opt,name=enum_stringer_all,json=enumStringerAll", + Filename: "gogo.proto", +} + +var E_UnsafeMarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63023, + Name: "gogoproto.unsafe_marshaler_all", + Tag: "varint,63023,opt,name=unsafe_marshaler_all,json=unsafeMarshalerAll", + Filename: "gogo.proto", +} + +var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63024, + Name: "gogoproto.unsafe_unmarshaler_all", + Tag: "varint,63024,opt,name=unsafe_unmarshaler_all,json=unsafeUnmarshalerAll", + Filename: "gogo.proto", +} + +var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63025, + Name: "gogoproto.goproto_extensions_map_all", + Tag: "varint,63025,opt,name=goproto_extensions_map_all,json=goprotoExtensionsMapAll", + Filename: "gogo.proto", +} + +var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63026, + Name: "gogoproto.goproto_unrecognized_all", + Tag: "varint,63026,opt,name=goproto_unrecognized_all,json=goprotoUnrecognizedAll", + Filename: "gogo.proto", +} + +var E_GogoprotoImport = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63027, + Name: "gogoproto.gogoproto_import", + Tag: "varint,63027,opt,name=gogoproto_import,json=gogoprotoImport", + Filename: "gogo.proto", +} + +var E_ProtosizerAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63028, + Name: "gogoproto.protosizer_all", + Tag: "varint,63028,opt,name=protosizer_all,json=protosizerAll", + Filename: "gogo.proto", +} + +var E_CompareAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63029, + Name: "gogoproto.compare_all", + Tag: "varint,63029,opt,name=compare_all,json=compareAll", + Filename: "gogo.proto", +} + +var E_GoprotoGetters = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64001, + Name: "gogoproto.goproto_getters", + Tag: "varint,64001,opt,name=goproto_getters,json=goprotoGetters", + Filename: "gogo.proto", +} + +var E_GoprotoStringer = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64003, + Name: "gogoproto.goproto_stringer", + Tag: "varint,64003,opt,name=goproto_stringer,json=goprotoStringer", + Filename: "gogo.proto", +} + +var E_VerboseEqual = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64004, + Name: "gogoproto.verbose_equal", + Tag: "varint,64004,opt,name=verbose_equal,json=verboseEqual", + Filename: "gogo.proto", +} + +var E_Face = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64005, + Name: "gogoproto.face", + Tag: "varint,64005,opt,name=face", + Filename: "gogo.proto", +} + +var E_Gostring = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64006, + Name: "gogoproto.gostring", + Tag: "varint,64006,opt,name=gostring", + Filename: "gogo.proto", +} + +var E_Populate = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64007, + Name: "gogoproto.populate", + Tag: "varint,64007,opt,name=populate", + Filename: "gogo.proto", +} + +var E_Stringer = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 67008, + Name: "gogoproto.stringer", + Tag: "varint,67008,opt,name=stringer", + Filename: "gogo.proto", +} + +var E_Onlyone = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64009, + Name: "gogoproto.onlyone", + Tag: "varint,64009,opt,name=onlyone", + Filename: "gogo.proto", +} + +var E_Equal = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64013, + Name: "gogoproto.equal", + Tag: "varint,64013,opt,name=equal", + Filename: "gogo.proto", +} + +var E_Description = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64014, + Name: "gogoproto.description", + Tag: "varint,64014,opt,name=description", + Filename: "gogo.proto", +} + +var E_Testgen = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64015, + Name: "gogoproto.testgen", + Tag: "varint,64015,opt,name=testgen", + Filename: "gogo.proto", +} + +var E_Benchgen = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64016, + Name: "gogoproto.benchgen", + Tag: "varint,64016,opt,name=benchgen", + Filename: "gogo.proto", +} + +var E_Marshaler = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64017, + Name: "gogoproto.marshaler", + Tag: "varint,64017,opt,name=marshaler", + Filename: "gogo.proto", +} + +var E_Unmarshaler = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64018, + Name: "gogoproto.unmarshaler", + Tag: "varint,64018,opt,name=unmarshaler", + Filename: "gogo.proto", +} + +var E_StableMarshaler = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64019, + Name: "gogoproto.stable_marshaler", + Tag: "varint,64019,opt,name=stable_marshaler,json=stableMarshaler", + Filename: "gogo.proto", +} + +var E_Sizer = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64020, + Name: "gogoproto.sizer", + Tag: "varint,64020,opt,name=sizer", + Filename: "gogo.proto", +} + +var E_UnsafeMarshaler = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64023, + Name: "gogoproto.unsafe_marshaler", + Tag: "varint,64023,opt,name=unsafe_marshaler,json=unsafeMarshaler", + Filename: "gogo.proto", +} + +var E_UnsafeUnmarshaler = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64024, + Name: "gogoproto.unsafe_unmarshaler", + Tag: "varint,64024,opt,name=unsafe_unmarshaler,json=unsafeUnmarshaler", + Filename: "gogo.proto", +} + +var E_GoprotoExtensionsMap = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64025, + Name: "gogoproto.goproto_extensions_map", + Tag: "varint,64025,opt,name=goproto_extensions_map,json=goprotoExtensionsMap", + Filename: "gogo.proto", +} + +var E_GoprotoUnrecognized = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64026, + Name: "gogoproto.goproto_unrecognized", + Tag: "varint,64026,opt,name=goproto_unrecognized,json=goprotoUnrecognized", + Filename: "gogo.proto", +} + +var E_Protosizer = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64028, + Name: "gogoproto.protosizer", + Tag: "varint,64028,opt,name=protosizer", + Filename: "gogo.proto", +} + +var E_Compare = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64029, + Name: "gogoproto.compare", + Tag: "varint,64029,opt,name=compare", + Filename: "gogo.proto", +} + +var E_Nullable = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65001, + Name: "gogoproto.nullable", + Tag: "varint,65001,opt,name=nullable", + Filename: "gogo.proto", +} + +var E_Embed = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65002, + Name: "gogoproto.embed", + Tag: "varint,65002,opt,name=embed", + Filename: "gogo.proto", +} + +var E_Customtype = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65003, + Name: "gogoproto.customtype", + Tag: "bytes,65003,opt,name=customtype", + Filename: "gogo.proto", +} + +var E_Customname = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65004, + Name: "gogoproto.customname", + Tag: "bytes,65004,opt,name=customname", + Filename: "gogo.proto", +} + +var E_Jsontag = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65005, + Name: "gogoproto.jsontag", + Tag: "bytes,65005,opt,name=jsontag", + Filename: "gogo.proto", +} + +var E_Moretags = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65006, + Name: "gogoproto.moretags", + Tag: "bytes,65006,opt,name=moretags", + Filename: "gogo.proto", +} + +var E_Casttype = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65007, + Name: "gogoproto.casttype", + Tag: "bytes,65007,opt,name=casttype", + Filename: "gogo.proto", +} + +var E_Castkey = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65008, + Name: "gogoproto.castkey", + Tag: "bytes,65008,opt,name=castkey", + Filename: "gogo.proto", +} + +var E_Castvalue = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65009, + Name: "gogoproto.castvalue", + Tag: "bytes,65009,opt,name=castvalue", + Filename: "gogo.proto", +} + +var E_Stdtime = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65010, + Name: "gogoproto.stdtime", + Tag: "varint,65010,opt,name=stdtime", + Filename: "gogo.proto", +} + +var E_Stdduration = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65011, + Name: "gogoproto.stdduration", + Tag: "varint,65011,opt,name=stdduration", + Filename: "gogo.proto", +} + +func init() { + proto.RegisterExtension(E_GoprotoEnumPrefix) + proto.RegisterExtension(E_GoprotoEnumStringer) + proto.RegisterExtension(E_EnumStringer) + proto.RegisterExtension(E_EnumCustomname) + proto.RegisterExtension(E_EnumvalueCustomname) + proto.RegisterExtension(E_GoprotoGettersAll) + proto.RegisterExtension(E_GoprotoEnumPrefixAll) + proto.RegisterExtension(E_GoprotoStringerAll) + proto.RegisterExtension(E_VerboseEqualAll) + proto.RegisterExtension(E_FaceAll) + proto.RegisterExtension(E_GostringAll) + proto.RegisterExtension(E_PopulateAll) + proto.RegisterExtension(E_StringerAll) + proto.RegisterExtension(E_OnlyoneAll) + proto.RegisterExtension(E_EqualAll) + proto.RegisterExtension(E_DescriptionAll) + proto.RegisterExtension(E_TestgenAll) + proto.RegisterExtension(E_BenchgenAll) + proto.RegisterExtension(E_MarshalerAll) + proto.RegisterExtension(E_UnmarshalerAll) + proto.RegisterExtension(E_StableMarshalerAll) + proto.RegisterExtension(E_SizerAll) + proto.RegisterExtension(E_GoprotoEnumStringerAll) + proto.RegisterExtension(E_EnumStringerAll) + proto.RegisterExtension(E_UnsafeMarshalerAll) + proto.RegisterExtension(E_UnsafeUnmarshalerAll) + proto.RegisterExtension(E_GoprotoExtensionsMapAll) + proto.RegisterExtension(E_GoprotoUnrecognizedAll) + proto.RegisterExtension(E_GogoprotoImport) + proto.RegisterExtension(E_ProtosizerAll) + proto.RegisterExtension(E_CompareAll) + proto.RegisterExtension(E_GoprotoGetters) + proto.RegisterExtension(E_GoprotoStringer) + proto.RegisterExtension(E_VerboseEqual) + proto.RegisterExtension(E_Face) + proto.RegisterExtension(E_Gostring) + proto.RegisterExtension(E_Populate) + proto.RegisterExtension(E_Stringer) + proto.RegisterExtension(E_Onlyone) + proto.RegisterExtension(E_Equal) + proto.RegisterExtension(E_Description) + proto.RegisterExtension(E_Testgen) + proto.RegisterExtension(E_Benchgen) + proto.RegisterExtension(E_Marshaler) + proto.RegisterExtension(E_Unmarshaler) + proto.RegisterExtension(E_StableMarshaler) + proto.RegisterExtension(E_Sizer) + proto.RegisterExtension(E_UnsafeMarshaler) + proto.RegisterExtension(E_UnsafeUnmarshaler) + proto.RegisterExtension(E_GoprotoExtensionsMap) + proto.RegisterExtension(E_GoprotoUnrecognized) + proto.RegisterExtension(E_Protosizer) + proto.RegisterExtension(E_Compare) + proto.RegisterExtension(E_Nullable) + proto.RegisterExtension(E_Embed) + proto.RegisterExtension(E_Customtype) + proto.RegisterExtension(E_Customname) + proto.RegisterExtension(E_Jsontag) + proto.RegisterExtension(E_Moretags) + proto.RegisterExtension(E_Casttype) + proto.RegisterExtension(E_Castkey) + proto.RegisterExtension(E_Castvalue) + proto.RegisterExtension(E_Stdtime) + proto.RegisterExtension(E_Stdduration) +} + +func init() { proto.RegisterFile("gogo.proto", fileDescriptorGogo) } + +var fileDescriptorGogo = []byte{ + // 1129 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x97, 0xc9, 0x6f, 0x1c, 0x45, + 0x14, 0x87, 0x85, 0x70, 0x64, 0xcf, 0xf3, 0x86, 0xc7, 0xc6, 0x84, 0x08, 0x44, 0x72, 0xe3, 0xe4, + 0x9c, 0x22, 0x94, 0xb2, 0x22, 0xcb, 0xb1, 0x9c, 0x51, 0x10, 0x86, 0x91, 0x89, 0x03, 0x88, 0xc3, + 0xa8, 0x67, 0xa6, 0xdc, 0x69, 0xe8, 0xee, 0x6a, 0xba, 0xaa, 0xa3, 0x38, 0x37, 0x14, 0x16, 0x21, + 0xc4, 0x8e, 0x04, 0x09, 0x09, 0xcb, 0x81, 0x7d, 0x0d, 0xcb, 0x9d, 0x0b, 0x70, 0xe6, 0x7f, 0xe0, + 0x02, 0x84, 0xdd, 0x37, 0x5f, 0x50, 0x75, 0xbf, 0xd7, 0x53, 0xdd, 0x1e, 0xa9, 0x6a, 0x6e, 0xe3, + 0x71, 0x7d, 0xdf, 0x54, 0xbf, 0x37, 0xf5, 0x7e, 0x53, 0x00, 0xbe, 0xf0, 0xc5, 0x52, 0x92, 0x0a, + 0x25, 0x9a, 0x0d, 0xfd, 0x3a, 0x7f, 0x79, 0xe8, 0xb0, 0x2f, 0x84, 0x1f, 0xf2, 0xa3, 0xf9, 0x5f, + 0xdd, 0x6c, 0xfb, 0x68, 0x9f, 0xcb, 0x5e, 0x1a, 0x24, 0x4a, 0xa4, 0xc5, 0x62, 0x76, 0x3f, 0xcc, + 0xe3, 0xe2, 0x0e, 0x8f, 0xb3, 0xa8, 0x93, 0xa4, 0x7c, 0x3b, 0xb8, 0xd0, 0xbc, 0x63, 0xa9, 0x20, + 0x97, 0x88, 0x5c, 0x5a, 0x8f, 0xb3, 0xe8, 0x81, 0x44, 0x05, 0x22, 0x96, 0x07, 0xaf, 0xff, 0x72, + 0xf3, 0xe1, 0x9b, 0xee, 0x9e, 0xd8, 0x9c, 0x43, 0x54, 0xff, 0xaf, 0x9d, 0x83, 0x6c, 0x13, 0x6e, + 0xad, 0xf8, 0xa4, 0x4a, 0x83, 0xd8, 0xe7, 0xa9, 0xc5, 0xf8, 0x03, 0x1a, 0xe7, 0x0d, 0xe3, 0x83, + 0x88, 0xb2, 0x35, 0x98, 0x1e, 0xc5, 0xf5, 0x23, 0xba, 0xa6, 0xb8, 0x29, 0x69, 0xc1, 0x6c, 0x2e, + 0xe9, 0x65, 0x52, 0x89, 0x28, 0xf6, 0x22, 0x6e, 0xd1, 0xfc, 0x94, 0x6b, 0x1a, 0x9b, 0x33, 0x1a, + 0x5b, 0x2b, 0x29, 0x76, 0x16, 0x16, 0xf4, 0x3b, 0xe7, 0xbd, 0x30, 0xe3, 0xa6, 0xed, 0xc8, 0x50, + 0xdb, 0x59, 0xbd, 0x8c, 0x94, 0x3f, 0x5f, 0x1a, 0xcb, 0x95, 0xf3, 0xa5, 0xc0, 0xf0, 0x1a, 0x9d, + 0xf0, 0xb9, 0x52, 0x3c, 0x95, 0x1d, 0x2f, 0x0c, 0x87, 0x6c, 0xf2, 0x54, 0x10, 0x96, 0xc6, 0xcb, + 0x37, 0xaa, 0x9d, 0x68, 0x15, 0xe4, 0x6a, 0x18, 0xb2, 0x2d, 0xb8, 0x6d, 0x48, 0x67, 0x1d, 0x9c, + 0x57, 0xd0, 0xb9, 0xb0, 0xaf, 0xbb, 0x5a, 0xdb, 0x06, 0x7a, 0xbf, 0xec, 0x87, 0x83, 0xf3, 0x2d, + 0x74, 0x36, 0x91, 0xa5, 0xb6, 0x68, 0xe3, 0xbd, 0x30, 0x77, 0x9e, 0xa7, 0x5d, 0x21, 0x79, 0x87, + 0x3f, 0x91, 0x79, 0xa1, 0x83, 0xee, 0x2a, 0xea, 0x66, 0x11, 0x5c, 0xd7, 0x9c, 0x76, 0x1d, 0x87, + 0x89, 0x6d, 0xaf, 0xc7, 0x1d, 0x14, 0xd7, 0x50, 0x31, 0xae, 0xd7, 0x6b, 0x74, 0x15, 0xa6, 0x7c, + 0x51, 0x3c, 0x92, 0x03, 0xfe, 0x36, 0xe2, 0x93, 0xc4, 0xa0, 0x22, 0x11, 0x49, 0x16, 0x7a, 0xca, + 0x65, 0x07, 0xef, 0x90, 0x82, 0x18, 0x54, 0x8c, 0x50, 0xd6, 0x77, 0x49, 0x21, 0x8d, 0x7a, 0xae, + 0xc0, 0xa4, 0x88, 0xc3, 0x1d, 0x11, 0xbb, 0x6c, 0xe2, 0x3d, 0x34, 0x00, 0x22, 0x5a, 0xb0, 0x0c, + 0x0d, 0xd7, 0x46, 0xbc, 0x8f, 0xf8, 0x04, 0xa7, 0x0e, 0xb4, 0x60, 0x96, 0x86, 0x4c, 0x20, 0x62, + 0x07, 0xc5, 0x07, 0xa8, 0x98, 0x31, 0x30, 0x7c, 0x0c, 0xc5, 0xa5, 0xf2, 0xb9, 0x8b, 0xe4, 0x43, + 0x7a, 0x0c, 0x44, 0xb0, 0x94, 0x5d, 0x1e, 0xf7, 0xce, 0xb9, 0x19, 0x3e, 0xa2, 0x52, 0x12, 0xa3, + 0x15, 0x6b, 0x30, 0x1d, 0x79, 0xa9, 0x3c, 0xe7, 0x85, 0x4e, 0xed, 0xf8, 0x18, 0x1d, 0x53, 0x25, + 0x84, 0x15, 0xc9, 0xe2, 0x51, 0x34, 0x9f, 0x50, 0x45, 0x0c, 0x0c, 0x8f, 0x9e, 0x54, 0x5e, 0x37, + 0xe4, 0x9d, 0x51, 0x6c, 0x9f, 0xd2, 0xd1, 0x2b, 0xd8, 0x0d, 0xd3, 0xb8, 0x0c, 0x0d, 0x19, 0x5c, + 0x74, 0xd2, 0x7c, 0x46, 0x9d, 0xce, 0x01, 0x0d, 0x3f, 0x02, 0xb7, 0x0f, 0x1d, 0xf5, 0x0e, 0xb2, + 0xcf, 0x51, 0xb6, 0x38, 0x64, 0xdc, 0xe3, 0x48, 0x18, 0x55, 0xf9, 0x05, 0x8d, 0x04, 0x5e, 0x73, + 0xb5, 0x61, 0x21, 0x8b, 0xa5, 0xb7, 0x3d, 0x5a, 0xd5, 0xbe, 0xa4, 0xaa, 0x15, 0x6c, 0xa5, 0x6a, + 0x67, 0x60, 0x11, 0x8d, 0xa3, 0xf5, 0xf5, 0x2b, 0x1a, 0xac, 0x05, 0xbd, 0x55, 0xed, 0xee, 0xa3, + 0x70, 0xa8, 0x2c, 0xe7, 0x05, 0xc5, 0x63, 0xa9, 0x99, 0x4e, 0xe4, 0x25, 0x0e, 0xe6, 0xeb, 0x68, + 0xa6, 0x89, 0xbf, 0x5e, 0x0a, 0x36, 0xbc, 0x44, 0xcb, 0x1f, 0x86, 0x83, 0x24, 0xcf, 0xe2, 0x94, + 0xf7, 0x84, 0x1f, 0x07, 0x17, 0x79, 0xdf, 0x41, 0xfd, 0x75, 0xad, 0x55, 0x5b, 0x06, 0xae, 0xcd, + 0xa7, 0xe1, 0x96, 0xf2, 0xf7, 0x46, 0x27, 0x88, 0x12, 0x91, 0x2a, 0x8b, 0xf1, 0x1b, 0xea, 0x54, + 0xc9, 0x9d, 0xce, 0x31, 0xb6, 0x0e, 0x33, 0xf9, 0x9f, 0xae, 0x5f, 0xc9, 0x6f, 0x51, 0x34, 0x3d, + 0xa0, 0x70, 0x70, 0xf4, 0x44, 0x94, 0x78, 0xa9, 0xcb, 0xfc, 0xfb, 0x8e, 0x06, 0x07, 0x22, 0xc5, + 0xb7, 0x6f, 0xb6, 0x96, 0xc4, 0xcd, 0xbb, 0xf6, 0x49, 0x36, 0xb8, 0x94, 0x9e, 0x5f, 0x7a, 0x9e, + 0xdc, 0xc5, 0x33, 0x5b, 0x0d, 0x62, 0x76, 0x9f, 0x2e, 0x4f, 0x35, 0x2e, 0xed, 0xb2, 0x4b, 0xbb, + 0x65, 0x85, 0x2a, 0x69, 0xc9, 0x4e, 0xc1, 0x74, 0x25, 0x2a, 0xed, 0xaa, 0xa7, 0x50, 0x35, 0x65, + 0x26, 0x25, 0x3b, 0x06, 0x63, 0x3a, 0xf6, 0xec, 0xf8, 0xd3, 0x88, 0xe7, 0xcb, 0xd9, 0x09, 0x98, + 0xa0, 0xb8, 0xb3, 0xa3, 0xcf, 0x20, 0x5a, 0x22, 0x1a, 0xa7, 0xa8, 0xb3, 0xe3, 0xcf, 0x12, 0x4e, + 0x88, 0xc6, 0xdd, 0x4b, 0xf8, 0xfd, 0xf3, 0x63, 0x38, 0xae, 0xa8, 0x76, 0xcb, 0x30, 0x8e, 0x19, + 0x67, 0xa7, 0x9f, 0xc3, 0x0f, 0x27, 0x82, 0xdd, 0x03, 0x07, 0x1c, 0x0b, 0xfe, 0x02, 0xa2, 0xc5, + 0x7a, 0xb6, 0x06, 0x93, 0x46, 0xae, 0xd9, 0xf1, 0x17, 0x11, 0x37, 0x29, 0xbd, 0x75, 0xcc, 0x35, + 0xbb, 0xe0, 0x25, 0xda, 0x3a, 0x12, 0xba, 0x6c, 0x14, 0x69, 0x76, 0xfa, 0x65, 0xaa, 0x3a, 0x21, + 0x6c, 0x05, 0x1a, 0xe5, 0x98, 0xb2, 0xf3, 0xaf, 0x20, 0x3f, 0x60, 0x74, 0x05, 0x8c, 0x31, 0x69, + 0x57, 0xbc, 0x4a, 0x15, 0x30, 0x28, 0x7d, 0x8c, 0xea, 0xd1, 0x67, 0x37, 0xbd, 0x46, 0xc7, 0xa8, + 0x96, 0x7c, 0xba, 0x9b, 0xf9, 0xb4, 0xb0, 0x2b, 0x5e, 0xa7, 0x6e, 0xe6, 0xeb, 0xf5, 0x36, 0xea, + 0x59, 0x62, 0x77, 0xbc, 0x41, 0xdb, 0xa8, 0x45, 0x09, 0x6b, 0x43, 0x73, 0x7f, 0x8e, 0xd8, 0x7d, + 0x6f, 0xa2, 0x6f, 0x6e, 0x5f, 0x8c, 0xb0, 0x87, 0x60, 0x71, 0x78, 0x86, 0xd8, 0xad, 0x97, 0x77, + 0x6b, 0xbf, 0xfa, 0xcd, 0x08, 0x61, 0x67, 0x06, 0xbf, 0xfa, 0xcd, 0xfc, 0xb0, 0x6b, 0xaf, 0xec, + 0x56, 0x2f, 0x76, 0x66, 0x7c, 0xb0, 0x55, 0x80, 0xc1, 0xe8, 0xb6, 0xbb, 0xae, 0xa2, 0xcb, 0x80, + 0xf4, 0xd1, 0xc0, 0xc9, 0x6d, 0xe7, 0xaf, 0xd1, 0xd1, 0x40, 0x82, 0x2d, 0xc3, 0x44, 0x9c, 0x85, + 0xa1, 0xfe, 0x72, 0x34, 0xef, 0x1c, 0x12, 0x13, 0x3c, 0xec, 0x13, 0xfb, 0xeb, 0x1e, 0x1e, 0x0c, + 0x02, 0xd8, 0x31, 0x38, 0xc0, 0xa3, 0x2e, 0xef, 0xdb, 0xc8, 0xdf, 0xf6, 0x68, 0x20, 0xe8, 0xd5, + 0x6c, 0x05, 0xa0, 0xb8, 0x34, 0xaa, 0x9d, 0xc4, 0xfa, 0xa9, 0xbf, 0xef, 0x15, 0x77, 0x50, 0x03, + 0x19, 0x08, 0xf2, 0x5b, 0xa7, 0x45, 0x70, 0xa3, 0x2a, 0xc8, 0x2f, 0x9a, 0xc7, 0x61, 0xfc, 0x31, + 0x29, 0x62, 0xe5, 0xf9, 0x36, 0xfa, 0x0f, 0xa4, 0x69, 0xbd, 0x2e, 0x58, 0x24, 0x52, 0xae, 0x3c, + 0x5f, 0xda, 0xd8, 0x3f, 0x91, 0x2d, 0x01, 0x0d, 0xf7, 0x3c, 0xa9, 0x5c, 0x9e, 0xfb, 0x2f, 0x82, + 0x09, 0xd0, 0x9b, 0xd6, 0xaf, 0x1f, 0xe7, 0x3b, 0x36, 0xf6, 0x6f, 0xda, 0x34, 0xae, 0x67, 0x27, + 0xa0, 0xa1, 0x5f, 0xe6, 0xf7, 0x6d, 0x1b, 0xfc, 0x0f, 0xc2, 0x03, 0x42, 0x7f, 0xb2, 0x54, 0x7d, + 0x15, 0xd8, 0x8b, 0xfd, 0x2f, 0x76, 0x9a, 0xd6, 0xb3, 0x55, 0x98, 0x94, 0xaa, 0xdf, 0xcf, 0x52, + 0x2f, 0x1f, 0xfe, 0x16, 0xfc, 0xbf, 0xbd, 0xf2, 0x32, 0x57, 0x32, 0x27, 0x8f, 0xc0, 0x7c, 0x4f, + 0x44, 0x75, 0xf0, 0x24, 0xb4, 0x44, 0x4b, 0xb4, 0xf3, 0x63, 0xf0, 0x7f, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x3f, 0x9b, 0x2b, 0x54, 0xfc, 0x11, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden new file mode 100644 index 000000000..f6502e4b9 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden @@ -0,0 +1,45 @@ +// Code generated by protoc-gen-go. +// source: gogo.proto +// DO NOT EDIT! + +package gogoproto + +import proto "github.com/gogo/protobuf/proto" +import json "encoding/json" +import math "math" +import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + +// Reference proto, json, and math imports to suppress error if they are not otherwise used. +var _ = proto.Marshal +var _ = &json.SyntaxError{} +var _ = math.Inf + +var E_Nullable = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 51235, + Name: "gogoproto.nullable", + Tag: "varint,51235,opt,name=nullable", +} + +var E_Embed = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 51236, + Name: "gogoproto.embed", + Tag: "varint,51236,opt,name=embed", +} + +var E_Customtype = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 51237, + Name: "gogoproto.customtype", + Tag: "bytes,51237,opt,name=customtype", +} + +func init() { + proto.RegisterExtension(E_Nullable) + proto.RegisterExtension(E_Embed) + proto.RegisterExtension(E_Customtype) +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto new file mode 100644 index 000000000..0da211a8e --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto @@ -0,0 +1,125 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; +package gogoproto; + +import "google/protobuf/descriptor.proto"; + +option java_package = "com.google.protobuf"; +option java_outer_classname = "GoGoProtos"; + +extend google.protobuf.EnumOptions { + optional bool goproto_enum_prefix = 62001; + optional bool goproto_enum_stringer = 62021; + optional bool enum_stringer = 62022; + optional string enum_customname = 62023; +} + +extend google.protobuf.EnumValueOptions { + optional string enumvalue_customname = 66001; +} + +extend google.protobuf.FileOptions { + optional bool goproto_getters_all = 63001; + optional bool goproto_enum_prefix_all = 63002; + optional bool goproto_stringer_all = 63003; + optional bool verbose_equal_all = 63004; + optional bool face_all = 63005; + optional bool gostring_all = 63006; + optional bool populate_all = 63007; + optional bool stringer_all = 63008; + optional bool onlyone_all = 63009; + + optional bool equal_all = 63013; + optional bool description_all = 63014; + optional bool testgen_all = 63015; + optional bool benchgen_all = 63016; + optional bool marshaler_all = 63017; + optional bool unmarshaler_all = 63018; + optional bool stable_marshaler_all = 63019; + + optional bool sizer_all = 63020; + + optional bool goproto_enum_stringer_all = 63021; + optional bool enum_stringer_all = 63022; + + optional bool unsafe_marshaler_all = 63023; + optional bool unsafe_unmarshaler_all = 63024; + + optional bool goproto_extensions_map_all = 63025; + optional bool goproto_unrecognized_all = 63026; + optional bool gogoproto_import = 63027; + optional bool protosizer_all = 63028; + optional bool compare_all = 63029; +} + +extend google.protobuf.MessageOptions { + optional bool goproto_getters = 64001; + optional bool goproto_stringer = 64003; + optional bool verbose_equal = 64004; + optional bool face = 64005; + optional bool gostring = 64006; + optional bool populate = 64007; + optional bool stringer = 67008; + optional bool onlyone = 64009; + + optional bool equal = 64013; + optional bool description = 64014; + optional bool testgen = 64015; + optional bool benchgen = 64016; + optional bool marshaler = 64017; + optional bool unmarshaler = 64018; + optional bool stable_marshaler = 64019; + + optional bool sizer = 64020; + + optional bool unsafe_marshaler = 64023; + optional bool unsafe_unmarshaler = 64024; + + optional bool goproto_extensions_map = 64025; + optional bool goproto_unrecognized = 64026; + + optional bool protosizer = 64028; + optional bool compare = 64029; +} + +extend google.protobuf.FieldOptions { + optional bool nullable = 65001; + optional bool embed = 65002; + optional string customtype = 65003; + optional string customname = 65004; + optional string jsontag = 65005; + optional string moretags = 65006; + optional string casttype = 65007; + optional string castkey = 65008; + optional string castvalue = 65009; + + optional bool stdtime = 65010; + optional bool stdduration = 65011; +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/helper.go b/vendor/github.com/gogo/protobuf/gogoproto/helper.go new file mode 100644 index 000000000..bb5fff48b --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/helper.go @@ -0,0 +1,345 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gogoproto + +import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" +import proto "github.com/gogo/protobuf/proto" + +func IsEmbed(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Embed, false) +} + +func IsNullable(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Nullable, true) +} + +func IsStdTime(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Stdtime, false) +} + +func IsStdDuration(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Stdduration, false) +} + +func NeedsNilCheck(proto3 bool, field *google_protobuf.FieldDescriptorProto) bool { + nullable := IsNullable(field) + if field.IsMessage() || IsCustomType(field) { + return nullable + } + if proto3 { + return false + } + return nullable || *field.Type == google_protobuf.FieldDescriptorProto_TYPE_BYTES +} + +func IsCustomType(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCustomType(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastType(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastType(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastKey(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastKey(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastValue(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastValue(field) + if len(typ) > 0 { + return true + } + return false +} + +func GetCustomType(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Customtype) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastType(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Casttype) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastKey(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Castkey) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastValue(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Castvalue) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func IsCustomName(field *google_protobuf.FieldDescriptorProto) bool { + name := GetCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func IsEnumCustomName(field *google_protobuf.EnumDescriptorProto) bool { + name := GetEnumCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func IsEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) bool { + name := GetEnumValueCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func GetCustomName(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Customname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetEnumCustomName(field *google_protobuf.EnumDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_EnumCustomname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_EnumvalueCustomname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetJsonTag(field *google_protobuf.FieldDescriptorProto) *string { + if field == nil { + return nil + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Jsontag) + if err == nil && v.(*string) != nil { + return (v.(*string)) + } + } + return nil +} + +func GetMoreTags(field *google_protobuf.FieldDescriptorProto) *string { + if field == nil { + return nil + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Moretags) + if err == nil && v.(*string) != nil { + return (v.(*string)) + } + } + return nil +} + +type EnableFunc func(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool + +func EnabledGoEnumPrefix(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_GoprotoEnumPrefix, proto.GetBoolExtension(file.Options, E_GoprotoEnumPrefixAll, true)) +} + +func EnabledGoStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoStringer, proto.GetBoolExtension(file.Options, E_GoprotoStringerAll, true)) +} + +func HasGoGetters(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoGetters, proto.GetBoolExtension(file.Options, E_GoprotoGettersAll, true)) +} + +func IsUnion(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Onlyone, proto.GetBoolExtension(file.Options, E_OnlyoneAll, false)) +} + +func HasGoString(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Gostring, proto.GetBoolExtension(file.Options, E_GostringAll, false)) +} + +func HasEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Equal, proto.GetBoolExtension(file.Options, E_EqualAll, false)) +} + +func HasVerboseEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_VerboseEqual, proto.GetBoolExtension(file.Options, E_VerboseEqualAll, false)) +} + +func IsStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Stringer, proto.GetBoolExtension(file.Options, E_StringerAll, false)) +} + +func IsFace(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Face, proto.GetBoolExtension(file.Options, E_FaceAll, false)) +} + +func HasDescription(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Description, proto.GetBoolExtension(file.Options, E_DescriptionAll, false)) +} + +func HasPopulate(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Populate, proto.GetBoolExtension(file.Options, E_PopulateAll, false)) +} + +func HasTestGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Testgen, proto.GetBoolExtension(file.Options, E_TestgenAll, false)) +} + +func HasBenchGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Benchgen, proto.GetBoolExtension(file.Options, E_BenchgenAll, false)) +} + +func IsMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Marshaler, proto.GetBoolExtension(file.Options, E_MarshalerAll, false)) +} + +func IsUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Unmarshaler, proto.GetBoolExtension(file.Options, E_UnmarshalerAll, false)) +} + +func IsStableMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_StableMarshaler, proto.GetBoolExtension(file.Options, E_StableMarshalerAll, false)) +} + +func IsSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Sizer, proto.GetBoolExtension(file.Options, E_SizerAll, false)) +} + +func IsProtoSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Protosizer, proto.GetBoolExtension(file.Options, E_ProtosizerAll, false)) +} + +func IsGoEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_GoprotoEnumStringer, proto.GetBoolExtension(file.Options, E_GoprotoEnumStringerAll, true)) +} + +func IsEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_EnumStringer, proto.GetBoolExtension(file.Options, E_EnumStringerAll, false)) +} + +func IsUnsafeMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_UnsafeMarshaler, proto.GetBoolExtension(file.Options, E_UnsafeMarshalerAll, false)) +} + +func IsUnsafeUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_UnsafeUnmarshaler, proto.GetBoolExtension(file.Options, E_UnsafeUnmarshalerAll, false)) +} + +func HasExtensionsMap(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoExtensionsMap, proto.GetBoolExtension(file.Options, E_GoprotoExtensionsMapAll, true)) +} + +func HasUnrecognized(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + if IsProto3(file) { + return false + } + return proto.GetBoolExtension(message.Options, E_GoprotoUnrecognized, proto.GetBoolExtension(file.Options, E_GoprotoUnrecognizedAll, true)) +} + +func IsProto3(file *google_protobuf.FileDescriptorProto) bool { + return file.GetSyntax() == "proto3" +} + +func ImportsGoGoProto(file *google_protobuf.FileDescriptorProto) bool { + return proto.GetBoolExtension(file.Options, E_GogoprotoImport, true) +} + +func HasCompare(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Compare, proto.GetBoolExtension(file.Options, E_CompareAll, false)) +} diff --git a/vendor/github.com/gogo/protobuf/proto/Makefile b/vendor/github.com/gogo/protobuf/proto/Makefile new file mode 100644 index 000000000..41c717573 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/Makefile @@ -0,0 +1,43 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +install: + go install + +test: install generate-test-pbs + go test + + +generate-test-pbs: + make install + make -C testdata + protoc-min-version --version="3.0.0" --proto_path=.:../../../../:../protobuf --gogo_out=Mtestdata/test.proto=github.com/gogo/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/gogo/protobuf/types:. proto3_proto/proto3.proto + make diff --git a/vendor/github.com/gogo/protobuf/proto/clone.go b/vendor/github.com/gogo/protobuf/proto/clone.go new file mode 100644 index 000000000..5d4cba4b5 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/clone.go @@ -0,0 +1,234 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer deep copy and merge. +// TODO: RawMessage. + +package proto + +import ( + "log" + "reflect" + "strings" +) + +// Clone returns a deep copy of a protocol buffer. +func Clone(pb Message) Message { + in := reflect.ValueOf(pb) + if in.IsNil() { + return pb + } + + out := reflect.New(in.Type().Elem()) + // out is empty so a merge is a deep copy. + mergeStruct(out.Elem(), in.Elem()) + return out.Interface().(Message) +} + +// Merge merges src into dst. +// Required and optional fields that are set in src will be set to that value in dst. +// Elements of repeated fields will be appended. +// Merge panics if src and dst are not the same type, or if dst is nil. +func Merge(dst, src Message) { + in := reflect.ValueOf(src) + out := reflect.ValueOf(dst) + if out.IsNil() { + panic("proto: nil destination") + } + if in.Type() != out.Type() { + // Explicit test prior to mergeStruct so that mistyped nils will fail + panic("proto: type mismatch") + } + if in.IsNil() { + // Merging nil into non-nil is a quiet no-op + return + } + mergeStruct(out.Elem(), in.Elem()) +} + +func mergeStruct(out, in reflect.Value) { + sprop := GetProperties(in.Type()) + for i := 0; i < in.NumField(); i++ { + f := in.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) + } + + if emIn, ok := in.Addr().Interface().(extensionsBytes); ok { + emOut := out.Addr().Interface().(extensionsBytes) + bIn := emIn.GetExtensions() + bOut := emOut.GetExtensions() + *bOut = append(*bOut, *bIn...) + } else if emIn, ok := extendable(in.Addr().Interface()); ok { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + uf := in.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return + } + uin := uf.Bytes() + if len(uin) > 0 { + out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) + } +} + +// mergeAny performs a merge between two values of the same type. +// viaPtr indicates whether the values were indirected through a pointer (implying proto2). +// prop is set if this is a struct field (it may be nil). +func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { + if in.Type() == protoMessageType { + if !in.IsNil() { + if out.IsNil() { + out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) + } else { + Merge(out.Interface().(Message), in.Interface().(Message)) + } + } + return + } + switch in.Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + if !viaPtr && isProto3Zero(in) { + return + } + out.Set(in) + case reflect.Interface: + // Probably a oneof field; copy non-nil values. + if in.IsNil() { + return + } + // Allocate destination if it is not set, or set to a different type. + // Otherwise we will merge as normal. + if out.IsNil() || out.Elem().Type() != in.Elem().Type() { + out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) + } + mergeAny(out.Elem(), in.Elem(), false, nil) + case reflect.Map: + if in.Len() == 0 { + return + } + if out.IsNil() { + out.Set(reflect.MakeMap(in.Type())) + } + // For maps with value types of *T or []byte we need to deep copy each value. + elemKind := in.Type().Elem().Kind() + for _, key := range in.MapKeys() { + var val reflect.Value + switch elemKind { + case reflect.Ptr: + val = reflect.New(in.Type().Elem().Elem()) + mergeAny(val, in.MapIndex(key), false, nil) + case reflect.Slice: + val = in.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + default: + val = in.MapIndex(key) + } + out.SetMapIndex(key, val) + } + case reflect.Ptr: + if in.IsNil() { + return + } + if out.IsNil() { + out.Set(reflect.New(in.Elem().Type())) + } + mergeAny(out.Elem(), in.Elem(), true, nil) + case reflect.Slice: + if in.IsNil() { + return + } + if in.Type().Elem().Kind() == reflect.Uint8 { + // []byte is a scalar bytes field, not a repeated field. + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value, and should not + // be merged. + if prop != nil && prop.proto3 && in.Len() == 0 { + return + } + + // Make a deep copy. + // Append to []byte{} instead of []byte(nil) so that we never end up + // with a nil result. + out.SetBytes(append([]byte{}, in.Bytes()...)) + return + } + n := in.Len() + if out.IsNil() { + out.Set(reflect.MakeSlice(in.Type(), 0, n)) + } + switch in.Type().Elem().Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + out.Set(reflect.AppendSlice(out, in)) + default: + for i := 0; i < n; i++ { + x := reflect.Indirect(reflect.New(in.Type().Elem())) + mergeAny(x, in.Index(i), false, nil) + out.Set(reflect.Append(out, x)) + } + } + case reflect.Struct: + mergeStruct(out, in) + default: + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to copy %v", in) + } +} + +func mergeExtension(out, in map[int32]Extension) { + for extNum, eIn := range in { + eOut := Extension{desc: eIn.desc} + if eIn.value != nil { + v := reflect.New(reflect.TypeOf(eIn.value)).Elem() + mergeAny(v, reflect.ValueOf(eIn.value), false, nil) + eOut.value = v.Interface() + } + if eIn.enc != nil { + eOut.enc = make([]byte, len(eIn.enc)) + copy(eOut.enc, eIn.enc) + } + + out[extNum] = eOut + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/decode.go b/vendor/github.com/gogo/protobuf/proto/decode.go new file mode 100644 index 000000000..737f2731d --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/decode.go @@ -0,0 +1,978 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for decoding protocol buffer data to construct in-memory representations. + */ + +import ( + "errors" + "fmt" + "io" + "os" + "reflect" +) + +// errOverflow is returned when an integer is too large to be represented. +var errOverflow = errors.New("proto: integer overflow") + +// ErrInternalBadWireType is returned by generated code when an incorrect +// wire type is encountered. It does not get returned to user code. +var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") + +// The fundamental decoders that interpret bytes on the wire. +// Those that take integer types all return uint64 and are +// therefore of type valueDecoder. + +// DecodeVarint reads a varint-encoded integer from the slice. +// It returns the integer and the number of bytes consumed, or +// zero if there is not enough. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func DecodeVarint(buf []byte) (x uint64, n int) { + for shift := uint(0); shift < 64; shift += 7 { + if n >= len(buf) { + return 0, 0 + } + b := uint64(buf[n]) + n++ + x |= (b & 0x7F) << shift + if (b & 0x80) == 0 { + return x, n + } + } + + // The number is too large to represent in a 64-bit value. + return 0, 0 +} + +func (p *Buffer) decodeVarintSlow() (x uint64, err error) { + i := p.index + l := len(p.buf) + + for shift := uint(0); shift < 64; shift += 7 { + if i >= l { + err = io.ErrUnexpectedEOF + return + } + b := p.buf[i] + i++ + x |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + p.index = i + return + } + } + + // The number is too large to represent in a 64-bit value. + err = errOverflow + return +} + +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) DecodeVarint() (x uint64, err error) { + i := p.index + buf := p.buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + p.index++ + return uint64(buf[i]), nil + } else if len(buf)-i < 10 { + return p.decodeVarintSlow() + } + + var b uint64 + // we already checked the first byte + x = uint64(buf[i]) - 0x80 + i++ + + b = uint64(buf[i]) + i++ + x += b << 7 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 7 + + b = uint64(buf[i]) + i++ + x += b << 14 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 14 + + b = uint64(buf[i]) + i++ + x += b << 21 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 21 + + b = uint64(buf[i]) + i++ + x += b << 28 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 28 + + b = uint64(buf[i]) + i++ + x += b << 35 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 35 + + b = uint64(buf[i]) + i++ + x += b << 42 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 42 + + b = uint64(buf[i]) + i++ + x += b << 49 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 49 + + b = uint64(buf[i]) + i++ + x += b << 56 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 56 + + b = uint64(buf[i]) + i++ + x += b << 63 + if b&0x80 == 0 { + goto done + } + // x -= 0x80 << 63 // Always zero. + + return 0, errOverflow + +done: + p.index = i + return x, nil +} + +// DecodeFixed64 reads a 64-bit integer from the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) DecodeFixed64() (x uint64, err error) { + // x, err already 0 + i := p.index + 8 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-8]) + x |= uint64(p.buf[i-7]) << 8 + x |= uint64(p.buf[i-6]) << 16 + x |= uint64(p.buf[i-5]) << 24 + x |= uint64(p.buf[i-4]) << 32 + x |= uint64(p.buf[i-3]) << 40 + x |= uint64(p.buf[i-2]) << 48 + x |= uint64(p.buf[i-1]) << 56 + return +} + +// DecodeFixed32 reads a 32-bit integer from the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) DecodeFixed32() (x uint64, err error) { + // x, err already 0 + i := p.index + 4 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-4]) + x |= uint64(p.buf[i-3]) << 8 + x |= uint64(p.buf[i-2]) << 16 + x |= uint64(p.buf[i-1]) << 24 + return +} + +// DecodeZigzag64 reads a zigzag-encoded 64-bit integer +// from the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) DecodeZigzag64() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) + return +} + +// DecodeZigzag32 reads a zigzag-encoded 32-bit integer +// from the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) DecodeZigzag32() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) + return +} + +// These are not ValueDecoders: they produce an array of bytes or a string. +// bytes, embedded messages + +// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { + n, err := p.DecodeVarint() + if err != nil { + return nil, err + } + + nb := int(n) + if nb < 0 { + return nil, fmt.Errorf("proto: bad byte length %d", nb) + } + end := p.index + nb + if end < p.index || end > len(p.buf) { + return nil, io.ErrUnexpectedEOF + } + + if !alloc { + // todo: check if can get more uses of alloc=false + buf = p.buf[p.index:end] + p.index += nb + return + } + + buf = make([]byte, nb) + copy(buf, p.buf[p.index:]) + p.index += nb + return +} + +// DecodeStringBytes reads an encoded string from the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) DecodeStringBytes() (s string, err error) { + buf, err := p.DecodeRawBytes(false) + if err != nil { + return + } + return string(buf), nil +} + +// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. +// If the protocol buffer has extensions, and the field matches, add it as an extension. +// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. +func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { + oi := o.index + + err := o.skip(t, tag, wire) + if err != nil { + return err + } + + if !unrecField.IsValid() { + return nil + } + + ptr := structPointer_Bytes(base, unrecField) + + // Add the skipped field to struct field + obuf := o.buf + + o.buf = *ptr + o.EncodeVarint(uint64(tag<<3 | wire)) + *ptr = append(o.buf, obuf[oi:o.index]...) + + o.buf = obuf + + return nil +} + +// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. +func (o *Buffer) skip(t reflect.Type, tag, wire int) error { + + var u uint64 + var err error + + switch wire { + case WireVarint: + _, err = o.DecodeVarint() + case WireFixed64: + _, err = o.DecodeFixed64() + case WireBytes: + _, err = o.DecodeRawBytes(false) + case WireFixed32: + _, err = o.DecodeFixed32() + case WireStartGroup: + for { + u, err = o.DecodeVarint() + if err != nil { + break + } + fwire := int(u & 0x7) + if fwire == WireEndGroup { + break + } + ftag := int(u >> 3) + err = o.skip(t, ftag, fwire) + if err != nil { + break + } + } + default: + err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) + } + return err +} + +// Unmarshaler is the interface representing objects that can +// unmarshal themselves. The method should reset the receiver before +// decoding starts. The argument points to data that may be +// overwritten, so implementations should not keep references to the +// buffer. +type Unmarshaler interface { + Unmarshal([]byte) error +} + +// Unmarshal parses the protocol buffer representation in buf and places the +// decoded result in pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// Unmarshal resets pb before starting to unmarshal, so any +// existing data in pb is always removed. Use UnmarshalMerge +// to preserve and append to existing data. +func Unmarshal(buf []byte, pb Message) error { + pb.Reset() + return UnmarshalMerge(buf, pb) +} + +// UnmarshalMerge parses the protocol buffer representation in buf and +// writes the decoded result to pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// UnmarshalMerge merges into existing data in pb. +// Most code should use Unmarshal instead. +func UnmarshalMerge(buf []byte, pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// DecodeMessage reads a count-delimited message from the Buffer. +func (p *Buffer) DecodeMessage(pb Message) error { + enc, err := p.DecodeRawBytes(false) + if err != nil { + return err + } + return NewBuffer(enc).Unmarshal(pb) +} + +// DecodeGroup reads a tag-delimited group from the Buffer. +func (p *Buffer) DecodeGroup(pb Message) error { + typ, base, err := getbase(pb) + if err != nil { + return err + } + return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base) +} + +// Unmarshal parses the protocol buffer representation in the +// Buffer and places the decoded result in pb. If the struct +// underlying pb does not match the data in the buffer, the results can be +// unpredictable. +// +// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. +func (p *Buffer) Unmarshal(pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(Unmarshaler); ok { + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + + typ, base, err := getbase(pb) + if err != nil { + return err + } + + err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) + + if collectStats { + stats.Decode++ + } + + return err +} + +// unmarshalType does the work of unmarshaling a structure. +func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { + var state errorState + required, reqFields := prop.reqCount, uint64(0) + + var err error + for err == nil && o.index < len(o.buf) { + oi := o.index + var u uint64 + u, err = o.DecodeVarint() + if err != nil { + break + } + wire := int(u & 0x7) + if wire == WireEndGroup { + if is_group { + if required > 0 { + // Not enough information to determine the exact field. + // (See below.) + return &RequiredNotSetError{"{Unknown}"} + } + return nil // input is satisfied + } + return fmt.Errorf("proto: %s: wiretype end group for non-group", st) + } + tag := int(u >> 3) + if tag <= 0 { + return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) + } + fieldnum, ok := prop.decoderTags.get(tag) + if !ok { + // Maybe it's an extension? + if prop.extendable { + if e, eok := structPointer_Interface(base, st).(extensionsBytes); eok { + if isExtensionField(e, int32(tag)) { + if err = o.skip(st, tag, wire); err == nil { + ext := e.GetExtensions() + *ext = append(*ext, o.buf[oi:o.index]...) + } + continue + } + } else if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) { + if err = o.skip(st, tag, wire); err == nil { + extmap := e.extensionsWrite() + ext := extmap[int32(tag)] // may be missing + ext.enc = append(ext.enc, o.buf[oi:o.index]...) + extmap[int32(tag)] = ext + } + continue + } + } + // Maybe it's a oneof? + if prop.oneofUnmarshaler != nil { + m := structPointer_Interface(base, st).(Message) + // First return value indicates whether tag is a oneof field. + ok, err = prop.oneofUnmarshaler(m, tag, wire, o) + if err == ErrInternalBadWireType { + // Map the error to something more descriptive. + // Do the formatting here to save generated code space. + err = fmt.Errorf("bad wiretype for oneof field in %T", m) + } + if ok { + continue + } + } + err = o.skipAndSave(st, tag, wire, base, prop.unrecField) + continue + } + p := prop.Prop[fieldnum] + + if p.dec == nil { + fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) + continue + } + dec := p.dec + if wire != WireStartGroup && wire != p.WireType { + if wire == WireBytes && p.packedDec != nil { + // a packable field + dec = p.packedDec + } else { + err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) + continue + } + } + decErr := dec(o, p, base) + if decErr != nil && !state.shouldContinue(decErr, p) { + err = decErr + } + if err == nil && p.Required { + // Successfully decoded a required field. + if tag <= 64 { + // use bitmap for fields 1-64 to catch field reuse. + var mask uint64 = 1 << uint64(tag-1) + if reqFields&mask == 0 { + // new required field + reqFields |= mask + required-- + } + } else { + // This is imprecise. It can be fooled by a required field + // with a tag > 64 that is encoded twice; that's very rare. + // A fully correct implementation would require allocating + // a data structure, which we would like to avoid. + required-- + } + } + } + if err == nil { + if is_group { + return io.ErrUnexpectedEOF + } + if state.err != nil { + return state.err + } + if required > 0 { + // Not enough information to determine the exact field. If we use extra + // CPU, we could determine the field only if the missing required field + // has a tag <= 64 and we check reqFields. + return &RequiredNotSetError{"{Unknown}"} + } + } + return err +} + +// Individual type decoders +// For each, +// u is the decoded value, +// v is a pointer to the field (pointer) in the struct + +// Sizes of the pools to allocate inside the Buffer. +// The goal is modest amortization and allocation +// on at least 16-byte boundaries. +const ( + boolPoolSize = 16 + uint32PoolSize = 8 + uint64PoolSize = 4 +) + +// Decode a bool. +func (o *Buffer) dec_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + if len(o.bools) == 0 { + o.bools = make([]bool, boolPoolSize) + } + o.bools[0] = u != 0 + *structPointer_Bool(base, p.field) = &o.bools[0] + o.bools = o.bools[1:] + return nil +} + +func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + *structPointer_BoolVal(base, p.field) = u != 0 + return nil +} + +// Decode an int32. +func (o *Buffer) dec_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) + return nil +} + +func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) + return nil +} + +// Decode an int64. +func (o *Buffer) dec_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word64_Set(structPointer_Word64(base, p.field), o, u) + return nil +} + +func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word64Val_Set(structPointer_Word64Val(base, p.field), o, u) + return nil +} + +// Decode a string. +func (o *Buffer) dec_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + *structPointer_String(base, p.field) = &s + return nil +} + +func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + *structPointer_StringVal(base, p.field) = s + return nil +} + +// Decode a slice of bytes ([]byte). +func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + *structPointer_Bytes(base, p.field) = b + return nil +} + +// Decode a slice of bools ([]bool). +func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + v := structPointer_BoolSlice(base, p.field) + *v = append(*v, u != 0) + return nil +} + +// Decode a slice of bools ([]bool) in packed format. +func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { + v := structPointer_BoolSlice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded bools + fin := o.index + nb + if fin < o.index { + return errOverflow + } + + y := *v + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + y = append(y, u != 0) + } + + *v = y + return nil +} + +// Decode a slice of int32s ([]int32). +func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + structPointer_Word32Slice(base, p.field).Append(uint32(u)) + return nil +} + +// Decode a slice of int32s ([]int32) in packed format. +func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Slice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded int32s + + fin := o.index + nb + if fin < o.index { + return errOverflow + } + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + v.Append(uint32(u)) + } + return nil +} + +// Decode a slice of int64s ([]int64). +func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + + structPointer_Word64Slice(base, p.field).Append(u) + return nil +} + +// Decode a slice of int64s ([]int64) in packed format. +func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Slice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded int64s + + fin := o.index + nb + if fin < o.index { + return errOverflow + } + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + v.Append(u) + } + return nil +} + +// Decode a slice of strings ([]string). +func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + v := structPointer_StringSlice(base, p.field) + *v = append(*v, s) + return nil +} + +// Decode a slice of slice of bytes ([][]byte). +func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + v := structPointer_BytesSlice(base, p.field) + *v = append(*v, b) + return nil +} + +// Decode a map field. +func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + oi := o.index // index at the end of this map entry + o.index -= len(raw) // move buffer back to start of map entry + + mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V + if mptr.Elem().IsNil() { + mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) + } + v := mptr.Elem() // map[K]V + + // Prepare addressable doubly-indirect placeholders for the key and value types. + // See enc_new_map for why. + keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K + keybase := toStructPointer(keyptr.Addr()) // **K + + var valbase structPointer + var valptr reflect.Value + switch p.mtype.Elem().Kind() { + case reflect.Slice: + // []byte + var dummy []byte + valptr = reflect.ValueOf(&dummy) // *[]byte + valbase = toStructPointer(valptr) // *[]byte + case reflect.Ptr: + // message; valptr is **Msg; need to allocate the intermediate pointer + valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V + valptr.Set(reflect.New(valptr.Type().Elem())) + valbase = toStructPointer(valptr) + default: + // everything else + valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V + valbase = toStructPointer(valptr.Addr()) // **V + } + + // Decode. + // This parses a restricted wire format, namely the encoding of a message + // with two fields. See enc_new_map for the format. + for o.index < oi { + // tagcode for key and value properties are always a single byte + // because they have tags 1 and 2. + tagcode := o.buf[o.index] + o.index++ + switch tagcode { + case p.mkeyprop.tagcode[0]: + if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { + return err + } + case p.mvalprop.tagcode[0]: + if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { + return err + } + default: + // TODO: Should we silently skip this instead? + return fmt.Errorf("proto: bad map data tag %d", raw[0]) + } + } + keyelem, valelem := keyptr.Elem(), valptr.Elem() + if !keyelem.IsValid() { + keyelem = reflect.Zero(p.mtype.Key()) + } + if !valelem.IsValid() { + valelem = reflect.Zero(p.mtype.Elem()) + } + + v.SetMapIndex(keyelem, valelem) + return nil +} + +// Decode a group. +func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { + bas := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(bas) { + // allocate new nested message + bas = toStructPointer(reflect.New(p.stype)) + structPointer_SetStructPointer(base, p.field, bas) + } + return o.unmarshalType(p.stype, p.sprop, true, bas) +} + +// Decode an embedded message. +func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { + raw, e := o.DecodeRawBytes(false) + if e != nil { + return e + } + + bas := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(bas) { + // allocate new nested message + bas = toStructPointer(reflect.New(p.stype)) + structPointer_SetStructPointer(base, p.field, bas) + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + iv := structPointer_Interface(bas, p.stype) + return iv.(Unmarshaler).Unmarshal(raw) + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, false, bas) + o.buf = obuf + o.index = oi + + return err +} + +// Decode a slice of embedded messages. +func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { + return o.dec_slice_struct(p, false, base) +} + +// Decode a slice of embedded groups. +func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { + return o.dec_slice_struct(p, true, base) +} + +// Decode a slice of structs ([]*struct). +func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { + v := reflect.New(p.stype) + bas := toStructPointer(v) + structPointer_StructPointerSlice(base, p.field).Append(bas) + + if is_group { + err := o.unmarshalType(p.stype, p.sprop, is_group, bas) + return err + } + + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + iv := v.Interface() + return iv.(Unmarshaler).Unmarshal(raw) + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, is_group, bas) + + o.buf = obuf + o.index = oi + + return err +} diff --git a/vendor/github.com/gogo/protobuf/proto/decode_gogo.go b/vendor/github.com/gogo/protobuf/proto/decode_gogo.go new file mode 100644 index 000000000..6fb74de4c --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/decode_gogo.go @@ -0,0 +1,172 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" +) + +// Decode a reference to a struct pointer. +func (o *Buffer) dec_ref_struct_message(p *Properties, base structPointer) (err error) { + raw, e := o.DecodeRawBytes(false) + if e != nil { + return e + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + panic("not supported, since this is a pointer receiver") + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + bas := structPointer_FieldPointer(base, p.field) + + err = o.unmarshalType(p.stype, p.sprop, false, bas) + o.buf = obuf + o.index = oi + + return err +} + +// Decode a slice of references to struct pointers ([]struct). +func (o *Buffer) dec_slice_ref_struct(p *Properties, is_group bool, base structPointer) error { + newBas := appendStructPointer(base, p.field, p.sstype) + + if is_group { + panic("not supported, maybe in future, if requested.") + } + + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + panic("not supported, since this is not a pointer receiver.") + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, is_group, newBas) + + o.buf = obuf + o.index = oi + + return err +} + +// Decode a slice of references to struct pointers. +func (o *Buffer) dec_slice_ref_struct_message(p *Properties, base structPointer) error { + return o.dec_slice_ref_struct(p, false, base) +} + +func setPtrCustomType(base structPointer, f field, v interface{}) { + if v == nil { + return + } + structPointer_SetStructPointer(base, f, toStructPointer(reflect.ValueOf(v))) +} + +func setCustomType(base structPointer, f field, value interface{}) { + if value == nil { + return + } + v := reflect.ValueOf(value).Elem() + t := reflect.TypeOf(value).Elem() + kind := t.Kind() + switch kind { + case reflect.Slice: + slice := reflect.MakeSlice(t, v.Len(), v.Cap()) + reflect.Copy(slice, v) + oldHeader := structPointer_GetSliceHeader(base, f) + oldHeader.Data = slice.Pointer() + oldHeader.Len = v.Len() + oldHeader.Cap = v.Cap() + default: + size := reflect.TypeOf(value).Elem().Size() + structPointer_Copy(toStructPointer(reflect.ValueOf(value)), structPointer_Add(base, f), int(size)) + } +} + +func (o *Buffer) dec_custom_bytes(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + i := reflect.New(p.ctype.Elem()).Interface() + custom := (i).(Unmarshaler) + if err := custom.Unmarshal(b); err != nil { + return err + } + setPtrCustomType(base, p.field, custom) + return nil +} + +func (o *Buffer) dec_custom_ref_bytes(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + i := reflect.New(p.ctype).Interface() + custom := (i).(Unmarshaler) + if err := custom.Unmarshal(b); err != nil { + return err + } + if custom != nil { + setCustomType(base, p.field, custom) + } + return nil +} + +// Decode a slice of bytes ([]byte) into a slice of custom types. +func (o *Buffer) dec_custom_slice_bytes(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + i := reflect.New(p.ctype.Elem()).Interface() + custom := (i).(Unmarshaler) + if err := custom.Unmarshal(b); err != nil { + return err + } + newBas := appendStructPointer(base, p.field, p.ctype) + + var zero field + setCustomType(newBas, zero, custom) + + return nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/duration.go b/vendor/github.com/gogo/protobuf/proto/duration.go new file mode 100644 index 000000000..93464c91c --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/duration.go @@ -0,0 +1,100 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Range of a Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid Duration +// may still be too large to fit into a time.Duration (the range of Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %#v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %#v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d) + } + return nil +} + +// DurationFromProto converts a Duration to a time.Duration. DurationFromProto +// returns an error if the Duration is invalid or is too large to be +// represented in a time.Duration. +func durationFromProto(p *duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a Duration. +func durationProto(d time.Duration) *duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go new file mode 100644 index 000000000..18e2a5f77 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go @@ -0,0 +1,203 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +var durationType = reflect.TypeOf((*time.Duration)(nil)).Elem() + +type duration struct { + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *duration) Reset() { *m = duration{} } +func (*duration) ProtoMessage() {} +func (*duration) String() string { return "duration" } + +func init() { + RegisterType((*duration)(nil), "gogo.protobuf.proto.duration") +} + +func (o *Buffer) decDuration() (time.Duration, error) { + b, err := o.DecodeRawBytes(true) + if err != nil { + return 0, err + } + dproto := &duration{} + if err := Unmarshal(b, dproto); err != nil { + return 0, err + } + return durationFromProto(dproto) +} + +func (o *Buffer) dec_duration(p *Properties, base structPointer) error { + d, err := o.decDuration() + if err != nil { + return err + } + word64_Set(structPointer_Word64(base, p.field), o, uint64(d)) + return nil +} + +func (o *Buffer) dec_ref_duration(p *Properties, base structPointer) error { + d, err := o.decDuration() + if err != nil { + return err + } + word64Val_Set(structPointer_Word64Val(base, p.field), o, uint64(d)) + return nil +} + +func (o *Buffer) dec_slice_duration(p *Properties, base structPointer) error { + d, err := o.decDuration() + if err != nil { + return err + } + newBas := appendStructPointer(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))) + var zero field + setPtrCustomType(newBas, zero, &d) + return nil +} + +func (o *Buffer) dec_slice_ref_duration(p *Properties, base structPointer) error { + d, err := o.decDuration() + if err != nil { + return err + } + structPointer_Word64Slice(base, p.field).Append(uint64(d)) + return nil +} + +func size_duration(p *Properties, base structPointer) (n int) { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + dur := structPointer_Interface(structp, durationType).(*time.Duration) + d := durationProto(*dur) + size := Size(d) + return size + sizeVarint(uint64(size)) + len(p.tagcode) +} + +func (o *Buffer) enc_duration(p *Properties, base structPointer) error { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + dur := structPointer_Interface(structp, durationType).(*time.Duration) + d := durationProto(*dur) + data, err := Marshal(d) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_ref_duration(p *Properties, base structPointer) (n int) { + dur := structPointer_InterfaceAt(base, p.field, durationType).(*time.Duration) + d := durationProto(*dur) + size := Size(d) + return size + sizeVarint(uint64(size)) + len(p.tagcode) +} + +func (o *Buffer) enc_ref_duration(p *Properties, base structPointer) error { + dur := structPointer_InterfaceAt(base, p.field, durationType).(*time.Duration) + d := durationProto(*dur) + data, err := Marshal(d) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_slice_duration(p *Properties, base structPointer) (n int) { + pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))).(*[]*time.Duration) + durs := *pdurs + for i := 0; i < len(durs); i++ { + if durs[i] == nil { + return 0 + } + dproto := durationProto(*durs[i]) + size := Size(dproto) + n += len(p.tagcode) + size + sizeVarint(uint64(size)) + } + return n +} + +func (o *Buffer) enc_slice_duration(p *Properties, base structPointer) error { + pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))).(*[]*time.Duration) + durs := *pdurs + for i := 0; i < len(durs); i++ { + if durs[i] == nil { + return errRepeatedHasNil + } + dproto := durationProto(*durs[i]) + data, err := Marshal(dproto) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + } + return nil +} + +func size_slice_ref_duration(p *Properties, base structPointer) (n int) { + pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(durationType)).(*[]time.Duration) + durs := *pdurs + for i := 0; i < len(durs); i++ { + dproto := durationProto(durs[i]) + size := Size(dproto) + n += len(p.tagcode) + size + sizeVarint(uint64(size)) + } + return n +} + +func (o *Buffer) enc_slice_ref_duration(p *Properties, base structPointer) error { + pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(durationType)).(*[]time.Duration) + durs := *pdurs + for i := 0; i < len(durs); i++ { + dproto := durationProto(durs[i]) + data, err := Marshal(dproto) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + } + return nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/encode.go b/vendor/github.com/gogo/protobuf/proto/encode.go new file mode 100644 index 000000000..2b30f8462 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/encode.go @@ -0,0 +1,1362 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "errors" + "fmt" + "reflect" + "sort" +) + +// RequiredNotSetError is the error returned if Marshal is called with +// a protocol buffer struct whose required fields have not +// all been initialized. It is also the error returned if Unmarshal is +// called with an encoded protocol buffer that does not include all the +// required fields. +// +// When printed, RequiredNotSetError reports the first unset required field in a +// message. If the field cannot be precisely determined, it is reported as +// "{Unknown}". +type RequiredNotSetError struct { + field string +} + +func (e *RequiredNotSetError) Error() string { + return fmt.Sprintf("proto: required field %q not set", e.field) +} + +var ( + // errRepeatedHasNil is the error returned if Marshal is called with + // a struct with a repeated field containing a nil element. + errRepeatedHasNil = errors.New("proto: repeated field has nil element") + + // errOneofHasNil is the error returned if Marshal is called with + // a struct with a oneof field containing a nil element. + errOneofHasNil = errors.New("proto: oneof field has nil value") + + // ErrNil is the error returned if Marshal is called with nil. + ErrNil = errors.New("proto: Marshal called with nil") + + // ErrTooLarge is the error returned if Marshal is called with a + // message that encodes to >2GB. + ErrTooLarge = errors.New("proto: message encodes to over 2 GB") +) + +// The fundamental encoders that put bytes on the wire. +// Those that take integer types all accept uint64 and are +// therefore of type valueEncoder. + +const maxVarintBytes = 10 // maximum length of a varint + +// maxMarshalSize is the largest allowed size of an encoded protobuf, +// since C++ and Java use signed int32s for the size. +const maxMarshalSize = 1<<31 - 1 + +// EncodeVarint returns the varint encoding of x. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +// Not used by the package itself, but helpful to clients +// wishing to use the same encoding. +func EncodeVarint(x uint64) []byte { + var buf [maxVarintBytes]byte + var n int + for n = 0; x > 127; n++ { + buf[n] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + buf[n] = uint8(x) + n++ + return buf[0:n] +} + +// EncodeVarint writes a varint-encoded integer to the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) EncodeVarint(x uint64) error { + for x >= 1<<7 { + p.buf = append(p.buf, uint8(x&0x7f|0x80)) + x >>= 7 + } + p.buf = append(p.buf, uint8(x)) + return nil +} + +// SizeVarint returns the varint encoding size of an integer. +func SizeVarint(x uint64) int { + return sizeVarint(x) +} + +func sizeVarint(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} + +// EncodeFixed64 writes a 64-bit integer to the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) EncodeFixed64(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24), + uint8(x>>32), + uint8(x>>40), + uint8(x>>48), + uint8(x>>56)) + return nil +} + +func sizeFixed64(x uint64) int { + return 8 +} + +// EncodeFixed32 writes a 32-bit integer to the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) EncodeFixed32(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24)) + return nil +} + +func sizeFixed32(x uint64) int { + return 4 +} + +// EncodeZigzag64 writes a zigzag-encoded 64-bit integer +// to the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) EncodeZigzag64(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +func sizeZigzag64(x uint64) int { + return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +// EncodeZigzag32 writes a zigzag-encoded 32-bit integer +// to the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) EncodeZigzag32(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +func sizeZigzag32(x uint64) int { + return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) EncodeRawBytes(b []byte) error { + p.EncodeVarint(uint64(len(b))) + p.buf = append(p.buf, b...) + return nil +} + +func sizeRawBytes(b []byte) int { + return sizeVarint(uint64(len(b))) + + len(b) +} + +// EncodeStringBytes writes an encoded string to the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) EncodeStringBytes(s string) error { + p.EncodeVarint(uint64(len(s))) + p.buf = append(p.buf, s...) + return nil +} + +func sizeStringBytes(s string) int { + return sizeVarint(uint64(len(s))) + + len(s) +} + +// Marshaler is the interface representing objects that can marshal themselves. +type Marshaler interface { + Marshal() ([]byte, error) +} + +// Marshal takes the protocol buffer +// and encodes it into the wire format, returning the data. +func Marshal(pb Message) ([]byte, error) { + // Can the object marshal itself? + if m, ok := pb.(Marshaler); ok { + return m.Marshal() + } + p := NewBuffer(nil) + err := p.Marshal(pb) + if p.buf == nil && err == nil { + // Return a non-nil slice on success. + return []byte{}, nil + } + return p.buf, err +} + +// EncodeMessage writes the protocol buffer to the Buffer, +// prefixed by a varint-encoded length. +func (p *Buffer) EncodeMessage(pb Message) error { + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return ErrNil + } + if err == nil { + var state errorState + err = p.enc_len_struct(GetProperties(t.Elem()), base, &state) + } + return err +} + +// Marshal takes the protocol buffer +// and encodes it into the wire format, writing the result to the +// Buffer. +func (p *Buffer) Marshal(pb Message) error { + // Can the object marshal itself? + if m, ok := pb.(Marshaler); ok { + data, err := m.Marshal() + p.buf = append(p.buf, data...) + return err + } + + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return ErrNil + } + if err == nil { + err = p.enc_struct(GetProperties(t.Elem()), base) + } + + if collectStats { + (stats).Encode++ // Parens are to work around a goimports bug. + } + + if len(p.buf) > maxMarshalSize { + return ErrTooLarge + } + return err +} + +// Size returns the encoded size of a protocol buffer. +func Size(pb Message) (n int) { + // Can the object marshal itself? If so, Size is slow. + // TODO: add Size to Marshaler, or add a Sizer interface. + if m, ok := pb.(Marshaler); ok { + b, _ := m.Marshal() + return len(b) + } + + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return 0 + } + if err == nil { + n = size_struct(GetProperties(t.Elem()), base) + } + + if collectStats { + (stats).Size++ // Parens are to work around a goimports bug. + } + + return +} + +// Individual type encoders. + +// Encode a bool. +func (o *Buffer) enc_bool(p *Properties, base structPointer) error { + v := *structPointer_Bool(base, p.field) + if v == nil { + return ErrNil + } + x := 0 + if *v { + x = 1 + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { + v := *structPointer_BoolVal(base, p.field) + if !v { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, 1) + return nil +} + +func size_bool(p *Properties, base structPointer) int { + v := *structPointer_Bool(base, p.field) + if v == nil { + return 0 + } + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +func size_proto3_bool(p *Properties, base structPointer) int { + v := *structPointer_BoolVal(base, p.field) + if !v && !p.oneof { + return 0 + } + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +// Encode an int32. +func (o *Buffer) enc_int32(p *Properties, base structPointer) error { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return ErrNil + } + x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return 0 + } + x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +func size_proto3_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode a uint32. +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return ErrNil + } + x := word32_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return 0 + } + x := word32_Get(v) + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +func size_proto3_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode an int64. +func (o *Buffer) enc_int64(p *Properties, base structPointer) error { + v := structPointer_Word64(base, p.field) + if word64_IsNil(v) { + return ErrNil + } + x := word64_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func size_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64(base, p.field) + if word64_IsNil(v) { + return 0 + } + x := word64_Get(v) + n += len(p.tagcode) + n += p.valSize(x) + return +} + +func size_proto3_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(x) + return +} + +// Encode a string. +func (o *Buffer) enc_string(p *Properties, base structPointer) error { + v := *structPointer_String(base, p.field) + if v == nil { + return ErrNil + } + x := *v + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(x) + return nil +} + +func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { + v := *structPointer_StringVal(base, p.field) + if v == "" { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(v) + return nil +} + +func size_string(p *Properties, base structPointer) (n int) { + v := *structPointer_String(base, p.field) + if v == nil { + return 0 + } + x := *v + n += len(p.tagcode) + n += sizeStringBytes(x) + return +} + +func size_proto3_string(p *Properties, base structPointer) (n int) { + v := *structPointer_StringVal(base, p.field) + if v == "" && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeStringBytes(v) + return +} + +// All protocol buffer fields are nillable, but be careful. +func isNil(v reflect.Value) bool { + switch v.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + } + return false +} + +// Encode a message struct. +func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { + var state errorState + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return state.err + } + + o.buf = append(o.buf, p.tagcode...) + return o.enc_len_struct(p.sprop, structp, &state) +} + +func size_struct_message(p *Properties, base structPointer) int { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n0 := len(p.tagcode) + n1 := sizeRawBytes(data) + return n0 + n1 + } + + n0 := len(p.tagcode) + n1 := size_struct(p.sprop, structp) + n2 := sizeVarint(uint64(n1)) // size of encoded length + return n0 + n1 + n2 +} + +// Encode a group struct. +func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { + var state errorState + b := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(b) { + return ErrNil + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) + err := o.enc_struct(p.sprop, b) + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) + return state.err +} + +func size_struct_group(p *Properties, base structPointer) (n int) { + b := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(b) { + return 0 + } + + n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) + n += size_struct(p.sprop, b) + n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) + return +} + +// Encode a slice of bools ([]bool). +func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return ErrNil + } + for _, x := range s { + o.buf = append(o.buf, p.tagcode...) + v := uint64(0) + if x { + v = 1 + } + p.valEnc(o, v) + } + return nil +} + +func size_slice_bool(p *Properties, base structPointer) int { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return 0 + } + return l * (len(p.tagcode) + 1) // each bool takes exactly one byte +} + +// Encode a slice of bools ([]bool) in packed format. +func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(l)) // each bool takes exactly one byte + for _, x := range s { + v := uint64(0) + if x { + v = 1 + } + p.valEnc(o, v) + } + return nil +} + +func size_slice_packed_bool(p *Properties, base structPointer) (n int) { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return 0 + } + n += len(p.tagcode) + n += sizeVarint(uint64(l)) + n += l // each bool takes exactly one byte + return +} + +// Encode a slice of bytes ([]byte). +func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if s == nil { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(s) + return nil +} + +func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if len(s) == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(s) + return nil +} + +func size_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if s == nil && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeRawBytes(s) + return +} + +func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if len(s) == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeRawBytes(s) + return +} + +// Encode a slice of int32s ([]int32). +func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + p.valEnc(o, uint64(x)) + } + return nil +} + +func size_slice_int32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + n += p.valSize(uint64(x)) + } + return +} + +// Encode a slice of int32s ([]int32) in packed format. +func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + p.valEnc(buf, uint64(x)) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_int32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + bufSize += p.valSize(uint64(x)) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of uint32s ([]uint32). +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + x := s.Index(i) + p.valEnc(o, uint64(x)) + } + return nil +} + +func size_slice_uint32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + x := s.Index(i) + n += p.valSize(uint64(x)) + } + return +} + +// Encode a slice of uint32s ([]uint32) in packed format. +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + p.valEnc(buf, uint64(s.Index(i))) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + bufSize += p.valSize(uint64(s.Index(i))) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of int64s ([]int64). +func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, s.Index(i)) + } + return nil +} + +func size_slice_int64(p *Properties, base structPointer) (n int) { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + n += p.valSize(s.Index(i)) + } + return +} + +// Encode a slice of int64s ([]int64) in packed format. +func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + p.valEnc(buf, s.Index(i)) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_int64(p *Properties, base structPointer) (n int) { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + bufSize += p.valSize(s.Index(i)) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of slice of bytes ([][]byte). +func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { + ss := *structPointer_BytesSlice(base, p.field) + l := len(ss) + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(ss[i]) + } + return nil +} + +func size_slice_slice_byte(p *Properties, base structPointer) (n int) { + ss := *structPointer_BytesSlice(base, p.field) + l := len(ss) + if l == 0 { + return 0 + } + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + n += sizeRawBytes(ss[i]) + } + return +} + +// Encode a slice of strings ([]string). +func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { + ss := *structPointer_StringSlice(base, p.field) + l := len(ss) + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(ss[i]) + } + return nil +} + +func size_slice_string(p *Properties, base structPointer) (n int) { + ss := *structPointer_StringSlice(base, p.field) + l := len(ss) + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + n += sizeStringBytes(ss[i]) + } + return +} + +// Encode a slice of message structs ([]*struct). +func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { + var state errorState + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + for i := 0; i < l; i++ { + structp := s.Index(i) + if structPointer_IsNil(structp) { + return errRepeatedHasNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + continue + } + + o.buf = append(o.buf, p.tagcode...) + err := o.enc_len_struct(p.sprop, structp, &state) + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return errRepeatedHasNil + } + return err + } + } + return state.err +} + +func size_slice_struct_message(p *Properties, base structPointer) (n int) { + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + structp := s.Index(i) + if structPointer_IsNil(structp) { + return // return the size up to this point + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n += sizeRawBytes(data) + continue + } + + n0 := size_struct(p.sprop, structp) + n1 := sizeVarint(uint64(n0)) // size of encoded length + n += n0 + n1 + } + return +} + +// Encode a slice of group structs ([]*struct). +func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { + var state errorState + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + for i := 0; i < l; i++ { + b := s.Index(i) + if structPointer_IsNil(b) { + return errRepeatedHasNil + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) + + err := o.enc_struct(p.sprop, b) + + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return errRepeatedHasNil + } + return err + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) + } + return state.err +} + +func size_slice_struct_group(p *Properties, base structPointer) (n int) { + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) + n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) + for i := 0; i < l; i++ { + b := s.Index(i) + if structPointer_IsNil(b) { + return // return size up to this point + } + + n += size_struct(p.sprop, b) + } + return +} + +// Encode an extension map. +func (o *Buffer) enc_map(p *Properties, base structPointer) error { + exts := structPointer_ExtMap(base, p.field) + if err := encodeExtensionsMap(*exts); err != nil { + return err + } + + return o.enc_map_body(*exts) +} + +func (o *Buffer) enc_exts(p *Properties, base structPointer) error { + exts := structPointer_Extensions(base, p.field) + + v, mu := exts.extensionsRead() + if v == nil { + return nil + } + + mu.Lock() + defer mu.Unlock() + if err := encodeExtensionsMap(v); err != nil { + return err + } + + return o.enc_map_body(v) +} + +func (o *Buffer) enc_map_body(v map[int32]Extension) error { + // Fast-path for common cases: zero or one extensions. + if len(v) <= 1 { + for _, e := range v { + o.buf = append(o.buf, e.enc...) + } + return nil + } + + // Sort keys to provide a deterministic encoding. + keys := make([]int, 0, len(v)) + for k := range v { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + o.buf = append(o.buf, v[int32(k)].enc...) + } + return nil +} + +func size_map(p *Properties, base structPointer) int { + v := structPointer_ExtMap(base, p.field) + return extensionsMapSize(*v) +} + +func size_exts(p *Properties, base structPointer) int { + v := structPointer_Extensions(base, p.field) + return extensionsSize(v) +} + +// Encode a map field. +func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { + var state errorState // XXX: or do we need to plumb this through? + + /* + A map defined as + map map_field = N; + is encoded in the same way as + message MapFieldEntry { + key_type key = 1; + value_type value = 2; + } + repeated MapFieldEntry map_field = N; + */ + + v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V + if v.Len() == 0 { + return nil + } + + keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) + + enc := func() error { + if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { + return err + } + if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil { + return err + } + return nil + } + + // Don't sort map keys. It is not required by the spec, and C++ doesn't do it. + for _, key := range v.MapKeys() { + val := v.MapIndex(key) + + keycopy.Set(key) + valcopy.Set(val) + + o.buf = append(o.buf, p.tagcode...) + if err := o.enc_len_thing(enc, &state); err != nil { + return err + } + } + return nil +} + +func size_new_map(p *Properties, base structPointer) int { + v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V + + keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) + + n := 0 + for _, key := range v.MapKeys() { + val := v.MapIndex(key) + keycopy.Set(key) + valcopy.Set(val) + + // Tag codes for key and val are the responsibility of the sub-sizer. + keysize := p.mkeyprop.size(p.mkeyprop, keybase) + valsize := p.mvalprop.size(p.mvalprop, valbase) + entry := keysize + valsize + // Add on tag code and length of map entry itself. + n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry + } + return n +} + +// mapEncodeScratch returns a new reflect.Value matching the map's value type, +// and a structPointer suitable for passing to an encoder or sizer. +func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { + // Prepare addressable doubly-indirect placeholders for the key and value types. + // This is needed because the element-type encoders expect **T, but the map iteration produces T. + + keycopy = reflect.New(mapType.Key()).Elem() // addressable K + keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K + keyptr.Set(keycopy.Addr()) // + keybase = toStructPointer(keyptr.Addr()) // **K + + // Value types are more varied and require special handling. + switch mapType.Elem().Kind() { + case reflect.Slice: + // []byte + var dummy []byte + valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte + valbase = toStructPointer(valcopy.Addr()) + case reflect.Ptr: + // message; the generated field type is map[K]*Msg (so V is *Msg), + // so we only need one level of indirection. + valcopy = reflect.New(mapType.Elem()).Elem() // addressable V + valbase = toStructPointer(valcopy.Addr()) + default: + // everything else + valcopy = reflect.New(mapType.Elem()).Elem() // addressable V + valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V + valptr.Set(valcopy.Addr()) // + valbase = toStructPointer(valptr.Addr()) // **V + } + return +} + +// Encode a struct. +func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { + var state errorState + // Encode fields in tag order so that decoders may use optimizations + // that depend on the ordering. + // https://developers.google.com/protocol-buffers/docs/encoding#order + for _, i := range prop.order { + p := prop.Prop[i] + if p.enc != nil { + err := p.enc(o, p, base) + if err != nil { + if err == ErrNil { + if p.Required && state.err == nil { + state.err = &RequiredNotSetError{p.Name} + } + } else if err == errRepeatedHasNil { + // Give more context to nil values in repeated fields. + return errors.New("repeated field " + p.OrigName + " has nil element") + } else if !state.shouldContinue(err, p) { + return err + } + } + if len(o.buf) > maxMarshalSize { + return ErrTooLarge + } + } + } + + // Do oneof fields. + if prop.oneofMarshaler != nil { + m := structPointer_Interface(base, prop.stype).(Message) + if err := prop.oneofMarshaler(m, o); err == ErrNil { + return errOneofHasNil + } else if err != nil { + return err + } + } + + // Add unrecognized fields at the end. + if prop.unrecField.IsValid() { + v := *structPointer_Bytes(base, prop.unrecField) + if len(o.buf)+len(v) > maxMarshalSize { + return ErrTooLarge + } + if len(v) > 0 { + o.buf = append(o.buf, v...) + } + } + + return state.err +} + +func size_struct(prop *StructProperties, base structPointer) (n int) { + for _, i := range prop.order { + p := prop.Prop[i] + if p.size != nil { + n += p.size(p, base) + } + } + + // Add unrecognized fields at the end. + if prop.unrecField.IsValid() { + v := *structPointer_Bytes(base, prop.unrecField) + n += len(v) + } + + // Factor in any oneof fields. + if prop.oneofSizer != nil { + m := structPointer_Interface(base, prop.stype).(Message) + n += prop.oneofSizer(m) + } + + return +} + +var zeroes [20]byte // longer than any conceivable sizeVarint + +// Encode a struct, preceded by its encoded length (as a varint). +func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { + return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) +} + +// Encode something, preceded by its encoded length (as a varint). +func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { + iLen := len(o.buf) + o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length + iMsg := len(o.buf) + err := enc() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + lMsg := len(o.buf) - iMsg + lLen := sizeVarint(uint64(lMsg)) + switch x := lLen - (iMsg - iLen); { + case x > 0: // actual length is x bytes larger than the space we reserved + // Move msg x bytes right. + o.buf = append(o.buf, zeroes[:x]...) + copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) + case x < 0: // actual length is x bytes smaller than the space we reserved + // Move msg x bytes left. + copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) + o.buf = o.buf[:len(o.buf)+x] // x is negative + } + // Encode the length in the reserved space. + o.buf = o.buf[:iLen] + o.EncodeVarint(uint64(lMsg)) + o.buf = o.buf[:len(o.buf)+lMsg] + return state.err +} + +// errorState maintains the first error that occurs and updates that error +// with additional context. +type errorState struct { + err error +} + +// shouldContinue reports whether encoding should continue upon encountering the +// given error. If the error is RequiredNotSetError, shouldContinue returns true +// and, if this is the first appearance of that error, remembers it for future +// reporting. +// +// If prop is not nil, it may update any error with additional context about the +// field with the error. +func (s *errorState) shouldContinue(err error, prop *Properties) bool { + // Ignore unset required fields. + reqNotSet, ok := err.(*RequiredNotSetError) + if !ok { + return false + } + if s.err == nil { + if prop != nil { + err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} + } + s.err = err + } + return true +} diff --git a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go new file mode 100644 index 000000000..32111b7f4 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go @@ -0,0 +1,350 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// http://github.com/golang/protobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" +) + +func NewRequiredNotSetError(field string) *RequiredNotSetError { + return &RequiredNotSetError{field} +} + +type Sizer interface { + Size() int +} + +func (o *Buffer) enc_ext_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if s == nil { + return ErrNil + } + o.buf = append(o.buf, s...) + return nil +} + +func size_ext_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if s == nil { + return 0 + } + n += len(s) + return +} + +// Encode a reference to bool pointer. +func (o *Buffer) enc_ref_bool(p *Properties, base structPointer) error { + v := *structPointer_BoolVal(base, p.field) + x := 0 + if v { + x = 1 + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_ref_bool(p *Properties, base structPointer) int { + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +// Encode a reference to int32 pointer. +func (o *Buffer) enc_ref_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_ref_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +func (o *Buffer) enc_ref_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_ref_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode a reference to an int64 pointer. +func (o *Buffer) enc_ref_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func size_ref_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + n += len(p.tagcode) + n += p.valSize(x) + return +} + +// Encode a reference to a string pointer. +func (o *Buffer) enc_ref_string(p *Properties, base structPointer) error { + v := *structPointer_StringVal(base, p.field) + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(v) + return nil +} + +func size_ref_string(p *Properties, base structPointer) (n int) { + v := *structPointer_StringVal(base, p.field) + n += len(p.tagcode) + n += sizeStringBytes(v) + return +} + +// Encode a reference to a message struct. +func (o *Buffer) enc_ref_struct_message(p *Properties, base structPointer) error { + var state errorState + structp := structPointer_GetRefStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil + } + + o.buf = append(o.buf, p.tagcode...) + return o.enc_len_struct(p.sprop, structp, &state) +} + +//TODO this is only copied, please fix this +func size_ref_struct_message(p *Properties, base structPointer) int { + structp := structPointer_GetRefStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n0 := len(p.tagcode) + n1 := sizeRawBytes(data) + return n0 + n1 + } + + n0 := len(p.tagcode) + n1 := size_struct(p.sprop, structp) + n2 := sizeVarint(uint64(n1)) // size of encoded length + return n0 + n1 + n2 +} + +// Encode a slice of references to message struct pointers ([]struct). +func (o *Buffer) enc_slice_ref_struct_message(p *Properties, base structPointer) error { + var state errorState + ss := structPointer_StructRefSlice(base, p.field, p.stype.Size()) + l := ss.Len() + for i := 0; i < l; i++ { + structp := ss.Index(i) + if structPointer_IsNil(structp) { + return errRepeatedHasNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + continue + } + + o.buf = append(o.buf, p.tagcode...) + err := o.enc_len_struct(p.sprop, structp, &state) + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return errRepeatedHasNil + } + return err + } + + } + return state.err +} + +//TODO this is only copied, please fix this +func size_slice_ref_struct_message(p *Properties, base structPointer) (n int) { + ss := structPointer_StructRefSlice(base, p.field, p.stype.Size()) + l := ss.Len() + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + structp := ss.Index(i) + if structPointer_IsNil(structp) { + return // return the size up to this point + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n += len(p.tagcode) + n += sizeRawBytes(data) + continue + } + + n0 := size_struct(p.sprop, structp) + n1 := sizeVarint(uint64(n0)) // size of encoded length + n += n0 + n1 + } + return +} + +func (o *Buffer) enc_custom_bytes(p *Properties, base structPointer) error { + i := structPointer_InterfaceRef(base, p.field, p.ctype) + if i == nil { + return ErrNil + } + custom := i.(Marshaler) + data, err := custom.Marshal() + if err != nil { + return err + } + if data == nil { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_custom_bytes(p *Properties, base structPointer) (n int) { + n += len(p.tagcode) + i := structPointer_InterfaceRef(base, p.field, p.ctype) + if i == nil { + return 0 + } + custom := i.(Marshaler) + data, _ := custom.Marshal() + n += sizeRawBytes(data) + return +} + +func (o *Buffer) enc_custom_ref_bytes(p *Properties, base structPointer) error { + custom := structPointer_InterfaceAt(base, p.field, p.ctype).(Marshaler) + data, err := custom.Marshal() + if err != nil { + return err + } + if data == nil { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_custom_ref_bytes(p *Properties, base structPointer) (n int) { + n += len(p.tagcode) + i := structPointer_InterfaceAt(base, p.field, p.ctype) + if i == nil { + return 0 + } + custom := i.(Marshaler) + data, _ := custom.Marshal() + n += sizeRawBytes(data) + return +} + +func (o *Buffer) enc_custom_slice_bytes(p *Properties, base structPointer) error { + inter := structPointer_InterfaceRef(base, p.field, p.ctype) + if inter == nil { + return ErrNil + } + slice := reflect.ValueOf(inter) + l := slice.Len() + for i := 0; i < l; i++ { + v := slice.Index(i) + custom := v.Interface().(Marshaler) + data, err := custom.Marshal() + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + } + return nil +} + +func size_custom_slice_bytes(p *Properties, base structPointer) (n int) { + inter := structPointer_InterfaceRef(base, p.field, p.ctype) + if inter == nil { + return 0 + } + slice := reflect.ValueOf(inter) + l := slice.Len() + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + v := slice.Index(i) + custom := v.Interface().(Marshaler) + data, _ := custom.Marshal() + n += sizeRawBytes(data) + } + return +} diff --git a/vendor/github.com/gogo/protobuf/proto/equal.go b/vendor/github.com/gogo/protobuf/proto/equal.go new file mode 100644 index 000000000..2ed1cf596 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/equal.go @@ -0,0 +1,300 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer comparison. + +package proto + +import ( + "bytes" + "log" + "reflect" + "strings" +) + +/* +Equal returns true iff protocol buffers a and b are equal. +The arguments must both be pointers to protocol buffer structs. + +Equality is defined in this way: + - Two messages are equal iff they are the same type, + corresponding fields are equal, unknown field sets + are equal, and extensions sets are equal. + - Two set scalar fields are equal iff their values are equal. + If the fields are of a floating-point type, remember that + NaN != x for all x, including NaN. If the message is defined + in a proto3 .proto file, fields are not "set"; specifically, + zero length proto3 "bytes" fields are equal (nil == {}). + - Two repeated fields are equal iff their lengths are the same, + and their corresponding elements are equal. Note a "bytes" field, + although represented by []byte, is not a repeated field and the + rule for the scalar fields described above applies. + - Two unset fields are equal. + - Two unknown field sets are equal if their current + encoded state is equal. + - Two extension sets are equal iff they have corresponding + elements that are pairwise equal. + - Two map fields are equal iff their lengths are the same, + and they contain the same set of elements. Zero-length map + fields are equal. + - Every other combination of things are not equal. + +The return value is undefined if a and b are not protocol buffers. +*/ +func Equal(a, b Message) bool { + if a == nil || b == nil { + return a == b + } + v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) + if v1.Type() != v2.Type() { + return false + } + if v1.Kind() == reflect.Ptr { + if v1.IsNil() { + return v2.IsNil() + } + if v2.IsNil() { + return false + } + v1, v2 = v1.Elem(), v2.Elem() + } + if v1.Kind() != reflect.Struct { + return false + } + return equalStruct(v1, v2) +} + +// v1 and v2 are known to have the same type. +func equalStruct(v1, v2 reflect.Value) bool { + sprop := GetProperties(v1.Type()) + for i := 0; i < v1.NumField(); i++ { + f := v1.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + f1, f2 := v1.Field(i), v2.Field(i) + if f.Type.Kind() == reflect.Ptr { + if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { + // both unset + continue + } else if n1 != n2 { + // set/unset mismatch + return false + } + b1, ok := f1.Interface().(raw) + if ok { + b2 := f2.Interface().(raw) + // RawMessage + if !bytes.Equal(b1.Bytes(), b2.Bytes()) { + return false + } + continue + } + f1, f2 = f1.Elem(), f2.Elem() + } + if !equalAny(f1, f2, sprop.Prop[i]) { + return false + } + } + + if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_InternalExtensions") + if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { + return false + } + } + + if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_extensions") + if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { + return false + } + } + + uf := v1.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return true + } + + u1 := uf.Bytes() + u2 := v2.FieldByName("XXX_unrecognized").Bytes() + if !bytes.Equal(u1, u2) { + return false + } + + return true +} + +// v1 and v2 are known to have the same type. +// prop may be nil. +func equalAny(v1, v2 reflect.Value, prop *Properties) bool { + if v1.Type() == protoMessageType { + m1, _ := v1.Interface().(Message) + m2, _ := v2.Interface().(Message) + return Equal(m1, m2) + } + switch v1.Kind() { + case reflect.Bool: + return v1.Bool() == v2.Bool() + case reflect.Float32, reflect.Float64: + return v1.Float() == v2.Float() + case reflect.Int32, reflect.Int64: + return v1.Int() == v2.Int() + case reflect.Interface: + // Probably a oneof field; compare the inner values. + n1, n2 := v1.IsNil(), v2.IsNil() + if n1 || n2 { + return n1 == n2 + } + e1, e2 := v1.Elem(), v2.Elem() + if e1.Type() != e2.Type() { + return false + } + return equalAny(e1, e2, nil) + case reflect.Map: + if v1.Len() != v2.Len() { + return false + } + for _, key := range v1.MapKeys() { + val2 := v2.MapIndex(key) + if !val2.IsValid() { + // This key was not found in the second map. + return false + } + if !equalAny(v1.MapIndex(key), val2, nil) { + return false + } + } + return true + case reflect.Ptr: + // Maps may have nil values in them, so check for nil. + if v1.IsNil() && v2.IsNil() { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return equalAny(v1.Elem(), v2.Elem(), prop) + case reflect.Slice: + if v1.Type().Elem().Kind() == reflect.Uint8 { + // short circuit: []byte + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value. + if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) + } + + if v1.Len() != v2.Len() { + return false + } + for i := 0; i < v1.Len(); i++ { + if !equalAny(v1.Index(i), v2.Index(i), prop) { + return false + } + } + return true + case reflect.String: + return v1.Interface().(string) == v2.Interface().(string) + case reflect.Struct: + return equalStruct(v1, v2) + case reflect.Uint32, reflect.Uint64: + return v1.Uint() == v2.Uint() + } + + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to compare %v", v1) + return false +} + +// base is the struct type that the extensions are based on. +// x1 and x2 are InternalExtensions. +func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { + em1, _ := x1.extensionsRead() + em2, _ := x2.extensionsRead() + return equalExtMap(base, em1, em2) +} + +func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { + if len(em1) != len(em2) { + return false + } + + for extNum, e1 := range em1 { + e2, ok := em2[extNum] + if !ok { + return false + } + + m1, m2 := e1.value, e2.value + + if m1 != nil && m2 != nil { + // Both are unencoded. + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + continue + } + + // At least one is encoded. To do a semantically correct comparison + // we need to unmarshal them first. + var desc *ExtensionDesc + if m := extensionMaps[base]; m != nil { + desc = m[extNum] + } + if desc == nil { + log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) + continue + } + var err error + if m1 == nil { + m1, err = decodeExtension(e1.enc, desc) + } + if m2 == nil && err == nil { + m2, err = decodeExtension(e2.enc, desc) + } + if err != nil { + // The encoded form is invalid. + log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) + return false + } + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + } + + return true +} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go new file mode 100644 index 000000000..0dfcb538e --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/extensions.go @@ -0,0 +1,693 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Types and routines for supporting protocol buffer extensions. + */ + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "sync" +) + +// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. +var ErrMissingExtension = errors.New("proto: missing extension") + +// ExtensionRange represents a range of message extensions for a protocol buffer. +// Used in code generated by the protocol compiler. +type ExtensionRange struct { + Start, End int32 // both inclusive +} + +// extendableProto is an interface implemented by any protocol buffer generated by the current +// proto compiler that may be extended. +type extendableProto interface { + Message + ExtensionRangeArray() []ExtensionRange + extensionsWrite() map[int32]Extension + extensionsRead() (map[int32]Extension, sync.Locker) +} + +// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous +// version of the proto compiler that may be extended. +type extendableProtoV1 interface { + Message + ExtensionRangeArray() []ExtensionRange + ExtensionMap() map[int32]Extension +} + +type extensionsBytes interface { + Message + ExtensionRangeArray() []ExtensionRange + GetExtensions() *[]byte +} + +// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. +type extensionAdapter struct { + extendableProtoV1 +} + +func (e extensionAdapter) extensionsWrite() map[int32]Extension { + return e.ExtensionMap() +} + +func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { + return e.ExtensionMap(), notLocker{} +} + +// notLocker is a sync.Locker whose Lock and Unlock methods are nops. +type notLocker struct{} + +func (n notLocker) Lock() {} +func (n notLocker) Unlock() {} + +// extendable returns the extendableProto interface for the given generated proto message. +// If the proto message has the old extension format, it returns a wrapper that implements +// the extendableProto interface. +func extendable(p interface{}) (extendableProto, bool) { + if ep, ok := p.(extendableProto); ok { + return ep, ok + } + if ep, ok := p.(extendableProtoV1); ok { + return extensionAdapter{ep}, ok + } + return nil, false +} + +// XXX_InternalExtensions is an internal representation of proto extensions. +// +// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, +// thus gaining the unexported 'extensions' method, which can be called only from the proto package. +// +// The methods of XXX_InternalExtensions are not concurrency safe in general, +// but calls to logically read-only methods such as has and get may be executed concurrently. +type XXX_InternalExtensions struct { + // The struct must be indirect so that if a user inadvertently copies a + // generated message and its embedded XXX_InternalExtensions, they + // avoid the mayhem of a copied mutex. + // + // The mutex serializes all logically read-only operations to p.extensionMap. + // It is up to the client to ensure that write operations to p.extensionMap are + // mutually exclusive with other accesses. + p *struct { + mu sync.Mutex + extensionMap map[int32]Extension + } +} + +// extensionsWrite returns the extension map, creating it on first use. +func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { + if e.p == nil { + e.p = new(struct { + mu sync.Mutex + extensionMap map[int32]Extension + }) + e.p.extensionMap = make(map[int32]Extension) + } + return e.p.extensionMap +} + +// extensionsRead returns the extensions map for read-only use. It may be nil. +// The caller must hold the returned mutex's lock when accessing Elements within the map. +func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { + if e.p == nil { + return nil, nil + } + return e.p.extensionMap, &e.p.mu +} + +type extensionRange interface { + Message + ExtensionRangeArray() []ExtensionRange +} + +var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() +var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem() +var extendableBytesType = reflect.TypeOf((*extensionsBytes)(nil)).Elem() +var extensionRangeType = reflect.TypeOf((*extensionRange)(nil)).Elem() + +// ExtensionDesc represents an extension specification. +// Used in generated code from the protocol compiler. +type ExtensionDesc struct { + ExtendedType Message // nil pointer to the type that is being extended + ExtensionType interface{} // nil pointer to the extension type + Field int32 // field number + Name string // fully-qualified name of extension, for text formatting + Tag string // protobuf tag style + Filename string // name of the file in which the extension is defined +} + +func (ed *ExtensionDesc) repeated() bool { + t := reflect.TypeOf(ed.ExtensionType) + return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 +} + +// Extension represents an extension in a message. +type Extension struct { + // When an extension is stored in a message using SetExtension + // only desc and value are set. When the message is marshaled + // enc will be set to the encoded form of the message. + // + // When a message is unmarshaled and contains extensions, each + // extension will have only enc set. When such an extension is + // accessed using GetExtension (or GetExtensions) desc and value + // will be set. + desc *ExtensionDesc + value interface{} + enc []byte +} + +// SetRawExtension is for testing only. +func SetRawExtension(base Message, id int32, b []byte) { + if ebase, ok := base.(extensionsBytes); ok { + clearExtension(base, id) + ext := ebase.GetExtensions() + *ext = append(*ext, b...) + return + } + epb, ok := extendable(base) + if !ok { + return + } + extmap := epb.extensionsWrite() + extmap[id] = Extension{enc: b} +} + +// isExtensionField returns true iff the given field number is in an extension range. +func isExtensionField(pb extensionRange, field int32) bool { + for _, er := range pb.ExtensionRangeArray() { + if er.Start <= field && field <= er.End { + return true + } + } + return false +} + +// checkExtensionTypes checks that the given extension is valid for pb. +func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { + var pbi interface{} = pb + // Check the extended type. + if ea, ok := pbi.(extensionAdapter); ok { + pbi = ea.extendableProtoV1 + } + if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { + return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) + } + // Check the range. + if !isExtensionField(pb, extension.Field) { + return errors.New("proto: bad extension number; not in declared ranges") + } + return nil +} + +// extPropKey is sufficient to uniquely identify an extension. +type extPropKey struct { + base reflect.Type + field int32 +} + +var extProp = struct { + sync.RWMutex + m map[extPropKey]*Properties +}{ + m: make(map[extPropKey]*Properties), +} + +func extensionProperties(ed *ExtensionDesc) *Properties { + key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} + + extProp.RLock() + if prop, ok := extProp.m[key]; ok { + extProp.RUnlock() + return prop + } + extProp.RUnlock() + + extProp.Lock() + defer extProp.Unlock() + // Check again. + if prop, ok := extProp.m[key]; ok { + return prop + } + + prop := new(Properties) + prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) + extProp.m[key] = prop + return prop +} + +// encode encodes any unmarshaled (unencoded) extensions in e. +func encodeExtensions(e *XXX_InternalExtensions) error { + m, mu := e.extensionsRead() + if m == nil { + return nil // fast path + } + mu.Lock() + defer mu.Unlock() + return encodeExtensionsMap(m) +} + +// encode encodes any unmarshaled (unencoded) extensions in e. +func encodeExtensionsMap(m map[int32]Extension) error { + for k, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + et := reflect.TypeOf(e.desc.ExtensionType) + props := extensionProperties(e.desc) + + p := NewBuffer(nil) + // If e.value has type T, the encoder expects a *struct{ X T }. + // Pass a *T with a zero field and hope it all works out. + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(e.value)) + if err := props.enc(p, props, toStructPointer(x)); err != nil { + return err + } + e.enc = p.buf + m[k] = e + } + return nil +} + +func extensionsSize(e *XXX_InternalExtensions) (n int) { + m, mu := e.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + defer mu.Unlock() + return extensionsMapSize(m) +} + +func extensionsMapSize(m map[int32]Extension) (n int) { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + et := reflect.TypeOf(e.desc.ExtensionType) + props := extensionProperties(e.desc) + + // If e.value has type T, the encoder expects a *struct{ X T }. + // Pass a *T with a zero field and hope it all works out. + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(e.value)) + n += props.size(props, toStructPointer(x)) + } + return +} + +// HasExtension returns whether the given extension is present in pb. +func HasExtension(pb Message, extension *ExtensionDesc) bool { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + buf := *ext + o := 0 + for o < len(buf) { + tag, n := DecodeVarint(buf[o:]) + fieldNum := int32(tag >> 3) + if int32(fieldNum) == extension.Field { + return true + } + wireType := int(tag & 0x7) + o += n + l, err := size(buf[o:], wireType) + if err != nil { + return false + } + o += l + } + return false + } + // TODO: Check types, field numbers, etc.? + epb, ok := extendable(pb) + if !ok { + return false + } + extmap, mu := epb.extensionsRead() + if extmap == nil { + return false + } + mu.Lock() + _, ok = extmap[extension.Field] + mu.Unlock() + return ok +} + +func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int { + ext := pb.GetExtensions() + for offset < len(*ext) { + tag, n1 := DecodeVarint((*ext)[offset:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + n2, err := size((*ext)[offset+n1:], wireType) + if err != nil { + panic(err) + } + newOffset := offset + n1 + n2 + if fieldNum == theFieldNum { + *ext = append((*ext)[:offset], (*ext)[newOffset:]...) + return offset + } + offset = newOffset + } + return -1 +} + +// ClearExtension removes the given extension from pb. +func ClearExtension(pb Message, extension *ExtensionDesc) { + clearExtension(pb, extension.Field) +} + +func clearExtension(pb Message, fieldNum int32) { + if epb, doki := pb.(extensionsBytes); doki { + offset := 0 + for offset != -1 { + offset = deleteExtension(epb, fieldNum, offset) + } + return + } + epb, ok := extendable(pb) + if !ok { + return + } + // TODO: Check types, field numbers, etc.? + extmap := epb.extensionsWrite() + delete(extmap, fieldNum) +} + +// GetExtension parses and returns the given extension of pb. +// If the extension is not present and has no default value it returns ErrMissingExtension. +func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + o := 0 + for o < len(*ext) { + tag, n := DecodeVarint((*ext)[o:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + l, err := size((*ext)[o+n:], wireType) + if err != nil { + return nil, err + } + if int32(fieldNum) == extension.Field { + v, err := decodeExtension((*ext)[o:o+n+l], extension) + if err != nil { + return nil, err + } + return v, nil + } + o += n + l + } + return defaultExtensionValue(extension) + } + epb, ok := extendable(pb) + if !ok { + return nil, errors.New("proto: not an extendable proto") + } + if err := checkExtensionTypes(epb, extension); err != nil { + return nil, err + } + + emap, mu := epb.extensionsRead() + if emap == nil { + return defaultExtensionValue(extension) + } + mu.Lock() + defer mu.Unlock() + e, ok := emap[extension.Field] + if !ok { + // defaultExtensionValue returns the default value or + // ErrMissingExtension if there is no default. + return defaultExtensionValue(extension) + } + + if e.value != nil { + // Already decoded. Check the descriptor, though. + if e.desc != extension { + // This shouldn't happen. If it does, it means that + // GetExtension was called twice with two different + // descriptors with the same field number. + return nil, errors.New("proto: descriptor conflict") + } + return e.value, nil + } + + v, err := decodeExtension(e.enc, extension) + if err != nil { + return nil, err + } + + // Remember the decoded version and drop the encoded version. + // That way it is safe to mutate what we return. + e.value = v + e.desc = extension + e.enc = nil + emap[extension.Field] = e + return e.value, nil +} + +// defaultExtensionValue returns the default value for extension. +// If no default for an extension is defined ErrMissingExtension is returned. +func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + t := reflect.TypeOf(extension.ExtensionType) + props := extensionProperties(extension) + + sf, _, err := fieldDefault(t, props) + if err != nil { + return nil, err + } + + if sf == nil || sf.value == nil { + // There is no default value. + return nil, ErrMissingExtension + } + + if t.Kind() != reflect.Ptr { + // We do not need to return a Ptr, we can directly return sf.value. + return sf.value, nil + } + + // We need to return an interface{} that is a pointer to sf.value. + value := reflect.New(t).Elem() + value.Set(reflect.New(value.Type().Elem())) + if sf.kind == reflect.Int32 { + // We may have an int32 or an enum, but the underlying data is int32. + // Since we can't set an int32 into a non int32 reflect.value directly + // set it as a int32. + value.Elem().SetInt(int64(sf.value.(int32))) + } else { + value.Elem().Set(reflect.ValueOf(sf.value)) + } + return value.Interface(), nil +} + +// decodeExtension decodes an extension encoded in b. +func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { + o := NewBuffer(b) + + t := reflect.TypeOf(extension.ExtensionType) + + props := extensionProperties(extension) + + // t is a pointer to a struct, pointer to basic type or a slice. + // Allocate a "field" to store the pointer/slice itself; the + // pointer/slice will be stored here. We pass + // the address of this field to props.dec. + // This passes a zero field and a *t and lets props.dec + // interpret it as a *struct{ x t }. + value := reflect.New(t).Elem() + + for { + // Discard wire type and field number varint. It isn't needed. + if _, err := o.DecodeVarint(); err != nil { + return nil, err + } + + if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { + return nil, err + } + + if o.index >= len(o.buf) { + break + } + } + return value.Interface(), nil +} + +// GetExtensions returns a slice of the extensions present in pb that are also listed in es. +// The returned slice has the same length as es; missing extensions will appear as nil elements. +func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { + extensions = make([]interface{}, len(es)) + for i, e := range es { + extensions[i], err = GetExtension(pb, e) + if err == ErrMissingExtension { + err = nil + } + if err != nil { + return + } + } + return +} + +// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. +// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing +// just the Field field, which defines the extension's field number. +func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { + epb, ok := extendable(pb) + if !ok { + return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb) + } + registeredExtensions := RegisteredExtensions(pb) + + emap, mu := epb.extensionsRead() + if emap == nil { + return nil, nil + } + mu.Lock() + defer mu.Unlock() + extensions := make([]*ExtensionDesc, 0, len(emap)) + for extid, e := range emap { + desc := e.desc + if desc == nil { + desc = registeredExtensions[extid] + if desc == nil { + desc = &ExtensionDesc{Field: extid} + } + } + + extensions = append(extensions, desc) + } + return extensions, nil +} + +// SetExtension sets the specified extension of pb to the specified value. +func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { + if epb, doki := pb.(extensionsBytes); doki { + ClearExtension(pb, extension) + ext := epb.GetExtensions() + et := reflect.TypeOf(extension.ExtensionType) + props := extensionProperties(extension) + p := NewBuffer(nil) + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(value)) + if err := props.enc(p, props, toStructPointer(x)); err != nil { + return err + } + *ext = append(*ext, p.buf...) + return nil + } + epb, ok := extendable(pb) + if !ok { + return errors.New("proto: not an extendable proto") + } + if err := checkExtensionTypes(epb, extension); err != nil { + return err + } + typ := reflect.TypeOf(extension.ExtensionType) + if typ != reflect.TypeOf(value) { + return errors.New("proto: bad extension value type") + } + // nil extension values need to be caught early, because the + // encoder can't distinguish an ErrNil due to a nil extension + // from an ErrNil due to a missing field. Extensions are + // always optional, so the encoder would just swallow the error + // and drop all the extensions from the encoded message. + if reflect.ValueOf(value).IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) + } + + extmap := epb.extensionsWrite() + extmap[extension.Field] = Extension{desc: extension, value: value} + return nil +} + +// ClearAllExtensions clears all extensions from pb. +func ClearAllExtensions(pb Message) { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + *ext = []byte{} + return + } + epb, ok := extendable(pb) + if !ok { + return + } + m := epb.extensionsWrite() + for k := range m { + delete(m, k) + } +} + +// A global registry of extensions. +// The generated code will register the generated descriptors by calling RegisterExtension. + +var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) + +// RegisterExtension is called from the generated code. +func RegisterExtension(desc *ExtensionDesc) { + st := reflect.TypeOf(desc.ExtendedType).Elem() + m := extensionMaps[st] + if m == nil { + m = make(map[int32]*ExtensionDesc) + extensionMaps[st] = m + } + if _, ok := m[desc.Field]; ok { + panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + } + m[desc.Field] = desc +} + +// RegisteredExtensions returns a map of the registered extensions of a +// protocol buffer struct, indexed by the extension number. +// The argument pb should be a nil pointer to the struct type. +func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { + return extensionMaps[reflect.TypeOf(pb).Elem()] +} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go new file mode 100644 index 000000000..ea6478f00 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go @@ -0,0 +1,294 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "sort" + "strings" + "sync" +) + +func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool { + if reflect.ValueOf(pb).IsNil() { + return ifnotset + } + value, err := GetExtension(pb, extension) + if err != nil { + return ifnotset + } + if value == nil { + return ifnotset + } + if value.(*bool) == nil { + return ifnotset + } + return *(value.(*bool)) +} + +func (this *Extension) Equal(that *Extension) bool { + return bytes.Equal(this.enc, that.enc) +} + +func (this *Extension) Compare(that *Extension) int { + return bytes.Compare(this.enc, that.enc) +} + +func SizeOfInternalExtension(m extendableProto) (n int) { + return SizeOfExtensionMap(m.extensionsWrite()) +} + +func SizeOfExtensionMap(m map[int32]Extension) (n int) { + return extensionsMapSize(m) +} + +type sortableMapElem struct { + field int32 + ext Extension +} + +func newSortableExtensionsFromMap(m map[int32]Extension) sortableExtensions { + s := make(sortableExtensions, 0, len(m)) + for k, v := range m { + s = append(s, &sortableMapElem{field: k, ext: v}) + } + return s +} + +type sortableExtensions []*sortableMapElem + +func (this sortableExtensions) Len() int { return len(this) } + +func (this sortableExtensions) Swap(i, j int) { this[i], this[j] = this[j], this[i] } + +func (this sortableExtensions) Less(i, j int) bool { return this[i].field < this[j].field } + +func (this sortableExtensions) String() string { + sort.Sort(this) + ss := make([]string, len(this)) + for i := range this { + ss[i] = fmt.Sprintf("%d: %v", this[i].field, this[i].ext) + } + return "map[" + strings.Join(ss, ",") + "]" +} + +func StringFromInternalExtension(m extendableProto) string { + return StringFromExtensionsMap(m.extensionsWrite()) +} + +func StringFromExtensionsMap(m map[int32]Extension) string { + return newSortableExtensionsFromMap(m).String() +} + +func StringFromExtensionsBytes(ext []byte) string { + m, err := BytesToExtensionsMap(ext) + if err != nil { + panic(err) + } + return StringFromExtensionsMap(m) +} + +func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) { + return EncodeExtensionMap(m.extensionsWrite(), data) +} + +func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) { + if err := encodeExtensionsMap(m); err != nil { + return 0, err + } + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + for _, k := range keys { + n += copy(data[n:], m[int32(k)].enc) + } + return n, nil +} + +func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) { + if m[id].value == nil || m[id].desc == nil { + return m[id].enc, nil + } + if err := encodeExtensionsMap(m); err != nil { + return nil, err + } + return m[id].enc, nil +} + +func size(buf []byte, wire int) (int, error) { + switch wire { + case WireVarint: + _, n := DecodeVarint(buf) + return n, nil + case WireFixed64: + return 8, nil + case WireBytes: + v, n := DecodeVarint(buf) + return int(v) + n, nil + case WireFixed32: + return 4, nil + case WireStartGroup: + offset := 0 + for { + u, n := DecodeVarint(buf[offset:]) + fwire := int(u & 0x7) + offset += n + if fwire == WireEndGroup { + return offset, nil + } + s, err := size(buf[offset:], wire) + if err != nil { + return 0, err + } + offset += s + } + } + return 0, fmt.Errorf("proto: can't get size for unknown wire type %d", wire) +} + +func BytesToExtensionsMap(buf []byte) (map[int32]Extension, error) { + m := make(map[int32]Extension) + i := 0 + for i < len(buf) { + tag, n := DecodeVarint(buf[i:]) + if n <= 0 { + return nil, fmt.Errorf("unable to decode varint") + } + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + l, err := size(buf[i+n:], wireType) + if err != nil { + return nil, err + } + end := i + int(l) + n + m[int32(fieldNum)] = Extension{enc: buf[i:end]} + i = end + } + return m, nil +} + +func NewExtension(e []byte) Extension { + ee := Extension{enc: make([]byte, len(e))} + copy(ee.enc, e) + return ee +} + +func AppendExtension(e Message, tag int32, buf []byte) { + if ee, eok := e.(extensionsBytes); eok { + ext := ee.GetExtensions() + *ext = append(*ext, buf...) + return + } + if ee, eok := e.(extendableProto); eok { + m := ee.extensionsWrite() + ext := m[int32(tag)] // may be missing + ext.enc = append(ext.enc, buf...) + m[int32(tag)] = ext + } +} + +func encodeExtension(e *Extension) error { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + return nil + } + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + et := reflect.TypeOf(e.desc.ExtensionType) + props := extensionProperties(e.desc) + + p := NewBuffer(nil) + // If e.value has type T, the encoder expects a *struct{ X T }. + // Pass a *T with a zero field and hope it all works out. + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(e.value)) + if err := props.enc(p, props, toStructPointer(x)); err != nil { + return err + } + e.enc = p.buf + return nil +} + +func (this Extension) GoString() string { + if this.enc == nil { + if err := encodeExtension(&this); err != nil { + panic(err) + } + } + return fmt.Sprintf("proto.NewExtension(%#v)", this.enc) +} + +func SetUnsafeExtension(pb Message, fieldNum int32, value interface{}) error { + typ := reflect.TypeOf(pb).Elem() + ext, ok := extensionMaps[typ] + if !ok { + return fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) + } + desc, ok := ext[fieldNum] + if !ok { + return errors.New("proto: bad extension number; not in declared ranges") + } + return SetExtension(pb, desc, value) +} + +func GetUnsafeExtension(pb Message, fieldNum int32) (interface{}, error) { + typ := reflect.TypeOf(pb).Elem() + ext, ok := extensionMaps[typ] + if !ok { + return nil, fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) + } + desc, ok := ext[fieldNum] + if !ok { + return nil, fmt.Errorf("unregistered field number %d", fieldNum) + } + return GetExtension(pb, desc) +} + +func NewUnsafeXXX_InternalExtensions(m map[int32]Extension) XXX_InternalExtensions { + x := &XXX_InternalExtensions{ + p: new(struct { + mu sync.Mutex + extensionMap map[int32]Extension + }), + } + x.p.extensionMap = m + return *x +} + +func GetUnsafeExtensionsMap(extendable Message) map[int32]Extension { + pb := extendable.(extendableProto) + return pb.extensionsWrite() +} diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go new file mode 100644 index 000000000..7580bb45c --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/lib.go @@ -0,0 +1,898 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package proto converts data structures to and from the wire format of +protocol buffers. It works in concert with the Go source code generated +for .proto files by the protocol compiler. + +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed by the enclosing message's name, or by the + enum's type name if it is a top-level enum. Enum types have a String + method, and a Enum method to assist in message construction. + - Nested messages, groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Oneof field sets are given a single field in their message, + with distinguished wrapper types for each possible field value. + - Marshal and Unmarshal are functions to encode and decode the wire format. + +When the .proto file specifies `syntax="proto3"`, there are some differences: + + - Non-repeated fields of non-message type are values instead of pointers. + - Getters are only generated for message and oneof fields. + - Enum types do not get an Enum method. + +The simplest way to describe this is to see an example. +Given file test.proto, containing + + package example; + + enum FOO { X = 17; } + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + oneof union { + int32 number = 6; + string name = 7; + } + } + +The resulting file, test.pb.go, is: + + package example + + import proto "github.com/gogo/protobuf/proto" + import math "math" + + type FOO int32 + const ( + FOO_X FOO = 17 + ) + var FOO_name = map[int32]string{ + 17: "X", + } + var FOO_value = map[string]int32{ + "X": 17, + } + + func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p + } + func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) + } + func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data) + if err != nil { + return err + } + *x = FOO(value) + return nil + } + + type Test struct { + Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` + Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` + Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` + Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + // Types that are valid to be assigned to Union: + // *Test_Number + // *Test_Name + Union isTest_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` + } + func (m *Test) Reset() { *m = Test{} } + func (m *Test) String() string { return proto.CompactTextString(m) } + func (*Test) ProtoMessage() {} + + type isTest_Union interface { + isTest_Union() + } + + type Test_Number struct { + Number int32 `protobuf:"varint,6,opt,name=number"` + } + type Test_Name struct { + Name string `protobuf:"bytes,7,opt,name=name"` + } + + func (*Test_Number) isTest_Union() {} + func (*Test_Name) isTest_Union() {} + + func (m *Test) GetUnion() isTest_Union { + if m != nil { + return m.Union + } + return nil + } + const Default_Test_Type int32 = 77 + + func (m *Test) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" + } + + func (m *Test) GetType() int32 { + if m != nil && m.Type != nil { + return *m.Type + } + return Default_Test_Type + } + + func (m *Test) GetOptionalgroup() *Test_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil + } + + type Test_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` + } + func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } + func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } + + func (m *Test_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" + } + + func (m *Test) GetNumber() int32 { + if x, ok := m.GetUnion().(*Test_Number); ok { + return x.Number + } + return 0 + } + + func (m *Test) GetName() string { + if x, ok := m.GetUnion().(*Test_Name); ok { + return x.Name + } + return "" + } + + func init() { + proto.RegisterEnum("example.FOO", FOO_name, FOO_value) + } + +To create and play with a Test object: + + package main + + import ( + "log" + + "github.com/gogo/protobuf/proto" + pb "./example.pb" + ) + + func main() { + test := &pb.Test{ + Label: proto.String("hello"), + Type: proto.Int32(17), + Reps: []int64{1, 2, 3}, + Optionalgroup: &pb.Test_OptionalGroup{ + RequiredField: proto.String("good bye"), + }, + Union: &pb.Test_Name{"fred"}, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := &pb.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // Use a type switch to determine which oneof was set. + switch u := test.Union.(type) { + case *pb.Test_Number: // u.Number contains the number. + case *pb.Test_Name: // u.Name contains the string. + } + // etc. + } +*/ +package proto + +import ( + "encoding/json" + "fmt" + "log" + "reflect" + "sort" + "strconv" + "sync" +) + +// Message is implemented by generated protocol buffer messages. +type Message interface { + Reset() + String() string + ProtoMessage() +} + +// Stats records allocation details about the protocol buffer encoders +// and decoders. Useful for tuning the library itself. +type Stats struct { + Emalloc uint64 // mallocs in encode + Dmalloc uint64 // mallocs in decode + Encode uint64 // number of encodes + Decode uint64 // number of decodes + Chit uint64 // number of cache hits + Cmiss uint64 // number of cache misses + Size uint64 // number of sizes +} + +// Set to true to enable stats collection. +const collectStats = false + +var stats Stats + +// GetStats returns a copy of the global Stats structure. +func GetStats() Stats { return stats } + +// A Buffer is a buffer manager for marshaling and unmarshaling +// protocol buffers. It may be reused between invocations to +// reduce memory usage. It is not necessary to use a Buffer; +// the global functions Marshal and Unmarshal create a +// temporary Buffer and are fine for most applications. +type Buffer struct { + buf []byte // encode/decode byte stream + index int // read point + + // pools of basic types to amortize allocation. + bools []bool + uint32s []uint32 + uint64s []uint64 + + // extra pools, only used with pointer_reflect.go + int32s []int32 + int64s []int64 + float32s []float32 + float64s []float64 +} + +// NewBuffer allocates a new Buffer and initializes its internal data to +// the contents of the argument slice. +func NewBuffer(e []byte) *Buffer { + return &Buffer{buf: e} +} + +// Reset resets the Buffer, ready for marshaling a new protocol buffer. +func (p *Buffer) Reset() { + p.buf = p.buf[0:0] // for reading/writing + p.index = 0 // for reading +} + +// SetBuf replaces the internal buffer with the slice, +// ready for unmarshaling the contents of the slice. +func (p *Buffer) SetBuf(s []byte) { + p.buf = s + p.index = 0 +} + +// Bytes returns the contents of the Buffer. +func (p *Buffer) Bytes() []byte { return p.buf } + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { + return &v +} + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { + return &v +} + +// Int is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it, but unlike Int32 +// its argument value is an int. +func Int(v int) *int32 { + p := new(int32) + *p = int32(v) + return p +} + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { + return &v +} + +// Float32 is a helper routine that allocates a new float32 value +// to store v and returns a pointer to it. +func Float32(v float32) *float32 { + return &v +} + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { + return &v +} + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { + return &v +} + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { + return &v +} + +// EnumName is a helper function to simplify printing protocol buffer enums +// by name. Given an enum map and a value, it returns a useful string. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// UnmarshalJSONEnum is a helper function to simplify recovering enum int values +// from their JSON-encoded representation. Given a map from the enum's symbolic +// names to its int values, and a byte buffer containing the JSON-encoded +// value, it returns an int32 that can be cast to the enum type by the caller. +// +// The function can deal with both JSON representations, numeric and symbolic. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// DebugPrint dumps the encoded data in b in a debugging format with a header +// including the string s. Used in testing but made available for general debugging. +func (p *Buffer) DebugPrint(s string, b []byte) { + var u uint64 + + obuf := p.buf + sindex := p.index + p.buf = b + p.index = 0 + depth := 0 + + fmt.Printf("\n--- %s ---\n", s) + +out: + for { + for i := 0; i < depth; i++ { + fmt.Print(" ") + } + + index := p.index + if index == len(p.buf) { + break + } + + op, err := p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: fetching op err %v\n", index, err) + break out + } + tag := op >> 3 + wire := op & 7 + + switch wire { + default: + fmt.Printf("%3d: t=%3d unknown wire=%d\n", + index, tag, wire) + break out + + case WireBytes: + var r []byte + + r, err = p.DecodeRawBytes(false) + if err != nil { + break out + } + fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) + if len(r) <= 6 { + for i := 0; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } else { + for i := 0; i < 3; i++ { + fmt.Printf(" %.2x", r[i]) + } + fmt.Printf(" ..") + for i := len(r) - 3; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } + fmt.Printf("\n") + + case WireFixed32: + u, err = p.DecodeFixed32() + if err != nil { + fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) + + case WireFixed64: + u, err = p.DecodeFixed64() + if err != nil { + fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) + + case WireVarint: + u, err = p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) + + case WireStartGroup: + fmt.Printf("%3d: t=%3d start\n", index, tag) + depth++ + + case WireEndGroup: + depth-- + fmt.Printf("%3d: t=%3d end\n", index, tag) + } + } + + if depth != 0 { + fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) + } + fmt.Printf("\n") + + p.buf = obuf + p.index = sindex +} + +// SetDefaults sets unset protocol buffer fields to their default values. +// It only modifies fields that are both unset and have defined defaults. +// It recursively sets default values in any non-nil sub-messages. +func SetDefaults(pb Message) { + setDefaults(reflect.ValueOf(pb), true, false) +} + +// v is a pointer to a struct. +func setDefaults(v reflect.Value, recur, zeros bool) { + v = v.Elem() + + defaultMu.RLock() + dm, ok := defaults[v.Type()] + defaultMu.RUnlock() + if !ok { + dm = buildDefaultMessage(v.Type()) + defaultMu.Lock() + defaults[v.Type()] = dm + defaultMu.Unlock() + } + + for _, sf := range dm.scalars { + f := v.Field(sf.index) + if !f.IsNil() { + // field already set + continue + } + dv := sf.value + if dv == nil && !zeros { + // no explicit default, and don't want to set zeros + continue + } + fptr := f.Addr().Interface() // **T + // TODO: Consider batching the allocations we do here. + switch sf.kind { + case reflect.Bool: + b := new(bool) + if dv != nil { + *b = dv.(bool) + } + *(fptr.(**bool)) = b + case reflect.Float32: + f := new(float32) + if dv != nil { + *f = dv.(float32) + } + *(fptr.(**float32)) = f + case reflect.Float64: + f := new(float64) + if dv != nil { + *f = dv.(float64) + } + *(fptr.(**float64)) = f + case reflect.Int32: + // might be an enum + if ft := f.Type(); ft != int32PtrType { + // enum + f.Set(reflect.New(ft.Elem())) + if dv != nil { + f.Elem().SetInt(int64(dv.(int32))) + } + } else { + // int32 field + i := new(int32) + if dv != nil { + *i = dv.(int32) + } + *(fptr.(**int32)) = i + } + case reflect.Int64: + i := new(int64) + if dv != nil { + *i = dv.(int64) + } + *(fptr.(**int64)) = i + case reflect.String: + s := new(string) + if dv != nil { + *s = dv.(string) + } + *(fptr.(**string)) = s + case reflect.Uint8: + // exceptional case: []byte + var b []byte + if dv != nil { + db := dv.([]byte) + b = make([]byte, len(db)) + copy(b, db) + } else { + b = []byte{} + } + *(fptr.(*[]byte)) = b + case reflect.Uint32: + u := new(uint32) + if dv != nil { + *u = dv.(uint32) + } + *(fptr.(**uint32)) = u + case reflect.Uint64: + u := new(uint64) + if dv != nil { + *u = dv.(uint64) + } + *(fptr.(**uint64)) = u + default: + log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) + } + } + + for _, ni := range dm.nested { + f := v.Field(ni) + // f is *T or []*T or map[T]*T + switch f.Kind() { + case reflect.Ptr: + if f.IsNil() { + continue + } + setDefaults(f, recur, zeros) + + case reflect.Slice: + for i := 0; i < f.Len(); i++ { + e := f.Index(i) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + + case reflect.Map: + for _, k := range f.MapKeys() { + e := f.MapIndex(k) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + } + } +} + +var ( + // defaults maps a protocol buffer struct type to a slice of the fields, + // with its scalar fields set to their proto-declared non-zero default values. + defaultMu sync.RWMutex + defaults = make(map[reflect.Type]defaultMessage) + + int32PtrType = reflect.TypeOf((*int32)(nil)) +) + +// defaultMessage represents information about the default values of a message. +type defaultMessage struct { + scalars []scalarField + nested []int // struct field index of nested messages +} + +type scalarField struct { + index int // struct field index + kind reflect.Kind // element type (the T in *T or []T) + value interface{} // the proto-declared default value, or nil +} + +// t is a struct type. +func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { + sprop := GetProperties(t) + for _, prop := range sprop.Prop { + fi, ok := sprop.decoderTags.get(prop.Tag) + if !ok { + // XXX_unrecognized + continue + } + ft := t.Field(fi).Type + + sf, nested, err := fieldDefault(ft, prop) + switch { + case err != nil: + log.Print(err) + case nested: + dm.nested = append(dm.nested, fi) + case sf != nil: + sf.index = fi + dm.scalars = append(dm.scalars, *sf) + } + } + + return dm +} + +// fieldDefault returns the scalarField for field type ft. +// sf will be nil if the field can not have a default. +// nestedMessage will be true if this is a nested message. +// Note that sf.index is not set on return. +func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { + var canHaveDefault bool + switch ft.Kind() { + case reflect.Ptr: + if ft.Elem().Kind() == reflect.Struct { + nestedMessage = true + } else { + canHaveDefault = true // proto2 scalar field + } + + case reflect.Slice: + switch ft.Elem().Kind() { + case reflect.Ptr: + nestedMessage = true // repeated message + case reflect.Uint8: + canHaveDefault = true // bytes field + } + + case reflect.Map: + if ft.Elem().Kind() == reflect.Ptr { + nestedMessage = true // map with message values + } + } + + if !canHaveDefault { + if nestedMessage { + return nil, true, nil + } + return nil, false, nil + } + + // We now know that ft is a pointer or slice. + sf = &scalarField{kind: ft.Elem().Kind()} + + // scalar fields without defaults + if !prop.HasDefault { + return sf, false, nil + } + + // a scalar field: either *T or []byte + switch ft.Elem().Kind() { + case reflect.Bool: + x, err := strconv.ParseBool(prop.Default) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Float32: + x, err := strconv.ParseFloat(prop.Default, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) + } + sf.value = float32(x) + case reflect.Float64: + x, err := strconv.ParseFloat(prop.Default, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Int32: + x, err := strconv.ParseInt(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) + } + sf.value = int32(x) + case reflect.Int64: + x, err := strconv.ParseInt(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.String: + sf.value = prop.Default + case reflect.Uint8: + // []byte (not *uint8) + sf.value = []byte(prop.Default) + case reflect.Uint32: + x, err := strconv.ParseUint(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) + } + sf.value = uint32(x) + case reflect.Uint64: + x, err := strconv.ParseUint(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) + } + sf.value = x + default: + return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) + } + + return sf, false, nil +} + +// Map fields may have key types of non-float scalars, strings and enums. +// The easiest way to sort them in some deterministic order is to use fmt. +// If this turns out to be inefficient we can always consider other options, +// such as doing a Schwartzian transform. + +func mapKeys(vs []reflect.Value) sort.Interface { + s := mapKeySorter{ + vs: vs, + // default Less function: textual comparison + less: func(a, b reflect.Value) bool { + return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) + }, + } + + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; + // numeric keys are sorted numerically. + if len(vs) == 0 { + return s + } + switch vs[0].Kind() { + case reflect.Int32, reflect.Int64: + s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } + case reflect.Uint32, reflect.Uint64: + s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + } + + return s +} + +type mapKeySorter struct { + vs []reflect.Value + less func(a, b reflect.Value) bool +} + +func (s mapKeySorter) Len() int { return len(s.vs) } +func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } +func (s mapKeySorter) Less(i, j int) bool { + return s.less(s.vs[i], s.vs[j]) +} + +// isProto3Zero reports whether v is a zero proto3 value. +func isProto3Zero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint32, reflect.Uint64: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.String: + return v.String() == "" + } + return false +} + +// ProtoPackageIsVersion2 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const GoGoProtoPackageIsVersion2 = true + +// ProtoPackageIsVersion1 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const GoGoProtoPackageIsVersion1 = true diff --git a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go new file mode 100644 index 000000000..4b4f7c909 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go @@ -0,0 +1,42 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "encoding/json" + "strconv" +) + +func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) { + s, ok := m[value] + if !ok { + s = strconv.Itoa(int(value)) + } + return json.Marshal(s) +} diff --git a/vendor/github.com/gogo/protobuf/proto/message_set.go b/vendor/github.com/gogo/protobuf/proto/message_set.go new file mode 100644 index 000000000..fd982decd --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/message_set.go @@ -0,0 +1,311 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Support for message sets. + */ + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" +) + +// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. +// A message type ID is required for storing a protocol buffer in a message set. +var errNoMessageTypeID = errors.New("proto does not have a message type ID") + +// The first two types (_MessageSet_Item and messageSet) +// model what the protocol compiler produces for the following protocol message: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } +// That is the MessageSet wire format. We can't use a proto to generate these +// because that would introduce a circular dependency between it and this package. + +type _MessageSet_Item struct { + TypeId *int32 `protobuf:"varint,2,req,name=type_id"` + Message []byte `protobuf:"bytes,3,req,name=message"` +} + +type messageSet struct { + Item []*_MessageSet_Item `protobuf:"group,1,rep"` + XXX_unrecognized []byte + // TODO: caching? +} + +// Make sure messageSet is a Message. +var _ Message = (*messageSet)(nil) + +// messageTypeIder is an interface satisfied by a protocol buffer type +// that may be stored in a MessageSet. +type messageTypeIder interface { + MessageTypeId() int32 +} + +func (ms *messageSet) find(pb Message) *_MessageSet_Item { + mti, ok := pb.(messageTypeIder) + if !ok { + return nil + } + id := mti.MessageTypeId() + for _, item := range ms.Item { + if *item.TypeId == id { + return item + } + } + return nil +} + +func (ms *messageSet) Has(pb Message) bool { + if ms.find(pb) != nil { + return true + } + return false +} + +func (ms *messageSet) Unmarshal(pb Message) error { + if item := ms.find(pb); item != nil { + return Unmarshal(item.Message, pb) + } + if _, ok := pb.(messageTypeIder); !ok { + return errNoMessageTypeID + } + return nil // TODO: return error instead? +} + +func (ms *messageSet) Marshal(pb Message) error { + msg, err := Marshal(pb) + if err != nil { + return err + } + if item := ms.find(pb); item != nil { + // reuse existing item + item.Message = msg + return nil + } + + mti, ok := pb.(messageTypeIder) + if !ok { + return errNoMessageTypeID + } + + mtid := mti.MessageTypeId() + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: &mtid, + Message: msg, + }) + return nil +} + +func (ms *messageSet) Reset() { *ms = messageSet{} } +func (ms *messageSet) String() string { return CompactTextString(ms) } +func (*messageSet) ProtoMessage() {} + +// Support for the message_set_wire_format message option. + +func skipVarint(buf []byte) []byte { + i := 0 + for ; buf[i]&0x80 != 0; i++ { + } + return buf[i+1:] +} + +// MarshalMessageSet encodes the extension map represented by m in the message set wire format. +// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSet(exts interface{}) ([]byte, error) { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + if err := encodeExtensions(exts); err != nil { + return nil, err + } + m, _ = exts.extensionsRead() + case map[int32]Extension: + if err := encodeExtensionsMap(exts); err != nil { + return nil, err + } + m = exts + default: + return nil, errors.New("proto: not an extension map") + } + + // Sort extension IDs to provide a deterministic encoding. + // See also enc_map in encode.go. + ids := make([]int, 0, len(m)) + for id := range m { + ids = append(ids, int(id)) + } + sort.Ints(ids) + + ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))} + for _, id := range ids { + e := m[int32(id)] + // Remove the wire type and field number varint, as well as the length varint. + msg := skipVarint(skipVarint(e.enc)) + + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: Int32(int32(id)), + Message: msg, + }) + } + return Marshal(ms) +} + +// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSet(buf []byte, exts interface{}) error { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + m = exts.extensionsWrite() + case map[int32]Extension: + m = exts + default: + return errors.New("proto: not an extension map") + } + + ms := new(messageSet) + if err := Unmarshal(buf, ms); err != nil { + return err + } + for _, item := range ms.Item { + id := *item.TypeId + msg := item.Message + + // Restore wire type and field number varint, plus length varint. + // Be careful to preserve duplicate items. + b := EncodeVarint(uint64(id)<<3 | WireBytes) + if ext, ok := m[id]; ok { + // Existing data; rip off the tag and length varint + // so we join the new data correctly. + // We can assume that ext.enc is set because we are unmarshaling. + o := ext.enc[len(b):] // skip wire type and field number + _, n := DecodeVarint(o) // calculate length of length varint + o = o[n:] // skip length varint + msg = append(o, msg...) // join old data and new data + } + b = append(b, EncodeVarint(uint64(len(msg)))...) + b = append(b, msg...) + + m[id] = Extension{enc: b} + } + return nil +} + +// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. +// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + m, _ = exts.extensionsRead() + case map[int32]Extension: + m = exts + default: + return nil, errors.New("proto: not an extension map") + } + var b bytes.Buffer + b.WriteByte('{') + + // Process the map in key order for deterministic output. + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) // int32Slice defined in text.go + + for i, id := range ids { + ext := m[id] + if i > 0 { + b.WriteByte(',') + } + + msd, ok := messageSetMap[id] + if !ok { + // Unknown type; we can't render it, so skip it. + continue + } + fmt.Fprintf(&b, `"[%s]":`, msd.name) + + x := ext.value + if x == nil { + x = reflect.New(msd.t.Elem()).Interface() + if err := Unmarshal(ext.enc, x.(Message)); err != nil { + return nil, err + } + } + d, err := json.Marshal(x) + if err != nil { + return nil, err + } + b.Write(d) + } + b.WriteByte('}') + return b.Bytes(), nil +} + +// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. +// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { + // Common-case fast path. + if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { + return nil + } + + // This is fairly tricky, and it's not clear that it is needed. + return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") +} + +// A global registry of types that can be used in a MessageSet. + +var messageSetMap = make(map[int32]messageSetDesc) + +type messageSetDesc struct { + t reflect.Type // pointer to struct + name string +} + +// RegisterMessageSetType is called from the generated code. +func RegisterMessageSetType(m Message, fieldNum int32, name string) { + messageSetMap[fieldNum] = messageSetDesc{ + t: reflect.TypeOf(m), + name: name, + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go new file mode 100644 index 000000000..fb512e2e1 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go @@ -0,0 +1,484 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build appengine js + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "math" + "reflect" +) + +// A structPointer is a pointer to a struct. +type structPointer struct { + v reflect.Value +} + +// toStructPointer returns a structPointer equivalent to the given reflect value. +// The reflect value must itself be a pointer to a struct. +func toStructPointer(v reflect.Value) structPointer { + return structPointer{v} +} + +// IsNil reports whether p is nil. +func structPointer_IsNil(p structPointer) bool { + return p.v.IsNil() +} + +// Interface returns the struct pointer as an interface value. +func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { + return p.v.Interface() +} + +// A field identifies a field in a struct, accessible from a structPointer. +// In this implementation, a field is identified by the sequence of field indices +// passed to reflect's FieldByIndex. +type field []int + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return f.Index +} + +// invalidField is an invalid field identifier. +var invalidField = field(nil) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { return f != nil } + +// field returns the given field in the struct as a reflect value. +func structPointer_field(p structPointer, f field) reflect.Value { + // Special case: an extension map entry with a value of type T + // passes a *T to the struct-handling code with a zero field, + // expecting that it will be treated as equivalent to *struct{ X T }, + // which has the same memory layout. We have to handle that case + // specially, because reflect will panic if we call FieldByIndex on a + // non-struct. + if f == nil { + return p.v.Elem() + } + + return p.v.Elem().FieldByIndex(f) +} + +// ifield returns the given field in the struct as an interface value. +func structPointer_ifield(p structPointer, f field) interface{} { + return structPointer_field(p, f).Addr().Interface() +} + +// Bytes returns the address of a []byte field in the struct. +func structPointer_Bytes(p structPointer, f field) *[]byte { + return structPointer_ifield(p, f).(*[]byte) +} + +// BytesSlice returns the address of a [][]byte field in the struct. +func structPointer_BytesSlice(p structPointer, f field) *[][]byte { + return structPointer_ifield(p, f).(*[][]byte) +} + +// Bool returns the address of a *bool field in the struct. +func structPointer_Bool(p structPointer, f field) **bool { + return structPointer_ifield(p, f).(**bool) +} + +// BoolVal returns the address of a bool field in the struct. +func structPointer_BoolVal(p structPointer, f field) *bool { + return structPointer_ifield(p, f).(*bool) +} + +// BoolSlice returns the address of a []bool field in the struct. +func structPointer_BoolSlice(p structPointer, f field) *[]bool { + return structPointer_ifield(p, f).(*[]bool) +} + +// String returns the address of a *string field in the struct. +func structPointer_String(p structPointer, f field) **string { + return structPointer_ifield(p, f).(**string) +} + +// StringVal returns the address of a string field in the struct. +func structPointer_StringVal(p structPointer, f field) *string { + return structPointer_ifield(p, f).(*string) +} + +// StringSlice returns the address of a []string field in the struct. +func structPointer_StringSlice(p structPointer, f field) *[]string { + return structPointer_ifield(p, f).(*[]string) +} + +// Extensions returns the address of an extension map field in the struct. +func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { + return structPointer_ifield(p, f).(*XXX_InternalExtensions) +} + +// ExtMap returns the address of an extension map field in the struct. +func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { + return structPointer_ifield(p, f).(*map[int32]Extension) +} + +// NewAt returns the reflect.Value for a pointer to a field in the struct. +func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { + return structPointer_field(p, f).Addr() +} + +// SetStructPointer writes a *struct field in the struct. +func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { + structPointer_field(p, f).Set(q.v) +} + +// GetStructPointer reads a *struct field in the struct. +func structPointer_GetStructPointer(p structPointer, f field) structPointer { + return structPointer{structPointer_field(p, f)} +} + +// StructPointerSlice the address of a []*struct field in the struct. +func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { + return structPointerSlice{structPointer_field(p, f)} +} + +// A structPointerSlice represents the address of a slice of pointers to structs +// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. +type structPointerSlice struct { + v reflect.Value +} + +func (p structPointerSlice) Len() int { return p.v.Len() } +func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } +func (p structPointerSlice) Append(q structPointer) { + p.v.Set(reflect.Append(p.v, q.v)) +} + +var ( + int32Type = reflect.TypeOf(int32(0)) + uint32Type = reflect.TypeOf(uint32(0)) + float32Type = reflect.TypeOf(float32(0)) + int64Type = reflect.TypeOf(int64(0)) + uint64Type = reflect.TypeOf(uint64(0)) + float64Type = reflect.TypeOf(float64(0)) +) + +// A word32 represents a field of type *int32, *uint32, *float32, or *enum. +// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. +type word32 struct { + v reflect.Value +} + +// IsNil reports whether p is nil. +func word32_IsNil(p word32) bool { + return p.v.IsNil() +} + +// Set sets p to point at a newly allocated word with bits set to x. +func word32_Set(p word32, o *Buffer, x uint32) { + t := p.v.Type().Elem() + switch t { + case int32Type: + if len(o.int32s) == 0 { + o.int32s = make([]int32, uint32PoolSize) + } + o.int32s[0] = int32(x) + p.v.Set(reflect.ValueOf(&o.int32s[0])) + o.int32s = o.int32s[1:] + return + case uint32Type: + if len(o.uint32s) == 0 { + o.uint32s = make([]uint32, uint32PoolSize) + } + o.uint32s[0] = x + p.v.Set(reflect.ValueOf(&o.uint32s[0])) + o.uint32s = o.uint32s[1:] + return + case float32Type: + if len(o.float32s) == 0 { + o.float32s = make([]float32, uint32PoolSize) + } + o.float32s[0] = math.Float32frombits(x) + p.v.Set(reflect.ValueOf(&o.float32s[0])) + o.float32s = o.float32s[1:] + return + } + + // must be enum + p.v.Set(reflect.New(t)) + p.v.Elem().SetInt(int64(int32(x))) +} + +// Get gets the bits pointed at by p, as a uint32. +func word32_Get(p word32) uint32 { + elem := p.v.Elem() + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32(p structPointer, f field) word32 { + return word32{structPointer_field(p, f)} +} + +// A word32Val represents a field of type int32, uint32, float32, or enum. +// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. +type word32Val struct { + v reflect.Value +} + +// Set sets *p to x. +func word32Val_Set(p word32Val, x uint32) { + switch p.v.Type() { + case int32Type: + p.v.SetInt(int64(x)) + return + case uint32Type: + p.v.SetUint(uint64(x)) + return + case float32Type: + p.v.SetFloat(float64(math.Float32frombits(x))) + return + } + + // must be enum + p.v.SetInt(int64(int32(x))) +} + +// Get gets the bits pointed at by p, as a uint32. +func word32Val_Get(p word32Val) uint32 { + elem := p.v + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. +func structPointer_Word32Val(p structPointer, f field) word32Val { + return word32Val{structPointer_field(p, f)} +} + +// A word32Slice is a slice of 32-bit values. +// That is, v.Type() is []int32, []uint32, []float32, or []enum. +type word32Slice struct { + v reflect.Value +} + +func (p word32Slice) Append(x uint32) { + n, m := p.v.Len(), p.v.Cap() + if n < m { + p.v.SetLen(n + 1) + } else { + t := p.v.Type().Elem() + p.v.Set(reflect.Append(p.v, reflect.Zero(t))) + } + elem := p.v.Index(n) + switch elem.Kind() { + case reflect.Int32: + elem.SetInt(int64(int32(x))) + case reflect.Uint32: + elem.SetUint(uint64(x)) + case reflect.Float32: + elem.SetFloat(float64(math.Float32frombits(x))) + } +} + +func (p word32Slice) Len() int { + return p.v.Len() +} + +func (p word32Slice) Index(i int) uint32 { + elem := p.v.Index(i) + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. +func structPointer_Word32Slice(p structPointer, f field) word32Slice { + return word32Slice{structPointer_field(p, f)} +} + +// word64 is like word32 but for 64-bit values. +type word64 struct { + v reflect.Value +} + +func word64_Set(p word64, o *Buffer, x uint64) { + t := p.v.Type().Elem() + switch t { + case int64Type: + if len(o.int64s) == 0 { + o.int64s = make([]int64, uint64PoolSize) + } + o.int64s[0] = int64(x) + p.v.Set(reflect.ValueOf(&o.int64s[0])) + o.int64s = o.int64s[1:] + return + case uint64Type: + if len(o.uint64s) == 0 { + o.uint64s = make([]uint64, uint64PoolSize) + } + o.uint64s[0] = x + p.v.Set(reflect.ValueOf(&o.uint64s[0])) + o.uint64s = o.uint64s[1:] + return + case float64Type: + if len(o.float64s) == 0 { + o.float64s = make([]float64, uint64PoolSize) + } + o.float64s[0] = math.Float64frombits(x) + p.v.Set(reflect.ValueOf(&o.float64s[0])) + o.float64s = o.float64s[1:] + return + } + panic("unreachable") +} + +func word64_IsNil(p word64) bool { + return p.v.IsNil() +} + +func word64_Get(p word64) uint64 { + elem := p.v.Elem() + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return elem.Uint() + case reflect.Float64: + return math.Float64bits(elem.Float()) + } + panic("unreachable") +} + +func structPointer_Word64(p structPointer, f field) word64 { + return word64{structPointer_field(p, f)} +} + +// word64Val is like word32Val but for 64-bit values. +type word64Val struct { + v reflect.Value +} + +func word64Val_Set(p word64Val, o *Buffer, x uint64) { + switch p.v.Type() { + case int64Type: + p.v.SetInt(int64(x)) + return + case uint64Type: + p.v.SetUint(x) + return + case float64Type: + p.v.SetFloat(math.Float64frombits(x)) + return + } + panic("unreachable") +} + +func word64Val_Get(p word64Val) uint64 { + elem := p.v + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return elem.Uint() + case reflect.Float64: + return math.Float64bits(elem.Float()) + } + panic("unreachable") +} + +func structPointer_Word64Val(p structPointer, f field) word64Val { + return word64Val{structPointer_field(p, f)} +} + +type word64Slice struct { + v reflect.Value +} + +func (p word64Slice) Append(x uint64) { + n, m := p.v.Len(), p.v.Cap() + if n < m { + p.v.SetLen(n + 1) + } else { + t := p.v.Type().Elem() + p.v.Set(reflect.Append(p.v, reflect.Zero(t))) + } + elem := p.v.Index(n) + switch elem.Kind() { + case reflect.Int64: + elem.SetInt(int64(int64(x))) + case reflect.Uint64: + elem.SetUint(uint64(x)) + case reflect.Float64: + elem.SetFloat(float64(math.Float64frombits(x))) + } +} + +func (p word64Slice) Len() int { + return p.v.Len() +} + +func (p word64Slice) Index(i int) uint64 { + elem := p.v.Index(i) + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return uint64(elem.Uint()) + case reflect.Float64: + return math.Float64bits(float64(elem.Float())) + } + panic("unreachable") +} + +func structPointer_Word64Slice(p structPointer, f field) word64Slice { + return word64Slice{structPointer_field(p, f)} +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go new file mode 100644 index 000000000..1763a5f22 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go @@ -0,0 +1,85 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build appengine js + +package proto + +import ( + "reflect" +) + +func structPointer_FieldPointer(p structPointer, f field) structPointer { + panic("not implemented") +} + +func appendStructPointer(base structPointer, f field, typ reflect.Type) structPointer { + panic("not implemented") +} + +func structPointer_InterfaceAt(p structPointer, f field, t reflect.Type) interface{} { + panic("not implemented") +} + +func structPointer_InterfaceRef(p structPointer, f field, t reflect.Type) interface{} { + panic("not implemented") +} + +func structPointer_GetRefStructPointer(p structPointer, f field) structPointer { + panic("not implemented") +} + +func structPointer_Add(p structPointer, size field) structPointer { + panic("not implemented") +} + +func structPointer_Len(p structPointer, f field) int { + panic("not implemented") +} + +func structPointer_GetSliceHeader(p structPointer, f field) *reflect.SliceHeader { + panic("not implemented") +} + +func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) { + panic("not implemented") +} + +func structPointer_StructRefSlice(p structPointer, f field, size uintptr) *structRefSlice { + panic("not implemented") +} + +type structRefSlice struct{} + +func (v *structRefSlice) Len() int { + panic("not implemented") +} + +func (v *structRefSlice) Index(i int) structPointer { + panic("not implemented") +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go new file mode 100644 index 000000000..6b5567d47 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go @@ -0,0 +1,270 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !appengine,!js + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "unsafe" +) + +// NOTE: These type_Foo functions would more idiomatically be methods, +// but Go does not allow methods on pointer types, and we must preserve +// some pointer type for the garbage collector. We use these +// funcs with clunky names as our poor approximation to methods. +// +// An alternative would be +// type structPointer struct { p unsafe.Pointer } +// but that does not registerize as well. + +// A structPointer is a pointer to a struct. +type structPointer unsafe.Pointer + +// toStructPointer returns a structPointer equivalent to the given reflect value. +func toStructPointer(v reflect.Value) structPointer { + return structPointer(unsafe.Pointer(v.Pointer())) +} + +// IsNil reports whether p is nil. +func structPointer_IsNil(p structPointer) bool { + return p == nil +} + +// Interface returns the struct pointer, assumed to have element type t, +// as an interface value. +func structPointer_Interface(p structPointer, t reflect.Type) interface{} { + return reflect.NewAt(t, unsafe.Pointer(p)).Interface() +} + +// A field identifies a field in a struct, accessible from a structPointer. +// In this implementation, a field is identified by its byte offset from the start of the struct. +type field uintptr + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return field(f.Offset) +} + +// invalidField is an invalid field identifier. +const invalidField = ^field(0) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { + return f != ^field(0) +} + +// Bytes returns the address of a []byte field in the struct. +func structPointer_Bytes(p structPointer, f field) *[]byte { + return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BytesSlice returns the address of a [][]byte field in the struct. +func structPointer_BytesSlice(p structPointer, f field) *[][]byte { + return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// Bool returns the address of a *bool field in the struct. +func structPointer_Bool(p structPointer, f field) **bool { + return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BoolVal returns the address of a bool field in the struct. +func structPointer_BoolVal(p structPointer, f field) *bool { + return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BoolSlice returns the address of a []bool field in the struct. +func structPointer_BoolSlice(p structPointer, f field) *[]bool { + return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// String returns the address of a *string field in the struct. +func structPointer_String(p structPointer, f field) **string { + return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StringVal returns the address of a string field in the struct. +func structPointer_StringVal(p structPointer, f field) *string { + return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StringSlice returns the address of a []string field in the struct. +func structPointer_StringSlice(p structPointer, f field) *[]string { + return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// ExtMap returns the address of an extension map field in the struct. +func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { + return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { + return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// NewAt returns the reflect.Value for a pointer to a field in the struct. +func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { + return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) +} + +// SetStructPointer writes a *struct field in the struct. +func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { + *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q +} + +// GetStructPointer reads a *struct field in the struct. +func structPointer_GetStructPointer(p structPointer, f field) structPointer { + return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StructPointerSlice the address of a []*struct field in the struct. +func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { + return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). +type structPointerSlice []structPointer + +func (v *structPointerSlice) Len() int { return len(*v) } +func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } +func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } + +// A word32 is the address of a "pointer to 32-bit value" field. +type word32 **uint32 + +// IsNil reports whether *v is nil. +func word32_IsNil(p word32) bool { + return *p == nil +} + +// Set sets *v to point at a newly allocated word set to x. +func word32_Set(p word32, o *Buffer, x uint32) { + if len(o.uint32s) == 0 { + o.uint32s = make([]uint32, uint32PoolSize) + } + o.uint32s[0] = x + *p = &o.uint32s[0] + o.uint32s = o.uint32s[1:] +} + +// Get gets the value pointed at by *v. +func word32_Get(p word32) uint32 { + return **p +} + +// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32(p structPointer, f field) word32 { + return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// A word32Val is the address of a 32-bit value field. +type word32Val *uint32 + +// Set sets *p to x. +func word32Val_Set(p word32Val, x uint32) { + *p = x +} + +// Get gets the value pointed at by p. +func word32Val_Get(p word32Val) uint32 { + return *p +} + +// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32Val(p structPointer, f field) word32Val { + return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// A word32Slice is a slice of 32-bit values. +type word32Slice []uint32 + +func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } +func (v *word32Slice) Len() int { return len(*v) } +func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } + +// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. +func structPointer_Word32Slice(p structPointer, f field) *word32Slice { + return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// word64 is like word32 but for 64-bit values. +type word64 **uint64 + +func word64_Set(p word64, o *Buffer, x uint64) { + if len(o.uint64s) == 0 { + o.uint64s = make([]uint64, uint64PoolSize) + } + o.uint64s[0] = x + *p = &o.uint64s[0] + o.uint64s = o.uint64s[1:] +} + +func word64_IsNil(p word64) bool { + return *p == nil +} + +func word64_Get(p word64) uint64 { + return **p +} + +func structPointer_Word64(p structPointer, f field) word64 { + return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// word64Val is like word32Val but for 64-bit values. +type word64Val *uint64 + +func word64Val_Set(p word64Val, o *Buffer, x uint64) { + *p = x +} + +func word64Val_Get(p word64Val) uint64 { + return *p +} + +func structPointer_Word64Val(p structPointer, f field) word64Val { + return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// word64Slice is like word32Slice but for 64-bit values. +type word64Slice []uint64 + +func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } +func (v *word64Slice) Len() int { return len(*v) } +func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } + +func structPointer_Word64Slice(p structPointer, f field) *word64Slice { + return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go new file mode 100644 index 000000000..f156a29f0 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go @@ -0,0 +1,128 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !appengine,!js + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "unsafe" +) + +func structPointer_InterfaceAt(p structPointer, f field, t reflect.Type) interface{} { + point := unsafe.Pointer(uintptr(p) + uintptr(f)) + r := reflect.NewAt(t, point) + return r.Interface() +} + +func structPointer_InterfaceRef(p structPointer, f field, t reflect.Type) interface{} { + point := unsafe.Pointer(uintptr(p) + uintptr(f)) + r := reflect.NewAt(t, point) + if r.Elem().IsNil() { + return nil + } + return r.Elem().Interface() +} + +func copyUintPtr(oldptr, newptr uintptr, size int) { + oldbytes := make([]byte, 0) + oldslice := (*reflect.SliceHeader)(unsafe.Pointer(&oldbytes)) + oldslice.Data = oldptr + oldslice.Len = size + oldslice.Cap = size + newbytes := make([]byte, 0) + newslice := (*reflect.SliceHeader)(unsafe.Pointer(&newbytes)) + newslice.Data = newptr + newslice.Len = size + newslice.Cap = size + copy(newbytes, oldbytes) +} + +func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) { + copyUintPtr(uintptr(oldptr), uintptr(newptr), size) +} + +func appendStructPointer(base structPointer, f field, typ reflect.Type) structPointer { + size := typ.Elem().Size() + + oldHeader := structPointer_GetSliceHeader(base, f) + oldSlice := reflect.NewAt(typ, unsafe.Pointer(oldHeader)).Elem() + newLen := oldHeader.Len + 1 + newSlice := reflect.MakeSlice(typ, newLen, newLen) + reflect.Copy(newSlice, oldSlice) + bas := toStructPointer(newSlice) + oldHeader.Data = uintptr(bas) + oldHeader.Len = newLen + oldHeader.Cap = newLen + + return structPointer(unsafe.Pointer(uintptr(unsafe.Pointer(bas)) + uintptr(uintptr(newLen-1)*size))) +} + +func structPointer_FieldPointer(p structPointer, f field) structPointer { + return structPointer(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +func structPointer_GetRefStructPointer(p structPointer, f field) structPointer { + return structPointer((*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +func structPointer_GetSliceHeader(p structPointer, f field) *reflect.SliceHeader { + return (*reflect.SliceHeader)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +func structPointer_Add(p structPointer, size field) structPointer { + return structPointer(unsafe.Pointer(uintptr(p) + uintptr(size))) +} + +func structPointer_Len(p structPointer, f field) int { + return len(*(*[]interface{})(unsafe.Pointer(structPointer_GetRefStructPointer(p, f)))) +} + +func structPointer_StructRefSlice(p structPointer, f field, size uintptr) *structRefSlice { + return &structRefSlice{p: p, f: f, size: size} +} + +// A structRefSlice represents a slice of structs (themselves submessages or groups). +type structRefSlice struct { + p structPointer + f field + size uintptr +} + +func (v *structRefSlice) Len() int { + return structPointer_Len(v.p, v.f) +} + +func (v *structRefSlice) Index(i int) structPointer { + ss := structPointer_GetStructPointer(v.p, v.f) + ss1 := structPointer_GetRefStructPointer(ss, 0) + return structPointer_Add(ss1, field(uintptr(i)*v.size)) +} diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go new file mode 100644 index 000000000..44b332052 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/properties.go @@ -0,0 +1,968 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "fmt" + "log" + "os" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +const debug bool = false + +// Constants that identify the encoding of a value on the wire. +const ( + WireVarint = 0 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 + WireFixed32 = 5 +) + +const startSize = 10 // initial slice/string sizes + +// Encoders are defined in encode.go +// An encoder outputs the full representation of a field, including its +// tag and encoder type. +type encoder func(p *Buffer, prop *Properties, base structPointer) error + +// A valueEncoder encodes a single integer in a particular encoding. +type valueEncoder func(o *Buffer, x uint64) error + +// Sizers are defined in encode.go +// A sizer returns the encoded size of a field, including its tag and encoder +// type. +type sizer func(prop *Properties, base structPointer) int + +// A valueSizer returns the encoded size of a single integer in a particular +// encoding. +type valueSizer func(x uint64) int + +// Decoders are defined in decode.go +// A decoder creates a value from its wire representation. +// Unrecognized subelements are saved in unrec. +type decoder func(p *Buffer, prop *Properties, base structPointer) error + +// A valueDecoder decodes a single integer in a particular encoding. +type valueDecoder func(o *Buffer) (x uint64, err error) + +// A oneofMarshaler does the marshaling for all oneof fields in a message. +type oneofMarshaler func(Message, *Buffer) error + +// A oneofUnmarshaler does the unmarshaling for a oneof field in a message. +type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) + +// A oneofSizer does the sizing for all oneof fields in a message. +type oneofSizer func(Message) int + +// tagMap is an optimization over map[int]int for typical protocol buffer +// use-cases. Encoded protocol buffers are often in tag order with small tag +// numbers. +type tagMap struct { + fastTags []int + slowTags map[int]int +} + +// tagMapFastLimit is the upper bound on the tag number that will be stored in +// the tagMap slice rather than its map. +const tagMapFastLimit = 1024 + +func (p *tagMap) get(t int) (int, bool) { + if t > 0 && t < tagMapFastLimit { + if t >= len(p.fastTags) { + return 0, false + } + fi := p.fastTags[t] + return fi, fi >= 0 + } + fi, ok := p.slowTags[t] + return fi, ok +} + +func (p *tagMap) put(t int, fi int) { + if t > 0 && t < tagMapFastLimit { + for len(p.fastTags) < t+1 { + p.fastTags = append(p.fastTags, -1) + } + p.fastTags[t] = fi + return + } + if p.slowTags == nil { + p.slowTags = make(map[int]int) + } + p.slowTags[t] = fi +} + +// StructProperties represents properties for all the fields of a struct. +// decoderTags and decoderOrigNames should only be used by the decoder. +type StructProperties struct { + Prop []*Properties // properties for each field + reqCount int // required count + decoderTags tagMap // map from proto tag to struct field number + decoderOrigNames map[string]int // map from original name to struct field number + order []int // list of struct field numbers in tag order + unrecField field // field id of the XXX_unrecognized []byte field + extendable bool // is this an extendable proto + + oneofMarshaler oneofMarshaler + oneofUnmarshaler oneofUnmarshaler + oneofSizer oneofSizer + stype reflect.Type + + // OneofTypes contains information about the oneof fields in this message. + // It is keyed by the original name of a field. + OneofTypes map[string]*OneofProperties +} + +// OneofProperties represents information about a specific field in a oneof. +type OneofProperties struct { + Type reflect.Type // pointer to generated struct type for this oneof field + Field int // struct field number of the containing oneof in the message + Prop *Properties +} + +// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. +// See encode.go, (*Buffer).enc_struct. + +func (sp *StructProperties) Len() int { return len(sp.order) } +func (sp *StructProperties) Less(i, j int) bool { + return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag +} +func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } + +// Properties represents the protocol-specific behavior of a single struct field. +type Properties struct { + Name string // name of the field, for error messages + OrigName string // original name before protocol compiler (always set) + JSONName string // name to use for JSON; determined by protoc + Wire string + WireType int + Tag int + Required bool + Optional bool + Repeated bool + Packed bool // relevant for repeated primitives only + Enum string // set for enum types only + proto3 bool // whether this is known to be a proto3 field; set for []byte only + oneof bool // whether this is a oneof field + + Default string // default value + HasDefault bool // whether an explicit default was provided + CustomType string + StdTime bool + StdDuration bool + + enc encoder + valEnc valueEncoder // set for bool and numeric types only + field field + tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) + tagbuf [8]byte + stype reflect.Type // set for struct types only + sstype reflect.Type // set for slices of structs types only + ctype reflect.Type // set for custom types only + sprop *StructProperties // set for struct types only + isMarshaler bool + isUnmarshaler bool + + mtype reflect.Type // set for map types only + mkeyprop *Properties // set for map types only + mvalprop *Properties // set for map types only + + size sizer + valSize valueSizer // set for bool and numeric types only + + dec decoder + valDec valueDecoder // set for bool and numeric types only + + // If this is a packable field, this will be the decoder for the packed version of the field. + packedDec decoder +} + +// String formats the properties in the protobuf struct field tag style. +func (p *Properties) String() string { + s := p.Wire + s = "," + s += strconv.Itoa(p.Tag) + if p.Required { + s += ",req" + } + if p.Optional { + s += ",opt" + } + if p.Repeated { + s += ",rep" + } + if p.Packed { + s += ",packed" + } + s += ",name=" + p.OrigName + if p.JSONName != p.OrigName { + s += ",json=" + p.JSONName + } + if p.proto3 { + s += ",proto3" + } + if p.oneof { + s += ",oneof" + } + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if p.HasDefault { + s += ",def=" + p.Default + } + return s +} + +// Parse populates p by parsing a string in the protobuf struct field tag style. +func (p *Properties) Parse(s string) { + // "bytes,49,opt,name=foo,def=hello!" + fields := strings.Split(s, ",") // breaks def=, but handled below. + if len(fields) < 2 { + fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) + return + } + + p.Wire = fields[0] + switch p.Wire { + case "varint": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeVarint + p.valDec = (*Buffer).DecodeVarint + p.valSize = sizeVarint + case "fixed32": + p.WireType = WireFixed32 + p.valEnc = (*Buffer).EncodeFixed32 + p.valDec = (*Buffer).DecodeFixed32 + p.valSize = sizeFixed32 + case "fixed64": + p.WireType = WireFixed64 + p.valEnc = (*Buffer).EncodeFixed64 + p.valDec = (*Buffer).DecodeFixed64 + p.valSize = sizeFixed64 + case "zigzag32": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeZigzag32 + p.valDec = (*Buffer).DecodeZigzag32 + p.valSize = sizeZigzag32 + case "zigzag64": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeZigzag64 + p.valDec = (*Buffer).DecodeZigzag64 + p.valSize = sizeZigzag64 + case "bytes", "group": + p.WireType = WireBytes + // no numeric converter for non-numeric types + default: + fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) + return + } + + var err error + p.Tag, err = strconv.Atoi(fields[1]) + if err != nil { + return + } + + for i := 2; i < len(fields); i++ { + f := fields[i] + switch { + case f == "req": + p.Required = true + case f == "opt": + p.Optional = true + case f == "rep": + p.Repeated = true + case f == "packed": + p.Packed = true + case strings.HasPrefix(f, "name="): + p.OrigName = f[5:] + case strings.HasPrefix(f, "json="): + p.JSONName = f[5:] + case strings.HasPrefix(f, "enum="): + p.Enum = f[5:] + case f == "proto3": + p.proto3 = true + case f == "oneof": + p.oneof = true + case strings.HasPrefix(f, "def="): + p.HasDefault = true + p.Default = f[4:] // rest of string + if i+1 < len(fields) { + // Commas aren't escaped, and def is always last. + p.Default += "," + strings.Join(fields[i+1:], ",") + break + } + case strings.HasPrefix(f, "embedded="): + p.OrigName = strings.Split(f, "=")[1] + case strings.HasPrefix(f, "customtype="): + p.CustomType = strings.Split(f, "=")[1] + case f == "stdtime": + p.StdTime = true + case f == "stdduration": + p.StdDuration = true + } + } +} + +func logNoSliceEnc(t1, t2 reflect.Type) { + fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) +} + +var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() + +// Initialize the fields for encoding and decoding. +func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { + p.enc = nil + p.dec = nil + p.size = nil + isMap := typ.Kind() == reflect.Map + if len(p.CustomType) > 0 && !isMap { + p.setCustomEncAndDec(typ) + p.setTag(lockGetProp) + return + } + if p.StdTime && !isMap { + p.setTimeEncAndDec(typ) + p.setTag(lockGetProp) + return + } + if p.StdDuration && !isMap { + p.setDurationEncAndDec(typ) + p.setTag(lockGetProp) + return + } + switch t1 := typ; t1.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) + + // proto3 scalar types + + case reflect.Bool: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_bool + p.dec = (*Buffer).dec_proto3_bool + p.size = size_proto3_bool + } else { + p.enc = (*Buffer).enc_ref_bool + p.dec = (*Buffer).dec_proto3_bool + p.size = size_ref_bool + } + case reflect.Int32: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_int32 + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_proto3_int32 + } else { + p.enc = (*Buffer).enc_ref_int32 + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_ref_int32 + } + case reflect.Uint32: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_uint32 + p.dec = (*Buffer).dec_proto3_int32 // can reuse + p.size = size_proto3_uint32 + } else { + p.enc = (*Buffer).enc_ref_uint32 + p.dec = (*Buffer).dec_proto3_int32 // can reuse + p.size = size_ref_uint32 + } + case reflect.Int64, reflect.Uint64: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_int64 + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_proto3_int64 + } else { + p.enc = (*Buffer).enc_ref_int64 + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_ref_int64 + } + case reflect.Float32: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_proto3_uint32 + } else { + p.enc = (*Buffer).enc_ref_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_ref_uint32 + } + case reflect.Float64: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_proto3_int64 + } else { + p.enc = (*Buffer).enc_ref_int64 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_ref_int64 + } + case reflect.String: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_string + p.dec = (*Buffer).dec_proto3_string + p.size = size_proto3_string + } else { + p.enc = (*Buffer).enc_ref_string + p.dec = (*Buffer).dec_proto3_string + p.size = size_ref_string + } + case reflect.Struct: + p.stype = typ + p.isMarshaler = isMarshaler(typ) + p.isUnmarshaler = isUnmarshaler(typ) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_ref_struct_message + p.dec = (*Buffer).dec_ref_struct_message + p.size = size_ref_struct_message + } else { + fmt.Fprintf(os.Stderr, "proto: no coders for struct %T\n", typ) + } + + case reflect.Ptr: + switch t2 := t1.Elem(); t2.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) + break + case reflect.Bool: + p.enc = (*Buffer).enc_bool + p.dec = (*Buffer).dec_bool + p.size = size_bool + case reflect.Int32: + p.enc = (*Buffer).enc_int32 + p.dec = (*Buffer).dec_int32 + p.size = size_int32 + case reflect.Uint32: + p.enc = (*Buffer).enc_uint32 + p.dec = (*Buffer).dec_int32 // can reuse + p.size = size_uint32 + case reflect.Int64, reflect.Uint64: + p.enc = (*Buffer).enc_int64 + p.dec = (*Buffer).dec_int64 + p.size = size_int64 + case reflect.Float32: + p.enc = (*Buffer).enc_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_int32 + p.size = size_uint32 + case reflect.Float64: + p.enc = (*Buffer).enc_int64 // can just treat them as bits + p.dec = (*Buffer).dec_int64 + p.size = size_int64 + case reflect.String: + p.enc = (*Buffer).enc_string + p.dec = (*Buffer).dec_string + p.size = size_string + case reflect.Struct: + p.stype = t1.Elem() + p.isMarshaler = isMarshaler(t1) + p.isUnmarshaler = isUnmarshaler(t1) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_struct_message + p.dec = (*Buffer).dec_struct_message + p.size = size_struct_message + } else { + p.enc = (*Buffer).enc_struct_group + p.dec = (*Buffer).dec_struct_group + p.size = size_struct_group + } + } + + case reflect.Slice: + switch t2 := t1.Elem(); t2.Kind() { + default: + logNoSliceEnc(t1, t2) + break + case reflect.Bool: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_bool + p.size = size_slice_packed_bool + } else { + p.enc = (*Buffer).enc_slice_bool + p.size = size_slice_bool + } + p.dec = (*Buffer).dec_slice_bool + p.packedDec = (*Buffer).dec_slice_packed_bool + case reflect.Int32: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int32 + p.size = size_slice_packed_int32 + } else { + p.enc = (*Buffer).enc_slice_int32 + p.size = size_slice_int32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case reflect.Uint32: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_uint32 + p.size = size_slice_packed_uint32 + } else { + p.enc = (*Buffer).enc_slice_uint32 + p.size = size_slice_uint32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case reflect.Int64, reflect.Uint64: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int64 + p.size = size_slice_packed_int64 + } else { + p.enc = (*Buffer).enc_slice_int64 + p.size = size_slice_int64 + } + p.dec = (*Buffer).dec_slice_int64 + p.packedDec = (*Buffer).dec_slice_packed_int64 + case reflect.Uint8: + p.dec = (*Buffer).dec_slice_byte + if p.proto3 { + p.enc = (*Buffer).enc_proto3_slice_byte + p.size = size_proto3_slice_byte + } else { + p.enc = (*Buffer).enc_slice_byte + p.size = size_slice_byte + } + case reflect.Float32, reflect.Float64: + switch t2.Bits() { + case 32: + // can just treat them as bits + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_uint32 + p.size = size_slice_packed_uint32 + } else { + p.enc = (*Buffer).enc_slice_uint32 + p.size = size_slice_uint32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case 64: + // can just treat them as bits + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int64 + p.size = size_slice_packed_int64 + } else { + p.enc = (*Buffer).enc_slice_int64 + p.size = size_slice_int64 + } + p.dec = (*Buffer).dec_slice_int64 + p.packedDec = (*Buffer).dec_slice_packed_int64 + default: + logNoSliceEnc(t1, t2) + break + } + case reflect.String: + p.enc = (*Buffer).enc_slice_string + p.dec = (*Buffer).dec_slice_string + p.size = size_slice_string + case reflect.Ptr: + switch t3 := t2.Elem(); t3.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) + break + case reflect.Struct: + p.stype = t2.Elem() + p.isMarshaler = isMarshaler(t2) + p.isUnmarshaler = isUnmarshaler(t2) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_slice_struct_message + p.dec = (*Buffer).dec_slice_struct_message + p.size = size_slice_struct_message + } else { + p.enc = (*Buffer).enc_slice_struct_group + p.dec = (*Buffer).dec_slice_struct_group + p.size = size_slice_struct_group + } + } + case reflect.Slice: + switch t2.Elem().Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) + break + case reflect.Uint8: + p.enc = (*Buffer).enc_slice_slice_byte + p.dec = (*Buffer).dec_slice_slice_byte + p.size = size_slice_slice_byte + } + case reflect.Struct: + p.setSliceOfNonPointerStructs(t1) + } + + case reflect.Map: + p.enc = (*Buffer).enc_new_map + p.dec = (*Buffer).dec_new_map + p.size = size_new_map + + p.mtype = t1 + p.mkeyprop = &Properties{} + p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) + p.mvalprop = &Properties{} + vtype := p.mtype.Elem() + if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { + // The value type is not a message (*T) or bytes ([]byte), + // so we need encoders for the pointer to this type. + vtype = reflect.PtrTo(vtype) + } + + p.mvalprop.CustomType = p.CustomType + p.mvalprop.StdDuration = p.StdDuration + p.mvalprop.StdTime = p.StdTime + p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) + } + p.setTag(lockGetProp) +} + +func (p *Properties) setTag(lockGetProp bool) { + // precalculate tag code + wire := p.WireType + if p.Packed { + wire = WireBytes + } + x := uint32(p.Tag)<<3 | uint32(wire) + i := 0 + for i = 0; x > 127; i++ { + p.tagbuf[i] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + p.tagbuf[i] = uint8(x) + p.tagcode = p.tagbuf[0 : i+1] + + if p.stype != nil { + if lockGetProp { + p.sprop = GetProperties(p.stype) + } else { + p.sprop = getPropertiesLocked(p.stype) + } + } +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() + unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() +) + +// isMarshaler reports whether type t implements Marshaler. +func isMarshaler(t reflect.Type) bool { + return t.Implements(marshalerType) +} + +// isUnmarshaler reports whether type t implements Unmarshaler. +func isUnmarshaler(t reflect.Type) bool { + return t.Implements(unmarshalerType) +} + +// Init populates the properties from a protocol buffer struct tag. +func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { + p.init(typ, name, tag, f, true) +} + +func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { + // "bytes,49,opt,def=hello!" + p.Name = name + p.OrigName = name + if f != nil { + p.field = toField(f) + } + if tag == "" { + return + } + p.Parse(tag) + p.setEncAndDec(typ, f, lockGetProp) +} + +var ( + propertiesMu sync.RWMutex + propertiesMap = make(map[reflect.Type]*StructProperties) +) + +// GetProperties returns the list of properties for the type represented by t. +// t must represent a generated struct type of a protocol message. +func GetProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic("proto: type must have kind struct") + } + + // Most calls to GetProperties in a long-running program will be + // retrieving details for types we have seen before. + propertiesMu.RLock() + sprop, ok := propertiesMap[t] + propertiesMu.RUnlock() + if ok { + if collectStats { + stats.Chit++ + } + return sprop + } + + propertiesMu.Lock() + sprop = getPropertiesLocked(t) + propertiesMu.Unlock() + return sprop +} + +// getPropertiesLocked requires that propertiesMu is held. +func getPropertiesLocked(t reflect.Type) *StructProperties { + if prop, ok := propertiesMap[t]; ok { + if collectStats { + stats.Chit++ + } + return prop + } + if collectStats { + stats.Cmiss++ + } + + prop := new(StructProperties) + // in case of recursive protos, fill this in now. + propertiesMap[t] = prop + + // build properties + prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) || + reflect.PtrTo(t).Implements(extendableProtoV1Type) || + reflect.PtrTo(t).Implements(extendableBytesType) + prop.unrecField = invalidField + prop.Prop = make([]*Properties, t.NumField()) + prop.order = make([]int, t.NumField()) + + isOneofMessage := false + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + p := new(Properties) + name := f.Name + p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + + if f.Name == "XXX_InternalExtensions" { // special case + p.enc = (*Buffer).enc_exts + p.dec = nil // not needed + p.size = size_exts + } else if f.Name == "XXX_extensions" { // special case + if len(f.Tag.Get("protobuf")) > 0 { + p.enc = (*Buffer).enc_ext_slice_byte + p.dec = nil // not needed + p.size = size_ext_slice_byte + } else { + p.enc = (*Buffer).enc_map + p.dec = nil // not needed + p.size = size_map + } + } else if f.Name == "XXX_unrecognized" { // special case + prop.unrecField = toField(&f) + } + oneof := f.Tag.Get("protobuf_oneof") // special case + if oneof != "" { + isOneofMessage = true + // Oneof fields don't use the traditional protobuf tag. + p.OrigName = oneof + } + prop.Prop[i] = p + prop.order[i] = i + if debug { + print(i, " ", f.Name, " ", t.String(), " ") + if p.Tag > 0 { + print(p.String()) + } + print("\n") + } + if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" { + fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") + } + } + + // Re-order prop.order. + sort.Sort(prop) + + type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + } + if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); isOneofMessage && ok { + var oots []interface{} + prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs() + prop.stype = t + + // Interpret oneof metadata. + prop.OneofTypes = make(map[string]*OneofProperties) + for _, oot := range oots { + oop := &OneofProperties{ + Type: reflect.ValueOf(oot).Type(), // *T + Prop: new(Properties), + } + sft := oop.Type.Elem().Field(0) + oop.Prop.Name = sft.Name + oop.Prop.Parse(sft.Tag.Get("protobuf")) + // There will be exactly one interface field that + // this new value is assignable to. + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Type.Kind() != reflect.Interface { + continue + } + if !oop.Type.AssignableTo(f.Type) { + continue + } + oop.Field = i + break + } + prop.OneofTypes[oop.Prop.OrigName] = oop + } + } + + // build required counts + // build tags + reqCount := 0 + prop.decoderOrigNames = make(map[string]int) + for i, p := range prop.Prop { + if strings.HasPrefix(p.Name, "XXX_") { + // Internal fields should not appear in tags/origNames maps. + // They are handled specially when encoding and decoding. + continue + } + if p.Required { + reqCount++ + } + prop.decoderTags.put(p.Tag, i) + prop.decoderOrigNames[p.OrigName] = i + } + prop.reqCount = reqCount + + return prop +} + +// Return the Properties object for the x[0]'th field of the structure. +func propByIndex(t reflect.Type, x []int) *Properties { + if len(x) != 1 { + fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) + return nil + } + prop := GetProperties(t) + return prop.Prop[x[0]] +} + +// Get the address and type of a pointer to a struct from an interface. +func getbase(pb Message) (t reflect.Type, b structPointer, err error) { + if pb == nil { + err = ErrNil + return + } + // get the reflect type of the pointer to the struct. + t = reflect.TypeOf(pb) + // get the address of the struct. + value := reflect.ValueOf(pb) + b = toStructPointer(value) + return +} + +// A global registry of enum types. +// The generated code will register the generated maps by calling RegisterEnum. + +var enumValueMaps = make(map[string]map[string]int32) +var enumStringMaps = make(map[string]map[int32]string) + +// RegisterEnum is called from the generated code to install the enum descriptor +// maps into the global table to aid parsing text format protocol buffers. +func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { + if _, ok := enumValueMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumValueMaps[typeName] = valueMap + if _, ok := enumStringMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumStringMaps[typeName] = unusedNameMap +} + +// EnumValueMap returns the mapping from names to integers of the +// enum type enumType, or a nil if not found. +func EnumValueMap(enumType string) map[string]int32 { + return enumValueMaps[enumType] +} + +// A registry of all linked message types. +// The string is a fully-qualified proto name ("pkg.Message"). +var ( + protoTypes = make(map[string]reflect.Type) + revProtoTypes = make(map[reflect.Type]string) +) + +// RegisterType is called from generated code and maps from the fully qualified +// proto name to the type (pointer to struct) of the protocol buffer. +func RegisterType(x Message, name string) { + if _, ok := protoTypes[name]; ok { + // TODO: Some day, make this a panic. + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoTypes[name] = t + revProtoTypes[t] = name +} + +// MessageName returns the fully-qualified proto name for the given message type. +func MessageName(x Message) string { + type xname interface { + XXX_MessageName() string + } + if m, ok := x.(xname); ok { + return m.XXX_MessageName() + } + return revProtoTypes[reflect.TypeOf(x)] +} + +// MessageType returns the message type (pointer to struct) for a named message. +func MessageType(name string) reflect.Type { return protoTypes[name] } + +// A registry of all linked proto files. +var ( + protoFiles = make(map[string][]byte) // file name => fileDescriptor +) + +// RegisterFile is called from generated code and maps from the +// full file name of a .proto file to its compressed FileDescriptorProto. +func RegisterFile(filename string, fileDescriptor []byte) { + protoFiles[filename] = fileDescriptor +} + +// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. +func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go new file mode 100644 index 000000000..b6b7176c5 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go @@ -0,0 +1,111 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "os" + "reflect" +) + +func (p *Properties) setCustomEncAndDec(typ reflect.Type) { + p.ctype = typ + if p.Repeated { + p.enc = (*Buffer).enc_custom_slice_bytes + p.dec = (*Buffer).dec_custom_slice_bytes + p.size = size_custom_slice_bytes + } else if typ.Kind() == reflect.Ptr { + p.enc = (*Buffer).enc_custom_bytes + p.dec = (*Buffer).dec_custom_bytes + p.size = size_custom_bytes + } else { + p.enc = (*Buffer).enc_custom_ref_bytes + p.dec = (*Buffer).dec_custom_ref_bytes + p.size = size_custom_ref_bytes + } +} + +func (p *Properties) setDurationEncAndDec(typ reflect.Type) { + if p.Repeated { + if typ.Elem().Kind() == reflect.Ptr { + p.enc = (*Buffer).enc_slice_duration + p.dec = (*Buffer).dec_slice_duration + p.size = size_slice_duration + } else { + p.enc = (*Buffer).enc_slice_ref_duration + p.dec = (*Buffer).dec_slice_ref_duration + p.size = size_slice_ref_duration + } + } else if typ.Kind() == reflect.Ptr { + p.enc = (*Buffer).enc_duration + p.dec = (*Buffer).dec_duration + p.size = size_duration + } else { + p.enc = (*Buffer).enc_ref_duration + p.dec = (*Buffer).dec_ref_duration + p.size = size_ref_duration + } +} + +func (p *Properties) setTimeEncAndDec(typ reflect.Type) { + if p.Repeated { + if typ.Elem().Kind() == reflect.Ptr { + p.enc = (*Buffer).enc_slice_time + p.dec = (*Buffer).dec_slice_time + p.size = size_slice_time + } else { + p.enc = (*Buffer).enc_slice_ref_time + p.dec = (*Buffer).dec_slice_ref_time + p.size = size_slice_ref_time + } + } else if typ.Kind() == reflect.Ptr { + p.enc = (*Buffer).enc_time + p.dec = (*Buffer).dec_time + p.size = size_time + } else { + p.enc = (*Buffer).enc_ref_time + p.dec = (*Buffer).dec_ref_time + p.size = size_ref_time + } + +} + +func (p *Properties) setSliceOfNonPointerStructs(typ reflect.Type) { + t2 := typ.Elem() + p.sstype = typ + p.stype = t2 + p.isMarshaler = isMarshaler(t2) + p.isUnmarshaler = isUnmarshaler(t2) + p.enc = (*Buffer).enc_slice_ref_struct_message + p.dec = (*Buffer).dec_slice_ref_struct_message + p.size = size_slice_ref_struct_message + if p.Wire != "bytes" { + fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T \n", typ, t2) + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go new file mode 100644 index 000000000..5a5fd93f7 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go @@ -0,0 +1,119 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "io" +) + +func Skip(data []byte) (n int, err error) { + l := len(data) + index := 0 + for index < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + index++ + if data[index-1] < 0x80 { + break + } + } + return index, nil + case 1: + index += 8 + return index, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + index += length + return index, nil + case 3: + for { + var innerWire uint64 + var start int = index + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := Skip(data[start:]) + if err != nil { + return 0, err + } + index = start + next + } + return index, nil + case 4: + return index, nil + case 5: + index += 4 + return index, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} diff --git a/vendor/github.com/gogo/protobuf/proto/text.go b/vendor/github.com/gogo/protobuf/proto/text.go new file mode 100644 index 000000000..d63732fcb --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/text.go @@ -0,0 +1,928 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for writing the text protocol buffer format. + +import ( + "bufio" + "bytes" + "encoding" + "errors" + "fmt" + "io" + "log" + "math" + "reflect" + "sort" + "strings" + "sync" + "time" +) + +var ( + newline = []byte("\n") + spaces = []byte(" ") + gtNewline = []byte(">\n") + endBraceNewline = []byte("}\n") + backslashN = []byte{'\\', 'n'} + backslashR = []byte{'\\', 'r'} + backslashT = []byte{'\\', 't'} + backslashDQ = []byte{'\\', '"'} + backslashBS = []byte{'\\', '\\'} + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +type writer interface { + io.Writer + WriteByte(byte) error +} + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + ind int + complete bool // if the current position is a complete line + compact bool // whether to write out as a one-liner + w writer +} + +func (w *textWriter) WriteString(s string) (n int, err error) { + if !strings.Contains(s, "\n") { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + return io.WriteString(w.w, s) + } + // WriteString is typically called without newlines, so this + // codepath and its copy are rare. We copy to avoid + // duplicating all of Write's logic here. + return w.Write([]byte(s)) +} + +func (w *textWriter) Write(p []byte) (n int, err error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + n, err = w.w.Write(p) + w.complete = false + return n, err + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + if err := w.w.WriteByte(' '); err != nil { + return n, err + } + n++ + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + if i+1 < len(frags) { + if err := w.w.WriteByte('\n'); err != nil { + return n, err + } + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + err := w.w.WriteByte(c) + w.complete = c == '\n' + return err +} + +func (w *textWriter) indent() { w.ind++ } + +func (w *textWriter) unindent() { + if w.ind == 0 { + log.Print("proto: textWriter unindented too far") + return + } + w.ind-- +} + +func writeName(w *textWriter, props *Properties) error { + if _, err := w.WriteString(props.OrigName); err != nil { + return err + } + if props.Wire != "group" { + return w.WriteByte(':') + } + return nil +} + +// raw is the interface satisfied by RawMessage. +type raw interface { + Bytes() []byte +} + +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// isAny reports whether sv is a google.protobuf.Any message +func isAny(sv reflect.Value) bool { + type wkt interface { + XXX_WellKnownType() string + } + t, ok := sv.Addr().Interface().(wkt) + return ok && t.XXX_WellKnownType() == "Any" +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { + turl := sv.FieldByName("TypeUrl") + val := sv.FieldByName("Value") + if !turl.IsValid() || !val.IsValid() { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + b, ok := val.Interface().([]byte) + if !ok { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + parts := strings.Split(turl.String(), "/") + mt := MessageType(parts[len(parts)-1]) + if mt == nil { + return false, nil + } + m := reflect.New(mt.Elem()) + if err := Unmarshal(b, m.Interface().(Message)); err != nil { + return false, nil + } + w.Write([]byte("[")) + u := turl.String() + if requiresQuotes(u) { + writeString(w, u) + } else { + w.Write([]byte(u)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.ind++ + } + if err := tm.writeStruct(w, m.Elem()); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.ind-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { + if tm.ExpandAny && isAny(sv) { + if canExpand, err := tm.writeProto3Any(w, sv); canExpand { + return err + } + } + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < sv.NumField(); i++ { + fv := sv.Field(i) + props := sprops.Prop[i] + name := st.Field(i).Name + + if strings.HasPrefix(name, "XXX_") { + // There are two XXX_ fields: + // XXX_unrecognized []byte + // XXX_extensions map[int32]proto.Extension + // The first is handled here; + // the second is handled at the bottom of this function. + if name == "XXX_unrecognized" && !fv.IsNil() { + if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Field not filled in. This could be an optional field or + // a required field that wasn't filled in. Either way, there + // isn't anything we can show for it. + continue + } + if fv.Kind() == reflect.Slice && fv.IsNil() { + // Repeated field that is empty, or a bytes field that is unused. + continue + } + + if props.Repeated && fv.Kind() == reflect.Slice { + // Repeated field. + for j := 0; j < fv.Len(); j++ { + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + v := fv.Index(j) + if v.Kind() == reflect.Ptr && v.IsNil() { + // A nil message in a repeated field is not valid, + // but we can handle that more gracefully than panicking. + if _, err := w.Write([]byte("\n")); err != nil { + return err + } + continue + } + if len(props.Enum) > 0 { + if err := tm.writeEnum(w, v, props); err != nil { + return err + } + } else if err := tm.writeAny(w, v, props); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Map { + // Map fields are rendered as a repeated struct with key/value fields. + keys := fv.MapKeys() + sort.Sort(mapKeys(keys)) + for _, key := range keys { + val := fv.MapIndex(key) + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + // open struct + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + // key + if _, err := w.WriteString("key:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, key, props.mkeyprop); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + // nil values aren't legal, but we can avoid panicking because of them. + if val.Kind() != reflect.Ptr || !val.IsNil() { + // value + if _, err := w.WriteString("value:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, val, props.mvalprop); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + // close struct + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { + // empty bytes field + continue + } + if props.proto3 && fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { + // proto3 non-repeated scalar field; skip if zero value + if isProto3Zero(fv) { + continue + } + } + + if fv.Kind() == reflect.Interface { + // Check if it is a oneof. + if st.Field(i).Tag.Get("protobuf_oneof") != "" { + // fv is nil, or holds a pointer to generated struct. + // That generated struct has exactly one field, + // which has a protobuf struct tag. + if fv.IsNil() { + continue + } + inner := fv.Elem().Elem() // interface -> *T -> T + tag := inner.Type().Field(0).Tag.Get("protobuf") + props = new(Properties) // Overwrite the outer props var, but not its pointee. + props.Parse(tag) + // Write the value in the oneof, not the oneof itself. + fv = inner.Field(0) + + // Special case to cope with malformed messages gracefully: + // If the value in the oneof is a nil pointer, don't panic + // in writeAny. + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Use errors.New so writeAny won't render quotes. + msg := errors.New("/* nil */") + fv = reflect.ValueOf(&msg).Elem() + } + } + } + + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if b, ok := fv.Interface().(raw); ok { + if err := writeRaw(w, b.Bytes()); err != nil { + return err + } + continue + } + + if len(props.Enum) > 0 { + if err := tm.writeEnum(w, fv, props); err != nil { + return err + } + } else if err := tm.writeAny(w, fv, props); err != nil { + return err + } + + if err := w.WriteByte('\n'); err != nil { + return err + } + } + + // Extensions (the XXX_extensions field). + pv := sv + if pv.CanAddr() { + pv = sv.Addr() + } else { + pv = reflect.New(sv.Type()) + pv.Elem().Set(sv) + } + if pv.Type().Implements(extensionRangeType) { + if err := tm.writeExtensions(w, pv); err != nil { + return err + } + } + + return nil +} + +// writeRaw writes an uninterpreted raw message. +func writeRaw(w *textWriter, b []byte) error { + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if err := writeUnknownStruct(w, b); err != nil { + return err + } + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + return nil +} + +// writeAny writes an arbitrary field. +func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { + v = reflect.Indirect(v) + + if props != nil { + if len(props.CustomType) > 0 { + custom, ok := v.Interface().(Marshaler) + if ok { + data, err := custom.Marshal() + if err != nil { + return err + } + if err := writeString(w, string(data)); err != nil { + return err + } + return nil + } + } else if props.StdTime { + t, ok := v.Interface().(time.Time) + if !ok { + return fmt.Errorf("stdtime is not time.Time, but %T", v.Interface()) + } + tproto, err := timestampProto(t) + if err != nil { + return err + } + props.StdTime = false + err = tm.writeAny(w, reflect.ValueOf(tproto), props) + props.StdTime = true + return err + } else if props.StdDuration { + d, ok := v.Interface().(time.Duration) + if !ok { + return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface()) + } + dproto := durationProto(d) + props.StdDuration = false + err := tm.writeAny(w, reflect.ValueOf(dproto), props) + props.StdDuration = true + return err + } + } + + // Floats have special cases. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + x := v.Float() + var b []byte + switch { + case math.IsInf(x, 1): + b = posInf + case math.IsInf(x, -1): + b = negInf + case math.IsNaN(x): + b = nan + } + if b != nil { + _, err := w.Write(b) + return err + } + // Other values are handled below. + } + + // We don't attempt to serialise every possible value type; only those + // that can occur in protocol buffers. + switch v.Kind() { + case reflect.Slice: + // Should only be a []byte; repeated fields are handled in writeStruct. + if err := writeString(w, string(v.Bytes())); err != nil { + return err + } + case reflect.String: + if err := writeString(w, v.String()); err != nil { + return err + } + case reflect.Struct: + // Required/optional group/message. + var bra, ket byte = '<', '>' + if props != nil && props.Wire == "group" { + bra, ket = '{', '}' + } + if err := w.WriteByte(bra); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if etm, ok := v.Interface().(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = w.Write(text); err != nil { + return err + } + } else if err := tm.writeStruct(w, v); err != nil { + return err + } + w.unindent() + if err := w.WriteByte(ket); err != nil { + return err + } + default: + _, err := fmt.Fprint(w, v.Interface()) + return err + } + return nil +} + +// equivalent to C's isprint. +func isprint(c byte) bool { + return c >= 0x20 && c < 0x7f +} + +// writeString writes a string in the protocol buffer text format. +// It is similar to strconv.Quote except we don't use Go escape sequences, +// we treat the string as a byte sequence, and we use octal escapes. +// These differences are to maintain interoperability with the other +// languages' implementations of the text format. +func writeString(w *textWriter, s string) error { + // use WriteByte here to get any needed indent + if err := w.WriteByte('"'); err != nil { + return err + } + // Loop over the bytes, not the runes. + for i := 0; i < len(s); i++ { + var err error + // Divergence from C++: we don't escape apostrophes. + // There's no need to escape them, and the C++ parser + // copes with a naked apostrophe. + switch c := s[i]; c { + case '\n': + _, err = w.w.Write(backslashN) + case '\r': + _, err = w.w.Write(backslashR) + case '\t': + _, err = w.w.Write(backslashT) + case '"': + _, err = w.w.Write(backslashDQ) + case '\\': + _, err = w.w.Write(backslashBS) + default: + if isprint(c) { + err = w.w.WriteByte(c) + } else { + _, err = fmt.Fprintf(w.w, "\\%03o", c) + } + } + if err != nil { + return err + } + } + return w.WriteByte('"') +} + +func writeUnknownStruct(w *textWriter, data []byte) (err error) { + if !w.compact { + if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { + return err + } + } + b := NewBuffer(data) + for b.index < len(b.buf) { + x, err := b.DecodeVarint() + if err != nil { + _, ferr := fmt.Fprintf(w, "/* %v */\n", err) + return ferr + } + wire, tag := x&7, x>>3 + if wire == WireEndGroup { + w.unindent() + if _, werr := w.Write(endBraceNewline); werr != nil { + return werr + } + continue + } + if _, ferr := fmt.Fprint(w, tag); ferr != nil { + return ferr + } + if wire != WireStartGroup { + if err = w.WriteByte(':'); err != nil { + return err + } + } + if !w.compact || wire == WireStartGroup { + if err = w.WriteByte(' '); err != nil { + return err + } + } + switch wire { + case WireBytes: + buf, e := b.DecodeRawBytes(false) + if e == nil { + _, err = fmt.Fprintf(w, "%q", buf) + } else { + _, err = fmt.Fprintf(w, "/* %v */", e) + } + case WireFixed32: + x, err = b.DecodeFixed32() + err = writeUnknownInt(w, x, err) + case WireFixed64: + x, err = b.DecodeFixed64() + err = writeUnknownInt(w, x, err) + case WireStartGroup: + err = w.WriteByte('{') + w.indent() + case WireVarint: + x, err = b.DecodeVarint() + err = writeUnknownInt(w, x, err) + default: + _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) + } + if err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + return nil +} + +func writeUnknownInt(w *textWriter, x uint64, err error) error { + if err == nil { + _, err = fmt.Fprint(w, x) + } else { + _, err = fmt.Fprintf(w, "/* %v */", err) + } + return err +} + +type int32Slice []int32 + +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// writeExtensions writes all the extensions in pv. +// pv is assumed to be a pointer to a protocol message struct that is extendable. +func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { + emap := extensionMaps[pv.Type().Elem()] + e := pv.Interface().(Message) + + var m map[int32]Extension + var mu sync.Locker + if em, ok := e.(extensionsBytes); ok { + eb := em.GetExtensions() + var err error + m, err = BytesToExtensionsMap(*eb) + if err != nil { + return err + } + mu = notLocker{} + } else if _, ok := e.(extendableProto); ok { + ep, _ := extendable(e) + m, mu = ep.extensionsRead() + if m == nil { + return nil + } + } + + // Order the extensions by ID. + // This isn't strictly necessary, but it will give us + // canonical output, which will also make testing easier. + + mu.Lock() + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + mu.Unlock() + + for _, extNum := range ids { + ext := m[extNum] + var desc *ExtensionDesc + if emap != nil { + desc = emap[extNum] + } + if desc == nil { + // Unknown extension. + if err := writeUnknownStruct(w, ext.enc); err != nil { + return err + } + continue + } + + pb, err := GetExtension(e, desc) + if err != nil { + return fmt.Errorf("failed getting extension: %v", err) + } + + // Repeated extensions will appear as a slice. + if !desc.repeated() { + if err := tm.writeExtension(w, desc.Name, pb); err != nil { + return err + } + } else { + v := reflect.ValueOf(pb) + for i := 0; i < v.Len(); i++ { + if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { + return err + } + } + } + } + return nil +} + +func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { + if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + remain := w.ind * 2 + for remain > 0 { + n := remain + if n > len(spaces) { + n = len(spaces) + } + w.w.Write(spaces[:n]) + remain -= n + } + w.complete = false +} + +// TextMarshaler is a configurable text format marshaler. +type TextMarshaler struct { + Compact bool // use compact text format (one line). + ExpandAny bool // expand google.protobuf.Any messages of known types +} + +// Marshal writes a given protocol buffer in text format. +// The only errors returned are from w. +func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { + val := reflect.ValueOf(pb) + if pb == nil || val.IsNil() { + w.Write([]byte("")) + return nil + } + var bw *bufio.Writer + ww, ok := w.(writer) + if !ok { + bw = bufio.NewWriter(w) + ww = bw + } + aw := &textWriter{ + w: ww, + complete: true, + compact: tm.Compact, + } + + if etm, ok := pb.(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = aw.Write(text); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil + } + // Dereference the received pointer so we don't have outer < and >. + v := reflect.Indirect(val) + if err := tm.writeStruct(aw, v); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil +} + +// Text is the same as Marshal, but returns the string directly. +func (tm *TextMarshaler) Text(pb Message) string { + var buf bytes.Buffer + tm.Marshal(&buf, pb) + return buf.String() +} + +var ( + defaultTextMarshaler = TextMarshaler{} + compactTextMarshaler = TextMarshaler{Compact: true} +) + +// TODO: consider removing some of the Marshal functions below. + +// MarshalText writes a given protocol buffer in text format. +// The only errors returned are from w. +func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } + +// MarshalTextString is the same as MarshalText, but returns the string directly. +func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } + +// CompactText writes a given protocol buffer in compact text format (one line). +func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } + +// CompactTextString is the same as CompactText, but returns the string directly. +func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/gogo/protobuf/proto/text_gogo.go b/vendor/github.com/gogo/protobuf/proto/text_gogo.go new file mode 100644 index 000000000..1d6c6aa0e --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/text_gogo.go @@ -0,0 +1,57 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" +) + +func (tm *TextMarshaler) writeEnum(w *textWriter, v reflect.Value, props *Properties) error { + m, ok := enumStringMaps[props.Enum] + if !ok { + if err := tm.writeAny(w, v, props); err != nil { + return err + } + } + key := int32(0) + if v.Kind() == reflect.Ptr { + key = int32(v.Elem().Int()) + } else { + key = int32(v.Int()) + } + s, ok := m[key] + if !ok { + if err := tm.writeAny(w, v, props); err != nil { + return err + } + } + _, err := fmt.Fprint(w, s) + return err +} diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go new file mode 100644 index 000000000..9db12e960 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/text_parser.go @@ -0,0 +1,1013 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for parsing the Text protocol buffer format. +// TODO: message sets. + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// Error string emitted when deserializing Any and fields are already set +const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" + +type ParseError struct { + Message string + Line int // 1-based line number + Offset int // 0-based byte offset from start of input +} + +func (p *ParseError) Error() string { + if p.Line == 1 { + // show offset only for first line + return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) + } + return fmt.Sprintf("line %d: %v", p.Line, p.Message) +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func (t *token) String() string { + if t.err == nil { + return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) + } + return fmt.Sprintf("parse error: %v", t.err) +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +// Numbers and identifiers are matched by [-+._A-Za-z0-9] +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func isQuote(c byte) bool { + switch c { + case '"', '\'': + return true + } + return false +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +var ( + errBadUTF8 = errors.New("proto: bad UTF-8") + errBadHex = errors.New("proto: bad hexadecimal") +) + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + base := 8 + ss := s[:2] + s = s[2:] + if r == 'x' || r == 'X' { + base = 16 + } else { + ss = string(r) + ss + } + i, err := strconv.ParseUint(ss, base, 8) + if err != nil { + return "", "", err + } + return string([]byte{byte(i)}), s, nil + case 'u', 'U': + n := 4 + if r == 'U' { + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) + } + + bs := make([]byte, n/2) + for i := 0; i < n; i += 2 { + a, ok1 := unhex(s[i]) + b, ok2 := unhex(s[i+1]) + if !ok1 || !ok2 { + return "", "", errBadHex + } + bs[i/2] = a<<4 | b + } + s = s[n:] + return string(bs), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +// Adapted from src/pkg/strconv/quote.go. +func unhex(b byte) (v byte, ok bool) { + switch { + case '0' <= b && b <= '9': + return b - '0', true + case 'a' <= b && b <= 'f': + return b - 'a' + 10, true + case 'A' <= b && b <= 'F': + return b - 'A' + 10, true + } + return 0, false +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || !isQuote(p.s[0]) { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +// Return a RequiredNotSetError indicating which required field was not set. +func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < st.NumField(); i++ { + if !isNil(sv.Field(i)) { + continue + } + + props := sprops.Prop[i] + if props.Required { + return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} + } + } + return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen +} + +// Returns the index in the struct for the named field, as well as the parsed tag properties. +func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { + i, ok := sprops.decoderOrigNames[name] + if ok { + return i, sprops.Prop[i], true + } + return -1, nil, false +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + // Colon is optional when the field is a group or message. + needColon := true + switch props.Wire { + case "group": + needColon = false + case "bytes": + // A "bytes" field is either a message, a string, or a repeated field; + // those three become *T, *string and []T respectively, so we can check for + // this field being a pointer to a non-string. + if typ.Kind() == reflect.Ptr { + // *T or *string + if typ.Elem().Kind() == reflect.String { + break + } + } else if typ.Kind() == reflect.Slice { + // []T or []*T + if typ.Elem().Kind() != reflect.Ptr { + break + } + } else if typ.Kind() == reflect.String { + // The proto3 exception is for a string field, + // which requires a colon. + break + } + needColon = false + } + if needColon { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +func (p *textParser) readStruct(sv reflect.Value, terminator string) error { + st := sv.Type() + sprops := GetProperties(st) + reqCount := sprops.reqCount + var reqFieldErr error + fieldSet := make(map[string]bool) + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + // Looks like an extension or an Any. + // + // TODO: Check whether we need to handle + // namespace rooted names (e.g. ".something.Foo"). + extName, err := p.consumeExtName() + if err != nil { + return err + } + + if s := strings.LastIndex(extName, "/"); s >= 0 { + // If it contains a slash, it's an Any type URL. + messageName := extName[s+1:] + mt := MessageType(messageName) + if mt == nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) + } + tok = p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + v := reflect.New(mt.Elem()) + if pe := p.readStruct(v.Elem(), terminator); pe != nil { + return pe + } + b, err := Marshal(v.Interface().(Message)) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", messageName, err) + } + if fieldSet["type_url"] { + return p.errorf(anyRepeatedlyUnpacked, "type_url") + } + if fieldSet["value"] { + return p.errorf(anyRepeatedlyUnpacked, "value") + } + sv.FieldByName("TypeUrl").SetString(extName) + sv.FieldByName("Value").SetBytes(b) + fieldSet["type_url"] = true + fieldSet["value"] = true + continue + } + + var desc *ExtensionDesc + // This could be faster, but it's functional. + // TODO: Do something smarter than a linear scan. + for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { + if d.Name == extName { + desc = d + break + } + } + if desc == nil { + return p.errorf("unrecognized extension %q", extName) + } + + props := &Properties{} + props.Parse(desc.Tag) + + typ := reflect.TypeOf(desc.ExtensionType) + if err := p.checkForColon(props, typ); err != nil { + return err + } + + rep := desc.repeated() + + // Read the extension structure, and set it in + // the value we're constructing. + var ext reflect.Value + if !rep { + ext = reflect.New(typ).Elem() + } else { + ext = reflect.New(typ.Elem()).Elem() + } + if err := p.readAny(ext, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + ep := sv.Addr().Interface().(Message) + if !rep { + SetExtension(ep, desc, ext.Interface()) + } else { + old, err := GetExtension(ep, desc) + var sl reflect.Value + if err == nil { + sl = reflect.ValueOf(old) // existing slice + } else { + sl = reflect.MakeSlice(typ, 0, 1) + } + sl = reflect.Append(sl, ext) + SetExtension(ep, desc, sl.Interface()) + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + continue + } + + // This is a normal, non-extension field. + name := tok.value + var dst reflect.Value + fi, props, ok := structFieldByName(sprops, name) + if ok { + dst = sv.Field(fi) + } else if oop, ok := sprops.OneofTypes[name]; ok { + // It is a oneof. + props = oop.Prop + nv := reflect.New(oop.Type.Elem()) + dst = nv.Elem().Field(0) + field := sv.Field(oop.Field) + if !field.IsNil() { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) + } + field.Set(nv) + } + if !dst.IsValid() { + return p.errorf("unknown field name %q in %v", name, st) + } + + if dst.Kind() == reflect.Map { + // Consume any colon. + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Construct the map if it doesn't already exist. + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + key := reflect.New(dst.Type().Key()).Elem() + val := reflect.New(dst.Type().Elem()).Elem() + + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // However, implementations may omit key or value, and technically + // we should support them in any order. See b/28924776 for a time + // this went wrong. + + tok := p.next() + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + switch tok.value { + case "key": + if err := p.consumeToken(":"); err != nil { + return err + } + if err := p.readAny(key, props.mkeyprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + case "value": + if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { + return err + } + if err := p.readAny(val, props.mvalprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + default: + p.back() + return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) + } + } + + dst.SetMapIndex(key, val) + continue + } + + // Check that it's not already set if it's not a repeated field. + if !props.Repeated && fieldSet[name] { + return p.errorf("non-repeated field %q was repeated", name) + } + + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Parse into the field. + fieldSet[name] = true + if err := p.readAny(dst, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + if props.Required { + reqCount-- + } + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + + } + + if reqCount > 0 { + return p.missingRequiredFieldError(sv) + } + return reqFieldErr +} + +// consumeExtName consumes extension name or expanded Any type URL and the +// following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + } + return strings.Join(parts, ""), nil +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in readStruct to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) readAny(v reflect.Value, props *Properties) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "" { + return p.errorf("unexpected EOF") + } + if len(props.CustomType) > 0 { + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + tc := reflect.TypeOf(new(Marshaler)) + ok := t.Elem().Implements(tc.Elem()) + if ok { + fv := v + flen := fv.Len() + if flen == fv.Cap() { + nav := reflect.MakeSlice(v.Type(), flen, 2*flen+1) + reflect.Copy(nav, fv) + fv.Set(nav) + } + fv.SetLen(flen + 1) + + // Read one. + p.back() + return p.readAny(fv.Index(flen), props) + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + custom := reflect.New(props.ctype.Elem()).Interface().(Unmarshaler) + err := custom.Unmarshal([]byte(tok.unquoted)) + if err != nil { + return p.errorf("%v %v: %v", err, v.Type(), tok.value) + } + v.Set(reflect.ValueOf(custom)) + } else { + custom := reflect.New(reflect.TypeOf(v.Interface())).Interface().(Unmarshaler) + err := custom.Unmarshal([]byte(tok.unquoted)) + if err != nil { + return p.errorf("%v %v: %v", err, v.Type(), tok.value) + } + v.Set(reflect.Indirect(reflect.ValueOf(custom))) + } + return nil + } + if props.StdTime { + fv := v + p.back() + props.StdTime = false + tproto := ×tamp{} + err := p.readAny(reflect.ValueOf(tproto).Elem(), props) + props.StdTime = true + if err != nil { + return err + } + tim, err := timestampFromProto(tproto) + if err != nil { + return err + } + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + if t.Elem().Kind() == reflect.Ptr { + ts := fv.Interface().([]*time.Time) + ts = append(ts, &tim) + fv.Set(reflect.ValueOf(ts)) + return nil + } else { + ts := fv.Interface().([]time.Time) + ts = append(ts, tim) + fv.Set(reflect.ValueOf(ts)) + return nil + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + v.Set(reflect.ValueOf(&tim)) + } else { + v.Set(reflect.Indirect(reflect.ValueOf(&tim))) + } + return nil + } + if props.StdDuration { + fv := v + p.back() + props.StdDuration = false + dproto := &duration{} + err := p.readAny(reflect.ValueOf(dproto).Elem(), props) + props.StdDuration = true + if err != nil { + return err + } + dur, err := durationFromProto(dproto) + if err != nil { + return err + } + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + if t.Elem().Kind() == reflect.Ptr { + ds := fv.Interface().([]*time.Duration) + ds = append(ds, &dur) + fv.Set(reflect.ValueOf(ds)) + return nil + } else { + ds := fv.Interface().([]time.Duration) + ds = append(ds, dur) + fv.Set(reflect.ValueOf(ds)) + return nil + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + v.Set(reflect.ValueOf(&dur)) + } else { + v.Set(reflect.Indirect(reflect.ValueOf(&dur))) + } + return nil + } + switch fv := v; fv.Kind() { + case reflect.Slice: + at := v.Type() + if at.Elem().Kind() == reflect.Uint8 { + // Special case for []byte + if tok.value[0] != '"' && tok.value[0] != '\'' { + // Deliberately written out here, as the error after + // this switch statement would write "invalid []byte: ...", + // which is not as user-friendly. + return p.errorf("invalid string: %v", tok.value) + } + bytes := []byte(tok.unquoted) + fv.Set(reflect.ValueOf(bytes)) + return nil + } + // Repeated field. + if tok.value == "[" { + // Repeated field with list notation, like [1,2,3]. + for { + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + err := p.readAny(fv.Index(fv.Len()-1), props) + if err != nil { + return err + } + ntok := p.next() + if ntok.err != nil { + return ntok.err + } + if ntok.value == "]" { + break + } + if ntok.value != "," { + return p.errorf("Expected ']' or ',' found %q", ntok.value) + } + } + return nil + } + // One value of the repeated field. + p.back() + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + return p.readAny(fv.Index(fv.Len()-1), props) + case reflect.Bool: + // true/1/t/True or false/f/0/False. + switch tok.value { + case "true", "1", "t", "True": + fv.SetBool(true) + return nil + case "false", "0", "f", "False": + fv.SetBool(false) + return nil + } + case reflect.Float32, reflect.Float64: + v := tok.value + // Ignore 'f' for compatibility with output generated by C++, but don't + // remove 'f' when the value is "-inf" or "inf". + if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { + v = v[:len(v)-1] + } + if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { + fv.SetFloat(f) + return nil + } + case reflect.Int32: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + fv.SetInt(x) + return nil + } + + if len(props.Enum) == 0 { + break + } + m, ok := enumValueMaps[props.Enum] + if !ok { + break + } + x, ok := m[tok.value] + if !ok { + break + } + fv.SetInt(int64(x)) + return nil + case reflect.Int64: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + fv.SetInt(x) + return nil + } + + case reflect.Ptr: + // A basic field (indirected through pointer), or a repeated message/group + p.back() + fv.Set(reflect.New(fv.Type().Elem())) + return p.readAny(fv.Elem(), props) + case reflect.String: + if tok.value[0] == '"' || tok.value[0] == '\'' { + fv.SetString(tok.unquoted) + return nil + } + case reflect.Struct: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + // TODO: Handle nested messages which implement encoding.TextUnmarshaler. + return p.readStruct(fv, terminator) + case reflect.Uint32: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + fv.SetUint(uint64(x)) + return nil + } + case reflect.Uint64: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + fv.SetUint(x) + return nil + } + } + return p.errorf("invalid %v: %v", v.Type(), tok.value) +} + +// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb +// before starting to unmarshal, so any existing data in pb is always removed. +// If a required field is not set and no other error occurs, +// UnmarshalText returns *RequiredNotSetError. +func UnmarshalText(s string, pb Message) error { + if um, ok := pb.(encoding.TextUnmarshaler); ok { + err := um.UnmarshalText([]byte(s)) + return err + } + pb.Reset() + v := reflect.ValueOf(pb) + if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { + return pe + } + return nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp.go b/vendor/github.com/gogo/protobuf/proto/timestamp.go new file mode 100644 index 000000000..9324f6542 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/timestamp.go @@ -0,0 +1,113 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %#v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %#v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func timestampFromProto(ts *timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func timestampProto(t time.Time) (*timestamp, error) { + seconds := t.Unix() + nanos := int32(t.Sub(time.Unix(seconds, 0))) + ts := ×tamp{ + Seconds: seconds, + Nanos: nanos, + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go new file mode 100644 index 000000000..d42764743 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go @@ -0,0 +1,229 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +var timeType = reflect.TypeOf((*time.Time)(nil)).Elem() + +type timestamp struct { + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *timestamp) Reset() { *m = timestamp{} } +func (*timestamp) ProtoMessage() {} +func (*timestamp) String() string { return "timestamp" } + +func init() { + RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp") +} + +func (o *Buffer) decTimestamp() (time.Time, error) { + b, err := o.DecodeRawBytes(true) + if err != nil { + return time.Time{}, err + } + tproto := ×tamp{} + if err := Unmarshal(b, tproto); err != nil { + return time.Time{}, err + } + return timestampFromProto(tproto) +} + +func (o *Buffer) dec_time(p *Properties, base structPointer) error { + t, err := o.decTimestamp() + if err != nil { + return err + } + setPtrCustomType(base, p.field, &t) + return nil +} + +func (o *Buffer) dec_ref_time(p *Properties, base structPointer) error { + t, err := o.decTimestamp() + if err != nil { + return err + } + setCustomType(base, p.field, &t) + return nil +} + +func (o *Buffer) dec_slice_time(p *Properties, base structPointer) error { + t, err := o.decTimestamp() + if err != nil { + return err + } + newBas := appendStructPointer(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))) + var zero field + setPtrCustomType(newBas, zero, &t) + return nil +} + +func (o *Buffer) dec_slice_ref_time(p *Properties, base structPointer) error { + t, err := o.decTimestamp() + if err != nil { + return err + } + newBas := appendStructPointer(base, p.field, reflect.SliceOf(timeType)) + var zero field + setCustomType(newBas, zero, &t) + return nil +} + +func size_time(p *Properties, base structPointer) (n int) { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + tim := structPointer_Interface(structp, timeType).(*time.Time) + t, err := timestampProto(*tim) + if err != nil { + return 0 + } + size := Size(t) + return size + sizeVarint(uint64(size)) + len(p.tagcode) +} + +func (o *Buffer) enc_time(p *Properties, base structPointer) error { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + tim := structPointer_Interface(structp, timeType).(*time.Time) + t, err := timestampProto(*tim) + if err != nil { + return err + } + data, err := Marshal(t) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_ref_time(p *Properties, base structPointer) (n int) { + tim := structPointer_InterfaceAt(base, p.field, timeType).(*time.Time) + t, err := timestampProto(*tim) + if err != nil { + return 0 + } + size := Size(t) + return size + sizeVarint(uint64(size)) + len(p.tagcode) +} + +func (o *Buffer) enc_ref_time(p *Properties, base structPointer) error { + tim := structPointer_InterfaceAt(base, p.field, timeType).(*time.Time) + t, err := timestampProto(*tim) + if err != nil { + return err + } + data, err := Marshal(t) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_slice_time(p *Properties, base structPointer) (n int) { + ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))).(*[]*time.Time) + tims := *ptims + for i := 0; i < len(tims); i++ { + if tims[i] == nil { + return 0 + } + tproto, err := timestampProto(*tims[i]) + if err != nil { + return 0 + } + size := Size(tproto) + n += len(p.tagcode) + size + sizeVarint(uint64(size)) + } + return n +} + +func (o *Buffer) enc_slice_time(p *Properties, base structPointer) error { + ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))).(*[]*time.Time) + tims := *ptims + for i := 0; i < len(tims); i++ { + if tims[i] == nil { + return errRepeatedHasNil + } + tproto, err := timestampProto(*tims[i]) + if err != nil { + return err + } + data, err := Marshal(tproto) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + } + return nil +} + +func size_slice_ref_time(p *Properties, base structPointer) (n int) { + ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(timeType)).(*[]time.Time) + tims := *ptims + for i := 0; i < len(tims); i++ { + tproto, err := timestampProto(tims[i]) + if err != nil { + return 0 + } + size := Size(tproto) + n += len(p.tagcode) + size + sizeVarint(uint64(size)) + } + return n +} + +func (o *Buffer) enc_slice_ref_time(p *Properties, base structPointer) error { + ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(timeType)).(*[]time.Time) + tims := *ptims + for i := 0; i < len(tims); i++ { + tproto, err := timestampProto(tims[i]) + if err != nil { + return err + } + data, err := Marshal(tproto) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + } + return nil +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile new file mode 100644 index 000000000..3496dc99d --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile @@ -0,0 +1,36 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +regenerate: + go install github.com/gogo/protobuf/protoc-gen-gogo + go install github.com/gogo/protobuf/protoc-gen-gostring + protoc --gogo_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto + protoc --gostring_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go new file mode 100644 index 000000000..e808a3fd0 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go @@ -0,0 +1,92 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package descriptor provides functions for obtaining protocol buffer +// descriptors for generated Go types. +// +// These functions cannot go in package proto because they depend on the +// generated protobuf descriptor messages, which themselves depend on proto. +package descriptor + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + + "github.com/gogo/protobuf/proto" +) + +// extractFile extracts a FileDescriptorProto from a gzip'd buffer. +func extractFile(gz []byte) (*FileDescriptorProto, error) { + r, err := gzip.NewReader(bytes.NewReader(gz)) + if err != nil { + return nil, fmt.Errorf("failed to open gzip reader: %v", err) + } + defer r.Close() + + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, fmt.Errorf("failed to uncompress descriptor: %v", err) + } + + fd := new(FileDescriptorProto) + if err := proto.Unmarshal(b, fd); err != nil { + return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err) + } + + return fd, nil +} + +// Message is a proto.Message with a method to return its descriptor. +// +// Message types generated by the protocol compiler always satisfy +// the Message interface. +type Message interface { + proto.Message + Descriptor() ([]byte, []int) +} + +// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it +// describing the given message. +func ForMessage(msg Message) (fd *FileDescriptorProto, md *DescriptorProto) { + gz, path := msg.Descriptor() + fd, err := extractFile(gz) + if err != nil { + panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err)) + } + + md = fd.MessageType[path[0]] + for _, i := range path[1:] { + md = md.NestedType[i] + } + return fd, md +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go new file mode 100644 index 000000000..6c4d80f5f --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go @@ -0,0 +1,2088 @@ +// Code generated by protoc-gen-gogo. +// source: descriptor.proto +// DO NOT EDIT! + +/* +Package descriptor is a generated protocol buffer package. + +It is generated from these files: + descriptor.proto + +It has these top-level messages: + FileDescriptorSet + FileDescriptorProto + DescriptorProto + FieldDescriptorProto + OneofDescriptorProto + EnumDescriptorProto + EnumValueDescriptorProto + ServiceDescriptorProto + MethodDescriptorProto + FileOptions + MessageOptions + FieldOptions + OneofOptions + EnumOptions + EnumValueOptions + ServiceOptions + MethodOptions + UninterpretedOption + SourceCodeInfo + GeneratedCodeInfo +*/ +package descriptor + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type FieldDescriptorProto_Type int32 + +const ( + // 0 is reserved for errors. + // Order is weird for historical reasons. + FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 + FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 + FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 + FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 + FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 + FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 + FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 + FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 + FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 + // New in version 2. + FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 + FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 + FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 + FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 + FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 + FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 + FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 +) + +var FieldDescriptorProto_Type_name = map[int32]string{ + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", +} +var FieldDescriptorProto_Type_value = map[string]int32{ + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, +} + +func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { + p := new(FieldDescriptorProto_Type) + *p = x + return p +} +func (x FieldDescriptorProto_Type) String() string { + return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) +} +func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type") + if err != nil { + return err + } + *x = FieldDescriptorProto_Type(value) + return nil +} +func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptorDescriptor, []int{3, 0} +} + +type FieldDescriptorProto_Label int32 + +const ( + // 0 is reserved for errors + FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 + FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 +) + +var FieldDescriptorProto_Label_name = map[int32]string{ + 1: "LABEL_OPTIONAL", + 2: "LABEL_REQUIRED", + 3: "LABEL_REPEATED", +} +var FieldDescriptorProto_Label_value = map[string]int32{ + "LABEL_OPTIONAL": 1, + "LABEL_REQUIRED": 2, + "LABEL_REPEATED": 3, +} + +func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { + p := new(FieldDescriptorProto_Label) + *p = x + return p +} +func (x FieldDescriptorProto_Label) String() string { + return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) +} +func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label") + if err != nil { + return err + } + *x = FieldDescriptorProto_Label(value) + return nil +} +func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { + return fileDescriptorDescriptor, []int{3, 1} +} + +// Generated classes can be optimized for speed or code size. +type FileOptions_OptimizeMode int32 + +const ( + FileOptions_SPEED FileOptions_OptimizeMode = 1 + // etc. + FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 + FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 +) + +var FileOptions_OptimizeMode_name = map[int32]string{ + 1: "SPEED", + 2: "CODE_SIZE", + 3: "LITE_RUNTIME", +} +var FileOptions_OptimizeMode_value = map[string]int32{ + "SPEED": 1, + "CODE_SIZE": 2, + "LITE_RUNTIME": 3, +} + +func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { + p := new(FileOptions_OptimizeMode) + *p = x + return p +} +func (x FileOptions_OptimizeMode) String() string { + return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) +} +func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode") + if err != nil { + return err + } + *x = FileOptions_OptimizeMode(value) + return nil +} +func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptorDescriptor, []int{9, 0} +} + +type FieldOptions_CType int32 + +const ( + // Default mode. + FieldOptions_STRING FieldOptions_CType = 0 + FieldOptions_CORD FieldOptions_CType = 1 + FieldOptions_STRING_PIECE FieldOptions_CType = 2 +) + +var FieldOptions_CType_name = map[int32]string{ + 0: "STRING", + 1: "CORD", + 2: "STRING_PIECE", +} +var FieldOptions_CType_value = map[string]int32{ + "STRING": 0, + "CORD": 1, + "STRING_PIECE": 2, +} + +func (x FieldOptions_CType) Enum() *FieldOptions_CType { + p := new(FieldOptions_CType) + *p = x + return p +} +func (x FieldOptions_CType) String() string { + return proto.EnumName(FieldOptions_CType_name, int32(x)) +} +func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType") + if err != nil { + return err + } + *x = FieldOptions_CType(value) + return nil +} +func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { + return fileDescriptorDescriptor, []int{11, 0} +} + +type FieldOptions_JSType int32 + +const ( + // Use the default type. + FieldOptions_JS_NORMAL FieldOptions_JSType = 0 + // Use JavaScript strings. + FieldOptions_JS_STRING FieldOptions_JSType = 1 + // Use JavaScript numbers. + FieldOptions_JS_NUMBER FieldOptions_JSType = 2 +) + +var FieldOptions_JSType_name = map[int32]string{ + 0: "JS_NORMAL", + 1: "JS_STRING", + 2: "JS_NUMBER", +} +var FieldOptions_JSType_value = map[string]int32{ + "JS_NORMAL": 0, + "JS_STRING": 1, + "JS_NUMBER": 2, +} + +func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { + p := new(FieldOptions_JSType) + *p = x + return p +} +func (x FieldOptions_JSType) String() string { + return proto.EnumName(FieldOptions_JSType_name, int32(x)) +} +func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") + if err != nil { + return err + } + *x = FieldOptions_JSType(value) + return nil +} +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { + return fileDescriptorDescriptor, []int{11, 1} +} + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +type FileDescriptorSet struct { + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } +func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorSet) ProtoMessage() {} +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{0} } + +func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { + if m != nil { + return m.File + } + return nil +} + +// Describes a complete .proto file. +type FileDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` + // Names of files imported by this file. + Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` + // Indexes of the public imported files in the dependency list above. + PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` + // All top-level definitions in this file. + MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` + Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } +func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorProto) ProtoMessage() {} +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{1} } + +func (m *FileDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FileDescriptorProto) GetPackage() string { + if m != nil && m.Package != nil { + return *m.Package + } + return "" +} + +func (m *FileDescriptorProto) GetDependency() []string { + if m != nil { + return m.Dependency + } + return nil +} + +func (m *FileDescriptorProto) GetPublicDependency() []int32 { + if m != nil { + return m.PublicDependency + } + return nil +} + +func (m *FileDescriptorProto) GetWeakDependency() []int32 { + if m != nil { + return m.WeakDependency + } + return nil +} + +func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto { + if m != nil { + return m.MessageType + } + return nil +} + +func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto { + if m != nil { + return m.Service + } + return nil +} + +func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *FileDescriptorProto) GetOptions() *FileOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { + if m != nil { + return m.SourceCodeInfo + } + return nil +} + +func (m *FileDescriptorProto) GetSyntax() string { + if m != nil && m.Syntax != nil { + return *m.Syntax + } + return "" +} + +// Describes a message type. +type DescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` + NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` + OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` + Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` + ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } +func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto) ProtoMessage() {} +func (*DescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{2} } + +func (m *DescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *DescriptorProto) GetField() []*FieldDescriptorProto { + if m != nil { + return m.Field + } + return nil +} + +func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *DescriptorProto) GetNestedType() []*DescriptorProto { + if m != nil { + return m.NestedType + } + return nil +} + +func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { + if m != nil { + return m.ExtensionRange + } + return nil +} + +func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { + if m != nil { + return m.OneofDecl + } + return nil +} + +func (m *DescriptorProto) GetOptions() *MessageOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *DescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +type DescriptorProto_ExtensionRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } +func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ExtensionRange) ProtoMessage() {} +func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { + return fileDescriptorDescriptor, []int{2, 0} +} + +func (m *DescriptorProto_ExtensionRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +// Range of reserved tag numbers. Reserved tag numbers may not be used by +// fields or extension ranges in the same message. Reserved ranges may +// not overlap. +type DescriptorProto_ReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } +func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ReservedRange) ProtoMessage() {} +func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptorDescriptor, []int{2, 1} +} + +func (m *DescriptorProto_ReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +// Describes a field within a message. +type FieldDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` + Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } +func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FieldDescriptorProto) ProtoMessage() {} +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{3} } + +func (m *FieldDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FieldDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { + if m != nil && m.Label != nil { + return *m.Label + } + return FieldDescriptorProto_LABEL_OPTIONAL +} + +func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { + if m != nil && m.Type != nil { + return *m.Type + } + return FieldDescriptorProto_TYPE_DOUBLE +} + +func (m *FieldDescriptorProto) GetTypeName() string { + if m != nil && m.TypeName != nil { + return *m.TypeName + } + return "" +} + +func (m *FieldDescriptorProto) GetExtendee() string { + if m != nil && m.Extendee != nil { + return *m.Extendee + } + return "" +} + +func (m *FieldDescriptorProto) GetDefaultValue() string { + if m != nil && m.DefaultValue != nil { + return *m.DefaultValue + } + return "" +} + +func (m *FieldDescriptorProto) GetOneofIndex() int32 { + if m != nil && m.OneofIndex != nil { + return *m.OneofIndex + } + return 0 +} + +func (m *FieldDescriptorProto) GetJsonName() string { + if m != nil && m.JsonName != nil { + return *m.JsonName + } + return "" +} + +func (m *FieldDescriptorProto) GetOptions() *FieldOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a oneof. +type OneofDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } +func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*OneofDescriptorProto) ProtoMessage() {} +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{4} } + +func (m *OneofDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *OneofDescriptorProto) GetOptions() *OneofOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes an enum type. +type EnumDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } +func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto) ProtoMessage() {} +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{5} } + +func (m *EnumDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { + if m != nil { + return m.Value + } + return nil +} + +func (m *EnumDescriptorProto) GetOptions() *EnumOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a value within an enum. +type EnumValueDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } +func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumValueDescriptorProto) ProtoMessage() {} +func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptorDescriptor, []int{6} +} + +func (m *EnumValueDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumValueDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a service. +type ServiceDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } +func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*ServiceDescriptorProto) ProtoMessage() {} +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{7} } + +func (m *ServiceDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { + if m != nil { + return m.Method + } + return nil +} + +func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a method of a service. +type MethodDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` + OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` + Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` + // Identifies if client streams multiple client messages + ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` + // Identifies if server streams multiple server messages + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } +func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*MethodDescriptorProto) ProtoMessage() {} +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{8} } + +const Default_MethodDescriptorProto_ClientStreaming bool = false +const Default_MethodDescriptorProto_ServerStreaming bool = false + +func (m *MethodDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MethodDescriptorProto) GetInputType() string { + if m != nil && m.InputType != nil { + return *m.InputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOutputType() string { + if m != nil && m.OutputType != nil { + return *m.OutputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOptions() *MethodOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *MethodDescriptorProto) GetClientStreaming() bool { + if m != nil && m.ClientStreaming != nil { + return *m.ClientStreaming + } + return Default_MethodDescriptorProto_ClientStreaming +} + +func (m *MethodDescriptorProto) GetServerStreaming() bool { + if m != nil && m.ServerStreaming != nil { + return *m.ServerStreaming + } + return Default_MethodDescriptorProto_ServerStreaming +} + +type FileOptions struct { + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` + // If set true, then the Java code generator will generate equals() and + // hashCode() methods for all messages defined in the .proto file. + // This increases generated code size, potentially substantially for large + // protos, which may harm a memory-constrained application. + // - In the full runtime this is a speed optimization, as the + // AbstractMessage base class includes reflection-based implementations of + // these methods. + // - In the lite runtime, setting this option changes the semantics of + // equals() and hashCode() to more closely match those of the full runtime; + // the generated methods compute their results based on field values rather + // than object identity. (Implementations should not assume that hashcodes + // will be consistent across runtimes or versions of the protocol compiler.) + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash,def=0" json:"java_generate_equals_and_hash,omitempty"` + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` + OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` + JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` + PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"` + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` + // Namespace for generated classes; defaults to the package. + CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FileOptions) Reset() { *m = FileOptions{} } +func (m *FileOptions) String() string { return proto.CompactTextString(m) } +func (*FileOptions) ProtoMessage() {} +func (*FileOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{9} } + +var extRange_FileOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FileOptions +} + +const Default_FileOptions_JavaMultipleFiles bool = false +const Default_FileOptions_JavaGenerateEqualsAndHash bool = false +const Default_FileOptions_JavaStringCheckUtf8 bool = false +const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED +const Default_FileOptions_CcGenericServices bool = false +const Default_FileOptions_JavaGenericServices bool = false +const Default_FileOptions_PyGenericServices bool = false +const Default_FileOptions_Deprecated bool = false +const Default_FileOptions_CcEnableArenas bool = false + +func (m *FileOptions) GetJavaPackage() string { + if m != nil && m.JavaPackage != nil { + return *m.JavaPackage + } + return "" +} + +func (m *FileOptions) GetJavaOuterClassname() string { + if m != nil && m.JavaOuterClassname != nil { + return *m.JavaOuterClassname + } + return "" +} + +func (m *FileOptions) GetJavaMultipleFiles() bool { + if m != nil && m.JavaMultipleFiles != nil { + return *m.JavaMultipleFiles + } + return Default_FileOptions_JavaMultipleFiles +} + +func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { + if m != nil && m.JavaGenerateEqualsAndHash != nil { + return *m.JavaGenerateEqualsAndHash + } + return Default_FileOptions_JavaGenerateEqualsAndHash +} + +func (m *FileOptions) GetJavaStringCheckUtf8() bool { + if m != nil && m.JavaStringCheckUtf8 != nil { + return *m.JavaStringCheckUtf8 + } + return Default_FileOptions_JavaStringCheckUtf8 +} + +func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { + if m != nil && m.OptimizeFor != nil { + return *m.OptimizeFor + } + return Default_FileOptions_OptimizeFor +} + +func (m *FileOptions) GetGoPackage() string { + if m != nil && m.GoPackage != nil { + return *m.GoPackage + } + return "" +} + +func (m *FileOptions) GetCcGenericServices() bool { + if m != nil && m.CcGenericServices != nil { + return *m.CcGenericServices + } + return Default_FileOptions_CcGenericServices +} + +func (m *FileOptions) GetJavaGenericServices() bool { + if m != nil && m.JavaGenericServices != nil { + return *m.JavaGenericServices + } + return Default_FileOptions_JavaGenericServices +} + +func (m *FileOptions) GetPyGenericServices() bool { + if m != nil && m.PyGenericServices != nil { + return *m.PyGenericServices + } + return Default_FileOptions_PyGenericServices +} + +func (m *FileOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FileOptions_Deprecated +} + +func (m *FileOptions) GetCcEnableArenas() bool { + if m != nil && m.CcEnableArenas != nil { + return *m.CcEnableArenas + } + return Default_FileOptions_CcEnableArenas +} + +func (m *FileOptions) GetObjcClassPrefix() string { + if m != nil && m.ObjcClassPrefix != nil { + return *m.ObjcClassPrefix + } + return "" +} + +func (m *FileOptions) GetCsharpNamespace() string { + if m != nil && m.CsharpNamespace != nil { + return *m.CsharpNamespace + } + return "" +} + +func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MessageOptions struct { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementions still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MessageOptions) Reset() { *m = MessageOptions{} } +func (m *MessageOptions) String() string { return proto.CompactTextString(m) } +func (*MessageOptions) ProtoMessage() {} +func (*MessageOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{10} } + +var extRange_MessageOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MessageOptions +} + +const Default_MessageOptions_MessageSetWireFormat bool = false +const Default_MessageOptions_NoStandardDescriptorAccessor bool = false +const Default_MessageOptions_Deprecated bool = false + +func (m *MessageOptions) GetMessageSetWireFormat() bool { + if m != nil && m.MessageSetWireFormat != nil { + return *m.MessageSetWireFormat + } + return Default_MessageOptions_MessageSetWireFormat +} + +func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool { + if m != nil && m.NoStandardDescriptorAccessor != nil { + return *m.NoStandardDescriptorAccessor + } + return Default_MessageOptions_NoStandardDescriptorAccessor +} + +func (m *MessageOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MessageOptions_Deprecated +} + +func (m *MessageOptions) GetMapEntry() bool { + if m != nil && m.MapEntry != nil { + return *m.MapEntry + } + return false +} + +func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type FieldOptions struct { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). By default these types are + // represented as JavaScript strings. This avoids loss of precision that can + // happen when a large value is converted to a floating point JavaScript + // numbers. Specifying JS_NUMBER for the jstype causes the generated + // JavaScript code to use the JavaScript "number" type instead of strings. + // This option is an enum to permit additional types to be added, + // e.g. goog.math.Integer. + Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outher message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // For Google-internal migration only. Do not use. + Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FieldOptions) Reset() { *m = FieldOptions{} } +func (m *FieldOptions) String() string { return proto.CompactTextString(m) } +func (*FieldOptions) ProtoMessage() {} +func (*FieldOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{11} } + +var extRange_FieldOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FieldOptions +} + +const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING +const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL +const Default_FieldOptions_Lazy bool = false +const Default_FieldOptions_Deprecated bool = false +const Default_FieldOptions_Weak bool = false + +func (m *FieldOptions) GetCtype() FieldOptions_CType { + if m != nil && m.Ctype != nil { + return *m.Ctype + } + return Default_FieldOptions_Ctype +} + +func (m *FieldOptions) GetPacked() bool { + if m != nil && m.Packed != nil { + return *m.Packed + } + return false +} + +func (m *FieldOptions) GetJstype() FieldOptions_JSType { + if m != nil && m.Jstype != nil { + return *m.Jstype + } + return Default_FieldOptions_Jstype +} + +func (m *FieldOptions) GetLazy() bool { + if m != nil && m.Lazy != nil { + return *m.Lazy + } + return Default_FieldOptions_Lazy +} + +func (m *FieldOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FieldOptions_Deprecated +} + +func (m *FieldOptions) GetWeak() bool { + if m != nil && m.Weak != nil { + return *m.Weak + } + return Default_FieldOptions_Weak +} + +func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type OneofOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OneofOptions) Reset() { *m = OneofOptions{} } +func (m *OneofOptions) String() string { return proto.CompactTextString(m) } +func (*OneofOptions) ProtoMessage() {} +func (*OneofOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{12} } + +var extRange_OneofOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OneofOptions +} + +func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumOptions struct { + // Set this option to true to allow mapping different tag names to the same + // value. + AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumOptions) Reset() { *m = EnumOptions{} } +func (m *EnumOptions) String() string { return proto.CompactTextString(m) } +func (*EnumOptions) ProtoMessage() {} +func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{13} } + +var extRange_EnumOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumOptions +} + +const Default_EnumOptions_Deprecated bool = false + +func (m *EnumOptions) GetAllowAlias() bool { + if m != nil && m.AllowAlias != nil { + return *m.AllowAlias + } + return false +} + +func (m *EnumOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumOptions_Deprecated +} + +func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumValueOptions struct { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } +func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } +func (*EnumValueOptions) ProtoMessage() {} +func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{14} } + +var extRange_EnumValueOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumValueOptions +} + +const Default_EnumValueOptions_Deprecated bool = false + +func (m *EnumValueOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumValueOptions_Deprecated +} + +func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type ServiceOptions struct { + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } +func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } +func (*ServiceOptions) ProtoMessage() {} +func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{15} } + +var extRange_ServiceOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ServiceOptions +} + +const Default_ServiceOptions_Deprecated bool = false + +func (m *ServiceOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_ServiceOptions_Deprecated +} + +func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MethodOptions struct { + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MethodOptions) Reset() { *m = MethodOptions{} } +func (m *MethodOptions) String() string { return proto.CompactTextString(m) } +func (*MethodOptions) ProtoMessage() {} +func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{16} } + +var extRange_MethodOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MethodOptions +} + +const Default_MethodOptions_Deprecated bool = false + +func (m *MethodOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MethodOptions_Deprecated +} + +func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +type UninterpretedOption struct { + Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } +func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption) ProtoMessage() {} +func (*UninterpretedOption) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{17} } + +func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { + if m != nil { + return m.Name + } + return nil +} + +func (m *UninterpretedOption) GetIdentifierValue() string { + if m != nil && m.IdentifierValue != nil { + return *m.IdentifierValue + } + return "" +} + +func (m *UninterpretedOption) GetPositiveIntValue() uint64 { + if m != nil && m.PositiveIntValue != nil { + return *m.PositiveIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetNegativeIntValue() int64 { + if m != nil && m.NegativeIntValue != nil { + return *m.NegativeIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetDoubleValue() float64 { + if m != nil && m.DoubleValue != nil { + return *m.DoubleValue + } + return 0 +} + +func (m *UninterpretedOption) GetStringValue() []byte { + if m != nil { + return m.StringValue + } + return nil +} + +func (m *UninterpretedOption) GetAggregateValue() string { + if m != nil && m.AggregateValue != nil { + return *m.AggregateValue + } + return "" +} + +// The name of the uninterpreted option. Each string represents a segment in +// a dot-separated name. is_extension is true iff a segment represents an +// extension (denoted with parentheses in options specs in .proto files). +// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents +// "foo.(bar.baz).qux". +type UninterpretedOption_NamePart struct { + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } +func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption_NamePart) ProtoMessage() {} +func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { + return fileDescriptorDescriptor, []int{17, 0} +} + +func (m *UninterpretedOption_NamePart) GetNamePart() string { + if m != nil && m.NamePart != nil { + return *m.NamePart + } + return "" +} + +func (m *UninterpretedOption_NamePart) GetIsExtension() bool { + if m != nil && m.IsExtension != nil { + return *m.IsExtension + } + return false +} + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +type SourceCodeInfo struct { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendent. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } +func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo) ProtoMessage() {} +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{18} } + +func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { + if m != nil { + return m.Location + } + return nil +} + +type SourceCodeInfo_Location struct { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` + TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` + LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } +func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo_Location) ProtoMessage() {} +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { + return fileDescriptorDescriptor, []int{18, 0} +} + +func (m *SourceCodeInfo_Location) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *SourceCodeInfo_Location) GetSpan() []int32 { + if m != nil { + return m.Span + } + return nil +} + +func (m *SourceCodeInfo_Location) GetLeadingComments() string { + if m != nil && m.LeadingComments != nil { + return *m.LeadingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetTrailingComments() string { + if m != nil && m.TrailingComments != nil { + return *m.TrailingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { + if m != nil { + return m.LeadingDetachedComments + } + return nil +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +type GeneratedCodeInfo struct { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } +func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo) ProtoMessage() {} +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{19} } + +func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { + if m != nil { + return m.Annotation + } + return nil +} + +type GeneratedCodeInfo_Annotation struct { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Identifies the filesystem path to the original source .proto. + SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } +func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} +func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { + return fileDescriptorDescriptor, []int{19, 0} +} + +func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { + if m != nil && m.SourceFile != nil { + return *m.SourceFile + } + return "" +} + +func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { + if m != nil && m.Begin != nil { + return *m.Begin + } + return 0 +} + +func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func init() { + proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") + proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") + proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") + proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") + proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") + proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") + proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") + proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") + proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") + proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") + proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") + proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") + proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") + proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") + proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") + proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") + proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") + proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") + proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions") + proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption") + proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") + proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") + proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") + proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") + proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) + proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) + proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) + proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) +} + +func init() { proto.RegisterFile("descriptor.proto", fileDescriptorDescriptor) } + +var fileDescriptorDescriptor = []byte{ + // 2273 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x59, 0xcd, 0x6f, 0xdb, 0xc8, + 0x15, 0x5f, 0xea, 0xcb, 0xd2, 0x93, 0x2c, 0x8f, 0xc7, 0xde, 0x84, 0x71, 0x36, 0x1b, 0x47, 0x9b, + 0x34, 0x4e, 0xd2, 0x3a, 0x0b, 0xe7, 0x63, 0xb3, 0xde, 0x62, 0x0b, 0x59, 0x62, 0xbc, 0x0a, 0x64, + 0x4b, 0xa5, 0xec, 0x36, 0xbb, 0x3d, 0x10, 0x63, 0x72, 0x24, 0x33, 0xa1, 0x86, 0x2c, 0x49, 0x25, + 0xf1, 0x9e, 0x02, 0xf4, 0x54, 0xf4, 0x1f, 0x28, 0xda, 0xa2, 0x28, 0xf6, 0xb2, 0x40, 0xff, 0x80, + 0x1e, 0x7a, 0xef, 0xb5, 0x40, 0xef, 0x3d, 0x16, 0x68, 0xff, 0x83, 0x5e, 0x8b, 0x99, 0x21, 0x29, + 0xea, 0x6b, 0xe3, 0x2e, 0xb0, 0x1f, 0x27, 0x6b, 0x7e, 0xef, 0xf7, 0x1e, 0xdf, 0xbc, 0x79, 0x7c, + 0xef, 0x71, 0x0c, 0xc8, 0xa2, 0x81, 0xe9, 0xdb, 0x5e, 0xe8, 0xfa, 0xdb, 0x9e, 0xef, 0x86, 0x2e, + 0x5e, 0x19, 0xb8, 0xee, 0xc0, 0xa1, 0x72, 0x75, 0x32, 0xea, 0xd7, 0x0e, 0x60, 0xf5, 0xb1, 0xed, + 0xd0, 0x66, 0x42, 0xec, 0xd1, 0x10, 0x3f, 0x82, 0x5c, 0xdf, 0x76, 0xa8, 0xaa, 0x6c, 0x66, 0xb7, + 0xca, 0x3b, 0xd7, 0xb7, 0xa7, 0x94, 0xb6, 0x27, 0x35, 0xba, 0x1c, 0xd6, 0x85, 0x46, 0xed, 0x5f, + 0x39, 0x58, 0x9b, 0x23, 0xc5, 0x18, 0x72, 0x8c, 0x0c, 0xb9, 0x45, 0x65, 0xab, 0xa4, 0x8b, 0xdf, + 0x58, 0x85, 0x25, 0x8f, 0x98, 0xcf, 0xc9, 0x80, 0xaa, 0x19, 0x01, 0xc7, 0x4b, 0xfc, 0x2e, 0x80, + 0x45, 0x3d, 0xca, 0x2c, 0xca, 0xcc, 0x33, 0x35, 0xbb, 0x99, 0xdd, 0x2a, 0xe9, 0x29, 0x04, 0xdf, + 0x81, 0x55, 0x6f, 0x74, 0xe2, 0xd8, 0xa6, 0x91, 0xa2, 0xc1, 0x66, 0x76, 0x2b, 0xaf, 0x23, 0x29, + 0x68, 0x8e, 0xc9, 0x37, 0x61, 0xe5, 0x25, 0x25, 0xcf, 0xd3, 0xd4, 0xb2, 0xa0, 0x56, 0x39, 0x9c, + 0x22, 0x36, 0xa0, 0x32, 0xa4, 0x41, 0x40, 0x06, 0xd4, 0x08, 0xcf, 0x3c, 0xaa, 0xe6, 0xc4, 0xee, + 0x37, 0x67, 0x76, 0x3f, 0xbd, 0xf3, 0x72, 0xa4, 0x75, 0x74, 0xe6, 0x51, 0x5c, 0x87, 0x12, 0x65, + 0xa3, 0xa1, 0xb4, 0x90, 0x5f, 0x10, 0x3f, 0x8d, 0x8d, 0x86, 0xd3, 0x56, 0x8a, 0x5c, 0x2d, 0x32, + 0xb1, 0x14, 0x50, 0xff, 0x85, 0x6d, 0x52, 0xb5, 0x20, 0x0c, 0xdc, 0x9c, 0x31, 0xd0, 0x93, 0xf2, + 0x69, 0x1b, 0xb1, 0x1e, 0x6e, 0x40, 0x89, 0xbe, 0x0a, 0x29, 0x0b, 0x6c, 0x97, 0xa9, 0x4b, 0xc2, + 0xc8, 0x8d, 0x39, 0xa7, 0x48, 0x1d, 0x6b, 0xda, 0xc4, 0x58, 0x0f, 0x3f, 0x84, 0x25, 0xd7, 0x0b, + 0x6d, 0x97, 0x05, 0x6a, 0x71, 0x53, 0xd9, 0x2a, 0xef, 0xbc, 0x33, 0x37, 0x11, 0x3a, 0x92, 0xa3, + 0xc7, 0x64, 0xdc, 0x02, 0x14, 0xb8, 0x23, 0xdf, 0xa4, 0x86, 0xe9, 0x5a, 0xd4, 0xb0, 0x59, 0xdf, + 0x55, 0x4b, 0xc2, 0xc0, 0xd5, 0xd9, 0x8d, 0x08, 0x62, 0xc3, 0xb5, 0x68, 0x8b, 0xf5, 0x5d, 0xbd, + 0x1a, 0x4c, 0xac, 0xf1, 0x05, 0x28, 0x04, 0x67, 0x2c, 0x24, 0xaf, 0xd4, 0x8a, 0xc8, 0x90, 0x68, + 0x55, 0xfb, 0x6f, 0x1e, 0x56, 0xce, 0x93, 0x62, 0x1f, 0x41, 0xbe, 0xcf, 0x77, 0xa9, 0x66, 0xfe, + 0x9f, 0x18, 0x48, 0x9d, 0xc9, 0x20, 0x16, 0xbe, 0x66, 0x10, 0xeb, 0x50, 0x66, 0x34, 0x08, 0xa9, + 0x25, 0x33, 0x22, 0x7b, 0xce, 0x9c, 0x02, 0xa9, 0x34, 0x9b, 0x52, 0xb9, 0xaf, 0x95, 0x52, 0x4f, + 0x61, 0x25, 0x71, 0xc9, 0xf0, 0x09, 0x1b, 0xc4, 0xb9, 0x79, 0xf7, 0x4d, 0x9e, 0x6c, 0x6b, 0xb1, + 0x9e, 0xce, 0xd5, 0xf4, 0x2a, 0x9d, 0x58, 0xe3, 0x26, 0x80, 0xcb, 0xa8, 0xdb, 0x37, 0x2c, 0x6a, + 0x3a, 0x6a, 0x71, 0x41, 0x94, 0x3a, 0x9c, 0x32, 0x13, 0x25, 0x57, 0xa2, 0xa6, 0x83, 0x3f, 0x1c, + 0xa7, 0xda, 0xd2, 0x82, 0x4c, 0x39, 0x90, 0x2f, 0xd9, 0x4c, 0xb6, 0x1d, 0x43, 0xd5, 0xa7, 0x3c, + 0xef, 0xa9, 0x15, 0xed, 0xac, 0x24, 0x9c, 0xd8, 0x7e, 0xe3, 0xce, 0xf4, 0x48, 0x4d, 0x6e, 0x6c, + 0xd9, 0x4f, 0x2f, 0xf1, 0x7b, 0x90, 0x00, 0x86, 0x48, 0x2b, 0x10, 0x55, 0xa8, 0x12, 0x83, 0x87, + 0x64, 0x48, 0x37, 0x1e, 0x41, 0x75, 0x32, 0x3c, 0x78, 0x1d, 0xf2, 0x41, 0x48, 0xfc, 0x50, 0x64, + 0x61, 0x5e, 0x97, 0x0b, 0x8c, 0x20, 0x4b, 0x99, 0x25, 0xaa, 0x5c, 0x5e, 0xe7, 0x3f, 0x37, 0x3e, + 0x80, 0xe5, 0x89, 0xc7, 0x9f, 0x57, 0xb1, 0xf6, 0xdb, 0x02, 0xac, 0xcf, 0xcb, 0xb9, 0xb9, 0xe9, + 0x7f, 0x01, 0x0a, 0x6c, 0x34, 0x3c, 0xa1, 0xbe, 0x9a, 0x15, 0x16, 0xa2, 0x15, 0xae, 0x43, 0xde, + 0x21, 0x27, 0xd4, 0x51, 0x73, 0x9b, 0xca, 0x56, 0x75, 0xe7, 0xce, 0xb9, 0xb2, 0x7a, 0xbb, 0xcd, + 0x55, 0x74, 0xa9, 0x89, 0x3f, 0x86, 0x5c, 0x54, 0xe2, 0xb8, 0x85, 0xdb, 0xe7, 0xb3, 0xc0, 0x73, + 0x51, 0x17, 0x7a, 0xf8, 0x32, 0x94, 0xf8, 0x5f, 0x19, 0xdb, 0x82, 0xf0, 0xb9, 0xc8, 0x01, 0x1e, + 0x57, 0xbc, 0x01, 0x45, 0x91, 0x66, 0x16, 0x8d, 0x5b, 0x43, 0xb2, 0xe6, 0x07, 0x63, 0xd1, 0x3e, + 0x19, 0x39, 0xa1, 0xf1, 0x82, 0x38, 0x23, 0x2a, 0x12, 0xa6, 0xa4, 0x57, 0x22, 0xf0, 0x67, 0x1c, + 0xc3, 0x57, 0xa1, 0x2c, 0xb3, 0xd2, 0x66, 0x16, 0x7d, 0x25, 0xaa, 0x4f, 0x5e, 0x97, 0x89, 0xda, + 0xe2, 0x08, 0x7f, 0xfc, 0xb3, 0xc0, 0x65, 0xf1, 0xd1, 0x8a, 0x47, 0x70, 0x40, 0x3c, 0xfe, 0x83, + 0xe9, 0xc2, 0x77, 0x65, 0xfe, 0xf6, 0xa6, 0x73, 0xb1, 0xf6, 0x97, 0x0c, 0xe4, 0xc4, 0xfb, 0xb6, + 0x02, 0xe5, 0xa3, 0x4f, 0xbb, 0x9a, 0xd1, 0xec, 0x1c, 0xef, 0xb5, 0x35, 0xa4, 0xe0, 0x2a, 0x80, + 0x00, 0x1e, 0xb7, 0x3b, 0xf5, 0x23, 0x94, 0x49, 0xd6, 0xad, 0xc3, 0xa3, 0x87, 0xf7, 0x51, 0x36, + 0x51, 0x38, 0x96, 0x40, 0x2e, 0x4d, 0xb8, 0xb7, 0x83, 0xf2, 0x18, 0x41, 0x45, 0x1a, 0x68, 0x3d, + 0xd5, 0x9a, 0x0f, 0xef, 0xa3, 0xc2, 0x24, 0x72, 0x6f, 0x07, 0x2d, 0xe1, 0x65, 0x28, 0x09, 0x64, + 0xaf, 0xd3, 0x69, 0xa3, 0x62, 0x62, 0xb3, 0x77, 0xa4, 0xb7, 0x0e, 0xf7, 0x51, 0x29, 0xb1, 0xb9, + 0xaf, 0x77, 0x8e, 0xbb, 0x08, 0x12, 0x0b, 0x07, 0x5a, 0xaf, 0x57, 0xdf, 0xd7, 0x50, 0x39, 0x61, + 0xec, 0x7d, 0x7a, 0xa4, 0xf5, 0x50, 0x65, 0xc2, 0xad, 0x7b, 0x3b, 0x68, 0x39, 0x79, 0x84, 0x76, + 0x78, 0x7c, 0x80, 0xaa, 0x78, 0x15, 0x96, 0xe5, 0x23, 0x62, 0x27, 0x56, 0xa6, 0xa0, 0x87, 0xf7, + 0x11, 0x1a, 0x3b, 0x22, 0xad, 0xac, 0x4e, 0x00, 0x0f, 0xef, 0x23, 0x5c, 0x6b, 0x40, 0x5e, 0x64, + 0x17, 0xc6, 0x50, 0x6d, 0xd7, 0xf7, 0xb4, 0xb6, 0xd1, 0xe9, 0x1e, 0xb5, 0x3a, 0x87, 0xf5, 0x36, + 0x52, 0xc6, 0x98, 0xae, 0xfd, 0xf4, 0xb8, 0xa5, 0x6b, 0x4d, 0x94, 0x49, 0x63, 0x5d, 0xad, 0x7e, + 0xa4, 0x35, 0x51, 0xb6, 0x66, 0xc2, 0xfa, 0xbc, 0x3a, 0x33, 0xf7, 0xcd, 0x48, 0x1d, 0x71, 0x66, + 0xc1, 0x11, 0x0b, 0x5b, 0x33, 0x47, 0xfc, 0x85, 0x02, 0x6b, 0x73, 0x6a, 0xed, 0xdc, 0x87, 0xfc, + 0x04, 0xf2, 0x32, 0x45, 0x65, 0xf7, 0xb9, 0x35, 0xb7, 0x68, 0x8b, 0x84, 0x9d, 0xe9, 0x40, 0x42, + 0x2f, 0xdd, 0x81, 0xb3, 0x0b, 0x3a, 0x30, 0x37, 0x31, 0xe3, 0xe4, 0xaf, 0x14, 0x50, 0x17, 0xd9, + 0x7e, 0x43, 0xa1, 0xc8, 0x4c, 0x14, 0x8a, 0x8f, 0xa6, 0x1d, 0xb8, 0xb6, 0x78, 0x0f, 0x33, 0x5e, + 0x7c, 0xa9, 0xc0, 0x85, 0xf9, 0x83, 0xca, 0x5c, 0x1f, 0x3e, 0x86, 0xc2, 0x90, 0x86, 0xa7, 0x6e, + 0xdc, 0xac, 0x7f, 0x30, 0xa7, 0x05, 0x70, 0xf1, 0x74, 0xac, 0x22, 0xad, 0x74, 0x0f, 0xc9, 0x2e, + 0x9a, 0x36, 0xa4, 0x37, 0x33, 0x9e, 0xfe, 0x3a, 0x03, 0x6f, 0xcf, 0x35, 0x3e, 0xd7, 0xd1, 0x2b, + 0x00, 0x36, 0xf3, 0x46, 0xa1, 0x6c, 0xc8, 0xb2, 0x3e, 0x95, 0x04, 0x22, 0xde, 0x7d, 0x5e, 0x7b, + 0x46, 0x61, 0x22, 0xcf, 0x0a, 0x39, 0x48, 0x48, 0x10, 0x1e, 0x8d, 0x1d, 0xcd, 0x09, 0x47, 0xdf, + 0x5d, 0xb0, 0xd3, 0x99, 0x5e, 0xf7, 0x3e, 0x20, 0xd3, 0xb1, 0x29, 0x0b, 0x8d, 0x20, 0xf4, 0x29, + 0x19, 0xda, 0x6c, 0x20, 0x0a, 0x70, 0x71, 0x37, 0xdf, 0x27, 0x4e, 0x40, 0xf5, 0x15, 0x29, 0xee, + 0xc5, 0x52, 0xae, 0x21, 0xba, 0x8c, 0x9f, 0xd2, 0x28, 0x4c, 0x68, 0x48, 0x71, 0xa2, 0x51, 0xfb, + 0xcd, 0x12, 0x94, 0x53, 0x63, 0x1d, 0xbe, 0x06, 0x95, 0x67, 0xe4, 0x05, 0x31, 0xe2, 0x51, 0x5d, + 0x46, 0xa2, 0xcc, 0xb1, 0x6e, 0x34, 0xae, 0xbf, 0x0f, 0xeb, 0x82, 0xe2, 0x8e, 0x42, 0xea, 0x1b, + 0xa6, 0x43, 0x82, 0x40, 0x04, 0xad, 0x28, 0xa8, 0x98, 0xcb, 0x3a, 0x5c, 0xd4, 0x88, 0x25, 0xf8, + 0x01, 0xac, 0x09, 0x8d, 0xe1, 0xc8, 0x09, 0x6d, 0xcf, 0xa1, 0x06, 0xff, 0x78, 0x08, 0x44, 0x21, + 0x4e, 0x3c, 0x5b, 0xe5, 0x8c, 0x83, 0x88, 0xc0, 0x3d, 0x0a, 0xf0, 0x3e, 0x5c, 0x11, 0x6a, 0x03, + 0xca, 0xa8, 0x4f, 0x42, 0x6a, 0xd0, 0x5f, 0x8e, 0x88, 0x13, 0x18, 0x84, 0x59, 0xc6, 0x29, 0x09, + 0x4e, 0xd5, 0xf5, 0xb4, 0x81, 0x4b, 0x9c, 0xbb, 0x1f, 0x51, 0x35, 0xc1, 0xac, 0x33, 0xeb, 0x13, + 0x12, 0x9c, 0xe2, 0x5d, 0xb8, 0x20, 0x0c, 0x05, 0xa1, 0x6f, 0xb3, 0x81, 0x61, 0x9e, 0x52, 0xf3, + 0xb9, 0x31, 0x0a, 0xfb, 0x8f, 0xd4, 0xcb, 0x69, 0x0b, 0xc2, 0xc9, 0x9e, 0xe0, 0x34, 0x38, 0xe5, + 0x38, 0xec, 0x3f, 0xc2, 0x3d, 0xa8, 0xf0, 0xf3, 0x18, 0xda, 0x9f, 0x53, 0xa3, 0xef, 0xfa, 0xa2, + 0xb9, 0x54, 0xe7, 0xbc, 0xdc, 0xa9, 0x20, 0x6e, 0x77, 0x22, 0x85, 0x03, 0xd7, 0xa2, 0xbb, 0xf9, + 0x5e, 0x57, 0xd3, 0x9a, 0x7a, 0x39, 0xb6, 0xf2, 0xd8, 0xf5, 0x79, 0x4e, 0x0d, 0xdc, 0x24, 0xc6, + 0x65, 0x99, 0x53, 0x03, 0x37, 0x8e, 0xf0, 0x03, 0x58, 0x33, 0x4d, 0xb9, 0x6d, 0xdb, 0x34, 0xa2, + 0x29, 0x3f, 0x50, 0xd1, 0x44, 0xbc, 0x4c, 0x73, 0x5f, 0x12, 0xa2, 0x34, 0x0f, 0xf0, 0x87, 0xf0, + 0xf6, 0x38, 0x5e, 0x69, 0xc5, 0xd5, 0x99, 0x5d, 0x4e, 0xab, 0x3e, 0x80, 0x35, 0xef, 0x6c, 0x56, + 0x11, 0x4f, 0x3c, 0xd1, 0x3b, 0x9b, 0x56, 0xbb, 0x21, 0xbe, 0xdc, 0x7c, 0x6a, 0x92, 0x90, 0x5a, + 0xea, 0xc5, 0x34, 0x3b, 0x25, 0xc0, 0x77, 0x01, 0x99, 0xa6, 0x41, 0x19, 0x39, 0x71, 0xa8, 0x41, + 0x7c, 0xca, 0x48, 0xa0, 0x5e, 0x4d, 0x93, 0xab, 0xa6, 0xa9, 0x09, 0x69, 0x5d, 0x08, 0xf1, 0x6d, + 0x58, 0x75, 0x4f, 0x9e, 0x99, 0x32, 0xb9, 0x0c, 0xcf, 0xa7, 0x7d, 0xfb, 0x95, 0x7a, 0x5d, 0x84, + 0x69, 0x85, 0x0b, 0x44, 0x6a, 0x75, 0x05, 0x8c, 0x6f, 0x01, 0x32, 0x83, 0x53, 0xe2, 0x7b, 0xa2, + 0xbb, 0x07, 0x1e, 0x31, 0xa9, 0x7a, 0x43, 0x52, 0x25, 0x7e, 0x18, 0xc3, 0xf8, 0x29, 0xac, 0x8f, + 0x98, 0xcd, 0x42, 0xea, 0x7b, 0x3e, 0xe5, 0x43, 0xba, 0x7c, 0xd3, 0xd4, 0x7f, 0x2f, 0x2d, 0x18, + 0xb3, 0x8f, 0xd3, 0x6c, 0x79, 0xba, 0xfa, 0xda, 0x68, 0x16, 0xac, 0xed, 0x42, 0x25, 0x7d, 0xe8, + 0xb8, 0x04, 0xf2, 0xd8, 0x91, 0xc2, 0x7b, 0x68, 0xa3, 0xd3, 0xe4, 0xdd, 0xef, 0x33, 0x0d, 0x65, + 0x78, 0x17, 0x6e, 0xb7, 0x8e, 0x34, 0x43, 0x3f, 0x3e, 0x3c, 0x6a, 0x1d, 0x68, 0x28, 0x7b, 0xbb, + 0x54, 0xfc, 0xcf, 0x12, 0x7a, 0xfd, 0xfa, 0xf5, 0xeb, 0x4c, 0xed, 0x6f, 0x19, 0xa8, 0x4e, 0x4e, + 0xbe, 0xf8, 0xc7, 0x70, 0x31, 0xfe, 0x4c, 0x0d, 0x68, 0x68, 0xbc, 0xb4, 0x7d, 0x91, 0x87, 0x43, + 0x22, 0x67, 0xc7, 0x24, 0x84, 0xeb, 0x11, 0xab, 0x47, 0xc3, 0x9f, 0xdb, 0x3e, 0xcf, 0xb2, 0x21, + 0x09, 0x71, 0x1b, 0xae, 0x32, 0xd7, 0x08, 0x42, 0xc2, 0x2c, 0xe2, 0x5b, 0xc6, 0xf8, 0x82, 0xc0, + 0x20, 0xa6, 0x49, 0x83, 0xc0, 0x95, 0x2d, 0x20, 0xb1, 0xf2, 0x0e, 0x73, 0x7b, 0x11, 0x79, 0x5c, + 0x1b, 0xeb, 0x11, 0x75, 0xea, 0xb8, 0xb3, 0x8b, 0x8e, 0xfb, 0x32, 0x94, 0x86, 0xc4, 0x33, 0x28, + 0x0b, 0xfd, 0x33, 0x31, 0xaf, 0x15, 0xf5, 0xe2, 0x90, 0x78, 0x1a, 0x5f, 0x7f, 0x73, 0x67, 0x90, + 0x8e, 0xe3, 0x3f, 0xb3, 0x50, 0x49, 0xcf, 0x6c, 0x7c, 0x04, 0x36, 0x45, 0x7d, 0x56, 0xc4, 0xeb, + 0xfb, 0xde, 0x57, 0x4e, 0x78, 0xdb, 0x0d, 0x5e, 0xb8, 0x77, 0x0b, 0x72, 0x92, 0xd2, 0xa5, 0x26, + 0x6f, 0x9a, 0xfc, 0x85, 0xa5, 0x72, 0x3e, 0x2f, 0xea, 0xd1, 0x0a, 0xef, 0x43, 0xe1, 0x59, 0x20, + 0x6c, 0x17, 0x84, 0xed, 0xeb, 0x5f, 0x6d, 0xfb, 0x49, 0x4f, 0x18, 0x2f, 0x3d, 0xe9, 0x19, 0x87, + 0x1d, 0xfd, 0xa0, 0xde, 0xd6, 0x23, 0x75, 0x7c, 0x09, 0x72, 0x0e, 0xf9, 0xfc, 0x6c, 0xb2, 0xc4, + 0x0b, 0xe8, 0xbc, 0x81, 0xbf, 0x04, 0xb9, 0x97, 0x94, 0x3c, 0x9f, 0x2c, 0xac, 0x02, 0xfa, 0x06, + 0x53, 0xff, 0x2e, 0xe4, 0x45, 0xbc, 0x30, 0x40, 0x14, 0x31, 0xf4, 0x16, 0x2e, 0x42, 0xae, 0xd1, + 0xd1, 0x79, 0xfa, 0x23, 0xa8, 0x48, 0xd4, 0xe8, 0xb6, 0xb4, 0x86, 0x86, 0x32, 0xb5, 0x07, 0x50, + 0x90, 0x41, 0xe0, 0xaf, 0x46, 0x12, 0x06, 0xf4, 0x56, 0xb4, 0x8c, 0x6c, 0x28, 0xb1, 0xf4, 0xf8, + 0x60, 0x4f, 0xd3, 0x51, 0x26, 0x7d, 0xbc, 0x01, 0x54, 0xd2, 0xe3, 0xda, 0xb7, 0x93, 0x53, 0x7f, + 0x55, 0xa0, 0x9c, 0x1a, 0xbf, 0x78, 0xe3, 0x27, 0x8e, 0xe3, 0xbe, 0x34, 0x88, 0x63, 0x93, 0x20, + 0x4a, 0x0a, 0x10, 0x50, 0x9d, 0x23, 0xe7, 0x3d, 0xb4, 0x6f, 0xc5, 0xf9, 0x3f, 0x2a, 0x80, 0xa6, + 0x47, 0xb7, 0x29, 0x07, 0x95, 0xef, 0xd4, 0xc1, 0x3f, 0x28, 0x50, 0x9d, 0x9c, 0xd7, 0xa6, 0xdc, + 0xbb, 0xf6, 0x9d, 0xba, 0xf7, 0x7b, 0x05, 0x96, 0x27, 0xa6, 0xb4, 0xef, 0x95, 0x77, 0xbf, 0xcb, + 0xc2, 0xda, 0x1c, 0x3d, 0x5c, 0x8f, 0xc6, 0x59, 0x39, 0x61, 0xff, 0xe8, 0x3c, 0xcf, 0xda, 0xe6, + 0xdd, 0xb2, 0x4b, 0xfc, 0x30, 0x9a, 0x7e, 0x6f, 0x01, 0xb2, 0x2d, 0xca, 0x42, 0xbb, 0x6f, 0x53, + 0x3f, 0xfa, 0x04, 0x97, 0x33, 0xee, 0xca, 0x18, 0x97, 0x5f, 0xe1, 0x3f, 0x04, 0xec, 0xb9, 0x81, + 0x1d, 0xda, 0x2f, 0xa8, 0x61, 0xb3, 0xf8, 0x7b, 0x9d, 0xcf, 0xbc, 0x39, 0x1d, 0xc5, 0x92, 0x16, + 0x0b, 0x13, 0x36, 0xa3, 0x03, 0x32, 0xc5, 0xe6, 0xb5, 0x2f, 0xab, 0xa3, 0x58, 0x92, 0xb0, 0xaf, + 0x41, 0xc5, 0x72, 0x47, 0x7c, 0x7c, 0x90, 0x3c, 0x5e, 0x6a, 0x15, 0xbd, 0x2c, 0xb1, 0x84, 0x12, + 0xcd, 0x77, 0xe3, 0x8b, 0x82, 0x8a, 0x5e, 0x96, 0x98, 0xa4, 0xdc, 0x84, 0x15, 0x32, 0x18, 0xf8, + 0xdc, 0x78, 0x6c, 0x48, 0x0e, 0xad, 0xd5, 0x04, 0x16, 0xc4, 0x8d, 0x27, 0x50, 0x8c, 0xe3, 0xc0, + 0xbb, 0x19, 0x8f, 0x84, 0xe1, 0xc9, 0xeb, 0x9a, 0xcc, 0x56, 0x49, 0x2f, 0xb2, 0x58, 0x78, 0x0d, + 0x2a, 0x76, 0x60, 0x8c, 0xef, 0x0d, 0x33, 0x9b, 0x99, 0xad, 0xa2, 0x5e, 0xb6, 0x83, 0xe4, 0xa2, + 0xa8, 0xf6, 0x65, 0x06, 0xaa, 0x93, 0xf7, 0x9e, 0xb8, 0x09, 0x45, 0xc7, 0x35, 0x89, 0x48, 0x04, + 0x79, 0xe9, 0xbe, 0xf5, 0x86, 0xab, 0xd2, 0xed, 0x76, 0xc4, 0xd7, 0x13, 0xcd, 0x8d, 0xbf, 0x2b, + 0x50, 0x8c, 0x61, 0x7c, 0x01, 0x72, 0x1e, 0x09, 0x4f, 0x85, 0xb9, 0xfc, 0x5e, 0x06, 0x29, 0xba, + 0x58, 0x73, 0x3c, 0xf0, 0x08, 0x13, 0x29, 0x10, 0xe1, 0x7c, 0xcd, 0xcf, 0xd5, 0xa1, 0xc4, 0x12, + 0xe3, 0xb0, 0x3b, 0x1c, 0x52, 0x16, 0x06, 0xf1, 0xb9, 0x46, 0x78, 0x23, 0x82, 0xf1, 0x1d, 0x58, + 0x0d, 0x7d, 0x62, 0x3b, 0x13, 0xdc, 0x9c, 0xe0, 0xa2, 0x58, 0x90, 0x90, 0x77, 0xe1, 0x52, 0x6c, + 0xd7, 0xa2, 0x21, 0x31, 0x4f, 0xa9, 0x35, 0x56, 0x2a, 0x88, 0x4b, 0xb5, 0x8b, 0x11, 0xa1, 0x19, + 0xc9, 0x63, 0xdd, 0xda, 0x3f, 0x14, 0x58, 0x8d, 0x07, 0x78, 0x2b, 0x09, 0xd6, 0x01, 0x00, 0x61, + 0xcc, 0x0d, 0xd3, 0xe1, 0x9a, 0x4d, 0xe5, 0x19, 0xbd, 0xed, 0x7a, 0xa2, 0xa4, 0xa7, 0x0c, 0x6c, + 0x0c, 0x01, 0xc6, 0x92, 0x85, 0x61, 0xbb, 0x0a, 0xe5, 0xe8, 0x52, 0x5b, 0xfc, 0x67, 0x44, 0x7e, + 0xf5, 0x81, 0x84, 0xf8, 0xa4, 0x8f, 0xd7, 0x21, 0x7f, 0x42, 0x07, 0x36, 0x8b, 0xae, 0xda, 0xe4, + 0x22, 0xbe, 0xc0, 0xcb, 0x25, 0x17, 0x78, 0x7b, 0xbf, 0x80, 0x35, 0xd3, 0x1d, 0x4e, 0xbb, 0xbb, + 0x87, 0xa6, 0xbe, 0x3c, 0x83, 0x4f, 0x94, 0xcf, 0x60, 0x3c, 0x9d, 0xfd, 0x49, 0x51, 0xbe, 0xc8, + 0x64, 0xf7, 0xbb, 0x7b, 0x7f, 0xce, 0x6c, 0xec, 0x4b, 0xd5, 0x6e, 0xbc, 0x53, 0x9d, 0xf6, 0x1d, + 0x6a, 0x72, 0xef, 0xff, 0x17, 0x00, 0x00, 0xff, 0xff, 0xd0, 0xf2, 0xf3, 0xa9, 0xf1, 0x19, 0x00, + 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go new file mode 100644 index 000000000..785d6f9fe --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go @@ -0,0 +1,715 @@ +// Code generated by protoc-gen-gogo. +// source: descriptor.proto +// DO NOT EDIT! + +/* +Package descriptor is a generated protocol buffer package. + +It is generated from these files: + descriptor.proto + +It has these top-level messages: + FileDescriptorSet + FileDescriptorProto + DescriptorProto + FieldDescriptorProto + OneofDescriptorProto + EnumDescriptorProto + EnumValueDescriptorProto + ServiceDescriptorProto + MethodDescriptorProto + FileOptions + MessageOptions + FieldOptions + OneofOptions + EnumOptions + EnumValueOptions + ServiceOptions + MethodOptions + UninterpretedOption + SourceCodeInfo + GeneratedCodeInfo +*/ +package descriptor + +import fmt "fmt" +import strings "strings" +import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" +import sort "sort" +import strconv "strconv" +import reflect "reflect" +import proto "github.com/gogo/protobuf/proto" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func (this *FileDescriptorSet) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.FileDescriptorSet{") + if this.File != nil { + s = append(s, "File: "+fmt.Sprintf("%#v", this.File)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FileDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 16) + s = append(s, "&descriptor.FileDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Package != nil { + s = append(s, "Package: "+valueToGoStringDescriptor(this.Package, "string")+",\n") + } + if this.Dependency != nil { + s = append(s, "Dependency: "+fmt.Sprintf("%#v", this.Dependency)+",\n") + } + if this.PublicDependency != nil { + s = append(s, "PublicDependency: "+fmt.Sprintf("%#v", this.PublicDependency)+",\n") + } + if this.WeakDependency != nil { + s = append(s, "WeakDependency: "+fmt.Sprintf("%#v", this.WeakDependency)+",\n") + } + if this.MessageType != nil { + s = append(s, "MessageType: "+fmt.Sprintf("%#v", this.MessageType)+",\n") + } + if this.EnumType != nil { + s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n") + } + if this.Service != nil { + s = append(s, "Service: "+fmt.Sprintf("%#v", this.Service)+",\n") + } + if this.Extension != nil { + s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.SourceCodeInfo != nil { + s = append(s, "SourceCodeInfo: "+fmt.Sprintf("%#v", this.SourceCodeInfo)+",\n") + } + if this.Syntax != nil { + s = append(s, "Syntax: "+valueToGoStringDescriptor(this.Syntax, "string")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&descriptor.DescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Field != nil { + s = append(s, "Field: "+fmt.Sprintf("%#v", this.Field)+",\n") + } + if this.Extension != nil { + s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n") + } + if this.NestedType != nil { + s = append(s, "NestedType: "+fmt.Sprintf("%#v", this.NestedType)+",\n") + } + if this.EnumType != nil { + s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n") + } + if this.ExtensionRange != nil { + s = append(s, "ExtensionRange: "+fmt.Sprintf("%#v", this.ExtensionRange)+",\n") + } + if this.OneofDecl != nil { + s = append(s, "OneofDecl: "+fmt.Sprintf("%#v", this.OneofDecl)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ReservedRange != nil { + s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n") + } + if this.ReservedName != nil { + s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto_ExtensionRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.DescriptorProto_ExtensionRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto_ReservedRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.DescriptorProto_ReservedRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FieldDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&descriptor.FieldDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Number != nil { + s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") + } + if this.Label != nil { + s = append(s, "Label: "+valueToGoStringDescriptor(this.Label, "descriptor.FieldDescriptorProto_Label")+",\n") + } + if this.Type != nil { + s = append(s, "Type: "+valueToGoStringDescriptor(this.Type, "descriptor.FieldDescriptorProto_Type")+",\n") + } + if this.TypeName != nil { + s = append(s, "TypeName: "+valueToGoStringDescriptor(this.TypeName, "string")+",\n") + } + if this.Extendee != nil { + s = append(s, "Extendee: "+valueToGoStringDescriptor(this.Extendee, "string")+",\n") + } + if this.DefaultValue != nil { + s = append(s, "DefaultValue: "+valueToGoStringDescriptor(this.DefaultValue, "string")+",\n") + } + if this.OneofIndex != nil { + s = append(s, "OneofIndex: "+valueToGoStringDescriptor(this.OneofIndex, "int32")+",\n") + } + if this.JsonName != nil { + s = append(s, "JsonName: "+valueToGoStringDescriptor(this.JsonName, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *OneofDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.OneofDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.EnumDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Value != nil { + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumValueDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.EnumValueDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Number != nil { + s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ServiceDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.ServiceDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Method != nil { + s = append(s, "Method: "+fmt.Sprintf("%#v", this.Method)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MethodDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&descriptor.MethodDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.InputType != nil { + s = append(s, "InputType: "+valueToGoStringDescriptor(this.InputType, "string")+",\n") + } + if this.OutputType != nil { + s = append(s, "OutputType: "+valueToGoStringDescriptor(this.OutputType, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ClientStreaming != nil { + s = append(s, "ClientStreaming: "+valueToGoStringDescriptor(this.ClientStreaming, "bool")+",\n") + } + if this.ServerStreaming != nil { + s = append(s, "ServerStreaming: "+valueToGoStringDescriptor(this.ServerStreaming, "bool")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FileOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 19) + s = append(s, "&descriptor.FileOptions{") + if this.JavaPackage != nil { + s = append(s, "JavaPackage: "+valueToGoStringDescriptor(this.JavaPackage, "string")+",\n") + } + if this.JavaOuterClassname != nil { + s = append(s, "JavaOuterClassname: "+valueToGoStringDescriptor(this.JavaOuterClassname, "string")+",\n") + } + if this.JavaMultipleFiles != nil { + s = append(s, "JavaMultipleFiles: "+valueToGoStringDescriptor(this.JavaMultipleFiles, "bool")+",\n") + } + if this.JavaGenerateEqualsAndHash != nil { + s = append(s, "JavaGenerateEqualsAndHash: "+valueToGoStringDescriptor(this.JavaGenerateEqualsAndHash, "bool")+",\n") + } + if this.JavaStringCheckUtf8 != nil { + s = append(s, "JavaStringCheckUtf8: "+valueToGoStringDescriptor(this.JavaStringCheckUtf8, "bool")+",\n") + } + if this.OptimizeFor != nil { + s = append(s, "OptimizeFor: "+valueToGoStringDescriptor(this.OptimizeFor, "descriptor.FileOptions_OptimizeMode")+",\n") + } + if this.GoPackage != nil { + s = append(s, "GoPackage: "+valueToGoStringDescriptor(this.GoPackage, "string")+",\n") + } + if this.CcGenericServices != nil { + s = append(s, "CcGenericServices: "+valueToGoStringDescriptor(this.CcGenericServices, "bool")+",\n") + } + if this.JavaGenericServices != nil { + s = append(s, "JavaGenericServices: "+valueToGoStringDescriptor(this.JavaGenericServices, "bool")+",\n") + } + if this.PyGenericServices != nil { + s = append(s, "PyGenericServices: "+valueToGoStringDescriptor(this.PyGenericServices, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.CcEnableArenas != nil { + s = append(s, "CcEnableArenas: "+valueToGoStringDescriptor(this.CcEnableArenas, "bool")+",\n") + } + if this.ObjcClassPrefix != nil { + s = append(s, "ObjcClassPrefix: "+valueToGoStringDescriptor(this.ObjcClassPrefix, "string")+",\n") + } + if this.CsharpNamespace != nil { + s = append(s, "CsharpNamespace: "+valueToGoStringDescriptor(this.CsharpNamespace, "string")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MessageOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.MessageOptions{") + if this.MessageSetWireFormat != nil { + s = append(s, "MessageSetWireFormat: "+valueToGoStringDescriptor(this.MessageSetWireFormat, "bool")+",\n") + } + if this.NoStandardDescriptorAccessor != nil { + s = append(s, "NoStandardDescriptorAccessor: "+valueToGoStringDescriptor(this.NoStandardDescriptorAccessor, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.MapEntry != nil { + s = append(s, "MapEntry: "+valueToGoStringDescriptor(this.MapEntry, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FieldOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&descriptor.FieldOptions{") + if this.Ctype != nil { + s = append(s, "Ctype: "+valueToGoStringDescriptor(this.Ctype, "descriptor.FieldOptions_CType")+",\n") + } + if this.Packed != nil { + s = append(s, "Packed: "+valueToGoStringDescriptor(this.Packed, "bool")+",\n") + } + if this.Jstype != nil { + s = append(s, "Jstype: "+valueToGoStringDescriptor(this.Jstype, "descriptor.FieldOptions_JSType")+",\n") + } + if this.Lazy != nil { + s = append(s, "Lazy: "+valueToGoStringDescriptor(this.Lazy, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.Weak != nil { + s = append(s, "Weak: "+valueToGoStringDescriptor(this.Weak, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *OneofOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.OneofOptions{") + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.EnumOptions{") + if this.AllowAlias != nil { + s = append(s, "AllowAlias: "+valueToGoStringDescriptor(this.AllowAlias, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumValueOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.EnumValueOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ServiceOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.ServiceOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MethodOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.MethodOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UninterpretedOption) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&descriptor.UninterpretedOption{") + if this.Name != nil { + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + } + if this.IdentifierValue != nil { + s = append(s, "IdentifierValue: "+valueToGoStringDescriptor(this.IdentifierValue, "string")+",\n") + } + if this.PositiveIntValue != nil { + s = append(s, "PositiveIntValue: "+valueToGoStringDescriptor(this.PositiveIntValue, "uint64")+",\n") + } + if this.NegativeIntValue != nil { + s = append(s, "NegativeIntValue: "+valueToGoStringDescriptor(this.NegativeIntValue, "int64")+",\n") + } + if this.DoubleValue != nil { + s = append(s, "DoubleValue: "+valueToGoStringDescriptor(this.DoubleValue, "float64")+",\n") + } + if this.StringValue != nil { + s = append(s, "StringValue: "+valueToGoStringDescriptor(this.StringValue, "byte")+",\n") + } + if this.AggregateValue != nil { + s = append(s, "AggregateValue: "+valueToGoStringDescriptor(this.AggregateValue, "string")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UninterpretedOption_NamePart) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.UninterpretedOption_NamePart{") + if this.NamePart != nil { + s = append(s, "NamePart: "+valueToGoStringDescriptor(this.NamePart, "string")+",\n") + } + if this.IsExtension != nil { + s = append(s, "IsExtension: "+valueToGoStringDescriptor(this.IsExtension, "bool")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SourceCodeInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.SourceCodeInfo{") + if this.Location != nil { + s = append(s, "Location: "+fmt.Sprintf("%#v", this.Location)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SourceCodeInfo_Location) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.SourceCodeInfo_Location{") + if this.Path != nil { + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + } + if this.Span != nil { + s = append(s, "Span: "+fmt.Sprintf("%#v", this.Span)+",\n") + } + if this.LeadingComments != nil { + s = append(s, "LeadingComments: "+valueToGoStringDescriptor(this.LeadingComments, "string")+",\n") + } + if this.TrailingComments != nil { + s = append(s, "TrailingComments: "+valueToGoStringDescriptor(this.TrailingComments, "string")+",\n") + } + if this.LeadingDetachedComments != nil { + s = append(s, "LeadingDetachedComments: "+fmt.Sprintf("%#v", this.LeadingDetachedComments)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GeneratedCodeInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.GeneratedCodeInfo{") + if this.Annotation != nil { + s = append(s, "Annotation: "+fmt.Sprintf("%#v", this.Annotation)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GeneratedCodeInfo_Annotation) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&descriptor.GeneratedCodeInfo_Annotation{") + if this.Path != nil { + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + } + if this.SourceFile != nil { + s = append(s, "SourceFile: "+valueToGoStringDescriptor(this.SourceFile, "string")+",\n") + } + if this.Begin != nil { + s = append(s, "Begin: "+valueToGoStringDescriptor(this.Begin, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringDescriptor(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func extensionToGoStringDescriptor(m github_com_gogo_protobuf_proto.Message) string { + e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m) + if e == nil { + return "nil" + } + s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{" + keys := make([]int, 0, len(e)) + for k := range e { + keys = append(keys, int(k)) + } + sort.Ints(keys) + ss := []string{} + for _, k := range keys { + ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString()) + } + s += strings.Join(ss, ",") + "})" + return s +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go new file mode 100644 index 000000000..861f4d028 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go @@ -0,0 +1,357 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package descriptor + +import ( + "strings" +) + +func (msg *DescriptorProto) GetMapFields() (*FieldDescriptorProto, *FieldDescriptorProto) { + if !msg.GetOptions().GetMapEntry() { + return nil, nil + } + return msg.GetField()[0], msg.GetField()[1] +} + +func dotToUnderscore(r rune) rune { + if r == '.' { + return '_' + } + return r +} + +func (field *FieldDescriptorProto) WireType() (wire int) { + switch *field.Type { + case FieldDescriptorProto_TYPE_DOUBLE: + return 1 + case FieldDescriptorProto_TYPE_FLOAT: + return 5 + case FieldDescriptorProto_TYPE_INT64: + return 0 + case FieldDescriptorProto_TYPE_UINT64: + return 0 + case FieldDescriptorProto_TYPE_INT32: + return 0 + case FieldDescriptorProto_TYPE_UINT32: + return 0 + case FieldDescriptorProto_TYPE_FIXED64: + return 1 + case FieldDescriptorProto_TYPE_FIXED32: + return 5 + case FieldDescriptorProto_TYPE_BOOL: + return 0 + case FieldDescriptorProto_TYPE_STRING: + return 2 + case FieldDescriptorProto_TYPE_GROUP: + return 2 + case FieldDescriptorProto_TYPE_MESSAGE: + return 2 + case FieldDescriptorProto_TYPE_BYTES: + return 2 + case FieldDescriptorProto_TYPE_ENUM: + return 0 + case FieldDescriptorProto_TYPE_SFIXED32: + return 5 + case FieldDescriptorProto_TYPE_SFIXED64: + return 1 + case FieldDescriptorProto_TYPE_SINT32: + return 0 + case FieldDescriptorProto_TYPE_SINT64: + return 0 + } + panic("unreachable") +} + +func (field *FieldDescriptorProto) GetKeyUint64() (x uint64) { + packed := field.IsPacked() + wireType := field.WireType() + fieldNumber := field.GetNumber() + if packed { + wireType = 2 + } + x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) + return x +} + +func (field *FieldDescriptorProto) GetKey() []byte { + x := field.GetKeyUint64() + i := 0 + keybuf := make([]byte, 0) + for i = 0; x > 127; i++ { + keybuf = append(keybuf, 0x80|uint8(x&0x7F)) + x >>= 7 + } + keybuf = append(keybuf, uint8(x)) + return keybuf +} + +func (desc *FileDescriptorSet) GetField(packageName, messageName, fieldName string) *FieldDescriptorProto { + msg := desc.GetMessage(packageName, messageName) + if msg == nil { + return nil + } + for _, field := range msg.GetField() { + if field.GetName() == fieldName { + return field + } + } + return nil +} + +func (file *FileDescriptorProto) GetMessage(typeName string) *DescriptorProto { + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return msg + } + nes := file.GetNestedMessage(msg, strings.TrimPrefix(typeName, msg.GetName()+".")) + if nes != nil { + return nes + } + } + return nil +} + +func (file *FileDescriptorProto) GetNestedMessage(msg *DescriptorProto, typeName string) *DescriptorProto { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return nes + } + res := file.GetNestedMessage(nes, strings.TrimPrefix(typeName, nes.GetName()+".")) + if res != nil { + return res + } + } + return nil +} + +func (desc *FileDescriptorSet) GetMessage(packageName string, typeName string) *DescriptorProto { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return msg + } + } + for _, msg := range file.GetMessageType() { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return nes + } + if msg.GetName()+"."+nes.GetName() == typeName { + return nes + } + } + } + } + return nil +} + +func (desc *FileDescriptorSet) IsProto3(packageName string, typeName string) bool { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + } + for _, msg := range file.GetMessageType() { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + if msg.GetName()+"."+nes.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + } + } + } + return false +} + +func (msg *DescriptorProto) IsExtendable() bool { + return len(msg.GetExtensionRange()) > 0 +} + +func (desc *FileDescriptorSet) FindExtension(packageName string, typeName string, fieldName string) (extPackageName string, field *FieldDescriptorProto) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", nil + } + if !parent.IsExtendable() { + return "", nil + } + extendee := "." + packageName + "." + typeName + for _, file := range desc.GetFile() { + for _, ext := range file.GetExtension() { + if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) { + if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) { + continue + } + } else { + if ext.GetExtendee() != extendee { + continue + } + } + if ext.GetName() == fieldName { + return file.GetPackage(), ext + } + } + } + return "", nil +} + +func (desc *FileDescriptorSet) FindExtensionByFieldNumber(packageName string, typeName string, fieldNum int32) (extPackageName string, field *FieldDescriptorProto) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", nil + } + if !parent.IsExtendable() { + return "", nil + } + extendee := "." + packageName + "." + typeName + for _, file := range desc.GetFile() { + for _, ext := range file.GetExtension() { + if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) { + if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) { + continue + } + } else { + if ext.GetExtendee() != extendee { + continue + } + } + if ext.GetNumber() == fieldNum { + return file.GetPackage(), ext + } + } + } + return "", nil +} + +func (desc *FileDescriptorSet) FindMessage(packageName string, typeName string, fieldName string) (msgPackageName string, msgName string) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", "" + } + field := parent.GetFieldDescriptor(fieldName) + if field == nil { + var extPackageName string + extPackageName, field = desc.FindExtension(packageName, typeName, fieldName) + if field == nil { + return "", "" + } + packageName = extPackageName + } + typeNames := strings.Split(field.GetTypeName(), ".") + if len(typeNames) == 1 { + msg := desc.GetMessage(packageName, typeName) + if msg == nil { + return "", "" + } + return packageName, msg.GetName() + } + if len(typeNames) > 2 { + for i := 1; i < len(typeNames)-1; i++ { + packageName = strings.Join(typeNames[1:len(typeNames)-i], ".") + typeName = strings.Join(typeNames[len(typeNames)-i:], ".") + msg := desc.GetMessage(packageName, typeName) + if msg != nil { + typeNames := strings.Split(msg.GetName(), ".") + if len(typeNames) == 1 { + return packageName, msg.GetName() + } + return strings.Join(typeNames[1:len(typeNames)-1], "."), typeNames[len(typeNames)-1] + } + } + } + return "", "" +} + +func (msg *DescriptorProto) GetFieldDescriptor(fieldName string) *FieldDescriptorProto { + for _, field := range msg.GetField() { + if field.GetName() == fieldName { + return field + } + } + return nil +} + +func (desc *FileDescriptorSet) GetEnum(packageName string, typeName string) *EnumDescriptorProto { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, enum := range file.GetEnumType() { + if enum.GetName() == typeName { + return enum + } + } + } + return nil +} + +func (f *FieldDescriptorProto) IsEnum() bool { + return *f.Type == FieldDescriptorProto_TYPE_ENUM +} + +func (f *FieldDescriptorProto) IsMessage() bool { + return *f.Type == FieldDescriptorProto_TYPE_MESSAGE +} + +func (f *FieldDescriptorProto) IsBytes() bool { + return *f.Type == FieldDescriptorProto_TYPE_BYTES +} + +func (f *FieldDescriptorProto) IsRepeated() bool { + return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REPEATED +} + +func (f *FieldDescriptorProto) IsString() bool { + return *f.Type == FieldDescriptorProto_TYPE_STRING +} + +func (f *FieldDescriptorProto) IsBool() bool { + return *f.Type == FieldDescriptorProto_TYPE_BOOL +} + +func (f *FieldDescriptorProto) IsRequired() bool { + return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REQUIRED +} + +func (f *FieldDescriptorProto) IsPacked() bool { + return f.Options != nil && f.GetOptions().GetPacked() +} + +func (m *DescriptorProto) HasExtension() bool { + return len(m.ExtensionRange) > 0 +} diff --git a/vendor/github.com/nats-io/go-nats-streaming/LICENSE b/vendor/github.com/nats-io/go-nats-streaming/LICENSE new file mode 100644 index 000000000..d5cf6aa5f --- /dev/null +++ b/vendor/github.com/nats-io/go-nats-streaming/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2016 Apcera Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/nats-io/go-nats-streaming/README.md b/vendor/github.com/nats-io/go-nats-streaming/README.md new file mode 100644 index 000000000..945eb0650 --- /dev/null +++ b/vendor/github.com/nats-io/go-nats-streaming/README.md @@ -0,0 +1,350 @@ +# NATS Streaming + +NATS Streaming is an extremely performant, lightweight reliable streaming platform powered by [NATS](https://nats.io). + +[![License MIT](https://img.shields.io/npm/l/express.svg)](http://opensource.org/licenses/MIT) +[![Build Status](https://travis-ci.org/nats-io/go-nats-streaming.svg?branch=master)](http://travis-ci.org/nats-io/go-nats-streaming) +[![Coverage Status](https://coveralls.io/repos/nats-io/go-nats-streaming/badge.svg?branch=master)](https://coveralls.io/r/nats-io/go-nats-streaming?branch=master) + +NATS Streaming provides the following high-level feature set: +- Log based persistence +- At-Least-Once Delivery model, giving reliable message delivery +- Rate matched on a per subscription basis +- Replay/Restart +- Last Value Semantics + +## Notes + +- Please raise questions/issues via the [Issue Tracker](https://github.com/nats-io/go-nats-streaming/issues). + +## Known Issues +- Time- and sequence-based subscriptions are exact. Requesting a time or seqno before the earliest stored message for a subject will result in an error (in SubscriptionRequest.Error) + +## Installation + +```bash +# Go client +go get github.com/nats-io/go-nats-streaming +``` + +## Basic Usage + +```go + +sc, _ := stan.Connect(clusterID, clientID) + +// Simple Synchronous Publisher +sc.Publish("foo", []byte("Hello World")) // does not return until an ack has been received from NATS Streaming + +// Simple Async Subscriber +sub, _ := sc.Subscribe("foo", func(m *stan.Msg) { + fmt.Printf("Received a message: %s\n", string(m.Data)) +}) + +// Unsubscribe +sub.Unsubscribe() + +// Close connection +sc.Close() +``` + +### Subscription Start (i.e. Replay) Options + +NATS Streaming subscriptions are similar to NATS subscriptions, but clients may start their subscription at an earlier point in the message stream, allowing them to receive messages that were published before this client registered interest. + +The options are described with examples below: + +```go + +// Subscribe starting with most recently published value +sub, err := sc.Subscribe("foo", func(m *stan.Msg) { + fmt.Printf("Received a message: %s\n", string(m.Data)) +}, StartWithLastReceived()) + +// Receive all stored values in order +sub, err := sc.Subscribe("foo", func(m *stan.Msg) { + fmt.Printf("Received a message: %s\n", string(m.Data)) +}, DeliverAllAvailable()) + +// Receive messages starting at a specific sequence number +sub, err := sc.Subscribe("foo", func(m *stan.Msg) { + fmt.Printf("Received a message: %s\n", string(m.Data)) +}, StartAtSequence(22)) + +// Subscribe starting at a specific time +var startTime time.Time +... +sub, err := sc.Subscribe("foo", func(m *stan.Msg) { + fmt.Printf("Received a message: %s\n", string(m.Data)) +}, StartAtTime(startTime)) + +// Subscribe starting a specific amount of time in the past (e.g. 30 seconds ago) +sub, err := sc.Subscribe("foo", func(m *stan.Msg) { + fmt.Printf("Received a message: %s\n", string(m.Data)) +}, StartAtTimeDelta(time.ParseDuration("30s"))) +``` + +### Durable Subscriptions + +Replay of messages offers great flexibility for clients wishing to begin processing at some earlier point in the data stream. +However, some clients just need to pick up where they left off from an earlier session, without having to manually track their position in the stream of messages. +Durable subscriptions allow clients to assign a durable name to a subscription when it is created. +Doing this causes the NATS Streaming server to track the last acknowledged message for that clientID + durable name, so that only messages since the last acknowledged message will be delivered to the client. + +```go +sc, _ := stan.Connect("test-cluster", "client-123") + +// Subscribe with durable name +sc.Subscribe("foo", func(m *stan.Msg) { + fmt.Printf("Received a message: %s\n", string(m.Data)) +}, stan.DurableName("my-durable")) +... +// client receives message sequence 1-40 +... +// client disconnects for an hour +... +// client reconnects with same clientID "client-123" +sc, _ := stan.Connect("test-cluster", "client-123") + +// client re-subscribes to "foo" with same durable name "my-durable" +sc.Subscribe("foo", func(m *stan.Msg) { + fmt.Printf("Received a message: %s\n", string(m.Data)) +}, stan.DurableName("my-durable")) +... +// client receives messages 41-current +``` + +### Queue Groups + +All subscriptions with the same queue name (regardless of the connection +they originate from) will form a queue group. +Each message will be delivered to only one subscriber per queue group, +using queuing semantics. You can have as many queue groups as you wish. + +Normal subscribers will continue to work as expected. + +#### Creating a Queue Group + +A queue group is automatically created when the first queue subscriber is +created. If the group already exists, the member is added to the group. + +```go +sc, _ := stan.Connect("test-cluster", "clientid") + +// Create a queue subscriber on "foo" for group "bar" +qsub1, _ := sc.QueueSubscribe("foo", "bar", qcb) + +// Add a second member +qsub2, _ := sc.QueueSubscribe("foo", "bar", qcb) + +// Notice that you can have a regular subscriber on that subject +sub, _ := sc.Subscribe("foo", cb) + +// A message on "foo" will be received by sub and qsub1 or qsub2. +``` + +#### Start Position + +Note that once a queue group is formed, a member's start position is ignored +when added to the group. It will start receive messages from the last +position in the group. + +Suppose the channel `foo` exists and there are `500` messages stored, the group +`bar` is already created, there are two members and the last +message sequence sent is `100`. A new member is added. Note its start position: + +```go +sc.QueueSubscribe("foo", "bar", qcb, stan.StartAtSequence(200)) +``` + +This will not produce an error, but the start position will be ignored. Assuming +this member would be the one receiving the next message, it would receive message +sequence `101`. + +#### Leaving the Group + +There are two ways of leaving the group: closing the subscriber's connection or +calling `Unsubscribe`: + +```go +// Have qsub leave the queue group +qsub.Unsubscribe() +``` + +If the leaving member had un-acknowledged messages, those messages are reassigned +to the remaining members. + +#### Closing a Queue Group + +There is no special API for that. Once all members have left (either calling `Unsubscribe`, +or their connections are closed), the group is removed from the server. + +The next call to `QueueSubscribe` with the same group name will create a brand new group, +that is, the start position will take effect and delivery will start from there. + +### Durable Queue Groups + +As described above, for non durable queue subsribers, when the last member leaves the group, +that group is removed. A durable queue group allows you to have all members leave but still +maintain state. When a member re-joins, it starts at the last position in that group. + +#### Creating a Durable Queue Group + +A durable queue group is created in a similar manner as that of a standard queue group, +except the `DurableName` option must be used to specify durability. + +```go +sc.QueueSubscribe("foo", "bar", qcb, stan.DurableName("dur")) +``` +A group called `dur:bar` (the concatenation of durable name and group name) is created in +the server. This means two things: + +- The character `:` is not allowed for a queue subscriber's durable name. +- Durable and non-durable queue groups with the same name can coexist. + +```go +// Non durable queue subscriber on group "bar" +qsub, _ := sc.QueueSubscribe("foo", "bar", qcb) + +// Durable queue subscriber on group "bar" +durQsub, _ := sc.QueueSubscribe("foo", "bar", qcb, stan.DurableName("mydurablegroup")) + +// The same message produced on "foo" would be received by both queue subscribers. +``` + +#### Start Position + +The rules for non-durable queue subscribers apply to durable subscribers. + +#### Leaving the Group + +As for non-durable queue subscribers, if a member's connection is closed, or if +`Unsubscribe` its called, the member leaves the group. Any unacknowledged message +is transfered to remaining members. See *Closing the Group* for important difference +with non-durable queue subscribers. + +#### Closing the Group + +The *last* member calling `Unsubscribe` will close (that is destroy) the +group. So if you want to maintain durability of the group, you should not be +calling `Unsubscribe`. + +So unlike for non-durable queue subscribers, it is possible to maintain a queue group +with no member in the server. When a new member re-joins the durable queue group, +it will resume from where the group left of, actually first receiving all unacknowledged +messages that may have been left when the last member previously left. + + +### Wildcard Subscriptions + +NATS Streaming subscriptions **do not** support wildcards. + + +## Advanced Usage + +### Asynchronous Publishing + +The basic publish API (`Publish(subject, payload)`) is synchronous; it does not return control to the caller until the NATS Streaming server has acknowledged receipt of the message. To accomplish this, a [NUID](https://github.com/nats-io/nuid) is generated for the message on creation, and the client library waits for a publish acknowledgement from the server with a matching NUID before it returns control to the caller, possibly with an error indicating that the operation was not successful due to some server problem or authorization error. + +Advanced users may wish to process these publish acknowledgements manually to achieve higher publish throughput by not waiting on individual acknowledgements during the publish operation. An asynchronous publish API is provided for this purpose: + +```go + ackHandler := func(ackedNuid string, err error) { + if err != nil { + log.Printf("Warning: error publishing msg id %s: %v\n", ackedNuid, err.Error()) + } else { + log.Printf("Received ack for msg id %s\n", ackedNuid) + } + } + + // can also use PublishAsyncWithReply(subj, replysubj, payload, ah) + nuid, err := sc.PublishAsync("foo", []byte("Hello World"), ackHandler) // returns immediately + if err != nil { + log.Printf("Error publishing msg %s: %v\n", nuid, err.Error()) + } +``` + +### Message Acknowledgements and Redelivery + +NATS Streaming offers At-Least-Once delivery semantics, meaning that once a message has been delivered to an eligible subscriber, if an acknowledgement is not received within the configured timeout interval, NATS Streaming will attempt redelivery of the message. +This timeout interval is specified by the subscription option `AckWait`, which defaults to 30 seconds. + +By default, messages are automatically acknowledged by the NATS Streaming client library after the subscriber's message handler is invoked. However, there may be cases in which the subscribing client wishes to accelerate or defer acknowledgement of the message. +To do this, the client must set manual acknowledgement mode on the subscription, and invoke `Ack()` on the `Msg`. ex: + +```go +// Subscribe with manual ack mode, and set AckWait to 60 seconds +aw, _ := time.ParseDuration("60s") +sub, err := sc.Subscribe("foo", func(m *stan.Msg) { + m.Ack() // ack message before performing I/O intensive operation + ///... + fmt.Printf("Received a message: %s\n", string(m.Data)) +}, stan.SetManualAckMode(), stan.AckWait(aw)) +``` + +## Rate limiting/matching + +A classic problem of publish-subscribe messaging is matching the rate of message producers with the rate of message consumers. +Message producers can often outpace the speed of the subscribers that are consuming their messages. +This mismatch is commonly called a "fast producer/slow consumer" problem, and may result in dramatic resource utilization spikes in the underlying messaging system as it tries to buffer messages until the slow consumer(s) can catch up. + +### Publisher rate limiting + +NATS Streaming provides a connection option called `MaxPubAcksInflight` that effectively limits the number of unacknowledged messages that a publisher may have in-flight at any given time. When this maximum is reached, further `PublishAsync()` calls will block until the number of unacknowledged messages falls below the specified limit. ex: + +```go +sc, _ := stan.Connect(clusterID, clientID, MaxPubAcksInflight(25)) + +ah := func(nuid string, err error) { + // process the ack + ... +} + +for i := 1; i < 1000; i++ { + // If the server is unable to keep up with the publisher, the number of oustanding acks will eventually + // reach the max and this call will block + guid, _ := sc.PublishAsync("foo", []byte("Hello World"), ah) +} +``` + +### Subscriber rate limiting + +Rate limiting may also be accomplished on the subscriber side, on a per-subscription basis, using a subscription option called `MaxInflight`. +This option specifies the maximum number of outstanding acknowledgements (messages that have been delivered but not acknowledged) that NATS Streaming will allow for a given subscription. +When this limit is reached, NATS Streaming will suspend delivery of messages to this subscription until the number of unacknowledged messages falls below the specified limit. ex: + +```go +// Subscribe with manual ack mode and a max in-flight limit of 25 +sc.Subscribe("foo", func(m *stan.Msg) { + fmt.Printf("Received message #: %s\n", string(m.Data)) + ... + // Does not ack, or takes a very long time to ack + ... + // Message delivery will suspend when the number of unacknowledged messages reaches 25 +}, stan.SetManualAckMode(), stan.MaxInflight(25)) + +``` + +## License + +(The MIT License) + +Copyright (c) 2012-2016 Apcera Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to +deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +IN THE SOFTWARE. diff --git a/vendor/github.com/nats-io/go-nats-streaming/TODO.md b/vendor/github.com/nats-io/go-nats-streaming/TODO.md new file mode 100644 index 000000000..581d347bf --- /dev/null +++ b/vendor/github.com/nats-io/go-nats-streaming/TODO.md @@ -0,0 +1,15 @@ + +- [ ] Retry limits? +- [ ] Server Store Limits (time, msgs, byte) +- [X] Change time to deltas +- [X] Server heartbeat, release dead clients. +- [X] Require clientID for published messages, error if not registered. +- [X] Check for need of ackMap (out of order re-delivery to queue subscribers). +- [X] Redelivered Flag for Msg. +- [X] Queue Subscribers +- [X] Durable Subscribers (survive reconnect, etc) +- [X] Start Positions on Subscribers +- [X] Ack for delivered just Reply? No need on ConnectedResponse? +- [X] PublishWithReply, or option. +- [X] Data Races in Server. +- [X] Manual Ack? diff --git a/vendor/github.com/nats-io/go-nats-streaming/pb/protocol.pb.go b/vendor/github.com/nats-io/go-nats-streaming/pb/protocol.pb.go new file mode 100644 index 000000000..e0a3da527 --- /dev/null +++ b/vendor/github.com/nats-io/go-nats-streaming/pb/protocol.pb.go @@ -0,0 +1,2794 @@ +// Code generated by protoc-gen-gogo. +// source: protocol.proto +// DO NOT EDIT! + +/* + Package pb is a generated protocol buffer package. + + It is generated from these files: + protocol.proto + + It has these top-level messages: + PubMsg + PubAck + MsgProto + Ack + ConnectRequest + ConnectResponse + SubscriptionRequest + SubscriptionResponse + UnsubscribeRequest + CloseRequest + CloseResponse +*/ +package pb + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Enum for start position type. +type StartPosition int32 + +const ( + StartPosition_NewOnly StartPosition = 0 + StartPosition_LastReceived StartPosition = 1 + StartPosition_TimeDeltaStart StartPosition = 2 + StartPosition_SequenceStart StartPosition = 3 + StartPosition_First StartPosition = 4 +) + +var StartPosition_name = map[int32]string{ + 0: "NewOnly", + 1: "LastReceived", + 2: "TimeDeltaStart", + 3: "SequenceStart", + 4: "First", +} +var StartPosition_value = map[string]int32{ + "NewOnly": 0, + "LastReceived": 1, + "TimeDeltaStart": 2, + "SequenceStart": 3, + "First": 4, +} + +func (x StartPosition) String() string { + return proto.EnumName(StartPosition_name, int32(x)) +} + +// How messages are delivered to the STAN cluster +type PubMsg struct { + ClientID string `protobuf:"bytes,1,opt,name=clientID,proto3" json:"clientID,omitempty"` + Guid string `protobuf:"bytes,2,opt,name=guid,proto3" json:"guid,omitempty"` + Subject string `protobuf:"bytes,3,opt,name=subject,proto3" json:"subject,omitempty"` + Reply string `protobuf:"bytes,4,opt,name=reply,proto3" json:"reply,omitempty"` + Data []byte `protobuf:"bytes,5,opt,name=data,proto3" json:"data,omitempty"` + Sha256 []byte `protobuf:"bytes,10,opt,name=sha256,proto3" json:"sha256,omitempty"` +} + +func (m *PubMsg) Reset() { *m = PubMsg{} } +func (m *PubMsg) String() string { return proto.CompactTextString(m) } +func (*PubMsg) ProtoMessage() {} + +// Used to ACK to publishers +type PubAck struct { + Guid string `protobuf:"bytes,1,opt,name=guid,proto3" json:"guid,omitempty"` + Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` +} + +func (m *PubAck) Reset() { *m = PubAck{} } +func (m *PubAck) String() string { return proto.CompactTextString(m) } +func (*PubAck) ProtoMessage() {} + +// Msg struct. Sequence is assigned for global ordering by +// the cluster after the publisher has been acknowledged. +type MsgProto struct { + Sequence uint64 `protobuf:"varint,1,opt,name=sequence,proto3" json:"sequence,omitempty"` + Subject string `protobuf:"bytes,2,opt,name=subject,proto3" json:"subject,omitempty"` + Reply string `protobuf:"bytes,3,opt,name=reply,proto3" json:"reply,omitempty"` + Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` + Timestamp int64 `protobuf:"varint,5,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Redelivered bool `protobuf:"varint,6,opt,name=redelivered,proto3" json:"redelivered,omitempty"` + CRC32 uint32 `protobuf:"varint,10,opt,name=CRC32,proto3" json:"CRC32,omitempty"` +} + +func (m *MsgProto) Reset() { *m = MsgProto{} } +func (m *MsgProto) String() string { return proto.CompactTextString(m) } +func (*MsgProto) ProtoMessage() {} + +// Ack will deliver an ack for a delivered msg. +type Ack struct { + Subject string `protobuf:"bytes,1,opt,name=subject,proto3" json:"subject,omitempty"` + Sequence uint64 `protobuf:"varint,2,opt,name=sequence,proto3" json:"sequence,omitempty"` +} + +func (m *Ack) Reset() { *m = Ack{} } +func (m *Ack) String() string { return proto.CompactTextString(m) } +func (*Ack) ProtoMessage() {} + +// Connection Request +type ConnectRequest struct { + ClientID string `protobuf:"bytes,1,opt,name=clientID,proto3" json:"clientID,omitempty"` + HeartbeatInbox string `protobuf:"bytes,2,opt,name=heartbeatInbox,proto3" json:"heartbeatInbox,omitempty"` +} + +func (m *ConnectRequest) Reset() { *m = ConnectRequest{} } +func (m *ConnectRequest) String() string { return proto.CompactTextString(m) } +func (*ConnectRequest) ProtoMessage() {} + +// Response to a client connect +type ConnectResponse struct { + PubPrefix string `protobuf:"bytes,1,opt,name=pubPrefix,proto3" json:"pubPrefix,omitempty"` + SubRequests string `protobuf:"bytes,2,opt,name=subRequests,proto3" json:"subRequests,omitempty"` + UnsubRequests string `protobuf:"bytes,3,opt,name=unsubRequests,proto3" json:"unsubRequests,omitempty"` + CloseRequests string `protobuf:"bytes,4,opt,name=closeRequests,proto3" json:"closeRequests,omitempty"` + Error string `protobuf:"bytes,5,opt,name=error,proto3" json:"error,omitempty"` + SubCloseRequests string `protobuf:"bytes,6,opt,name=subCloseRequests,proto3" json:"subCloseRequests,omitempty"` + PublicKey string `protobuf:"bytes,100,opt,name=publicKey,proto3" json:"publicKey,omitempty"` +} + +func (m *ConnectResponse) Reset() { *m = ConnectResponse{} } +func (m *ConnectResponse) String() string { return proto.CompactTextString(m) } +func (*ConnectResponse) ProtoMessage() {} + +// Protocol for a client to subscribe +type SubscriptionRequest struct { + ClientID string `protobuf:"bytes,1,opt,name=clientID,proto3" json:"clientID,omitempty"` + Subject string `protobuf:"bytes,2,opt,name=subject,proto3" json:"subject,omitempty"` + QGroup string `protobuf:"bytes,3,opt,name=qGroup,proto3" json:"qGroup,omitempty"` + Inbox string `protobuf:"bytes,4,opt,name=inbox,proto3" json:"inbox,omitempty"` + MaxInFlight int32 `protobuf:"varint,5,opt,name=maxInFlight,proto3" json:"maxInFlight,omitempty"` + AckWaitInSecs int32 `protobuf:"varint,6,opt,name=ackWaitInSecs,proto3" json:"ackWaitInSecs,omitempty"` + DurableName string `protobuf:"bytes,7,opt,name=durableName,proto3" json:"durableName,omitempty"` + StartPosition StartPosition `protobuf:"varint,10,opt,name=startPosition,proto3,enum=pb.StartPosition" json:"startPosition,omitempty"` + StartSequence uint64 `protobuf:"varint,11,opt,name=startSequence,proto3" json:"startSequence,omitempty"` + StartTimeDelta int64 `protobuf:"varint,12,opt,name=startTimeDelta,proto3" json:"startTimeDelta,omitempty"` +} + +func (m *SubscriptionRequest) Reset() { *m = SubscriptionRequest{} } +func (m *SubscriptionRequest) String() string { return proto.CompactTextString(m) } +func (*SubscriptionRequest) ProtoMessage() {} + +// Response for SubscriptionRequest and UnsubscribeRequests +type SubscriptionResponse struct { + AckInbox string `protobuf:"bytes,2,opt,name=ackInbox,proto3" json:"ackInbox,omitempty"` + Error string `protobuf:"bytes,3,opt,name=error,proto3" json:"error,omitempty"` +} + +func (m *SubscriptionResponse) Reset() { *m = SubscriptionResponse{} } +func (m *SubscriptionResponse) String() string { return proto.CompactTextString(m) } +func (*SubscriptionResponse) ProtoMessage() {} + +// Protocol for a clients to unsubscribe. Will return a SubscriptionResponse +type UnsubscribeRequest struct { + ClientID string `protobuf:"bytes,1,opt,name=clientID,proto3" json:"clientID,omitempty"` + Subject string `protobuf:"bytes,2,opt,name=subject,proto3" json:"subject,omitempty"` + Inbox string `protobuf:"bytes,3,opt,name=inbox,proto3" json:"inbox,omitempty"` + DurableName string `protobuf:"bytes,4,opt,name=durableName,proto3" json:"durableName,omitempty"` +} + +func (m *UnsubscribeRequest) Reset() { *m = UnsubscribeRequest{} } +func (m *UnsubscribeRequest) String() string { return proto.CompactTextString(m) } +func (*UnsubscribeRequest) ProtoMessage() {} + +// Protocol for a client to close a connection +type CloseRequest struct { + ClientID string `protobuf:"bytes,1,opt,name=clientID,proto3" json:"clientID,omitempty"` +} + +func (m *CloseRequest) Reset() { *m = CloseRequest{} } +func (m *CloseRequest) String() string { return proto.CompactTextString(m) } +func (*CloseRequest) ProtoMessage() {} + +// Response for CloseRequest +type CloseResponse struct { + Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` +} + +func (m *CloseResponse) Reset() { *m = CloseResponse{} } +func (m *CloseResponse) String() string { return proto.CompactTextString(m) } +func (*CloseResponse) ProtoMessage() {} + +func init() { + proto.RegisterType((*PubMsg)(nil), "pb.PubMsg") + proto.RegisterType((*PubAck)(nil), "pb.PubAck") + proto.RegisterType((*MsgProto)(nil), "pb.MsgProto") + proto.RegisterType((*Ack)(nil), "pb.Ack") + proto.RegisterType((*ConnectRequest)(nil), "pb.ConnectRequest") + proto.RegisterType((*ConnectResponse)(nil), "pb.ConnectResponse") + proto.RegisterType((*SubscriptionRequest)(nil), "pb.SubscriptionRequest") + proto.RegisterType((*SubscriptionResponse)(nil), "pb.SubscriptionResponse") + proto.RegisterType((*UnsubscribeRequest)(nil), "pb.UnsubscribeRequest") + proto.RegisterType((*CloseRequest)(nil), "pb.CloseRequest") + proto.RegisterType((*CloseResponse)(nil), "pb.CloseResponse") + proto.RegisterEnum("pb.StartPosition", StartPosition_name, StartPosition_value) +} +func (m *PubMsg) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PubMsg) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ClientID) > 0 { + data[i] = 0xa + i++ + i = encodeVarintProtocol(data, i, uint64(len(m.ClientID))) + i += copy(data[i:], m.ClientID) + } + if len(m.Guid) > 0 { + data[i] = 0x12 + i++ + i = encodeVarintProtocol(data, i, uint64(len(m.Guid))) + i += copy(data[i:], m.Guid) + } + if len(m.Subject) > 0 { + data[i] = 0x1a + i++ + i = encodeVarintProtocol(data, i, uint64(len(m.Subject))) + i += copy(data[i:], m.Subject) + } + if len(m.Reply) > 0 { + data[i] = 0x22 + i++ + i = encodeVarintProtocol(data, i, uint64(len(m.Reply))) + i += copy(data[i:], m.Reply) + } + if m.Data != nil { + if len(m.Data) > 0 { + data[i] = 0x2a + i++ + i = encodeVarintProtocol(data, i, uint64(len(m.Data))) + i += copy(data[i:], m.Data) + } + } + if m.Sha256 != nil { + if len(m.Sha256) > 0 { + data[i] = 0x52 + i++ + i = encodeVarintProtocol(data, i, uint64(len(m.Sha256))) + i += copy(data[i:], m.Sha256) + } + } + return i, nil +} + +func (m *PubAck) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PubAck) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Guid) > 0 { + data[i] = 0xa + i++ + i = encodeVarintProtocol(data, i, uint64(len(m.Guid))) + i += copy(data[i:], m.Guid) + } + if len(m.Error) > 0 { + data[i] = 0x12 + i++ + i = encodeVarintProtocol(data, i, uint64(len(m.Error))) + i += copy(data[i:], m.Error) + } + return i, nil +} + +func (m *MsgProto) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *MsgProto) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Sequence != 0 { + data[i] = 0x8 + i++ + i = encodeVarintProtocol(data, i, uint64(m.Sequence)) + } + if len(m.Subject) > 0 { + data[i] = 0x12 + i++ + i = encodeVarintProtocol(data, i, uint64(len(m.Subject))) + i += copy(data[i:], m.Subject) + } + if len(m.Reply) > 0 { + data[i] = 0x1a + i++ + i = encodeVarintProtocol(data, i, uint64(len(m.Reply))) + i += copy(data[i:], m.Reply) + } + if m.Data != nil { + if len(m.Data) > 0 { + data[i] = 0x22 + i++ + i = encodeVarintProtocol(data, i, uint64(len(m.Data))) + i += copy(data[i:], m.Data) + } + } + if m.Timestamp != 0 { + data[i] = 0x28 + i++ + i = encodeVarintProtocol(data, i, uint64(m.Timestamp)) + } + if m.Redelivered { + data[i] = 0x30 + i++ + if m.Redelivered { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + if m.CRC32 != 0 { + data[i] = 0x50 + i++ + i = encodeVarintProtocol(data, i, uint64(m.CRC32)) + } + return i, nil +} + +func (m *Ack) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Ack) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Subject) > 0 { + data[i] = 0xa + i++ + i = encodeVarintProtocol(data, i, uint64(len(m.Subject))) + i += copy(data[i:], m.Subject) + } + if m.Sequence != 0 { + data[i] = 0x10 + i++ + i = encodeVarintProtocol(data, i, uint64(m.Sequence)) + } + return i, nil +} + +func (m *ConnectRequest) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ConnectRequest) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ClientID) > 0 { + data[i] = 0xa + i++ + i = encodeVarintProtocol(data, i, uint64(len(m.ClientID))) + i += copy(data[i:], m.ClientID) + } + if len(m.HeartbeatInbox) > 0 { + data[i] = 0x12 + i++ + i = encodeVarintProtocol(data, i, uint64(len(m.HeartbeatInbox))) + i += copy(data[i:], m.HeartbeatInbox) + } + return i, nil +} + +func (m *ConnectResponse) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ConnectResponse) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.PubPrefix) > 0 { + data[i] = 0xa + i++ + i = encodeVarintProtocol(data, i, uint64(len(m.PubPrefix))) + i += copy(data[i:], m.PubPrefix) + } + if len(m.SubRequests) > 0 { + data[i] = 0x12 + i++ + i = encodeVarintProtocol(data, i, uint64(len(m.SubRequests))) + i += copy(data[i:], m.SubRequests) + } + if len(m.UnsubRequests) > 0 { + data[i] = 0x1a + i++ + i = encodeVarintProtocol(data, i, uint64(len(m.UnsubRequests))) + i += copy(data[i:], m.UnsubRequests) + } + if len(m.CloseRequests) > 0 { + data[i] = 0x22 + i++ + i = encodeVarintProtocol(data, i, uint64(len(m.CloseRequests))) + i += copy(data[i:], m.CloseRequests) + } + if len(m.Error) > 0 { + data[i] = 0x2a + i++ + i = encodeVarintProtocol(data, i, uint64(len(m.Error))) + i += copy(data[i:], m.Error) + } + if len(m.SubCloseRequests) > 0 { + data[i] = 0x32 + i++ + i = encodeVarintProtocol(data, i, uint64(len(m.SubCloseRequests))) + i += copy(data[i:], m.SubCloseRequests) + } + if len(m.PublicKey) > 0 { + data[i] = 0xa2 + i++ + data[i] = 0x6 + i++ + i = encodeVarintProtocol(data, i, uint64(len(m.PublicKey))) + i += copy(data[i:], m.PublicKey) + } + return i, nil +} + +func (m *SubscriptionRequest) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SubscriptionRequest) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ClientID) > 0 { + data[i] = 0xa + i++ + i = encodeVarintProtocol(data, i, uint64(len(m.ClientID))) + i += copy(data[i:], m.ClientID) + } + if len(m.Subject) > 0 { + data[i] = 0x12 + i++ + i = encodeVarintProtocol(data, i, uint64(len(m.Subject))) + i += copy(data[i:], m.Subject) + } + if len(m.QGroup) > 0 { + data[i] = 0x1a + i++ + i = encodeVarintProtocol(data, i, uint64(len(m.QGroup))) + i += copy(data[i:], m.QGroup) + } + if len(m.Inbox) > 0 { + data[i] = 0x22 + i++ + i = encodeVarintProtocol(data, i, uint64(len(m.Inbox))) + i += copy(data[i:], m.Inbox) + } + if m.MaxInFlight != 0 { + data[i] = 0x28 + i++ + i = encodeVarintProtocol(data, i, uint64(m.MaxInFlight)) + } + if m.AckWaitInSecs != 0 { + data[i] = 0x30 + i++ + i = encodeVarintProtocol(data, i, uint64(m.AckWaitInSecs)) + } + if len(m.DurableName) > 0 { + data[i] = 0x3a + i++ + i = encodeVarintProtocol(data, i, uint64(len(m.DurableName))) + i += copy(data[i:], m.DurableName) + } + if m.StartPosition != 0 { + data[i] = 0x50 + i++ + i = encodeVarintProtocol(data, i, uint64(m.StartPosition)) + } + if m.StartSequence != 0 { + data[i] = 0x58 + i++ + i = encodeVarintProtocol(data, i, uint64(m.StartSequence)) + } + if m.StartTimeDelta != 0 { + data[i] = 0x60 + i++ + i = encodeVarintProtocol(data, i, uint64(m.StartTimeDelta)) + } + return i, nil +} + +func (m *SubscriptionResponse) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SubscriptionResponse) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.AckInbox) > 0 { + data[i] = 0x12 + i++ + i = encodeVarintProtocol(data, i, uint64(len(m.AckInbox))) + i += copy(data[i:], m.AckInbox) + } + if len(m.Error) > 0 { + data[i] = 0x1a + i++ + i = encodeVarintProtocol(data, i, uint64(len(m.Error))) + i += copy(data[i:], m.Error) + } + return i, nil +} + +func (m *UnsubscribeRequest) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *UnsubscribeRequest) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ClientID) > 0 { + data[i] = 0xa + i++ + i = encodeVarintProtocol(data, i, uint64(len(m.ClientID))) + i += copy(data[i:], m.ClientID) + } + if len(m.Subject) > 0 { + data[i] = 0x12 + i++ + i = encodeVarintProtocol(data, i, uint64(len(m.Subject))) + i += copy(data[i:], m.Subject) + } + if len(m.Inbox) > 0 { + data[i] = 0x1a + i++ + i = encodeVarintProtocol(data, i, uint64(len(m.Inbox))) + i += copy(data[i:], m.Inbox) + } + if len(m.DurableName) > 0 { + data[i] = 0x22 + i++ + i = encodeVarintProtocol(data, i, uint64(len(m.DurableName))) + i += copy(data[i:], m.DurableName) + } + return i, nil +} + +func (m *CloseRequest) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CloseRequest) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ClientID) > 0 { + data[i] = 0xa + i++ + i = encodeVarintProtocol(data, i, uint64(len(m.ClientID))) + i += copy(data[i:], m.ClientID) + } + return i, nil +} + +func (m *CloseResponse) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CloseResponse) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Error) > 0 { + data[i] = 0xa + i++ + i = encodeVarintProtocol(data, i, uint64(len(m.Error))) + i += copy(data[i:], m.Error) + } + return i, nil +} + +func encodeFixed64Protocol(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Protocol(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintProtocol(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *PubMsg) Size() (n int) { + var l int + _ = l + l = len(m.ClientID) + if l > 0 { + n += 1 + l + sovProtocol(uint64(l)) + } + l = len(m.Guid) + if l > 0 { + n += 1 + l + sovProtocol(uint64(l)) + } + l = len(m.Subject) + if l > 0 { + n += 1 + l + sovProtocol(uint64(l)) + } + l = len(m.Reply) + if l > 0 { + n += 1 + l + sovProtocol(uint64(l)) + } + if m.Data != nil { + l = len(m.Data) + if l > 0 { + n += 1 + l + sovProtocol(uint64(l)) + } + } + if m.Sha256 != nil { + l = len(m.Sha256) + if l > 0 { + n += 1 + l + sovProtocol(uint64(l)) + } + } + return n +} + +func (m *PubAck) Size() (n int) { + var l int + _ = l + l = len(m.Guid) + if l > 0 { + n += 1 + l + sovProtocol(uint64(l)) + } + l = len(m.Error) + if l > 0 { + n += 1 + l + sovProtocol(uint64(l)) + } + return n +} + +func (m *MsgProto) Size() (n int) { + var l int + _ = l + if m.Sequence != 0 { + n += 1 + sovProtocol(uint64(m.Sequence)) + } + l = len(m.Subject) + if l > 0 { + n += 1 + l + sovProtocol(uint64(l)) + } + l = len(m.Reply) + if l > 0 { + n += 1 + l + sovProtocol(uint64(l)) + } + if m.Data != nil { + l = len(m.Data) + if l > 0 { + n += 1 + l + sovProtocol(uint64(l)) + } + } + if m.Timestamp != 0 { + n += 1 + sovProtocol(uint64(m.Timestamp)) + } + if m.Redelivered { + n += 2 + } + if m.CRC32 != 0 { + n += 1 + sovProtocol(uint64(m.CRC32)) + } + return n +} + +func (m *Ack) Size() (n int) { + var l int + _ = l + l = len(m.Subject) + if l > 0 { + n += 1 + l + sovProtocol(uint64(l)) + } + if m.Sequence != 0 { + n += 1 + sovProtocol(uint64(m.Sequence)) + } + return n +} + +func (m *ConnectRequest) Size() (n int) { + var l int + _ = l + l = len(m.ClientID) + if l > 0 { + n += 1 + l + sovProtocol(uint64(l)) + } + l = len(m.HeartbeatInbox) + if l > 0 { + n += 1 + l + sovProtocol(uint64(l)) + } + return n +} + +func (m *ConnectResponse) Size() (n int) { + var l int + _ = l + l = len(m.PubPrefix) + if l > 0 { + n += 1 + l + sovProtocol(uint64(l)) + } + l = len(m.SubRequests) + if l > 0 { + n += 1 + l + sovProtocol(uint64(l)) + } + l = len(m.UnsubRequests) + if l > 0 { + n += 1 + l + sovProtocol(uint64(l)) + } + l = len(m.CloseRequests) + if l > 0 { + n += 1 + l + sovProtocol(uint64(l)) + } + l = len(m.Error) + if l > 0 { + n += 1 + l + sovProtocol(uint64(l)) + } + l = len(m.SubCloseRequests) + if l > 0 { + n += 1 + l + sovProtocol(uint64(l)) + } + l = len(m.PublicKey) + if l > 0 { + n += 2 + l + sovProtocol(uint64(l)) + } + return n +} + +func (m *SubscriptionRequest) Size() (n int) { + var l int + _ = l + l = len(m.ClientID) + if l > 0 { + n += 1 + l + sovProtocol(uint64(l)) + } + l = len(m.Subject) + if l > 0 { + n += 1 + l + sovProtocol(uint64(l)) + } + l = len(m.QGroup) + if l > 0 { + n += 1 + l + sovProtocol(uint64(l)) + } + l = len(m.Inbox) + if l > 0 { + n += 1 + l + sovProtocol(uint64(l)) + } + if m.MaxInFlight != 0 { + n += 1 + sovProtocol(uint64(m.MaxInFlight)) + } + if m.AckWaitInSecs != 0 { + n += 1 + sovProtocol(uint64(m.AckWaitInSecs)) + } + l = len(m.DurableName) + if l > 0 { + n += 1 + l + sovProtocol(uint64(l)) + } + if m.StartPosition != 0 { + n += 1 + sovProtocol(uint64(m.StartPosition)) + } + if m.StartSequence != 0 { + n += 1 + sovProtocol(uint64(m.StartSequence)) + } + if m.StartTimeDelta != 0 { + n += 1 + sovProtocol(uint64(m.StartTimeDelta)) + } + return n +} + +func (m *SubscriptionResponse) Size() (n int) { + var l int + _ = l + l = len(m.AckInbox) + if l > 0 { + n += 1 + l + sovProtocol(uint64(l)) + } + l = len(m.Error) + if l > 0 { + n += 1 + l + sovProtocol(uint64(l)) + } + return n +} + +func (m *UnsubscribeRequest) Size() (n int) { + var l int + _ = l + l = len(m.ClientID) + if l > 0 { + n += 1 + l + sovProtocol(uint64(l)) + } + l = len(m.Subject) + if l > 0 { + n += 1 + l + sovProtocol(uint64(l)) + } + l = len(m.Inbox) + if l > 0 { + n += 1 + l + sovProtocol(uint64(l)) + } + l = len(m.DurableName) + if l > 0 { + n += 1 + l + sovProtocol(uint64(l)) + } + return n +} + +func (m *CloseRequest) Size() (n int) { + var l int + _ = l + l = len(m.ClientID) + if l > 0 { + n += 1 + l + sovProtocol(uint64(l)) + } + return n +} + +func (m *CloseResponse) Size() (n int) { + var l int + _ = l + l = len(m.Error) + if l > 0 { + n += 1 + l + sovProtocol(uint64(l)) + } + return n +} + +func sovProtocol(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozProtocol(x uint64) (n int) { + return sovProtocol(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *PubMsg) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PubMsg: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PubMsg: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProtocol + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Guid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProtocol + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Guid = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subject", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProtocol + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subject = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reply", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProtocol + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reply = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthProtocol + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], data[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sha256", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthProtocol + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sha256 = append(m.Sha256[:0], data[iNdEx:postIndex]...) + if m.Sha256 == nil { + m.Sha256 = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProtocol(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthProtocol + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PubAck) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PubAck: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PubAck: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Guid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProtocol + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Guid = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProtocol + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProtocol(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthProtocol + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgProto) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgProto: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgProto: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType) + } + m.Sequence = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Sequence |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subject", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProtocol + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subject = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reply", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProtocol + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reply = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthProtocol + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], data[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Timestamp |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Redelivered", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Redelivered = bool(v != 0) + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CRC32", wireType) + } + m.CRC32 = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.CRC32 |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipProtocol(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthProtocol + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Ack) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Ack: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Ack: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subject", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProtocol + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subject = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType) + } + m.Sequence = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Sequence |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipProtocol(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthProtocol + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConnectRequest) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConnectRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConnectRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProtocol + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HeartbeatInbox", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProtocol + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HeartbeatInbox = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProtocol(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthProtocol + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConnectResponse) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConnectResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConnectResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PubPrefix", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProtocol + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PubPrefix = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SubRequests", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProtocol + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SubRequests = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UnsubRequests", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProtocol + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UnsubRequests = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CloseRequests", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProtocol + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CloseRequests = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProtocol + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SubCloseRequests", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProtocol + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SubCloseRequests = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 100: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PublicKey", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProtocol + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PublicKey = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProtocol(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthProtocol + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubscriptionRequest) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubscriptionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubscriptionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProtocol + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subject", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProtocol + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subject = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field QGroup", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProtocol + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.QGroup = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Inbox", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProtocol + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Inbox = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxInFlight", wireType) + } + m.MaxInFlight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.MaxInFlight |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AckWaitInSecs", wireType) + } + m.AckWaitInSecs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.AckWaitInSecs |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DurableName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProtocol + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DurableName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartPosition", wireType) + } + m.StartPosition = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.StartPosition |= (StartPosition(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartSequence", wireType) + } + m.StartSequence = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.StartSequence |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTimeDelta", wireType) + } + m.StartTimeDelta = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.StartTimeDelta |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipProtocol(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthProtocol + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubscriptionResponse) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubscriptionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubscriptionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AckInbox", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProtocol + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AckInbox = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProtocol + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProtocol(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthProtocol + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UnsubscribeRequest) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UnsubscribeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UnsubscribeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProtocol + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subject", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProtocol + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subject = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Inbox", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProtocol + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Inbox = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DurableName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProtocol + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DurableName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProtocol(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthProtocol + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CloseRequest) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CloseRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CloseRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProtocol + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProtocol(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthProtocol + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CloseResponse) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CloseResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CloseResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProtocol + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProtocol + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProtocol(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthProtocol + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipProtocol(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowProtocol + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowProtocol + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowProtocol + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthProtocol + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowProtocol + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipProtocol(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthProtocol = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowProtocol = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/github.com/nats-io/go-nats-streaming/pb/protocol.proto b/vendor/github.com/nats-io/go-nats-streaming/pb/protocol.proto new file mode 100644 index 000000000..a1eea67af --- /dev/null +++ b/vendor/github.com/nats-io/go-nats-streaming/pb/protocol.proto @@ -0,0 +1,115 @@ +// Copyright 2016 Apcera Inc. All rights reserved. +// +// Uses https://github.com/gogo/protobuf +// compiled via `protoc -I=. -I=$GOPATH/src --gogofaster_out=. protocol.proto` + +syntax = "proto3"; +package pb; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_getters_all) = false; + +// How messages are delivered to the STAN cluster +message PubMsg { + string clientID = 1; // ClientID + string guid = 2; // guid + string subject = 3; // subject + string reply = 4; // optional reply + bytes data = 5; // payload + + bytes sha256 = 10; // optional sha256 of data +} + +// Used to ACK to publishers +message PubAck { + string guid = 1; // guid + string error = 2; // err string, empty/omitted if no error +} + +// Msg struct. Sequence is assigned for global ordering by +// the cluster after the publisher has been acknowledged. +message MsgProto { + uint64 sequence = 1; // globally ordered sequence number for the subject's channel + string subject = 2; // subject + string reply = 3; // optional reply + bytes data = 4; // payload + int64 timestamp = 5; // received timestamp + bool redelivered = 6; // Flag specifying if the message is being redelivered + + uint32 CRC32 = 10; // optional IEEE CRC32 +} + +// Ack will deliver an ack for a delivered msg. +message Ack { + string subject = 1; // Subject + uint64 sequence = 2; // Sequence to acknowledge +} + +// Connection Request +message ConnectRequest { + string clientID = 1; // Client name/identifier. + string heartbeatInbox = 2; // Inbox for server initiated heartbeats. +} + +// Response to a client connect +message ConnectResponse { + string pubPrefix = 1; // Prefix to use when publishing to this STAN cluster + string subRequests = 2; // Subject to use for subscription requests + string unsubRequests = 3; // Subject to use for unsubscribe requests + string closeRequests = 4; // Subject for closing the stan connection + string error = 5; // err string, empty/omitted if no error + string subCloseRequests = 6; // Subject to use for subscription close requests + + string publicKey = 100; // Possibly used to sign acks, etc. +} + +// Enum for start position type. +enum StartPosition { + NewOnly = 0; + LastReceived = 1; + TimeDeltaStart = 2; + SequenceStart = 3; + First = 4; + } + +// Protocol for a client to subscribe +message SubscriptionRequest { + string clientID = 1; // ClientID + string subject = 2; // Formal subject to subscribe to, e.g. foo.bar + string qGroup = 3; // Optional queue group + string inbox = 4; // Inbox subject to deliver messages on + int32 maxInFlight = 5; // Maximum inflight messages without an ack allowed + int32 ackWaitInSecs = 6; // Timeout for receiving an ack from the client + string durableName = 7; // Optional durable name which survives client restarts + StartPosition startPosition = 10; // Start position + uint64 startSequence = 11; // Optional start sequence number + int64 startTimeDelta = 12; // Optional start time +} + +// Response for SubscriptionRequest and UnsubscribeRequests +message SubscriptionResponse { + string ackInbox = 2; // ackInbox for sending acks + string error = 3; // err string, empty/omitted if no error +} + +// Protocol for a clients to unsubscribe. Will return a SubscriptionResponse +message UnsubscribeRequest { + string clientID = 1; // ClientID + string subject = 2; // subject for the subscription + string inbox = 3; // Inbox subject to identify subscription + string durableName = 4; // Optional durable name which survives client restarts +} + +// Protocol for a client to close a connection +message CloseRequest { + string clientID = 1; // Client name provided to Connect() requests +} + +// Response for CloseRequest +message CloseResponse { + string error = 1; // err string, empty/omitted if no error +} diff --git a/vendor/github.com/nats-io/go-nats-streaming/stan.go b/vendor/github.com/nats-io/go-nats-streaming/stan.go new file mode 100644 index 000000000..0bfea014c --- /dev/null +++ b/vendor/github.com/nats-io/go-nats-streaming/stan.go @@ -0,0 +1,476 @@ +// Copyright 2016 Apcera Inc. All rights reserved. + +// Package stan is a Go client for the NATS Streaming messaging system (https://nats.io). +package stan + +import ( + "errors" + "fmt" + "runtime" + "sync" + "time" + + "github.com/nats-io/go-nats" + "github.com/nats-io/go-nats-streaming/pb" + "github.com/nats-io/nuid" +) + +// Version is the NATS Streaming Go Client version +const Version = "0.3.4" + +const ( + // DefaultNatsURL is the default URL the client connects to + DefaultNatsURL = "nats://localhost:4222" + // DefaultConnectWait is the default timeout used for the connect operation + DefaultConnectWait = 2 * time.Second + // DefaultDiscoverPrefix is the prefix subject used to connect to the NATS Streaming server + DefaultDiscoverPrefix = "_STAN.discover" + // DefaultACKPrefix is the prefix subject used to send ACKs to the NATS Streaming server + DefaultACKPrefix = "_STAN.acks" + // DefaultMaxPubAcksInflight is the default maximum number of published messages + // without outstanding ACKs from the server + DefaultMaxPubAcksInflight = 16384 +) + +// Conn represents a connection to the NATS Streaming subsystem. It can Publish and +// Subscribe to messages within the NATS Streaming cluster. +type Conn interface { + // Publish + Publish(subject string, data []byte) error + PublishAsync(subject string, data []byte, ah AckHandler) (string, error) + + // Subscribe + Subscribe(subject string, cb MsgHandler, opts ...SubscriptionOption) (Subscription, error) + + // QueueSubscribe + QueueSubscribe(subject, qgroup string, cb MsgHandler, opts ...SubscriptionOption) (Subscription, error) + + // Close + Close() error + + // NatsConn returns the underlying NATS conn. Use this with care. For + // example, closing the wrapped NATS conn will put the NATS Streaming Conn + // in an invalid state. + NatsConn() *nats.Conn +} + +// Errors +var ( + ErrConnectReqTimeout = errors.New("stan: connect request timeout") + ErrCloseReqTimeout = errors.New("stan: close request timeout") + ErrSubReqTimeout = errors.New("stan: subscribe request timeout") + ErrUnsubReqTimeout = errors.New("stan: unsubscribe request timeout") + ErrConnectionClosed = errors.New("stan: connection closed") + ErrTimeout = errors.New("stan: publish ack timeout") + ErrBadAck = errors.New("stan: malformed ack") + ErrBadSubscription = errors.New("stan: invalid subscription") + ErrBadConnection = errors.New("stan: invalid connection") + ErrManualAck = errors.New("stan: cannot manually ack in auto-ack mode") + ErrNilMsg = errors.New("stan: nil message") + ErrNoServerSupport = errors.New("stan: not supported by server") +) + +// AckHandler is used for Async Publishing to provide status of the ack. +// The func will be passed teh GUID and any error state. No error means the +// message was successfully received by NATS Streaming. +type AckHandler func(string, error) + +// Options can be used to a create a customized connection. +type Options struct { + NatsURL string + NatsConn *nats.Conn + ConnectTimeout time.Duration + AckTimeout time.Duration + DiscoverPrefix string + MaxPubAcksInflight int +} + +// DefaultOptions are the NATS Streaming client's default options +var DefaultOptions = Options{ + NatsURL: DefaultNatsURL, + ConnectTimeout: DefaultConnectWait, + AckTimeout: DefaultAckWait, + DiscoverPrefix: DefaultDiscoverPrefix, + MaxPubAcksInflight: DefaultMaxPubAcksInflight, +} + +// Option is a function on the options for a connection. +type Option func(*Options) error + +// NatsURL is an Option to set the URL the client should connect to. +func NatsURL(u string) Option { + return func(o *Options) error { + o.NatsURL = u + return nil + } +} + +// ConnectWait is an Option to set the timeout for establishing a connection. +func ConnectWait(t time.Duration) Option { + return func(o *Options) error { + o.ConnectTimeout = t + return nil + } +} + +// PubAckWait is an Option to set the timeout for waiting for an ACK for a +// published message. +func PubAckWait(t time.Duration) Option { + return func(o *Options) error { + o.AckTimeout = t + return nil + } +} + +// MaxPubAcksInflight is an Option to set the maximum number of published +// messages without outstanding ACKs from the server. +func MaxPubAcksInflight(max int) Option { + return func(o *Options) error { + o.MaxPubAcksInflight = max + return nil + } +} + +// NatsConn is an Option to set the underlying NATS connection to be used +// by a NATS Streaming Conn object. +func NatsConn(nc *nats.Conn) Option { + return func(o *Options) error { + o.NatsConn = nc + return nil + } +} + +// A conn represents a bare connection to a stan cluster. +type conn struct { + sync.RWMutex + clientID string + serverID string + pubPrefix string // Publish prefix set by stan, append our subject. + subRequests string // Subject to send subscription requests. + unsubRequests string // Subject to send unsubscribe requests. + subCloseRequests string // Subject to send subscription close requests. + closeRequests string // Subject to send close requests. + ackSubject string // publish acks + ackSubscription *nats.Subscription + hbSubscription *nats.Subscription + subMap map[string]*subscription + pubAckMap map[string]*ack + pubAckChan chan (struct{}) + opts Options + nc *nats.Conn + ncOwned bool // NATS Streaming created the connection, so needs to close it. +} + +// Closure for ack contexts. +type ack struct { + t *time.Timer + ah AckHandler + ch chan error +} + +// Connect will form a connection to the NATS Streaming subsystem. +func Connect(stanClusterID, clientID string, options ...Option) (Conn, error) { + // Process Options + c := conn{clientID: clientID, opts: DefaultOptions} + for _, opt := range options { + if err := opt(&c.opts); err != nil { + return nil, err + } + } + // Check if the user has provided a connection as an option + c.nc = c.opts.NatsConn + // Create a NATS connection if it doesn't exist. + if c.nc == nil { + nc, err := nats.Connect(c.opts.NatsURL, nats.Name(clientID)) + if err != nil { + return nil, err + } + c.nc = nc + c.ncOwned = true + } else if !c.nc.IsConnected() { + // Bail if the custom NATS connection is disconnected + return nil, ErrBadConnection + } + + // Create a heartbeat inbox + hbInbox := nats.NewInbox() + var err error + if c.hbSubscription, err = c.nc.Subscribe(hbInbox, c.processHeartBeat); err != nil { + c.Close() + return nil, err + } + + // Send Request to discover the cluster + discoverSubject := c.opts.DiscoverPrefix + "." + stanClusterID + req := &pb.ConnectRequest{ClientID: clientID, HeartbeatInbox: hbInbox} + b, _ := req.Marshal() + reply, err := c.nc.Request(discoverSubject, b, c.opts.ConnectTimeout) + if err != nil { + c.Close() + if err == nats.ErrTimeout { + return nil, ErrConnectReqTimeout + } + return nil, err + } + // Process the response, grab server pubPrefix + cr := &pb.ConnectResponse{} + err = cr.Unmarshal(reply.Data) + if err != nil { + c.Close() + return nil, err + } + if cr.Error != "" { + c.Close() + return nil, errors.New(cr.Error) + } + + // Capture cluster configuration endpoints to publish and subscribe/unsubscribe. + c.pubPrefix = cr.PubPrefix + c.subRequests = cr.SubRequests + c.unsubRequests = cr.UnsubRequests + c.subCloseRequests = cr.SubCloseRequests + c.closeRequests = cr.CloseRequests + + // Setup the ACK subscription + c.ackSubject = DefaultACKPrefix + "." + nuid.Next() + if c.ackSubscription, err = c.nc.Subscribe(c.ackSubject, c.processAck); err != nil { + c.Close() + return nil, err + } + c.ackSubscription.SetPendingLimits(1024*1024, 32*1024*1024) + c.pubAckMap = make(map[string]*ack) + + // Create Subscription map + c.subMap = make(map[string]*subscription) + + c.pubAckChan = make(chan struct{}, c.opts.MaxPubAcksInflight) + + // Attach a finalizer + runtime.SetFinalizer(&c, func(sc *conn) { sc.Close() }) + + return &c, nil +} + +// Close a connection to the stan system. +func (sc *conn) Close() error { + if sc == nil { + return ErrBadConnection + } + + sc.Lock() + defer sc.Unlock() + + if sc.nc == nil { + // We are already closed. + return nil + } + + // Capture for NATS calls below. + nc := sc.nc + if sc.ncOwned { + defer nc.Close() + } + + // Signals we are closed. + sc.nc = nil + + // Now close ourselves. + if sc.ackSubscription != nil { + sc.ackSubscription.Unsubscribe() + } + + req := &pb.CloseRequest{ClientID: sc.clientID} + b, _ := req.Marshal() + reply, err := nc.Request(sc.closeRequests, b, sc.opts.ConnectTimeout) + if err != nil { + if err == nats.ErrTimeout { + return ErrCloseReqTimeout + } + return err + } + cr := &pb.CloseResponse{} + err = cr.Unmarshal(reply.Data) + if err != nil { + return err + } + if cr.Error != "" { + return errors.New(cr.Error) + } + return nil +} + +// NatsConn returns the underlying NATS conn. Use this with care. For example, +// closing the wrapped NATS conn will put the NATS Streaming Conn in an invalid +// state. +func (sc *conn) NatsConn() *nats.Conn { + return sc.nc +} + +// Process a heartbeat from the NATS Streaming cluster +func (sc *conn) processHeartBeat(m *nats.Msg) { + // No payload assumed, just reply. + sc.RLock() + nc := sc.nc + sc.RUnlock() + if nc != nil { + nc.Publish(m.Reply, nil) + } +} + +// Process an ack from the NATS Streaming cluster +func (sc *conn) processAck(m *nats.Msg) { + pa := &pb.PubAck{} + err := pa.Unmarshal(m.Data) + if err != nil { + // FIXME, make closure to have context? + fmt.Printf("Error processing unmarshal\n") + return + } + + // Remove + a := sc.removeAck(pa.Guid) + if a != nil { + // Capture error if it exists. + if pa.Error != "" { + err = errors.New(pa.Error) + } + if a.ah != nil { + // Perform the ackHandler callback + a.ah(pa.Guid, err) + } else if a.ch != nil { + // Send to channel directly + a.ch <- err + } + } +} + +// Publish will publish to the cluster and wait for an ACK. +func (sc *conn) Publish(subject string, data []byte) error { + ch := make(chan error) + _, err := sc.publishAsync(subject, data, nil, ch) + if err == nil { + err = <-ch + } + return err +} + +// PublishAsync will publish to the cluster on pubPrefix+subject and asynchronously +// process the ACK or error state. It will return the GUID for the message being sent. +func (sc *conn) PublishAsync(subject string, data []byte, ah AckHandler) (string, error) { + return sc.publishAsync(subject, data, ah, nil) +} + +func (sc *conn) publishAsync(subject string, data []byte, ah AckHandler, ch chan error) (string, error) { + a := &ack{ah: ah, ch: ch} + sc.Lock() + if sc.nc == nil { + sc.Unlock() + return "", ErrConnectionClosed + } + + subj := sc.pubPrefix + "." + subject + // This is only what we need from PubMsg in the timer below, + // so do this so that pe doesn't escape (and we same on new object) + peGUID := nuid.Next() + pe := &pb.PubMsg{ClientID: sc.clientID, Guid: peGUID, Subject: subject, Data: data} + b, _ := pe.Marshal() + + // Map ack to guid. + sc.pubAckMap[peGUID] = a + // snapshot + ackSubject := sc.ackSubject + ackTimeout := sc.opts.AckTimeout + pac := sc.pubAckChan + sc.Unlock() + + // Use the buffered channel to control the number of outstanding acks. + pac <- struct{}{} + + err := sc.nc.PublishRequest(subj, ackSubject, b) + if err != nil { + sc.removeAck(peGUID) + return "", err + } + + // Setup the timer for expiration. + sc.Lock() + a.t = time.AfterFunc(ackTimeout, func() { + sc.removeAck(peGUID) + if a.ah != nil { + ah(peGUID, ErrTimeout) + } else if a.ch != nil { + a.ch <- ErrTimeout + } + }) + sc.Unlock() + + return peGUID, nil +} + +// removeAck removes the ack from the pubAckMap and cancels any state, e.g. timers +func (sc *conn) removeAck(guid string) *ack { + var t *time.Timer + sc.Lock() + a := sc.pubAckMap[guid] + if a != nil { + t = a.t + delete(sc.pubAckMap, guid) + } + pac := sc.pubAckChan + sc.Unlock() + + // Cancel timer if needed. + if t != nil { + t.Stop() + } + + // Remove from channel to unblock PublishAsync + if a != nil && len(pac) > 0 { + <-pac + } + return a +} + +// Process an msg from the NATS Streaming cluster +func (sc *conn) processMsg(raw *nats.Msg) { + msg := &Msg{} + err := msg.Unmarshal(raw.Data) + if err != nil { + panic("Error processing unmarshal for msg") + } + // Lookup the subscription + sc.RLock() + nc := sc.nc + isClosed := nc == nil + sub := sc.subMap[raw.Subject] + sc.RUnlock() + + // Check if sub is no longer valid or connection has been closed. + if sub == nil || isClosed { + return + } + + // Store in msg for backlink + msg.Sub = sub + + sub.RLock() + cb := sub.cb + ackSubject := sub.ackInbox + isManualAck := sub.opts.ManualAcks + subsc := sub.sc // Can be nil if sub has been unsubscribed. + sub.RUnlock() + + // Perform the callback + if cb != nil && subsc != nil { + cb(msg) + } + + // Proces auto-ack + if !isManualAck && nc != nil { + ack := &pb.Ack{Subject: msg.Subject, Sequence: msg.Sequence} + b, _ := ack.Marshal() + if err := nc.Publish(ackSubject, b); err != nil { + // FIXME(dlc) - Async error handler? Retry? + } + } +} diff --git a/vendor/github.com/nats-io/go-nats-streaming/sub.go b/vendor/github.com/nats-io/go-nats-streaming/sub.go new file mode 100644 index 000000000..164ab81de --- /dev/null +++ b/vendor/github.com/nats-io/go-nats-streaming/sub.go @@ -0,0 +1,472 @@ +// Copyright 2016 Apcera Inc. All rights reserved. + +// Package stan is a Go client for the NATS Streaming messaging system (https://nats.io). +package stan + +import ( + "errors" + "sync" + "time" + + "github.com/nats-io/go-nats" + "github.com/nats-io/go-nats-streaming/pb" +) + +const ( + // DefaultAckWait indicates how long the server should wait for an ACK before resending a message + DefaultAckWait = 30 * time.Second + // DefaultMaxInflight indicates how many messages with outstanding ACKs the server can send + DefaultMaxInflight = 1024 +) + +// Msg is the client defined message, which includes proto, then back link to subscription. +type Msg struct { + pb.MsgProto // MsgProto: Seq, Subject, Reply[opt], Data, Timestamp, CRC32[opt] + Sub Subscription +} + +// Subscriptions and Options + +// Subscription represents a subscription within the NATS Streaming cluster. Subscriptions +// will be rate matched and follow at-least delivery semantics. +type Subscription interface { + ClearMaxPending() error + Delivered() (int64, error) + Dropped() (int, error) + IsValid() bool + MaxPending() (int, int, error) + Pending() (int, int, error) + PendingLimits() (int, int, error) + SetPendingLimits(msgLimit, bytesLimit int) error + // Unsubscribe removes interest in the subscription. + // For durables, it means that the durable interest is also removed from + // the server. Restarting a durable with the same name will not resume + // the subscription, it will be considered a new one. + Unsubscribe() error + + // Close removes this subscriber from the server, but unlike Unsubscribe(), + // the durable interest is not removed. If the client has connected to a server + // for which this feature is not available, Close() will return a ErrNoServerSupport + // error. + Close() error +} + +// A subscription represents a subscription to a stan cluster. +type subscription struct { + sync.RWMutex + sc *conn + subject string + qgroup string + inbox string + ackInbox string + inboxSub *nats.Subscription + opts SubscriptionOptions + cb MsgHandler +} + +// SubscriptionOption is a function on the options for a subscription. +type SubscriptionOption func(*SubscriptionOptions) error + +// MsgHandler is a callback function that processes messages delivered to +// asynchronous subscribers. +type MsgHandler func(msg *Msg) + +// SubscriptionOptions are used to control the Subscription's behavior. +type SubscriptionOptions struct { + // DurableName, if set will survive client restarts. + DurableName string + // Controls the number of messages the cluster will have inflight without an ACK. + MaxInflight int + // Controls the time the cluster will wait for an ACK for a given message. + AckWait time.Duration + // StartPosition enum from proto. + StartAt pb.StartPosition + // Optional start sequence number. + StartSequence uint64 + // Optional start time. + StartTime time.Time + // Option to do Manual Acks + ManualAcks bool +} + +// DefaultSubscriptionOptions are the default subscriptions' options +var DefaultSubscriptionOptions = SubscriptionOptions{ + MaxInflight: DefaultMaxInflight, + AckWait: DefaultAckWait, +} + +// MaxInflight is an Option to set the maximum number of messages the cluster will send +// without an ACK. +func MaxInflight(m int) SubscriptionOption { + return func(o *SubscriptionOptions) error { + o.MaxInflight = m + return nil + } +} + +// AckWait is an Option to set the timeout for waiting for an ACK from the cluster's +// point of view for delivered messages. +func AckWait(t time.Duration) SubscriptionOption { + return func(o *SubscriptionOptions) error { + o.AckWait = t + return nil + } +} + +// StartAt sets the desired start position for the message stream. +func StartAt(sp pb.StartPosition) SubscriptionOption { + return func(o *SubscriptionOptions) error { + o.StartAt = sp + return nil + } +} + +// StartAtSequence sets the desired start sequence position and state. +func StartAtSequence(seq uint64) SubscriptionOption { + return func(o *SubscriptionOptions) error { + o.StartAt = pb.StartPosition_SequenceStart + o.StartSequence = seq + return nil + } +} + +// StartAtTime sets the desired start time position and state. +func StartAtTime(start time.Time) SubscriptionOption { + return func(o *SubscriptionOptions) error { + o.StartAt = pb.StartPosition_TimeDeltaStart + o.StartTime = start + return nil + } +} + +// StartAtTimeDelta sets the desired start time position and state using the delta. +func StartAtTimeDelta(ago time.Duration) SubscriptionOption { + return func(o *SubscriptionOptions) error { + o.StartAt = pb.StartPosition_TimeDeltaStart + o.StartTime = time.Now().Add(-ago) + return nil + } +} + +// StartWithLastReceived is a helper function to set start position to last received. +func StartWithLastReceived() SubscriptionOption { + return func(o *SubscriptionOptions) error { + o.StartAt = pb.StartPosition_LastReceived + return nil + } +} + +// DeliverAllAvailable will deliver all messages available. +func DeliverAllAvailable() SubscriptionOption { + return func(o *SubscriptionOptions) error { + o.StartAt = pb.StartPosition_First + return nil + } +} + +// SetManualAckMode will allow clients to control their own acks to delivered messages. +func SetManualAckMode() SubscriptionOption { + return func(o *SubscriptionOptions) error { + o.ManualAcks = true + return nil + } +} + +// DurableName sets the DurableName for the subcriber. +func DurableName(name string) SubscriptionOption { + return func(o *SubscriptionOptions) error { + o.DurableName = name + return nil + } +} + +// Subscribe will perform a subscription with the given options to the NATS Streaming cluster. +func (sc *conn) Subscribe(subject string, cb MsgHandler, options ...SubscriptionOption) (Subscription, error) { + return sc.subscribe(subject, "", cb, options...) +} + +// QueueSubscribe will perform a queue subscription with the given options to the NATS Streaming cluster. +func (sc *conn) QueueSubscribe(subject, qgroup string, cb MsgHandler, options ...SubscriptionOption) (Subscription, error) { + return sc.subscribe(subject, qgroup, cb, options...) +} + +// subscribe will perform a subscription with the given options to the NATS Streaming cluster. +func (sc *conn) subscribe(subject, qgroup string, cb MsgHandler, options ...SubscriptionOption) (Subscription, error) { + sub := &subscription{subject: subject, qgroup: qgroup, inbox: nats.NewInbox(), cb: cb, sc: sc, opts: DefaultSubscriptionOptions} + for _, opt := range options { + if err := opt(&sub.opts); err != nil { + return nil, err + } + } + sc.Lock() + if sc.nc == nil { + sc.Unlock() + return nil, ErrConnectionClosed + } + + // Register subscription. + sc.subMap[sub.inbox] = sub + nc := sc.nc + sc.Unlock() + + // Hold lock throughout. + sub.Lock() + defer sub.Unlock() + + // Listen for actual messages. + nsub, err := nc.Subscribe(sub.inbox, sc.processMsg) + if err != nil { + return nil, err + } + sub.inboxSub = nsub + + // Create a subscription request + // FIXME(dlc) add others. + sr := &pb.SubscriptionRequest{ + ClientID: sc.clientID, + Subject: subject, + QGroup: qgroup, + Inbox: sub.inbox, + MaxInFlight: int32(sub.opts.MaxInflight), + AckWaitInSecs: int32(sub.opts.AckWait / time.Second), + StartPosition: sub.opts.StartAt, + DurableName: sub.opts.DurableName, + } + + // Conditionals + switch sr.StartPosition { + case pb.StartPosition_TimeDeltaStart: + sr.StartTimeDelta = time.Now().UnixNano() - sub.opts.StartTime.UnixNano() + case pb.StartPosition_SequenceStart: + sr.StartSequence = sub.opts.StartSequence + } + + b, _ := sr.Marshal() + reply, err := sc.nc.Request(sc.subRequests, b, sc.opts.ConnectTimeout) + if err != nil { + sub.inboxSub.Unsubscribe() + if err == nats.ErrTimeout { + err = ErrSubReqTimeout + } + return nil, err + } + r := &pb.SubscriptionResponse{} + if err := r.Unmarshal(reply.Data); err != nil { + sub.inboxSub.Unsubscribe() + return nil, err + } + if r.Error != "" { + sub.inboxSub.Unsubscribe() + return nil, errors.New(r.Error) + } + sub.ackInbox = r.AckInbox + + return sub, nil +} + +// ClearMaxPending resets the maximums seen so far. +func (sub *subscription) ClearMaxPending() error { + sub.Lock() + defer sub.Unlock() + if sub.inboxSub == nil { + return ErrBadSubscription + } + return sub.inboxSub.ClearMaxPending() +} + +// Delivered returns the number of delivered messages for this subscription. +func (sub *subscription) Delivered() (int64, error) { + sub.Lock() + defer sub.Unlock() + if sub.inboxSub == nil { + return -1, ErrBadSubscription + } + return sub.inboxSub.Delivered() +} + +// Dropped returns the number of known dropped messages for this subscription. +// This will correspond to messages dropped by violations of PendingLimits. If +// the server declares the connection a SlowConsumer, this number may not be +// valid. +func (sub *subscription) Dropped() (int, error) { + sub.Lock() + defer sub.Unlock() + if sub.inboxSub == nil { + return -1, ErrBadSubscription + } + return sub.inboxSub.Dropped() +} + +// IsValid returns a boolean indicating whether the subscription +// is still active. This will return false if the subscription has +// already been closed. +func (sub *subscription) IsValid() bool { + sub.Lock() + defer sub.Unlock() + if sub.inboxSub == nil { + return false + } + return sub.inboxSub.IsValid() +} + +// MaxPending returns the maximum number of queued messages and queued bytes seen so far. +func (sub *subscription) MaxPending() (int, int, error) { + sub.Lock() + defer sub.Unlock() + if sub.inboxSub == nil { + return -1, -1, ErrBadSubscription + } + return sub.inboxSub.MaxPending() +} + +// Pending returns the number of queued messages and queued bytes in the client for this subscription. +func (sub *subscription) Pending() (int, int, error) { + sub.Lock() + defer sub.Unlock() + if sub.inboxSub == nil { + return -1, -1, ErrBadSubscription + } + return sub.inboxSub.Pending() +} + +// PendingLimits returns the current limits for this subscription. +// If no error is returned, a negative value indicates that the +// given metric is not limited. +func (sub *subscription) PendingLimits() (int, int, error) { + sub.Lock() + defer sub.Unlock() + if sub.inboxSub == nil { + return -1, -1, ErrBadSubscription + } + return sub.inboxSub.PendingLimits() +} + +// SetPendingLimits sets the limits for pending msgs and bytes for this subscription. +// Zero is not allowed. Any negative value means that the given metric is not limited. +func (sub *subscription) SetPendingLimits(msgLimit, bytesLimit int) error { + sub.Lock() + defer sub.Unlock() + if sub.inboxSub == nil { + return ErrBadSubscription + } + return sub.inboxSub.SetPendingLimits(msgLimit, bytesLimit) +} + +// closeOrUnsubscribe performs either close or unsubsribe based on +// given boolean. +func (sub *subscription) closeOrUnsubscribe(doClose bool) error { + if sub == nil { + return ErrBadSubscription + } + sub.Lock() + sc := sub.sc + if sc == nil { + // Already closed. + sub.Unlock() + return ErrBadSubscription + } + sub.sc = nil + sub.inboxSub.Unsubscribe() + sub.inboxSub = nil + sub.Unlock() + + if sc == nil { + return ErrBadSubscription + } + + sc.Lock() + if sc.nc == nil { + sc.Unlock() + return ErrConnectionClosed + } + + delete(sc.subMap, sub.inbox) + reqSubject := sc.unsubRequests + if doClose { + reqSubject = sc.subCloseRequests + if reqSubject == "" { + sc.Unlock() + return ErrNoServerSupport + } + } + + // Snapshot connection to avoid data race, since the connection may be + // closing while we try to send the request + nc := sc.nc + sc.Unlock() + + usr := &pb.UnsubscribeRequest{ + ClientID: sc.clientID, + Subject: sub.subject, + Inbox: sub.ackInbox, + } + b, _ := usr.Marshal() + reply, err := nc.Request(reqSubject, b, sc.opts.ConnectTimeout) + if err != nil { + if err == nats.ErrTimeout { + if doClose { + return ErrCloseReqTimeout + } + return ErrUnsubReqTimeout + } + return err + } + r := &pb.SubscriptionResponse{} + if err := r.Unmarshal(reply.Data); err != nil { + return err + } + if r.Error != "" { + return errors.New(r.Error) + } + + return nil +} + +// Unsubscribe implements the Subscription interface +func (sub *subscription) Unsubscribe() error { + return sub.closeOrUnsubscribe(false) +} + +// Close implements the Subscription interface +func (sub *subscription) Close() error { + return sub.closeOrUnsubscribe(true) +} + +// Ack manually acknowledges a message. +// The subscriber had to be created with SetManualAckMode() option. +func (msg *Msg) Ack() error { + if msg == nil { + return ErrNilMsg + } + // Look up subscription + sub := msg.Sub.(*subscription) + if sub == nil { + return ErrBadSubscription + } + + sub.RLock() + ackSubject := sub.ackInbox + isManualAck := sub.opts.ManualAcks + sc := sub.sc + sub.RUnlock() + + // Check for error conditions. + if sc == nil { + return ErrBadSubscription + } + // Get nc from the connection (needs locking to avoid race) + sc.RLock() + nc := sc.nc + sc.RUnlock() + if nc == nil { + return ErrBadConnection + } + if !isManualAck { + return ErrManualAck + } + + // Ack here. + ack := &pb.Ack{Subject: msg.Subject, Sequence: msg.Sequence} + b, _ := ack.Marshal() + return nc.Publish(ackSubject, b) +} diff --git a/vendor/github.com/nats-io/go-nats/LICENSE b/vendor/github.com/nats-io/go-nats/LICENSE new file mode 100644 index 000000000..4cfd668f2 --- /dev/null +++ b/vendor/github.com/nats-io/go-nats/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2012-2016 Apcera Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/nats-io/go-nats/README.md b/vendor/github.com/nats-io/go-nats/README.md new file mode 100644 index 000000000..ad95e0a7e --- /dev/null +++ b/vendor/github.com/nats-io/go-nats/README.md @@ -0,0 +1,322 @@ +# NATS - Go Client +A [Go](http://golang.org) client for the [NATS messaging system](https://nats.io). + +[![License MIT](https://img.shields.io/npm/l/express.svg)](http://opensource.org/licenses/MIT) +[![Go Report Card](https://goreportcard.com/badge/github.com/nats-io/go-nats)](https://goreportcard.com/report/github.com/nats-io/go-nats) [![Build Status](https://travis-ci.org/nats-io/go-nats.svg?branch=master)](http://travis-ci.org/nats-io/go-nats) [![GoDoc](https://godoc.org/github.com/nats-io/go-nats?status.svg)](http://godoc.org/github.com/nats-io/go-nats) [![Coverage Status](https://coveralls.io/repos/nats-io/go-nats/badge.svg?branch=master)](https://coveralls.io/r/nats-io/go-nats?branch=master) + +## Installation + +```bash +# Go client +go get github.com/nats-io/go-nats + +# Server +go get github.com/nats-io/gnatsd +``` + +## Basic Usage + +```go + +nc, _ := nats.Connect(nats.DefaultURL) + +// Simple Publisher +nc.Publish("foo", []byte("Hello World")) + +// Simple Async Subscriber +nc.Subscribe("foo", func(m *nats.Msg) { + fmt.Printf("Received a message: %s\n", string(m.Data)) +}) + +// Simple Sync Subscriber +sub, err := nc.SubscribeSync("foo") +m, err := sub.NextMsg(timeout) + +// Channel Subscriber +ch := make(chan *nats.Msg, 64) +sub, err := nc.ChanSubscribe("foo", ch) +msg <- ch + +// Unsubscribe +sub.Unsubscribe() + +// Requests +msg, err := nc.Request("help", []byte("help me"), 10*time.Millisecond) + +// Replies +nc.Subscribe("help", func(m *Msg) { + nc.Publish(m.Reply, []byte("I can help!")) +}) + +// Close connection +nc := nats.Connect("nats://localhost:4222") +nc.Close(); +``` + +## Encoded Connections + +```go + +nc, _ := nats.Connect(nats.DefaultURL) +c, _ := nats.NewEncodedConn(nc, nats.JSON_ENCODER) +defer c.Close() + +// Simple Publisher +c.Publish("foo", "Hello World") + +// Simple Async Subscriber +c.Subscribe("foo", func(s string) { + fmt.Printf("Received a message: %s\n", s) +}) + +// EncodedConn can Publish any raw Go type using the registered Encoder +type person struct { + Name string + Address string + Age int +} + +// Go type Subscriber +c.Subscribe("hello", func(p *person) { + fmt.Printf("Received a person: %+v\n", p) +}) + +me := &person{Name: "derek", Age: 22, Address: "140 New Montgomery Street, San Francisco, CA"} + +// Go type Publisher +c.Publish("hello", me) + +// Unsubscribe +sub, err := c.Subscribe("foo", nil) +... +sub.Unsubscribe() + +// Requests +var response string +err := c.Request("help", "help me", &response, 10*time.Millisecond) +if err != nil { + fmt.Printf("Request failed: %v\n", err) +} + +// Replying +c.Subscribe("help", func(subj, reply string, msg string) { + c.Publish(reply, "I can help!") +}) + +// Close connection +c.Close(); +``` + +## TLS + +```go +// tls as a scheme will enable secure connections by default. This will also verify the server name. +nc, err := nats.Connect("tls://nats.demo.io:4443") + +// If you are using a self-signed certificate, you need to have a tls.Config with RootCAs setup. +// We provide a helper method to make this case easier. +nc, err = nats.Connect("tls://localhost:4443", nats.RootCAs("./configs/certs/ca.pem")) + +// If the server requires client certificate, there is an helper function for that too: +cert := nats.ClientCert("./configs/certs/client-cert.pem", "./configs/certs/client-key.pem") +nc, err = nats.Connect("tls://localhost:4443", cert) + +// You can also supply a complete tls.Config + +certFile := "./configs/certs/client-cert.pem" +keyFile := "./configs/certs/client-key.pem" +cert, err := tls.LoadX509KeyPair(certFile, keyFile) +if err != nil { + t.Fatalf("error parsing X509 certificate/key pair: %v", err) +} + +config := &tls.Config{ + ServerName: opts.Host, + Certificates: []tls.Certificate{cert}, + RootCAs: pool, + MinVersion: tls.VersionTLS12, +} + +nc, err = nats.Connect("nats://localhost:4443", nats.Secure(config)) +if err != nil { + t.Fatalf("Got an error on Connect with Secure Options: %+v\n", err) +} + +``` + +## Using Go Channels (netchan) + +```go +nc, _ := nats.Connect(nats.DefaultURL) +ec, _ := nats.NewEncodedConn(nc, nats.JSON_ENCODER) +defer ec.Close() + +type person struct { + Name string + Address string + Age int +} + +recvCh := make(chan *person) +ec.BindRecvChan("hello", recvCh) + +sendCh := make(chan *person) +ec.BindSendChan("hello", sendCh) + +me := &person{Name: "derek", Age: 22, Address: "140 New Montgomery Street"} + +// Send via Go channels +sendCh <- me + +// Receive via Go channels +who := <- recvCh +``` + +## Wildcard Subscriptions + +```go + +// "*" matches any token, at any level of the subject. +nc.Subscribe("foo.*.baz", func(m *Msg) { + fmt.Printf("Msg received on [%s] : %s\n", m.Subject, string(m.Data)); +}) + +nc.Subscribe("foo.bar.*", func(m *Msg) { + fmt.Printf("Msg received on [%s] : %s\n", m.Subject, string(m.Data)); +}) + +// ">" matches any length of the tail of a subject, and can only be the last token +// E.g. 'foo.>' will match 'foo.bar', 'foo.bar.baz', 'foo.foo.bar.bax.22' +nc.Subscribe("foo.>", func(m *Msg) { + fmt.Printf("Msg received on [%s] : %s\n", m.Subject, string(m.Data)); +}) + +// Matches all of the above +nc.Publish("foo.bar.baz", []byte("Hello World")) + +``` + +## Queue Groups + +```go +// All subscriptions with the same queue name will form a queue group. +// Each message will be delivered to only one subscriber per queue group, +// using queuing semantics. You can have as many queue groups as you wish. +// Normal subscribers will continue to work as expected. + +nc.QueueSubscribe("foo", "job_workers", func(_ *Msg) { + received += 1; +}) + +``` + +## Advanced Usage + +```go + +// Flush connection to server, returns when all messages have been processed. +nc.Flush() +fmt.Println("All clear!") + +// FlushTimeout specifies a timeout value as well. +err := nc.FlushTimeout(1*time.Second) +if err != nil { + fmt.Println("All clear!") +} else { + fmt.Println("Flushed timed out!") +} + +// Auto-unsubscribe after MAX_WANTED messages received +const MAX_WANTED = 10 +sub, err := nc.Subscribe("foo") +sub.AutoUnsubscribe(MAX_WANTED) + +// Multiple connections +nc1 := nats.Connect("nats://host1:4222") +nc2 := nats.Connect("nats://host2:4222") + +nc1.Subscribe("foo", func(m *Msg) { + fmt.Printf("Received a message: %s\n", string(m.Data)) +}) + +nc2.Publish("foo", []byte("Hello World!")); + +``` + +## Clustered Usage + +```go + +var servers = "nats://localhost:1222, nats://localhost:1223, nats://localhost:1224" + +nc, err := nats.Connect(servers) + +// Optionally set ReconnectWait and MaxReconnect attempts. +// This example means 10 seconds total per backend. +nc, err = nats.Connect(servers, nats.MaxReconnects(5), nats.ReconnectWait(2 * time.Second)) + +// Optionally disable randomization of the server pool +nc, err = nats.Connect(servers, nats.DontRandomize()) + +// Setup callbacks to be notified on disconnects, reconnects and connection closed. +nc, err = nats.Connect(servers, + nats.DisconnectHandler(func(nc *nats.Conn) { + fmt.Printf("Got disconnected!\n") + }), + nats.ReconnectHandler(func(_ *nats.Conn) { + fmt.Printf("Got reconnected to %v!\n", nc.ConnectedUrl()) + }), + nats.ClosedHandler(func(nc *nats.Conn) { + fmt.Printf("Connection closed. Reason: %q\n", nc.LastError()) + }) +) + +// When connecting to a mesh of servers with auto-discovery capabilities, +// you may need to provide a username/password or token in order to connect +// to any server in that mesh when authentication is required. +// Instead of providing the credentials in the initial URL, you will use +// new option setters: +nc, err = nats.Connect("nats://localhost:4222", nats.UserInfo("foo", "bar")) + +// For token based authentication: +nc, err = nats.Connect("nats://localhost:4222", nats.Token("S3cretT0ken")) + +// You can even pass the two at the same time in case one of the server +// in the mesh requires token instead of user name and password. +nc, err = nats.Connect("nats://localhost:4222", + nats.UserInfo("foo", "bar"), + nats.Token("S3cretT0ken")) + +// Note that if credentials are specified in the initial URLs, they take +// precedence on the credentials specfied through the options. +// For instance, in the connect call below, the client library will use +// the user "my" and password "pwd" to connect to locahost:4222, however, +// it will use username "foo" and password "bar" when (re)connecting to +// a different server URL that it got as part of the auto-discovery. +nc, err = nats.Connect("nats://my:pwd@localhost:4222", nats.UserInfo("foo", "bar")) + +``` + +## License + +(The MIT License) + +Copyright (c) 2012-2016 Apcera Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to +deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +IN THE SOFTWARE. diff --git a/vendor/github.com/nats-io/go-nats/TODO.md b/vendor/github.com/nats-io/go-nats/TODO.md new file mode 100644 index 000000000..213aaeca8 --- /dev/null +++ b/vendor/github.com/nats-io/go-nats/TODO.md @@ -0,0 +1,26 @@ + +- [ ] Better constructors, options handling +- [ ] Functions for callback settings after connection created. +- [ ] Better options for subscriptions. Slow Consumer state settable, Go routines vs Inline. +- [ ] Move off of channels for subscribers, use syncPool linkedLists, etc with highwater. +- [ ] Test for valid subjects on publish and subscribe? +- [ ] SyncSubscriber and Next for EncodedConn +- [ ] Fast Publisher? +- [ ] pooling for structs used? leaky bucket? +- [ ] Timeout 0 should work as no timeout +- [x] Ping timer +- [x] Name in Connect for gnatsd +- [x] Asynchronous error handling +- [x] Parser rewrite +- [x] Reconnect +- [x] Hide Lock +- [x] Easier encoder interface +- [x] QueueSubscribeSync +- [x] Make nats specific errors prefixed with 'nats:' +- [x] API test for closed connection +- [x] TLS/SSL +- [x] Stats collection +- [x] Disconnect detection +- [x] Optimized Publish (coalescing) +- [x] Do Examples via Go style +- [x] Standardized Errors diff --git a/vendor/github.com/nats-io/go-nats/enc.go b/vendor/github.com/nats-io/go-nats/enc.go new file mode 100644 index 000000000..f29b0343a --- /dev/null +++ b/vendor/github.com/nats-io/go-nats/enc.go @@ -0,0 +1,249 @@ +// Copyright 2012-2015 Apcera Inc. All rights reserved. + +package nats + +import ( + "errors" + "fmt" + "reflect" + "sync" + "time" + + // Default Encoders + . "github.com/nats-io/go-nats/encoders/builtin" +) + +// Encoder interface is for all register encoders +type Encoder interface { + Encode(subject string, v interface{}) ([]byte, error) + Decode(subject string, data []byte, vPtr interface{}) error +} + +var encMap map[string]Encoder +var encLock sync.Mutex + +// Indexe names into the Registered Encoders. +const ( + JSON_ENCODER = "json" + GOB_ENCODER = "gob" + DEFAULT_ENCODER = "default" +) + +func init() { + encMap = make(map[string]Encoder) + // Register json, gob and default encoder + RegisterEncoder(JSON_ENCODER, &JsonEncoder{}) + RegisterEncoder(GOB_ENCODER, &GobEncoder{}) + RegisterEncoder(DEFAULT_ENCODER, &DefaultEncoder{}) +} + +// EncodedConn are the preferred way to interface with NATS. They wrap a bare connection to +// a nats server and have an extendable encoder system that will encode and decode messages +// from raw Go types. +type EncodedConn struct { + Conn *Conn + Enc Encoder +} + +// NewEncodedConn will wrap an existing Connection and utilize the appropriate registered +// encoder. +func NewEncodedConn(c *Conn, encType string) (*EncodedConn, error) { + if c == nil { + return nil, errors.New("nats: Nil Connection") + } + if c.IsClosed() { + return nil, ErrConnectionClosed + } + ec := &EncodedConn{Conn: c, Enc: EncoderForType(encType)} + if ec.Enc == nil { + return nil, fmt.Errorf("No encoder registered for '%s'", encType) + } + return ec, nil +} + +// RegisterEncoder will register the encType with the given Encoder. Useful for customization. +func RegisterEncoder(encType string, enc Encoder) { + encLock.Lock() + defer encLock.Unlock() + encMap[encType] = enc +} + +// EncoderForType will return the registered Encoder for the encType. +func EncoderForType(encType string) Encoder { + encLock.Lock() + defer encLock.Unlock() + return encMap[encType] +} + +// Publish publishes the data argument to the given subject. The data argument +// will be encoded using the associated encoder. +func (c *EncodedConn) Publish(subject string, v interface{}) error { + b, err := c.Enc.Encode(subject, v) + if err != nil { + return err + } + return c.Conn.publish(subject, _EMPTY_, b) +} + +// PublishRequest will perform a Publish() expecting a response on the +// reply subject. Use Request() for automatically waiting for a response +// inline. +func (c *EncodedConn) PublishRequest(subject, reply string, v interface{}) error { + b, err := c.Enc.Encode(subject, v) + if err != nil { + return err + } + return c.Conn.publish(subject, reply, b) +} + +// Request will create an Inbox and perform a Request() call +// with the Inbox reply for the data v. A response will be +// decoded into the vPtrResponse. +func (c *EncodedConn) Request(subject string, v interface{}, vPtr interface{}, timeout time.Duration) error { + b, err := c.Enc.Encode(subject, v) + if err != nil { + return err + } + m, err := c.Conn.Request(subject, b, timeout) + if err != nil { + return err + } + if reflect.TypeOf(vPtr) == emptyMsgType { + mPtr := vPtr.(*Msg) + *mPtr = *m + } else { + err = c.Enc.Decode(m.Subject, m.Data, vPtr) + } + return err +} + +// Handler is a specific callback used for Subscribe. It is generalized to +// an interface{}, but we will discover its format and arguments at runtime +// and perform the correct callback, including de-marshalling JSON strings +// back into the appropriate struct based on the signature of the Handler. +// +// Handlers are expected to have one of four signatures. +// +// type person struct { +// Name string `json:"name,omitempty"` +// Age uint `json:"age,omitempty"` +// } +// +// handler := func(m *Msg) +// handler := func(p *person) +// handler := func(subject string, o *obj) +// handler := func(subject, reply string, o *obj) +// +// These forms allow a callback to request a raw Msg ptr, where the processing +// of the message from the wire is untouched. Process a JSON representation +// and demarshal it into the given struct, e.g. person. +// There are also variants where the callback wants either the subject, or the +// subject and the reply subject. +type Handler interface{} + +// Dissect the cb Handler's signature +func argInfo(cb Handler) (reflect.Type, int) { + cbType := reflect.TypeOf(cb) + if cbType.Kind() != reflect.Func { + panic("nats: Handler needs to be a func") + } + numArgs := cbType.NumIn() + if numArgs == 0 { + return nil, numArgs + } + return cbType.In(numArgs - 1), numArgs +} + +var emptyMsgType = reflect.TypeOf(&Msg{}) + +// Subscribe will create a subscription on the given subject and process incoming +// messages using the specified Handler. The Handler should be a func that matches +// a signature from the description of Handler from above. +func (c *EncodedConn) Subscribe(subject string, cb Handler) (*Subscription, error) { + return c.subscribe(subject, _EMPTY_, cb) +} + +// QueueSubscribe will create a queue subscription on the given subject and process +// incoming messages using the specified Handler. The Handler should be a func that +// matches a signature from the description of Handler from above. +func (c *EncodedConn) QueueSubscribe(subject, queue string, cb Handler) (*Subscription, error) { + return c.subscribe(subject, queue, cb) +} + +// Internal implementation that all public functions will use. +func (c *EncodedConn) subscribe(subject, queue string, cb Handler) (*Subscription, error) { + if cb == nil { + return nil, errors.New("nats: Handler required for EncodedConn Subscription") + } + argType, numArgs := argInfo(cb) + if argType == nil { + return nil, errors.New("nats: Handler requires at least one argument") + } + + cbValue := reflect.ValueOf(cb) + wantsRaw := (argType == emptyMsgType) + + natsCB := func(m *Msg) { + var oV []reflect.Value + if wantsRaw { + oV = []reflect.Value{reflect.ValueOf(m)} + } else { + var oPtr reflect.Value + if argType.Kind() != reflect.Ptr { + oPtr = reflect.New(argType) + } else { + oPtr = reflect.New(argType.Elem()) + } + if err := c.Enc.Decode(m.Subject, m.Data, oPtr.Interface()); err != nil { + if c.Conn.Opts.AsyncErrorCB != nil { + c.Conn.ach <- func() { + c.Conn.Opts.AsyncErrorCB(c.Conn, m.Sub, errors.New("nats: Got an error trying to unmarshal: "+err.Error())) + } + } + return + } + if argType.Kind() != reflect.Ptr { + oPtr = reflect.Indirect(oPtr) + } + + // Callback Arity + switch numArgs { + case 1: + oV = []reflect.Value{oPtr} + case 2: + subV := reflect.ValueOf(m.Subject) + oV = []reflect.Value{subV, oPtr} + case 3: + subV := reflect.ValueOf(m.Subject) + replyV := reflect.ValueOf(m.Reply) + oV = []reflect.Value{subV, replyV, oPtr} + } + + } + cbValue.Call(oV) + } + + return c.Conn.subscribe(subject, queue, natsCB, nil) +} + +// FlushTimeout allows a Flush operation to have an associated timeout. +func (c *EncodedConn) FlushTimeout(timeout time.Duration) (err error) { + return c.Conn.FlushTimeout(timeout) +} + +// Flush will perform a round trip to the server and return when it +// receives the internal reply. +func (c *EncodedConn) Flush() error { + return c.Conn.Flush() +} + +// Close will close the connection to the server. This call will release +// all blocking calls, such as Flush(), etc. +func (c *EncodedConn) Close() { + c.Conn.Close() +} + +// LastError reports the last error encountered via the Connection. +func (c *EncodedConn) LastError() error { + return c.Conn.err +} diff --git a/vendor/github.com/nats-io/go-nats/encoders/builtin/default_enc.go b/vendor/github.com/nats-io/go-nats/encoders/builtin/default_enc.go new file mode 100644 index 000000000..82467ce78 --- /dev/null +++ b/vendor/github.com/nats-io/go-nats/encoders/builtin/default_enc.go @@ -0,0 +1,106 @@ +// Copyright 2012-2015 Apcera Inc. All rights reserved. + +package builtin + +import ( + "bytes" + "fmt" + "reflect" + "strconv" + "unsafe" +) + +// DefaultEncoder implementation for EncodedConn. +// This encoder will leave []byte and string untouched, but will attempt to +// turn numbers into appropriate strings that can be decoded. It will also +// propely encoded and decode bools. If will encode a struct, but if you want +// to properly handle structures you should use JsonEncoder. +type DefaultEncoder struct { + // Empty +} + +var trueB = []byte("true") +var falseB = []byte("false") +var nilB = []byte("") + +// Encode +func (je *DefaultEncoder) Encode(subject string, v interface{}) ([]byte, error) { + switch arg := v.(type) { + case string: + bytes := *(*[]byte)(unsafe.Pointer(&arg)) + return bytes, nil + case []byte: + return arg, nil + case bool: + if arg { + return trueB, nil + } else { + return falseB, nil + } + case nil: + return nilB, nil + default: + var buf bytes.Buffer + fmt.Fprintf(&buf, "%+v", arg) + return buf.Bytes(), nil + } +} + +// Decode +func (je *DefaultEncoder) Decode(subject string, data []byte, vPtr interface{}) error { + // Figure out what it's pointing to... + sData := *(*string)(unsafe.Pointer(&data)) + switch arg := vPtr.(type) { + case *string: + *arg = sData + return nil + case *[]byte: + *arg = data + return nil + case *int: + n, err := strconv.ParseInt(sData, 10, 64) + if err != nil { + return err + } + *arg = int(n) + return nil + case *int32: + n, err := strconv.ParseInt(sData, 10, 64) + if err != nil { + return err + } + *arg = int32(n) + return nil + case *int64: + n, err := strconv.ParseInt(sData, 10, 64) + if err != nil { + return err + } + *arg = int64(n) + return nil + case *float32: + n, err := strconv.ParseFloat(sData, 32) + if err != nil { + return err + } + *arg = float32(n) + return nil + case *float64: + n, err := strconv.ParseFloat(sData, 64) + if err != nil { + return err + } + *arg = float64(n) + return nil + case *bool: + b, err := strconv.ParseBool(sData) + if err != nil { + return err + } + *arg = b + return nil + default: + vt := reflect.TypeOf(arg).Elem() + return fmt.Errorf("nats: Default Encoder can't decode to type %s", vt) + } +} diff --git a/vendor/github.com/nats-io/go-nats/encoders/builtin/gob_enc.go b/vendor/github.com/nats-io/go-nats/encoders/builtin/gob_enc.go new file mode 100644 index 000000000..988ff42f5 --- /dev/null +++ b/vendor/github.com/nats-io/go-nats/encoders/builtin/gob_enc.go @@ -0,0 +1,34 @@ +// Copyright 2013-2015 Apcera Inc. All rights reserved. + +package builtin + +import ( + "bytes" + "encoding/gob" +) + +// GobEncoder is a Go specific GOB Encoder implementation for EncodedConn. +// This encoder will use the builtin encoding/gob to Marshal +// and Unmarshal most types, including structs. +type GobEncoder struct { + // Empty +} + +// FIXME(dlc) - This could probably be more efficient. + +// Encode +func (ge *GobEncoder) Encode(subject string, v interface{}) ([]byte, error) { + b := new(bytes.Buffer) + enc := gob.NewEncoder(b) + if err := enc.Encode(v); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// Decode +func (ge *GobEncoder) Decode(subject string, data []byte, vPtr interface{}) (err error) { + dec := gob.NewDecoder(bytes.NewBuffer(data)) + err = dec.Decode(vPtr) + return +} diff --git a/vendor/github.com/nats-io/go-nats/encoders/builtin/json_enc.go b/vendor/github.com/nats-io/go-nats/encoders/builtin/json_enc.go new file mode 100644 index 000000000..3b269ef02 --- /dev/null +++ b/vendor/github.com/nats-io/go-nats/encoders/builtin/json_enc.go @@ -0,0 +1,45 @@ +// Copyright 2012-2015 Apcera Inc. All rights reserved. + +package builtin + +import ( + "encoding/json" + "strings" +) + +// JsonEncoder is a JSON Encoder implementation for EncodedConn. +// This encoder will use the builtin encoding/json to Marshal +// and Unmarshal most types, including structs. +type JsonEncoder struct { + // Empty +} + +// Encode +func (je *JsonEncoder) Encode(subject string, v interface{}) ([]byte, error) { + b, err := json.Marshal(v) + if err != nil { + return nil, err + } + return b, nil +} + +// Decode +func (je *JsonEncoder) Decode(subject string, data []byte, vPtr interface{}) (err error) { + switch arg := vPtr.(type) { + case *string: + // If they want a string and it is a JSON string, strip quotes + // This allows someone to send a struct but receive as a plain string + // This cast should be efficient for Go 1.3 and beyond. + str := string(data) + if strings.HasPrefix(str, `"`) && strings.HasSuffix(str, `"`) { + *arg = str[1 : len(str)-1] + } else { + *arg = str + } + case *[]byte: + *arg = data + default: + err = json.Unmarshal(data, arg) + } + return +} diff --git a/vendor/github.com/nats-io/go-nats/nats.go b/vendor/github.com/nats-io/go-nats/nats.go new file mode 100644 index 000000000..52a3bb083 --- /dev/null +++ b/vendor/github.com/nats-io/go-nats/nats.go @@ -0,0 +1,2630 @@ +// Copyright 2012-2016 Apcera Inc. All rights reserved. + +// A Go client for the NATS messaging system (https://nats.io). +package nats + +import ( + "bufio" + "bytes" + "crypto/tls" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "math/rand" + "net" + "net/url" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/nats-io/go-nats/util" + "github.com/nats-io/nuid" +) + +// Default Constants +const ( + Version = "1.2.2" + DefaultURL = "nats://localhost:4222" + DefaultPort = 4222 + DefaultMaxReconnect = 60 + DefaultReconnectWait = 2 * time.Second + DefaultTimeout = 2 * time.Second + DefaultPingInterval = 2 * time.Minute + DefaultMaxPingOut = 2 + DefaultMaxChanLen = 8192 // 8k + DefaultReconnectBufSize = 8 * 1024 * 1024 // 8MB + RequestChanLen = 8 + LangString = "go" +) + +// STALE_CONNECTION is for detection and proper handling of stale connections. +const STALE_CONNECTION = "stale connection" + +// PERMISSIONS_ERR is for when nats server subject authorization has failed. +const PERMISSIONS_ERR = "permissions violation" + +// Errors +var ( + ErrConnectionClosed = errors.New("nats: connection closed") + ErrSecureConnRequired = errors.New("nats: secure connection required") + ErrSecureConnWanted = errors.New("nats: secure connection not available") + ErrBadSubscription = errors.New("nats: invalid subscription") + ErrTypeSubscription = errors.New("nats: invalid subscription type") + ErrBadSubject = errors.New("nats: invalid subject") + ErrSlowConsumer = errors.New("nats: slow consumer, messages dropped") + ErrTimeout = errors.New("nats: timeout") + ErrBadTimeout = errors.New("nats: timeout invalid") + ErrAuthorization = errors.New("nats: authorization violation") + ErrNoServers = errors.New("nats: no servers available for connection") + ErrJsonParse = errors.New("nats: connect message, json parse error") + ErrChanArg = errors.New("nats: argument needs to be a channel type") + ErrMaxPayload = errors.New("nats: maximum payload exceeded") + ErrMaxMessages = errors.New("nats: maximum messages delivered") + ErrSyncSubRequired = errors.New("nats: illegal call on an async subscription") + ErrMultipleTLSConfigs = errors.New("nats: multiple tls.Configs not allowed") + ErrNoInfoReceived = errors.New("nats: protocol exception, INFO not received") + ErrReconnectBufExceeded = errors.New("nats: outbound buffer limit exceeded") + ErrInvalidConnection = errors.New("nats: invalid connection") + ErrInvalidMsg = errors.New("nats: invalid message or message nil") + ErrInvalidArg = errors.New("nats: invalid argument") + ErrStaleConnection = errors.New("nats: " + STALE_CONNECTION) +) + +var DefaultOptions = Options{ + AllowReconnect: true, + MaxReconnect: DefaultMaxReconnect, + ReconnectWait: DefaultReconnectWait, + Timeout: DefaultTimeout, + PingInterval: DefaultPingInterval, + MaxPingsOut: DefaultMaxPingOut, + SubChanLen: DefaultMaxChanLen, + ReconnectBufSize: DefaultReconnectBufSize, + Dialer: &net.Dialer{ + Timeout: DefaultTimeout, + }, +} + +// Status represents the state of the connection. +type Status int + +const ( + DISCONNECTED = Status(iota) + CONNECTED + CLOSED + RECONNECTING + CONNECTING +) + +// ConnHandler is used for asynchronous events such as +// disconnected and closed connections. +type ConnHandler func(*Conn) + +// ErrHandler is used to process asynchronous errors encountered +// while processing inbound messages. +type ErrHandler func(*Conn, *Subscription, error) + +// asyncCB is used to preserve order for async callbacks. +type asyncCB func() + +// Option is a function on the options for a connection. +type Option func(*Options) error + +// Options can be used to create a customized connection. +type Options struct { + Url string + Servers []string + NoRandomize bool + Name string + Verbose bool + Pedantic bool + Secure bool + TLSConfig *tls.Config + AllowReconnect bool + MaxReconnect int + ReconnectWait time.Duration + Timeout time.Duration + PingInterval time.Duration // disabled if 0 or negative + MaxPingsOut int + ClosedCB ConnHandler + DisconnectedCB ConnHandler + ReconnectedCB ConnHandler + AsyncErrorCB ErrHandler + + // Size of the backing bufio buffer during reconnect. Once this + // has been exhausted publish operations will error. + ReconnectBufSize int + + // The size of the buffered channel used between the socket + // Go routine and the message delivery for SyncSubscriptions. + // NOTE: This does not affect AsyncSubscriptions which are + // dictated by PendingLimits() + SubChanLen int + + User string + Password string + Token string + + // Dialer allows users setting a custom Dialer + Dialer *net.Dialer +} + +const ( + // Scratch storage for assembling protocol headers + scratchSize = 512 + + // The size of the bufio reader/writer on top of the socket. + defaultBufSize = 32768 + + // The buffered size of the flush "kick" channel + flushChanSize = 1024 + + // Default server pool size + srvPoolSize = 4 + + // Channel size for the async callback handler. + asyncCBChanSize = 32 +) + +// A Conn represents a bare connection to a nats-server. +// It can send and receive []byte payloads. +type Conn struct { + // Keep all members for which we use atomic at the beginning of the + // struct and make sure they are all 64bits (or use padding if necessary). + // atomic.* functions crash on 32bit machines if operand is not aligned + // at 64bit. See https://github.com/golang/go/issues/599 + ssid int64 + + Statistics + mu sync.Mutex + Opts Options + wg sync.WaitGroup + url *url.URL + conn net.Conn + srvPool []*srv + urls map[string]struct{} // Keep track of all known URLs (used by processInfo) + bw *bufio.Writer + pending *bytes.Buffer + fch chan bool + info serverInfo + subs map[int64]*Subscription + mch chan *Msg + ach chan asyncCB + pongs []chan bool + scratch [scratchSize]byte + status Status + err error + ps *parseState + ptmr *time.Timer + pout int +} + +// A Subscription represents interest in a given subject. +type Subscription struct { + mu sync.Mutex + sid int64 + + // Subject that represents this subscription. This can be different + // than the received subject inside a Msg if this is a wildcard. + Subject string + + // Optional queue group name. If present, all subscriptions with the + // same name will form a distributed queue, and each message will + // only be processed by one member of the group. + Queue string + + delivered uint64 + max uint64 + conn *Conn + mcb MsgHandler + mch chan *Msg + closed bool + sc bool + connClosed bool + + // Type of Subscription + typ SubscriptionType + + // Async linked list + pHead *Msg + pTail *Msg + pCond *sync.Cond + + // Pending stats, async subscriptions, high-speed etc. + pMsgs int + pBytes int + pMsgsMax int + pBytesMax int + pMsgsLimit int + pBytesLimit int + dropped int +} + +// Msg is a structure used by Subscribers and PublishMsg(). +type Msg struct { + Subject string + Reply string + Data []byte + Sub *Subscription + next *Msg +} + +// Tracks various stats received and sent on this connection, +// including counts for messages and bytes. +type Statistics struct { + InMsgs uint64 + OutMsgs uint64 + InBytes uint64 + OutBytes uint64 + Reconnects uint64 +} + +// Tracks individual backend servers. +type srv struct { + url *url.URL + didConnect bool + reconnects int + lastAttempt time.Time + isImplicit bool +} + +type serverInfo struct { + Id string `json:"server_id"` + Host string `json:"host"` + Port uint `json:"port"` + Version string `json:"version"` + AuthRequired bool `json:"auth_required"` + TLSRequired bool `json:"tls_required"` + MaxPayload int64 `json:"max_payload"` + ConnectURLs []string `json:"connect_urls,omitempty"` +} + +const ( + // clientProtoZero is the original client protocol from 2009. + // http://nats.io/documentation/internals/nats-protocol/ + clientProtoZero = iota + // clientProtoInfo signals a client can receive more then the original INFO block. + // This can be used to update clients on other cluster members, etc. + clientProtoInfo +) + +type connectInfo struct { + Verbose bool `json:"verbose"` + Pedantic bool `json:"pedantic"` + User string `json:"user,omitempty"` + Pass string `json:"pass,omitempty"` + Token string `json:"auth_token,omitempty"` + TLS bool `json:"tls_required"` + Name string `json:"name"` + Lang string `json:"lang"` + Version string `json:"version"` + Protocol int `json:"protocol"` +} + +// MsgHandler is a callback function that processes messages delivered to +// asynchronous subscribers. +type MsgHandler func(msg *Msg) + +// Connect will attempt to connect to the NATS system. +// The url can contain username/password semantics. e.g. nats://derek:pass@localhost:4222 +// Comma separated arrays are also supported, e.g. urlA, urlB. +// Options start with the defaults but can be overridden. +func Connect(url string, options ...Option) (*Conn, error) { + opts := DefaultOptions + opts.Servers = processUrlString(url) + for _, opt := range options { + if err := opt(&opts); err != nil { + return nil, err + } + } + return opts.Connect() +} + +// Options that can be passed to Connect. + +// Name is an Option to set the client name. +func Name(name string) Option { + return func(o *Options) error { + o.Name = name + return nil + } +} + +// Secure is an Option to enable TLS secure connections that skip server verification by default. +// Pass a TLS Configuration for proper TLS. +func Secure(tls ...*tls.Config) Option { + return func(o *Options) error { + o.Secure = true + // Use of variadic just simplifies testing scenarios. We only take the first one. + // fixme(DLC) - Could panic if more than one. Could also do TLS option. + if len(tls) > 1 { + return ErrMultipleTLSConfigs + } + if len(tls) == 1 { + o.TLSConfig = tls[0] + } + return nil + } +} + +// RootCAs is a helper option to provide the RootCAs pool from a list of filenames. If Secure is +// not already set this will set it as well. +func RootCAs(file ...string) Option { + return func(o *Options) error { + pool := x509.NewCertPool() + for _, f := range file { + rootPEM, err := ioutil.ReadFile(f) + if err != nil || rootPEM == nil { + return fmt.Errorf("nats: error loading or parsing rootCA file: %v", err) + } + ok := pool.AppendCertsFromPEM([]byte(rootPEM)) + if !ok { + return fmt.Errorf("nats: failed to parse root certificate from %q", f) + } + } + if o.TLSConfig == nil { + o.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12} + } + o.TLSConfig.RootCAs = pool + o.Secure = true + return nil + } +} + +// ClientCert is a helper option to provide the client certificate from a file. If Secure is +// not already set this will set it as well +func ClientCert(certFile, keyFile string) Option { + return func(o *Options) error { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return fmt.Errorf("nats: error loading client certificate: %v", err) + } + cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0]) + if err != nil { + return fmt.Errorf("nats: error parsing client certificate: %v", err) + } + if o.TLSConfig == nil { + o.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12} + } + o.TLSConfig.Certificates = []tls.Certificate{cert} + o.Secure = true + return nil + } +} + +// NoReconnect is an Option to turn off reconnect behavior. +func NoReconnect() Option { + return func(o *Options) error { + o.AllowReconnect = false + return nil + } +} + +// DontRandomize is an Option to turn off randomizing the server pool. +func DontRandomize() Option { + return func(o *Options) error { + o.NoRandomize = true + return nil + } +} + +// ReconnectWait is an Option to set the wait time between reconnect attempts. +func ReconnectWait(t time.Duration) Option { + return func(o *Options) error { + o.ReconnectWait = t + return nil + } +} + +// MaxReconnects is an Option to set the maximum number of reconnect attempts. +func MaxReconnects(max int) Option { + return func(o *Options) error { + o.MaxReconnect = max + return nil + } +} + +// Timeout is an Option to set the timeout for Dial on a connection. +func Timeout(t time.Duration) Option { + return func(o *Options) error { + o.Timeout = t + return nil + } +} + +// DisconnectHandler is an Option to set the disconnected handler. +func DisconnectHandler(cb ConnHandler) Option { + return func(o *Options) error { + o.DisconnectedCB = cb + return nil + } +} + +// ReconnectHandler is an Option to set the reconnected handler. +func ReconnectHandler(cb ConnHandler) Option { + return func(o *Options) error { + o.ReconnectedCB = cb + return nil + } +} + +// ClosedHandler is an Option to set the closed handler. +func ClosedHandler(cb ConnHandler) Option { + return func(o *Options) error { + o.ClosedCB = cb + return nil + } +} + +// ErrHandler is an Option to set the async error handler. +func ErrorHandler(cb ErrHandler) Option { + return func(o *Options) error { + o.AsyncErrorCB = cb + return nil + } +} + +// UserInfo is an Option to set the username and password to +// use when not included directly in the URLs. +func UserInfo(user, password string) Option { + return func(o *Options) error { + o.User = user + o.Password = password + return nil + } +} + +// Token is an Option to set the token to use when not included +// directly in the URLs. +func Token(token string) Option { + return func(o *Options) error { + o.Token = token + return nil + } +} + +// Dialer is an Option to set the dialer which will be used when +// attempting to establish a connection. +func Dialer(dialer *net.Dialer) Option { + return func(o *Options) error { + o.Dialer = dialer + return nil + } +} + +// Handler processing + +// SetDisconnectHandler will set the disconnect event handler. +func (nc *Conn) SetDisconnectHandler(dcb ConnHandler) { + if nc == nil { + return + } + nc.mu.Lock() + defer nc.mu.Unlock() + nc.Opts.DisconnectedCB = dcb +} + +// SetReconnectHandler will set the reconnect event handler. +func (nc *Conn) SetReconnectHandler(rcb ConnHandler) { + if nc == nil { + return + } + nc.mu.Lock() + defer nc.mu.Unlock() + nc.Opts.ReconnectedCB = rcb +} + +// SetClosedHandler will set the reconnect event handler. +func (nc *Conn) SetClosedHandler(cb ConnHandler) { + if nc == nil { + return + } + nc.mu.Lock() + defer nc.mu.Unlock() + nc.Opts.ClosedCB = cb +} + +// SetErrHandler will set the async error handler. +func (nc *Conn) SetErrorHandler(cb ErrHandler) { + if nc == nil { + return + } + nc.mu.Lock() + defer nc.mu.Unlock() + nc.Opts.AsyncErrorCB = cb +} + +// Process the url string argument to Connect. Return an array of +// urls, even if only one. +func processUrlString(url string) []string { + urls := strings.Split(url, ",") + for i, s := range urls { + urls[i] = strings.TrimSpace(s) + } + return urls +} + +// Connect will attempt to connect to a NATS server with multiple options. +func (o Options) Connect() (*Conn, error) { + nc := &Conn{Opts: o} + + // Some default options processing. + if nc.Opts.MaxPingsOut == 0 { + nc.Opts.MaxPingsOut = DefaultMaxPingOut + } + // Allow old default for channel length to work correctly. + if nc.Opts.SubChanLen == 0 { + nc.Opts.SubChanLen = DefaultMaxChanLen + } + // Default ReconnectBufSize + if nc.Opts.ReconnectBufSize == 0 { + nc.Opts.ReconnectBufSize = DefaultReconnectBufSize + } + // Ensure that Timeout is not 0 + if nc.Opts.Timeout == 0 { + nc.Opts.Timeout = DefaultTimeout + } + + // Allow custom Dialer for connecting using DialTimeout by default + if nc.Opts.Dialer == nil { + nc.Opts.Dialer = &net.Dialer{ + Timeout: nc.Opts.Timeout, + } + } + + if err := nc.setupServerPool(); err != nil { + return nil, err + } + + // Create the async callback channel. + nc.ach = make(chan asyncCB, asyncCBChanSize) + + if err := nc.connect(); err != nil { + return nil, err + } + + // Spin up the async cb dispatcher on success + go nc.asyncDispatch() + + return nc, nil +} + +const ( + _CRLF_ = "\r\n" + _EMPTY_ = "" + _SPC_ = " " + _PUB_P_ = "PUB " +) + +const ( + _OK_OP_ = "+OK" + _ERR_OP_ = "-ERR" + _MSG_OP_ = "MSG" + _PING_OP_ = "PING" + _PONG_OP_ = "PONG" + _INFO_OP_ = "INFO" +) + +const ( + conProto = "CONNECT %s" + _CRLF_ + pingProto = "PING" + _CRLF_ + pongProto = "PONG" + _CRLF_ + pubProto = "PUB %s %s %d" + _CRLF_ + subProto = "SUB %s %s %d" + _CRLF_ + unsubProto = "UNSUB %d %s" + _CRLF_ + okProto = _OK_OP_ + _CRLF_ +) + +// Return the currently selected server +func (nc *Conn) currentServer() (int, *srv) { + for i, s := range nc.srvPool { + if s == nil { + continue + } + if s.url == nc.url { + return i, s + } + } + return -1, nil +} + +// Pop the current server and put onto the end of the list. Select head of list as long +// as number of reconnect attempts under MaxReconnect. +func (nc *Conn) selectNextServer() (*srv, error) { + i, s := nc.currentServer() + if i < 0 { + return nil, ErrNoServers + } + sp := nc.srvPool + num := len(sp) + copy(sp[i:num-1], sp[i+1:num]) + maxReconnect := nc.Opts.MaxReconnect + if maxReconnect < 0 || s.reconnects < maxReconnect { + nc.srvPool[num-1] = s + } else { + nc.srvPool = sp[0 : num-1] + } + if len(nc.srvPool) <= 0 { + nc.url = nil + return nil, ErrNoServers + } + nc.url = nc.srvPool[0].url + return nc.srvPool[0], nil +} + +// Will assign the correct server to the nc.Url +func (nc *Conn) pickServer() error { + nc.url = nil + if len(nc.srvPool) <= 0 { + return ErrNoServers + } + for _, s := range nc.srvPool { + if s != nil { + nc.url = s.url + return nil + } + } + return ErrNoServers +} + +const tlsScheme = "tls" + +// Create the server pool using the options given. +// We will place a Url option first, followed by any +// Server Options. We will randomize the server pool unlesss +// the NoRandomize flag is set. +func (nc *Conn) setupServerPool() error { + nc.srvPool = make([]*srv, 0, srvPoolSize) + nc.urls = make(map[string]struct{}, srvPoolSize) + + // Create srv objects from each url string in nc.Opts.Servers + // and add them to the pool + for _, urlString := range nc.Opts.Servers { + if err := nc.addURLToPool(urlString, false); err != nil { + return err + } + } + + // Randomize if allowed to + if !nc.Opts.NoRandomize { + nc.shufflePool() + } + + // Normally, if this one is set, Options.Servers should not be, + // but we always allowed that, so continue to do so. + if nc.Opts.Url != _EMPTY_ { + // Add to the end of the array + if err := nc.addURLToPool(nc.Opts.Url, false); err != nil { + return err + } + // Then swap it with first to guarantee that Options.Url is tried first. + last := len(nc.srvPool) - 1 + if last > 0 { + nc.srvPool[0], nc.srvPool[last] = nc.srvPool[last], nc.srvPool[0] + } + } else if len(nc.srvPool) <= 0 { + // Place default URL if pool is empty. + if err := nc.addURLToPool(DefaultURL, false); err != nil { + return err + } + } + + // Check for Scheme hint to move to TLS mode. + for _, srv := range nc.srvPool { + if srv.url.Scheme == tlsScheme { + // FIXME(dlc), this is for all in the pool, should be case by case. + nc.Opts.Secure = true + if nc.Opts.TLSConfig == nil { + nc.Opts.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12} + } + } + } + + return nc.pickServer() +} + +// addURLToPool adds an entry to the server pool +func (nc *Conn) addURLToPool(sURL string, implicit bool) error { + u, err := url.Parse(sURL) + if err != nil { + return err + } + s := &srv{url: u, isImplicit: implicit} + nc.srvPool = append(nc.srvPool, s) + nc.urls[u.Host] = struct{}{} + return nil +} + +// shufflePool swaps randomly elements in the server pool +func (nc *Conn) shufflePool() { + if len(nc.srvPool) <= 1 { + return + } + source := rand.NewSource(time.Now().UnixNano()) + r := rand.New(source) + for i := range nc.srvPool { + j := r.Intn(i + 1) + nc.srvPool[i], nc.srvPool[j] = nc.srvPool[j], nc.srvPool[i] + } +} + +// createConn will connect to the server and wrap the appropriate +// bufio structures. It will do the right thing when an existing +// connection is in place. +func (nc *Conn) createConn() (err error) { + if nc.Opts.Timeout < 0 { + return ErrBadTimeout + } + if _, cur := nc.currentServer(); cur == nil { + return ErrNoServers + } else { + cur.lastAttempt = time.Now() + } + + dialer := nc.Opts.Dialer + nc.conn, err = dialer.Dial("tcp", nc.url.Host) + if err != nil { + return err + } + + // No clue why, but this stalls and kills performance on Mac (Mavericks). + // https://code.google.com/p/go/issues/detail?id=6930 + //if ip, ok := nc.conn.(*net.TCPConn); ok { + // ip.SetReadBuffer(defaultBufSize) + //} + + if nc.pending != nil && nc.bw != nil { + // Move to pending buffer. + nc.bw.Flush() + } + nc.bw = bufio.NewWriterSize(nc.conn, defaultBufSize) + return nil +} + +// makeTLSConn will wrap an existing Conn using TLS +func (nc *Conn) makeTLSConn() { + // Allow the user to configure their own tls.Config structure, otherwise + // default to InsecureSkipVerify. + // TODO(dlc) - We should make the more secure version the default. + if nc.Opts.TLSConfig != nil { + tlsCopy := util.CloneTLSConfig(nc.Opts.TLSConfig) + // If its blank we will override it with the current host + if tlsCopy.ServerName == _EMPTY_ { + h, _, _ := net.SplitHostPort(nc.url.Host) + tlsCopy.ServerName = h + } + nc.conn = tls.Client(nc.conn, tlsCopy) + } else { + nc.conn = tls.Client(nc.conn, &tls.Config{InsecureSkipVerify: true}) + } + conn := nc.conn.(*tls.Conn) + conn.Handshake() + nc.bw = bufio.NewWriterSize(nc.conn, defaultBufSize) +} + +// waitForExits will wait for all socket watcher Go routines to +// be shutdown before proceeding. +func (nc *Conn) waitForExits() { + // Kick old flusher forcefully. + select { + case nc.fch <- true: + default: + } + + // Wait for any previous go routines. + nc.wg.Wait() +} + +// spinUpGoRoutines will launch the Go routines responsible for +// reading and writing to the socket. This will be launched via a +// go routine itself to release any locks that may be held. +// We also use a WaitGroup to make sure we only start them on a +// reconnect when the previous ones have exited. +func (nc *Conn) spinUpGoRoutines() { + // Make sure everything has exited. + nc.waitForExits() + + // We will wait on both. + nc.wg.Add(2) + + // Spin up the readLoop and the socket flusher. + go nc.readLoop() + go nc.flusher() + + nc.mu.Lock() + if nc.Opts.PingInterval > 0 { + if nc.ptmr == nil { + nc.ptmr = time.AfterFunc(nc.Opts.PingInterval, nc.processPingTimer) + } else { + nc.ptmr.Reset(nc.Opts.PingInterval) + } + } + nc.mu.Unlock() +} + +// Report the connected server's Url +func (nc *Conn) ConnectedUrl() string { + if nc == nil { + return _EMPTY_ + } + nc.mu.Lock() + defer nc.mu.Unlock() + if nc.status != CONNECTED { + return _EMPTY_ + } + return nc.url.String() +} + +// Report the connected server's Id +func (nc *Conn) ConnectedServerId() string { + if nc == nil { + return _EMPTY_ + } + nc.mu.Lock() + defer nc.mu.Unlock() + if nc.status != CONNECTED { + return _EMPTY_ + } + return nc.info.Id +} + +// Low level setup for structs, etc +func (nc *Conn) setup() { + nc.subs = make(map[int64]*Subscription) + nc.pongs = make([]chan bool, 0, 8) + + nc.fch = make(chan bool, flushChanSize) + + // Setup scratch outbound buffer for PUB + pub := nc.scratch[:len(_PUB_P_)] + copy(pub, _PUB_P_) +} + +// Process a connected connection and initialize properly. +func (nc *Conn) processConnectInit() error { + + // Set out deadline for the whole connect process + nc.conn.SetDeadline(time.Now().Add(nc.Opts.Timeout)) + defer nc.conn.SetDeadline(time.Time{}) + + // Set our status to connecting. + nc.status = CONNECTING + + // Process the INFO protocol received from the server + err := nc.processExpectedInfo() + if err != nil { + return err + } + + // Send the CONNECT protocol along with the initial PING protocol. + // Wait for the PONG response (or any error that we get from the server). + err = nc.sendConnect() + if err != nil { + return err + } + + // Reset the number of PING sent out + nc.pout = 0 + + go nc.spinUpGoRoutines() + + return nil +} + +// Main connect function. Will connect to the nats-server +func (nc *Conn) connect() error { + var returnedErr error + + // Create actual socket connection + // For first connect we walk all servers in the pool and try + // to connect immediately. + nc.mu.Lock() + // The pool may change inside theloop iteration due to INFO protocol. + for i := 0; i < len(nc.srvPool); i++ { + nc.url = nc.srvPool[i].url + + if err := nc.createConn(); err == nil { + // This was moved out of processConnectInit() because + // that function is now invoked from doReconnect() too. + nc.setup() + + err = nc.processConnectInit() + + if err == nil { + nc.srvPool[i].didConnect = true + nc.srvPool[i].reconnects = 0 + returnedErr = nil + break + } else { + returnedErr = err + nc.mu.Unlock() + nc.close(DISCONNECTED, false) + nc.mu.Lock() + nc.url = nil + } + } else { + // Cancel out default connection refused, will trigger the + // No servers error conditional + if matched, _ := regexp.Match(`connection refused`, []byte(err.Error())); matched { + returnedErr = nil + } + } + } + defer nc.mu.Unlock() + + if returnedErr == nil && nc.status != CONNECTED { + returnedErr = ErrNoServers + } + return returnedErr +} + +// This will check to see if the connection should be +// secure. This can be dictated from either end and should +// only be called after the INIT protocol has been received. +func (nc *Conn) checkForSecure() error { + // Check to see if we need to engage TLS + o := nc.Opts + + // Check for mismatch in setups + if o.Secure && !nc.info.TLSRequired { + return ErrSecureConnWanted + } else if nc.info.TLSRequired && !o.Secure { + return ErrSecureConnRequired + } + + // Need to rewrap with bufio + if o.Secure { + nc.makeTLSConn() + } + return nil +} + +// processExpectedInfo will look for the expected first INFO message +// sent when a connection is established. The lock should be held entering. +func (nc *Conn) processExpectedInfo() error { + + c := &control{} + + // Read the protocol + err := nc.readOp(c) + if err != nil { + return err + } + + // The nats protocol should send INFO first always. + if c.op != _INFO_OP_ { + return ErrNoInfoReceived + } + + // Parse the protocol + if err := nc.processInfo(c.args); err != nil { + return err + } + + err = nc.checkForSecure() + if err != nil { + return err + } + + return nil +} + +// Sends a protocol control message by queuing into the bufio writer +// and kicking the flush Go routine. These writes are protected. +func (nc *Conn) sendProto(proto string) { + nc.mu.Lock() + nc.bw.WriteString(proto) + nc.kickFlusher() + nc.mu.Unlock() +} + +// Generate a connect protocol message, issuing user/password if +// applicable. The lock is assumed to be held upon entering. +func (nc *Conn) connectProto() (string, error) { + o := nc.Opts + var user, pass, token string + u := nc.url.User + if u != nil { + // if no password, assume username is authToken + if _, ok := u.Password(); !ok { + token = u.Username() + } else { + user = u.Username() + pass, _ = u.Password() + } + } else { + // Take from options (pssibly all empty strings) + user = nc.Opts.User + pass = nc.Opts.Password + token = nc.Opts.Token + } + cinfo := connectInfo{o.Verbose, o.Pedantic, + user, pass, token, + o.Secure, o.Name, LangString, Version, clientProtoInfo} + b, err := json.Marshal(cinfo) + if err != nil { + return _EMPTY_, ErrJsonParse + } + return fmt.Sprintf(conProto, b), nil +} + +// normalizeErr removes the prefix -ERR, trim spaces and remove the quotes. +func normalizeErr(line string) string { + s := strings.ToLower(strings.TrimSpace(strings.TrimPrefix(line, _ERR_OP_))) + s = strings.TrimLeft(strings.TrimRight(s, "'"), "'") + return s +} + +// Send a connect protocol message to the server, issue user/password if +// applicable. Will wait for a flush to return from the server for error +// processing. +func (nc *Conn) sendConnect() error { + + // Construct the CONNECT protocol string + cProto, err := nc.connectProto() + if err != nil { + return err + } + + // Write the protocol into the buffer + _, err = nc.bw.WriteString(cProto) + if err != nil { + return err + } + + // Add to the buffer the PING protocol + _, err = nc.bw.WriteString(pingProto) + if err != nil { + return err + } + + // Flush the buffer + err = nc.bw.Flush() + if err != nil { + return err + } + + // Now read the response from the server. + br := bufio.NewReaderSize(nc.conn, defaultBufSize) + line, err := br.ReadString('\n') + if err != nil { + return err + } + + // If opts.Verbose is set, handle +OK + if nc.Opts.Verbose && line == okProto { + // Read the rest now... + line, err = br.ReadString('\n') + if err != nil { + return err + } + } + + // We expect a PONG + if line != pongProto { + // But it could be something else, like -ERR + + // Since we no longer use ReadLine(), trim the trailing "\r\n" + line = strings.TrimRight(line, "\r\n") + + // If it's a server error... + if strings.HasPrefix(line, _ERR_OP_) { + // Remove -ERR, trim spaces and quotes, and convert to lower case. + line = normalizeErr(line) + return errors.New("nats: " + line) + } + + // Notify that we got an unexpected protocol. + return errors.New(fmt.Sprintf("nats: expected '%s', got '%s'", _PONG_OP_, line)) + } + + // This is where we are truly connected. + nc.status = CONNECTED + + return nil +} + +// A control protocol line. +type control struct { + op, args string +} + +// Read a control line and process the intended op. +func (nc *Conn) readOp(c *control) error { + br := bufio.NewReaderSize(nc.conn, defaultBufSize) + line, err := br.ReadString('\n') + if err != nil { + return err + } + parseControl(line, c) + return nil +} + +// Parse a control line from the server. +func parseControl(line string, c *control) { + toks := strings.SplitN(line, _SPC_, 2) + if len(toks) == 1 { + c.op = strings.TrimSpace(toks[0]) + c.args = _EMPTY_ + } else if len(toks) == 2 { + c.op, c.args = strings.TrimSpace(toks[0]), strings.TrimSpace(toks[1]) + } else { + c.op = _EMPTY_ + } +} + +// flushReconnectPending will push the pending items that were +// gathered while we were in a RECONNECTING state to the socket. +func (nc *Conn) flushReconnectPendingItems() { + if nc.pending == nil { + return + } + if nc.pending.Len() > 0 { + nc.bw.Write(nc.pending.Bytes()) + } +} + +// Try to reconnect using the option parameters. +// This function assumes we are allowed to reconnect. +func (nc *Conn) doReconnect() { + // We want to make sure we have the other watchers shutdown properly + // here before we proceed past this point. + nc.waitForExits() + + // FIXME(dlc) - We have an issue here if we have + // outstanding flush points (pongs) and they were not + // sent out, but are still in the pipe. + + // Hold the lock manually and release where needed below, + // can't do defer here. + nc.mu.Lock() + + // Clear any queued pongs, e.g. pending flush calls. + nc.clearPendingFlushCalls() + + // Clear any errors. + nc.err = nil + + // Perform appropriate callback if needed for a disconnect. + if nc.Opts.DisconnectedCB != nil { + nc.ach <- func() { nc.Opts.DisconnectedCB(nc) } + } + + for len(nc.srvPool) > 0 { + cur, err := nc.selectNextServer() + if err != nil { + nc.err = err + break + } + + sleepTime := int64(0) + + // Sleep appropriate amount of time before the + // connection attempt if connecting to same server + // we just got disconnected from.. + if time.Since(cur.lastAttempt) < nc.Opts.ReconnectWait { + sleepTime = int64(nc.Opts.ReconnectWait - time.Since(cur.lastAttempt)) + } + + // On Windows, createConn() will take more than a second when no + // server is running at that address. So it could be that the + // time elapsed between reconnect attempts is always > than + // the set option. Release the lock to give a chance to a parallel + // nc.Close() to break the loop. + nc.mu.Unlock() + if sleepTime <= 0 { + runtime.Gosched() + } else { + time.Sleep(time.Duration(sleepTime)) + } + nc.mu.Lock() + + // Check if we have been closed first. + if nc.isClosed() { + break + } + + // Mark that we tried a reconnect + cur.reconnects++ + + // Try to create a new connection + err = nc.createConn() + + // Not yet connected, retry... + // Continue to hold the lock + if err != nil { + nc.err = nil + continue + } + + // We are reconnected + nc.Reconnects++ + + // Process connect logic + if nc.err = nc.processConnectInit(); nc.err != nil { + nc.status = RECONNECTING + continue + } + + // Clear out server stats for the server we connected to.. + cur.didConnect = true + cur.reconnects = 0 + + // Send existing subscription state + nc.resendSubscriptions() + + // Now send off and clear pending buffer + nc.flushReconnectPendingItems() + + // Flush the buffer + nc.err = nc.bw.Flush() + if nc.err != nil { + nc.status = RECONNECTING + continue + } + + // Done with the pending buffer + nc.pending = nil + + // This is where we are truly connected. + nc.status = CONNECTED + + // Queue up the reconnect callback. + if nc.Opts.ReconnectedCB != nil { + nc.ach <- func() { nc.Opts.ReconnectedCB(nc) } + } + + // Release lock here, we will return below. + nc.mu.Unlock() + + // Make sure to flush everything + nc.Flush() + + return + } + + // Call into close.. We have no servers left.. + if nc.err == nil { + nc.err = ErrNoServers + } + nc.mu.Unlock() + nc.Close() +} + +// processOpErr handles errors from reading or parsing the protocol. +// The lock should not be held entering this function. +func (nc *Conn) processOpErr(err error) { + nc.mu.Lock() + if nc.isConnecting() || nc.isClosed() || nc.isReconnecting() { + nc.mu.Unlock() + return + } + + if nc.Opts.AllowReconnect && nc.status == CONNECTED { + // Set our new status + nc.status = RECONNECTING + if nc.ptmr != nil { + nc.ptmr.Stop() + } + if nc.conn != nil { + nc.bw.Flush() + nc.conn.Close() + nc.conn = nil + } + + // Create a new pending buffer to underpin the bufio Writer while + // we are reconnecting. + nc.pending = &bytes.Buffer{} + nc.bw = bufio.NewWriterSize(nc.pending, nc.Opts.ReconnectBufSize) + + go nc.doReconnect() + nc.mu.Unlock() + return + } + + nc.status = DISCONNECTED + nc.err = err + nc.mu.Unlock() + nc.Close() +} + +// Marker to close the channel to kick out the Go routine. +func (nc *Conn) closeAsyncFunc() asyncCB { + return func() { + nc.mu.Lock() + if nc.ach != nil { + close(nc.ach) + nc.ach = nil + } + nc.mu.Unlock() + } +} + +// asyncDispatch is responsible for calling any async callbacks +func (nc *Conn) asyncDispatch() { + // snapshot since they can change from underneath of us. + nc.mu.Lock() + ach := nc.ach + nc.mu.Unlock() + + // Loop on the channel and process async callbacks. + for { + if f, ok := <-ach; !ok { + return + } else { + f() + } + } +} + +// readLoop() will sit on the socket reading and processing the +// protocol from the server. It will dispatch appropriately based +// on the op type. +func (nc *Conn) readLoop() { + // Release the wait group on exit + defer nc.wg.Done() + + // Create a parseState if needed. + nc.mu.Lock() + if nc.ps == nil { + nc.ps = &parseState{} + } + nc.mu.Unlock() + + // Stack based buffer. + b := make([]byte, defaultBufSize) + + for { + // FIXME(dlc): RWLock here? + nc.mu.Lock() + sb := nc.isClosed() || nc.isReconnecting() + if sb { + nc.ps = &parseState{} + } + conn := nc.conn + nc.mu.Unlock() + + if sb || conn == nil { + break + } + + n, err := conn.Read(b) + if err != nil { + nc.processOpErr(err) + break + } + + if err := nc.parse(b[:n]); err != nil { + nc.processOpErr(err) + break + } + } + // Clear the parseState here.. + nc.mu.Lock() + nc.ps = nil + nc.mu.Unlock() +} + +// waitForMsgs waits on the conditional shared with readLoop and processMsg. +// It is used to deliver messages to asynchronous subscribers. +func (nc *Conn) waitForMsgs(s *Subscription) { + var closed bool + var delivered, max uint64 + + for { + s.mu.Lock() + if s.pHead == nil && !s.closed { + s.pCond.Wait() + } + // Pop the msg off the list + m := s.pHead + if m != nil { + s.pHead = m.next + if s.pHead == nil { + s.pTail = nil + } + s.pMsgs-- + s.pBytes -= len(m.Data) + } + mcb := s.mcb + max = s.max + closed = s.closed + if !s.closed { + s.delivered++ + delivered = s.delivered + } + s.mu.Unlock() + + if closed { + break + } + + // Deliver the message. + if m != nil && (max == 0 || delivered <= max) { + mcb(m) + } + // If we have hit the max for delivered msgs, remove sub. + if max > 0 && delivered >= max { + nc.mu.Lock() + nc.removeSub(s) + nc.mu.Unlock() + break + } + } +} + +// processMsg is called by parse and will place the msg on the +// appropriate channel/pending queue for processing. If the channel is full, +// or the pending queue is over the pending limits, the connection is +// considered a slow consumer. +func (nc *Conn) processMsg(data []byte) { + // Lock from here on out. + nc.mu.Lock() + + // Stats + nc.InMsgs++ + nc.InBytes += uint64(len(data)) + + sub := nc.subs[nc.ps.ma.sid] + if sub == nil { + nc.mu.Unlock() + return + } + + // Copy them into string + subj := string(nc.ps.ma.subject) + reply := string(nc.ps.ma.reply) + + // Doing message create outside of the sub's lock to reduce contention. + // It's possible that we end-up not using the message, but that's ok. + + // FIXME(dlc): Need to copy, should/can do COW? + msgPayload := make([]byte, len(data)) + copy(msgPayload, data) + + // FIXME(dlc): Should we recycle these containers? + m := &Msg{Data: msgPayload, Subject: subj, Reply: reply, Sub: sub} + + sub.mu.Lock() + + // Subscription internal stats (applicable only for non ChanSubscription's) + if sub.typ != ChanSubscription { + sub.pMsgs++ + if sub.pMsgs > sub.pMsgsMax { + sub.pMsgsMax = sub.pMsgs + } + sub.pBytes += len(m.Data) + if sub.pBytes > sub.pBytesMax { + sub.pBytesMax = sub.pBytes + } + + // Check for a Slow Consumer + if (sub.pMsgsLimit > 0 && sub.pMsgs > sub.pMsgsLimit) || + (sub.pBytesLimit > 0 && sub.pBytes > sub.pBytesLimit) { + goto slowConsumer + } + } + + // We have two modes of delivery. One is the channel, used by channel + // subscribers and syncSubscribers, the other is a linked list for async. + if sub.mch != nil { + select { + case sub.mch <- m: + default: + goto slowConsumer + } + } else { + // Push onto the async pList + if sub.pHead == nil { + sub.pHead = m + sub.pTail = m + sub.pCond.Signal() + } else { + sub.pTail.next = m + sub.pTail = m + } + } + + // Clear SlowConsumer status. + sub.sc = false + + sub.mu.Unlock() + nc.mu.Unlock() + return + +slowConsumer: + sub.dropped++ + nc.processSlowConsumer(sub) + // Undo stats from above + if sub.typ != ChanSubscription { + sub.pMsgs-- + sub.pBytes -= len(m.Data) + } + sub.mu.Unlock() + nc.mu.Unlock() + return +} + +// processSlowConsumer will set SlowConsumer state and fire the +// async error handler if registered. +func (nc *Conn) processSlowConsumer(s *Subscription) { + nc.err = ErrSlowConsumer + if nc.Opts.AsyncErrorCB != nil && !s.sc { + nc.ach <- func() { nc.Opts.AsyncErrorCB(nc, s, ErrSlowConsumer) } + } + s.sc = true +} + +// processPermissionsViolation is called when the server signals a subject +// permissions violation on either publish or subscribe. +func (nc *Conn) processPermissionsViolation(err string) { + nc.err = errors.New("nats: " + err) + if nc.Opts.AsyncErrorCB != nil { + nc.ach <- func() { nc.Opts.AsyncErrorCB(nc, nil, nc.err) } + } +} + +// flusher is a separate Go routine that will process flush requests for the write +// bufio. This allows coalescing of writes to the underlying socket. +func (nc *Conn) flusher() { + // Release the wait group + defer nc.wg.Done() + + // snapshot the bw and conn since they can change from underneath of us. + nc.mu.Lock() + bw := nc.bw + conn := nc.conn + fch := nc.fch + nc.mu.Unlock() + + if conn == nil || bw == nil { + return + } + + for { + if _, ok := <-fch; !ok { + return + } + nc.mu.Lock() + + // Check to see if we should bail out. + if !nc.isConnected() || nc.isConnecting() || bw != nc.bw || conn != nc.conn { + nc.mu.Unlock() + return + } + if bw.Buffered() > 0 { + if err := bw.Flush(); err != nil { + if nc.err == nil { + nc.err = err + } + } + } + nc.mu.Unlock() + } +} + +// processPing will send an immediate pong protocol response to the +// server. The server uses this mechanism to detect dead clients. +func (nc *Conn) processPing() { + nc.sendProto(pongProto) +} + +// processPong is used to process responses to the client's ping +// messages. We use pings for the flush mechanism as well. +func (nc *Conn) processPong() { + var ch chan bool + + nc.mu.Lock() + if len(nc.pongs) > 0 { + ch = nc.pongs[0] + nc.pongs = nc.pongs[1:] + } + nc.pout = 0 + nc.mu.Unlock() + if ch != nil { + ch <- true + } +} + +// processOK is a placeholder for processing OK messages. +func (nc *Conn) processOK() { + // do nothing +} + +// processInfo is used to parse the info messages sent +// from the server. +// This function may update the server pool. +func (nc *Conn) processInfo(info string) error { + if info == _EMPTY_ { + return nil + } + if err := json.Unmarshal([]byte(info), &nc.info); err != nil { + return err + } + updated := false + urls := nc.info.ConnectURLs + for _, curl := range urls { + if _, present := nc.urls[curl]; !present { + if err := nc.addURLToPool(fmt.Sprintf("nats://%s", curl), true); err != nil { + continue + } + updated = true + } + } + if updated && !nc.Opts.NoRandomize { + nc.shufflePool() + } + return nil +} + +// processAsyncInfo does the same than processInfo, but is called +// from the parser. Calls processInfo under connection's lock +// protection. +func (nc *Conn) processAsyncInfo(info []byte) { + nc.mu.Lock() + // Ignore errors, we will simply not update the server pool... + nc.processInfo(string(info)) + nc.mu.Unlock() +} + +// LastError reports the last error encountered via the connection. +// It can be used reliably within ClosedCB in order to find out reason +// why connection was closed for example. +func (nc *Conn) LastError() error { + if nc == nil { + return ErrInvalidConnection + } + nc.mu.Lock() + err := nc.err + nc.mu.Unlock() + return err +} + +// processErr processes any error messages from the server and +// sets the connection's lastError. +func (nc *Conn) processErr(e string) { + // Trim, remove quotes, convert to lower case. + e = normalizeErr(e) + + // FIXME(dlc) - process Slow Consumer signals special. + if e == STALE_CONNECTION { + nc.processOpErr(ErrStaleConnection) + } else if strings.HasPrefix(e, PERMISSIONS_ERR) { + nc.processPermissionsViolation(e) + } else { + nc.mu.Lock() + nc.err = errors.New("nats: " + e) + nc.mu.Unlock() + nc.Close() + } +} + +// kickFlusher will send a bool on a channel to kick the +// flush Go routine to flush data to the server. +func (nc *Conn) kickFlusher() { + if nc.bw != nil { + select { + case nc.fch <- true: + default: + } + } +} + +// Publish publishes the data argument to the given subject. The data +// argument is left untouched and needs to be correctly interpreted on +// the receiver. +func (nc *Conn) Publish(subj string, data []byte) error { + return nc.publish(subj, _EMPTY_, data) +} + +// PublishMsg publishes the Msg structure, which includes the +// Subject, an optional Reply and an optional Data field. +func (nc *Conn) PublishMsg(m *Msg) error { + if m == nil { + return ErrInvalidMsg + } + return nc.publish(m.Subject, m.Reply, m.Data) +} + +// PublishRequest will perform a Publish() excpecting a response on the +// reply subject. Use Request() for automatically waiting for a response +// inline. +func (nc *Conn) PublishRequest(subj, reply string, data []byte) error { + return nc.publish(subj, reply, data) +} + +// Used for handrolled itoa +const digits = "0123456789" + +// publish is the internal function to publish messages to a nats-server. +// Sends a protocol data message by queuing into the bufio writer +// and kicking the flush go routine. These writes should be protected. +func (nc *Conn) publish(subj, reply string, data []byte) error { + if nc == nil { + return ErrInvalidConnection + } + if subj == "" { + return ErrBadSubject + } + nc.mu.Lock() + + // Proactively reject payloads over the threshold set by server. + var msgSize int64 + msgSize = int64(len(data)) + if msgSize > nc.info.MaxPayload { + nc.mu.Unlock() + return ErrMaxPayload + } + + if nc.isClosed() { + nc.mu.Unlock() + return ErrConnectionClosed + } + + // Check if we are reconnecting, and if so check if + // we have exceeded our reconnect outbound buffer limits. + if nc.isReconnecting() { + // Flush to underlying buffer. + nc.bw.Flush() + // Check if we are over + if nc.pending.Len() >= nc.Opts.ReconnectBufSize { + nc.mu.Unlock() + return ErrReconnectBufExceeded + } + } + + msgh := nc.scratch[:len(_PUB_P_)] + msgh = append(msgh, subj...) + msgh = append(msgh, ' ') + if reply != "" { + msgh = append(msgh, reply...) + msgh = append(msgh, ' ') + } + + // We could be smarter here, but simple loop is ok, + // just avoid strconv in fast path + // FIXME(dlc) - Find a better way here. + // msgh = strconv.AppendInt(msgh, int64(len(data)), 10) + + var b [12]byte + var i = len(b) + if len(data) > 0 { + for l := len(data); l > 0; l /= 10 { + i -= 1 + b[i] = digits[l%10] + } + } else { + i -= 1 + b[i] = digits[0] + } + + msgh = append(msgh, b[i:]...) + msgh = append(msgh, _CRLF_...) + + // FIXME, do deadlines here + _, err := nc.bw.Write(msgh) + if err == nil { + _, err = nc.bw.Write(data) + } + if err == nil { + _, err = nc.bw.WriteString(_CRLF_) + } + if err != nil { + nc.mu.Unlock() + return err + } + + nc.OutMsgs++ + nc.OutBytes += uint64(len(data)) + + if len(nc.fch) == 0 { + nc.kickFlusher() + } + nc.mu.Unlock() + return nil +} + +// Request will create an Inbox and perform a Request() call +// with the Inbox reply and return the first reply received. +// This is optimized for the case of multiple responses. +func (nc *Conn) Request(subj string, data []byte, timeout time.Duration) (*Msg, error) { + inbox := NewInbox() + ch := make(chan *Msg, RequestChanLen) + + s, err := nc.subscribe(inbox, _EMPTY_, nil, ch) + if err != nil { + return nil, err + } + s.AutoUnsubscribe(1) + defer s.Unsubscribe() + + err = nc.PublishRequest(subj, inbox, data) + if err != nil { + return nil, err + } + return s.NextMsg(timeout) +} + +// InboxPrefix is the prefix for all inbox subjects. +const InboxPrefix = "_INBOX." +const inboxPrefixLen = len(InboxPrefix) + +// NewInbox will return an inbox string which can be used for directed replies from +// subscribers. These are guaranteed to be unique, but can be shared and subscribed +// to by others. +func NewInbox() string { + var b [inboxPrefixLen + 22]byte + pres := b[:inboxPrefixLen] + copy(pres, InboxPrefix) + ns := b[inboxPrefixLen:] + copy(ns, nuid.Next()) + return string(b[:]) +} + +// Subscribe will express interest in the given subject. The subject +// can have wildcards (partial:*, full:>). Messages will be delivered +// to the associated MsgHandler. If no MsgHandler is given, the +// subscription is a synchronous subscription and can be polled via +// Subscription.NextMsg(). +func (nc *Conn) Subscribe(subj string, cb MsgHandler) (*Subscription, error) { + return nc.subscribe(subj, _EMPTY_, cb, nil) +} + +// ChanSubscribe will place all messages received on the channel. +// You should not close the channel until sub.Unsubscribe() has been called. +func (nc *Conn) ChanSubscribe(subj string, ch chan *Msg) (*Subscription, error) { + return nc.subscribe(subj, _EMPTY_, nil, ch) +} + +// ChanQueueSubscribe will place all messages received on the channel. +// You should not close the channel until sub.Unsubscribe() has been called. +func (nc *Conn) ChanQueueSubscribe(subj, group string, ch chan *Msg) (*Subscription, error) { + return nc.subscribe(subj, group, nil, ch) +} + +// SubscribeSync is syntactic sugar for Subscribe(subject, nil). +func (nc *Conn) SubscribeSync(subj string) (*Subscription, error) { + if nc == nil { + return nil, ErrInvalidConnection + } + mch := make(chan *Msg, nc.Opts.SubChanLen) + s, e := nc.subscribe(subj, _EMPTY_, nil, mch) + if s != nil { + s.typ = SyncSubscription + } + return s, e +} + +// QueueSubscribe creates an asynchronous queue subscriber on the given subject. +// All subscribers with the same queue name will form the queue group and +// only one member of the group will be selected to receive any given +// message asynchronously. +func (nc *Conn) QueueSubscribe(subj, queue string, cb MsgHandler) (*Subscription, error) { + return nc.subscribe(subj, queue, cb, nil) +} + +// QueueSubscribeSync creates a synchronous queue subscriber on the given +// subject. All subscribers with the same queue name will form the queue +// group and only one member of the group will be selected to receive any +// given message synchronously. +func (nc *Conn) QueueSubscribeSync(subj, queue string) (*Subscription, error) { + mch := make(chan *Msg, nc.Opts.SubChanLen) + s, e := nc.subscribe(subj, queue, nil, mch) + if s != nil { + s.typ = SyncSubscription + } + return s, e +} + +// QueueSubscribeSyncWithChan is syntactic sugar for ChanQueueSubscribe(subject, group, ch). +func (nc *Conn) QueueSubscribeSyncWithChan(subj, queue string, ch chan *Msg) (*Subscription, error) { + return nc.subscribe(subj, queue, nil, ch) +} + +// subscribe is the internal subscribe function that indicates interest in a subject. +func (nc *Conn) subscribe(subj, queue string, cb MsgHandler, ch chan *Msg) (*Subscription, error) { + if nc == nil { + return nil, ErrInvalidConnection + } + nc.mu.Lock() + // ok here, but defer is generally expensive + defer nc.mu.Unlock() + defer nc.kickFlusher() + + // Check for some error conditions. + if nc.isClosed() { + return nil, ErrConnectionClosed + } + + if cb == nil && ch == nil { + return nil, ErrBadSubscription + } + + sub := &Subscription{Subject: subj, Queue: queue, mcb: cb, conn: nc} + // Set pending limits. + sub.pMsgsLimit = DefaultSubPendingMsgsLimit + sub.pBytesLimit = DefaultSubPendingBytesLimit + + // If we have an async callback, start up a sub specific + // Go routine to deliver the messages. + if cb != nil { + sub.typ = AsyncSubscription + sub.pCond = sync.NewCond(&sub.mu) + go nc.waitForMsgs(sub) + } else { + sub.typ = ChanSubscription + sub.mch = ch + } + + sub.sid = atomic.AddInt64(&nc.ssid, 1) + nc.subs[sub.sid] = sub + + // We will send these for all subs when we reconnect + // so that we can suppress here. + if !nc.isReconnecting() { + nc.bw.WriteString(fmt.Sprintf(subProto, subj, queue, sub.sid)) + } + return sub, nil +} + +// Lock for nc should be held here upon entry +func (nc *Conn) removeSub(s *Subscription) { + delete(nc.subs, s.sid) + s.mu.Lock() + defer s.mu.Unlock() + // Release callers on NextMsg for SyncSubscription only + if s.mch != nil && s.typ == SyncSubscription { + close(s.mch) + } + s.mch = nil + + // Mark as invalid + s.conn = nil + s.closed = true + if s.pCond != nil { + s.pCond.Broadcast() + } +} + +// SubscriptionType is the type of the Subscription. +type SubscriptionType int + +// The different types of subscription types. +const ( + AsyncSubscription = SubscriptionType(iota) + SyncSubscription + ChanSubscription + NilSubscription +) + +// Type returns the type of Subscription. +func (s *Subscription) Type() SubscriptionType { + if s == nil { + return NilSubscription + } + s.mu.Lock() + defer s.mu.Unlock() + return s.typ +} + +// IsValid returns a boolean indicating whether the subscription +// is still active. This will return false if the subscription has +// already been closed. +func (s *Subscription) IsValid() bool { + if s == nil { + return false + } + s.mu.Lock() + defer s.mu.Unlock() + return s.conn != nil +} + +// Unsubscribe will remove interest in the given subject. +func (s *Subscription) Unsubscribe() error { + if s == nil { + return ErrBadSubscription + } + s.mu.Lock() + conn := s.conn + s.mu.Unlock() + if conn == nil { + return ErrBadSubscription + } + return conn.unsubscribe(s, 0) +} + +// AutoUnsubscribe will issue an automatic Unsubscribe that is +// processed by the server when max messages have been received. +// This can be useful when sending a request to an unknown number +// of subscribers. Request() uses this functionality. +func (s *Subscription) AutoUnsubscribe(max int) error { + if s == nil { + return ErrBadSubscription + } + s.mu.Lock() + conn := s.conn + s.mu.Unlock() + if conn == nil { + return ErrBadSubscription + } + return conn.unsubscribe(s, max) +} + +// unsubscribe performs the low level unsubscribe to the server. +// Use Subscription.Unsubscribe() +func (nc *Conn) unsubscribe(sub *Subscription, max int) error { + nc.mu.Lock() + // ok here, but defer is expensive + defer nc.mu.Unlock() + defer nc.kickFlusher() + + if nc.isClosed() { + return ErrConnectionClosed + } + + s := nc.subs[sub.sid] + // Already unsubscribed + if s == nil { + return nil + } + + maxStr := _EMPTY_ + if max > 0 { + s.max = uint64(max) + maxStr = strconv.Itoa(max) + } else { + nc.removeSub(s) + } + // We will send these for all subs when we reconnect + // so that we can suppress here. + if !nc.isReconnecting() { + nc.bw.WriteString(fmt.Sprintf(unsubProto, s.sid, maxStr)) + } + return nil +} + +// NextMsg() will return the next message available to a synchronous subscriber +// or block until one is available. A timeout can be used to return when no +// message has been delivered. +func (s *Subscription) NextMsg(timeout time.Duration) (*Msg, error) { + if s == nil { + return nil, ErrBadSubscription + } + s.mu.Lock() + if s.connClosed { + s.mu.Unlock() + return nil, ErrConnectionClosed + } + if s.mch == nil { + if s.max > 0 && s.delivered >= s.max { + s.mu.Unlock() + return nil, ErrMaxMessages + } else if s.closed { + s.mu.Unlock() + return nil, ErrBadSubscription + } + } + if s.mcb != nil { + s.mu.Unlock() + return nil, ErrSyncSubRequired + } + if s.sc { + s.sc = false + s.mu.Unlock() + return nil, ErrSlowConsumer + } + + // snapshot + nc := s.conn + mch := s.mch + max := s.max + s.mu.Unlock() + + var ok bool + var msg *Msg + + t := time.NewTimer(timeout) + defer t.Stop() + + select { + case msg, ok = <-mch: + if !ok { + return nil, ErrConnectionClosed + } + // Update some stats. + s.mu.Lock() + s.delivered++ + delivered := s.delivered + if s.typ == SyncSubscription { + s.pMsgs-- + s.pBytes -= len(msg.Data) + } + s.mu.Unlock() + + if max > 0 { + if delivered > max { + return nil, ErrMaxMessages + } + // Remove subscription if we have reached max. + if delivered == max { + nc.mu.Lock() + nc.removeSub(s) + nc.mu.Unlock() + } + } + + case <-t.C: + return nil, ErrTimeout + } + + return msg, nil +} + +// Queued returns the number of queued messages in the client for this subscription. +// DEPRECATED: Use Pending() +func (s *Subscription) QueuedMsgs() (int, error) { + m, _, err := s.Pending() + return int(m), err +} + +// Pending returns the number of queued messages and queued bytes in the client for this subscription. +func (s *Subscription) Pending() (int, int, error) { + if s == nil { + return -1, -1, ErrBadSubscription + } + s.mu.Lock() + defer s.mu.Unlock() + if s.conn == nil { + return -1, -1, ErrBadSubscription + } + if s.typ == ChanSubscription { + return -1, -1, ErrTypeSubscription + } + return s.pMsgs, s.pBytes, nil +} + +// MaxPending returns the maximum number of queued messages and queued bytes seen so far. +func (s *Subscription) MaxPending() (int, int, error) { + if s == nil { + return -1, -1, ErrBadSubscription + } + s.mu.Lock() + defer s.mu.Unlock() + if s.conn == nil { + return -1, -1, ErrBadSubscription + } + if s.typ == ChanSubscription { + return -1, -1, ErrTypeSubscription + } + return s.pMsgsMax, s.pBytesMax, nil +} + +// ClearMaxPending resets the maximums seen so far. +func (s *Subscription) ClearMaxPending() error { + if s == nil { + return ErrBadSubscription + } + s.mu.Lock() + defer s.mu.Unlock() + if s.conn == nil { + return ErrBadSubscription + } + if s.typ == ChanSubscription { + return ErrTypeSubscription + } + s.pMsgsMax, s.pBytesMax = 0, 0 + return nil +} + +// Pending Limits +const ( + DefaultSubPendingMsgsLimit = 65536 + DefaultSubPendingBytesLimit = 65536 * 1024 +) + +// PendingLimits returns the current limits for this subscription. +// If no error is returned, a negative value indicates that the +// given metric is not limited. +func (s *Subscription) PendingLimits() (int, int, error) { + if s == nil { + return -1, -1, ErrBadSubscription + } + s.mu.Lock() + defer s.mu.Unlock() + if s.conn == nil { + return -1, -1, ErrBadSubscription + } + if s.typ == ChanSubscription { + return -1, -1, ErrTypeSubscription + } + return s.pMsgsLimit, s.pBytesLimit, nil +} + +// SetPendingLimits sets the limits for pending msgs and bytes for this subscription. +// Zero is not allowed. Any negative value means that the given metric is not limited. +func (s *Subscription) SetPendingLimits(msgLimit, bytesLimit int) error { + if s == nil { + return ErrBadSubscription + } + s.mu.Lock() + defer s.mu.Unlock() + if s.conn == nil { + return ErrBadSubscription + } + if s.typ == ChanSubscription { + return ErrTypeSubscription + } + if msgLimit == 0 || bytesLimit == 0 { + return ErrInvalidArg + } + s.pMsgsLimit, s.pBytesLimit = msgLimit, bytesLimit + return nil +} + +// Delivered returns the number of delivered messages for this subscription. +func (s *Subscription) Delivered() (int64, error) { + if s == nil { + return -1, ErrBadSubscription + } + s.mu.Lock() + defer s.mu.Unlock() + if s.conn == nil { + return -1, ErrBadSubscription + } + return int64(s.delivered), nil +} + +// Dropped returns the number of known dropped messages for this subscription. +// This will correspond to messages dropped by violations of PendingLimits. If +// the server declares the connection a SlowConsumer, this number may not be +// valid. +func (s *Subscription) Dropped() (int, error) { + if s == nil { + return -1, ErrBadSubscription + } + s.mu.Lock() + defer s.mu.Unlock() + if s.conn == nil { + return -1, ErrBadSubscription + } + return s.dropped, nil +} + +// FIXME: This is a hack +// removeFlushEntry is needed when we need to discard queued up responses +// for our pings as part of a flush call. This happens when we have a flush +// call outstanding and we call close. +func (nc *Conn) removeFlushEntry(ch chan bool) bool { + nc.mu.Lock() + defer nc.mu.Unlock() + if nc.pongs == nil { + return false + } + for i, c := range nc.pongs { + if c == ch { + nc.pongs[i] = nil + return true + } + } + return false +} + +// The lock must be held entering this function. +func (nc *Conn) sendPing(ch chan bool) { + nc.pongs = append(nc.pongs, ch) + nc.bw.WriteString(pingProto) + // Flush in place. + nc.bw.Flush() +} + +// This will fire periodically and send a client origin +// ping to the server. Will also check that we have received +// responses from the server. +func (nc *Conn) processPingTimer() { + nc.mu.Lock() + + if nc.status != CONNECTED { + nc.mu.Unlock() + return + } + + // Check for violation + nc.pout++ + if nc.pout > nc.Opts.MaxPingsOut { + nc.mu.Unlock() + nc.processOpErr(ErrStaleConnection) + return + } + + nc.sendPing(nil) + nc.ptmr.Reset(nc.Opts.PingInterval) + nc.mu.Unlock() +} + +// FlushTimeout allows a Flush operation to have an associated timeout. +func (nc *Conn) FlushTimeout(timeout time.Duration) (err error) { + if nc == nil { + return ErrInvalidConnection + } + if timeout <= 0 { + return ErrBadTimeout + } + + nc.mu.Lock() + if nc.isClosed() { + nc.mu.Unlock() + return ErrConnectionClosed + } + t := time.NewTimer(timeout) + defer t.Stop() + + ch := make(chan bool) // FIXME: Inefficient? + nc.sendPing(ch) + nc.mu.Unlock() + + select { + case _, ok := <-ch: + if !ok { + err = ErrConnectionClosed + } else { + close(ch) + } + case <-t.C: + err = ErrTimeout + } + + if err != nil { + nc.removeFlushEntry(ch) + } + return +} + +// Flush will perform a round trip to the server and return when it +// receives the internal reply. +func (nc *Conn) Flush() error { + return nc.FlushTimeout(60 * time.Second) +} + +// Buffered will return the number of bytes buffered to be sent to the server. +// FIXME(dlc) take into account disconnected state. +func (nc *Conn) Buffered() (int, error) { + nc.mu.Lock() + defer nc.mu.Unlock() + if nc.isClosed() || nc.bw == nil { + return -1, ErrConnectionClosed + } + return nc.bw.Buffered(), nil +} + +// resendSubscriptions will send our subscription state back to the +// server. Used in reconnects +func (nc *Conn) resendSubscriptions() { + for _, s := range nc.subs { + adjustedMax := uint64(0) + s.mu.Lock() + if s.max > 0 { + if s.delivered < s.max { + adjustedMax = s.max - s.delivered + } + + // adjustedMax could be 0 here if the number of delivered msgs + // reached the max, if so unsubscribe. + if adjustedMax == 0 { + s.mu.Unlock() + nc.bw.WriteString(fmt.Sprintf(unsubProto, s.sid, _EMPTY_)) + continue + } + } + s.mu.Unlock() + + nc.bw.WriteString(fmt.Sprintf(subProto, s.Subject, s.Queue, s.sid)) + if adjustedMax > 0 { + maxStr := strconv.Itoa(int(adjustedMax)) + nc.bw.WriteString(fmt.Sprintf(unsubProto, s.sid, maxStr)) + } + } +} + +// This will clear any pending flush calls and release pending calls. +// Lock is assumed to be held by the caller. +func (nc *Conn) clearPendingFlushCalls() { + // Clear any queued pongs, e.g. pending flush calls. + for _, ch := range nc.pongs { + if ch != nil { + close(ch) + } + } + nc.pongs = nil +} + +// Low level close call that will do correct cleanup and set +// desired status. Also controls whether user defined callbacks +// will be triggered. The lock should not be held entering this +// function. This function will handle the locking manually. +func (nc *Conn) close(status Status, doCBs bool) { + nc.mu.Lock() + if nc.isClosed() { + nc.status = status + nc.mu.Unlock() + return + } + nc.status = CLOSED + + // Kick the Go routines so they fall out. + nc.kickFlusher() + nc.mu.Unlock() + + nc.mu.Lock() + + // Clear any queued pongs, e.g. pending flush calls. + nc.clearPendingFlushCalls() + + if nc.ptmr != nil { + nc.ptmr.Stop() + } + + // Go ahead and make sure we have flushed the outbound + if nc.conn != nil { + nc.bw.Flush() + defer nc.conn.Close() + } + + // Close sync subscriber channels and release any + // pending NextMsg() calls. + for _, s := range nc.subs { + s.mu.Lock() + + // Release callers on NextMsg for SyncSubscription only + if s.mch != nil && s.typ == SyncSubscription { + close(s.mch) + } + s.mch = nil + // Mark as invalid, for signalling to deliverMsgs + s.closed = true + // Mark connection closed in subscription + s.connClosed = true + // If we have an async subscription, signals it to exit + if s.typ == AsyncSubscription && s.pCond != nil { + s.pCond.Signal() + } + + s.mu.Unlock() + } + nc.subs = nil + + // Perform appropriate callback if needed for a disconnect. + if doCBs { + if nc.Opts.DisconnectedCB != nil && nc.conn != nil { + nc.ach <- func() { nc.Opts.DisconnectedCB(nc) } + } + if nc.Opts.ClosedCB != nil { + nc.ach <- func() { nc.Opts.ClosedCB(nc) } + } + nc.ach <- nc.closeAsyncFunc() + } + nc.status = status + nc.mu.Unlock() +} + +// Close will close the connection to the server. This call will release +// all blocking calls, such as Flush() and NextMsg() +func (nc *Conn) Close() { + nc.close(CLOSED, true) +} + +// IsClosed tests if a Conn has been closed. +func (nc *Conn) IsClosed() bool { + nc.mu.Lock() + defer nc.mu.Unlock() + return nc.isClosed() +} + +// IsReconnecting tests if a Conn is reconnecting. +func (nc *Conn) IsReconnecting() bool { + nc.mu.Lock() + defer nc.mu.Unlock() + return nc.isReconnecting() +} + +// IsConnected tests if a Conn is connected. +func (nc *Conn) IsConnected() bool { + nc.mu.Lock() + defer nc.mu.Unlock() + return nc.isConnected() +} + +// caller must lock +func (nc *Conn) getServers(implicitOnly bool) []string { + poolSize := len(nc.srvPool) + var servers = make([]string, 0) + for i := 0; i < poolSize; i++ { + if implicitOnly && !nc.srvPool[i].isImplicit { + continue + } + url := nc.srvPool[i].url + servers = append(servers, fmt.Sprintf("%s://%s", url.Scheme, url.Host)) + } + return servers +} + +// Servers returns the list of known server urls, including additional +// servers discovered after a connection has been established. If +// authentication is enabled, use UserInfo or Token when connecting with +// these urls. +func (nc *Conn) Servers() []string { + nc.mu.Lock() + defer nc.mu.Unlock() + return nc.getServers(false) +} + +// DiscoveredServers returns only the server urls that have been discovered +// after a connection has been established. If authentication is enabled, +// use UserInfo or Token when connecting with these urls. +func (nc *Conn) DiscoveredServers() []string { + nc.mu.Lock() + defer nc.mu.Unlock() + return nc.getServers(true) +} + +// Status returns the current state of the connection. +func (nc *Conn) Status() Status { + nc.mu.Lock() + defer nc.mu.Unlock() + return nc.status +} + +// Test if Conn has been closed Lock is assumed held. +func (nc *Conn) isClosed() bool { + return nc.status == CLOSED +} + +// Test if Conn is in the process of connecting +func (nc *Conn) isConnecting() bool { + return nc.status == CONNECTING +} + +// Test if Conn is being reconnected. +func (nc *Conn) isReconnecting() bool { + return nc.status == RECONNECTING +} + +// Test if Conn is connected or connecting. +func (nc *Conn) isConnected() bool { + return nc.status == CONNECTED +} + +// Stats will return a race safe copy of the Statistics section for the connection. +func (nc *Conn) Stats() Statistics { + nc.mu.Lock() + defer nc.mu.Unlock() + stats := nc.Statistics + return stats +} + +// MaxPayload returns the size limit that a message payload can have. +// This is set by the server configuration and delivered to the client +// upon connect. +func (nc *Conn) MaxPayload() int64 { + nc.mu.Lock() + defer nc.mu.Unlock() + return nc.info.MaxPayload +} + +// AuthRequired will return if the connected server requires authorization. +func (nc *Conn) AuthRequired() bool { + nc.mu.Lock() + defer nc.mu.Unlock() + return nc.info.AuthRequired +} + +// TLSRequired will return if the connected server requires TLS connections. +func (nc *Conn) TLSRequired() bool { + nc.mu.Lock() + defer nc.mu.Unlock() + return nc.info.TLSRequired +} diff --git a/vendor/github.com/nats-io/go-nats/netchan.go b/vendor/github.com/nats-io/go-nats/netchan.go new file mode 100644 index 000000000..337674e04 --- /dev/null +++ b/vendor/github.com/nats-io/go-nats/netchan.go @@ -0,0 +1,100 @@ +// Copyright 2013-2014 Apcera Inc. All rights reserved. + +package nats + +import ( + "errors" + "reflect" +) + +// This allows the functionality for network channels by binding send and receive Go chans +// to subjects and optionally queue groups. +// Data will be encoded and decoded via the EncodedConn and its associated encoders. + +// BindSendChan binds a channel for send operations to NATS. +func (c *EncodedConn) BindSendChan(subject string, channel interface{}) error { + chVal := reflect.ValueOf(channel) + if chVal.Kind() != reflect.Chan { + return ErrChanArg + } + go chPublish(c, chVal, subject) + return nil +} + +// Publish all values that arrive on the channel until it is closed or we +// encounter an error. +func chPublish(c *EncodedConn, chVal reflect.Value, subject string) { + for { + val, ok := chVal.Recv() + if !ok { + // Channel has most likely been closed. + return + } + if e := c.Publish(subject, val.Interface()); e != nil { + // Do this under lock. + c.Conn.mu.Lock() + defer c.Conn.mu.Unlock() + + if c.Conn.Opts.AsyncErrorCB != nil { + // FIXME(dlc) - Not sure this is the right thing to do. + // FIXME(ivan) - If the connection is not yet closed, try to schedule the callback + if c.Conn.isClosed() { + go c.Conn.Opts.AsyncErrorCB(c.Conn, nil, e) + } else { + c.Conn.ach <- func() { c.Conn.Opts.AsyncErrorCB(c.Conn, nil, e) } + } + } + return + } + } +} + +// BindRecvChan binds a channel for receive operations from NATS. +func (c *EncodedConn) BindRecvChan(subject string, channel interface{}) (*Subscription, error) { + return c.bindRecvChan(subject, _EMPTY_, channel) +} + +// BindRecvQueueChan binds a channel for queue-based receive operations from NATS. +func (c *EncodedConn) BindRecvQueueChan(subject, queue string, channel interface{}) (*Subscription, error) { + return c.bindRecvChan(subject, queue, channel) +} + +// Internal function to bind receive operations for a channel. +func (c *EncodedConn) bindRecvChan(subject, queue string, channel interface{}) (*Subscription, error) { + chVal := reflect.ValueOf(channel) + if chVal.Kind() != reflect.Chan { + return nil, ErrChanArg + } + argType := chVal.Type().Elem() + + cb := func(m *Msg) { + var oPtr reflect.Value + if argType.Kind() != reflect.Ptr { + oPtr = reflect.New(argType) + } else { + oPtr = reflect.New(argType.Elem()) + } + if err := c.Enc.Decode(m.Subject, m.Data, oPtr.Interface()); err != nil { + c.Conn.err = errors.New("nats: Got an error trying to unmarshal: " + err.Error()) + if c.Conn.Opts.AsyncErrorCB != nil { + c.Conn.ach <- func() { c.Conn.Opts.AsyncErrorCB(c.Conn, m.Sub, c.Conn.err) } + } + return + } + if argType.Kind() != reflect.Ptr { + oPtr = reflect.Indirect(oPtr) + } + // This is a bit hacky, but in this instance we may be trying to send to a closed channel. + // and the user does not know when it is safe to close the channel. + defer func() { + // If we have panicked, recover and close the subscription. + if r := recover(); r != nil { + m.Sub.Unsubscribe() + } + }() + // Actually do the send to the channel. + chVal.Send(oPtr) + } + + return c.Conn.subscribe(subject, queue, cb, nil) +} diff --git a/vendor/github.com/nats-io/go-nats/parser.go b/vendor/github.com/nats-io/go-nats/parser.go new file mode 100644 index 000000000..0911954a1 --- /dev/null +++ b/vendor/github.com/nats-io/go-nats/parser.go @@ -0,0 +1,470 @@ +// Copyright 2012-2014 Apcera Inc. All rights reserved. + +package nats + +import ( + "fmt" +) + +type msgArg struct { + subject []byte + reply []byte + sid int64 + size int +} + +const MAX_CONTROL_LINE_SIZE = 1024 + +type parseState struct { + state int + as int + drop int + ma msgArg + argBuf []byte + msgBuf []byte + scratch [MAX_CONTROL_LINE_SIZE]byte +} + +const ( + OP_START = iota + OP_PLUS + OP_PLUS_O + OP_PLUS_OK + OP_MINUS + OP_MINUS_E + OP_MINUS_ER + OP_MINUS_ERR + OP_MINUS_ERR_SPC + MINUS_ERR_ARG + OP_M + OP_MS + OP_MSG + OP_MSG_SPC + MSG_ARG + MSG_PAYLOAD + MSG_END + OP_P + OP_PI + OP_PIN + OP_PING + OP_PO + OP_PON + OP_PONG + OP_I + OP_IN + OP_INF + OP_INFO + OP_INFO_SPC + INFO_ARG +) + +// parse is the fast protocol parser engine. +func (nc *Conn) parse(buf []byte) error { + var i int + var b byte + + // Move to loop instead of range syntax to allow jumping of i + for i = 0; i < len(buf); i++ { + b = buf[i] + + switch nc.ps.state { + case OP_START: + switch b { + case 'M', 'm': + nc.ps.state = OP_M + case 'P', 'p': + nc.ps.state = OP_P + case '+': + nc.ps.state = OP_PLUS + case '-': + nc.ps.state = OP_MINUS + case 'I', 'i': + nc.ps.state = OP_I + default: + goto parseErr + } + case OP_M: + switch b { + case 'S', 's': + nc.ps.state = OP_MS + default: + goto parseErr + } + case OP_MS: + switch b { + case 'G', 'g': + nc.ps.state = OP_MSG + default: + goto parseErr + } + case OP_MSG: + switch b { + case ' ', '\t': + nc.ps.state = OP_MSG_SPC + default: + goto parseErr + } + case OP_MSG_SPC: + switch b { + case ' ', '\t': + continue + default: + nc.ps.state = MSG_ARG + nc.ps.as = i + } + case MSG_ARG: + switch b { + case '\r': + nc.ps.drop = 1 + case '\n': + var arg []byte + if nc.ps.argBuf != nil { + arg = nc.ps.argBuf + } else { + arg = buf[nc.ps.as : i-nc.ps.drop] + } + if err := nc.processMsgArgs(arg); err != nil { + return err + } + nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, MSG_PAYLOAD + + // jump ahead with the index. If this overruns + // what is left we fall out and process split + // buffer. + i = nc.ps.as + nc.ps.ma.size - 1 + default: + if nc.ps.argBuf != nil { + nc.ps.argBuf = append(nc.ps.argBuf, b) + } + } + case MSG_PAYLOAD: + if nc.ps.msgBuf != nil { + if len(nc.ps.msgBuf) >= nc.ps.ma.size { + nc.processMsg(nc.ps.msgBuf) + nc.ps.argBuf, nc.ps.msgBuf, nc.ps.state = nil, nil, MSG_END + } else { + // copy as much as we can to the buffer and skip ahead. + toCopy := nc.ps.ma.size - len(nc.ps.msgBuf) + avail := len(buf) - i + + if avail < toCopy { + toCopy = avail + } + + if toCopy > 0 { + start := len(nc.ps.msgBuf) + // This is needed for copy to work. + nc.ps.msgBuf = nc.ps.msgBuf[:start+toCopy] + copy(nc.ps.msgBuf[start:], buf[i:i+toCopy]) + // Update our index + i = (i + toCopy) - 1 + } else { + nc.ps.msgBuf = append(nc.ps.msgBuf, b) + } + } + } else if i-nc.ps.as >= nc.ps.ma.size { + nc.processMsg(buf[nc.ps.as:i]) + nc.ps.argBuf, nc.ps.msgBuf, nc.ps.state = nil, nil, MSG_END + } + case MSG_END: + switch b { + case '\n': + nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, OP_START + default: + continue + } + case OP_PLUS: + switch b { + case 'O', 'o': + nc.ps.state = OP_PLUS_O + default: + goto parseErr + } + case OP_PLUS_O: + switch b { + case 'K', 'k': + nc.ps.state = OP_PLUS_OK + default: + goto parseErr + } + case OP_PLUS_OK: + switch b { + case '\n': + nc.processOK() + nc.ps.drop, nc.ps.state = 0, OP_START + } + case OP_MINUS: + switch b { + case 'E', 'e': + nc.ps.state = OP_MINUS_E + default: + goto parseErr + } + case OP_MINUS_E: + switch b { + case 'R', 'r': + nc.ps.state = OP_MINUS_ER + default: + goto parseErr + } + case OP_MINUS_ER: + switch b { + case 'R', 'r': + nc.ps.state = OP_MINUS_ERR + default: + goto parseErr + } + case OP_MINUS_ERR: + switch b { + case ' ', '\t': + nc.ps.state = OP_MINUS_ERR_SPC + default: + goto parseErr + } + case OP_MINUS_ERR_SPC: + switch b { + case ' ', '\t': + continue + default: + nc.ps.state = MINUS_ERR_ARG + nc.ps.as = i + } + case MINUS_ERR_ARG: + switch b { + case '\r': + nc.ps.drop = 1 + case '\n': + var arg []byte + if nc.ps.argBuf != nil { + arg = nc.ps.argBuf + nc.ps.argBuf = nil + } else { + arg = buf[nc.ps.as : i-nc.ps.drop] + } + nc.processErr(string(arg)) + nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, OP_START + default: + if nc.ps.argBuf != nil { + nc.ps.argBuf = append(nc.ps.argBuf, b) + } + } + case OP_P: + switch b { + case 'I', 'i': + nc.ps.state = OP_PI + case 'O', 'o': + nc.ps.state = OP_PO + default: + goto parseErr + } + case OP_PO: + switch b { + case 'N', 'n': + nc.ps.state = OP_PON + default: + goto parseErr + } + case OP_PON: + switch b { + case 'G', 'g': + nc.ps.state = OP_PONG + default: + goto parseErr + } + case OP_PONG: + switch b { + case '\n': + nc.processPong() + nc.ps.drop, nc.ps.state = 0, OP_START + } + case OP_PI: + switch b { + case 'N', 'n': + nc.ps.state = OP_PIN + default: + goto parseErr + } + case OP_PIN: + switch b { + case 'G', 'g': + nc.ps.state = OP_PING + default: + goto parseErr + } + case OP_PING: + switch b { + case '\n': + nc.processPing() + nc.ps.drop, nc.ps.state = 0, OP_START + } + case OP_I: + switch b { + case 'N', 'n': + nc.ps.state = OP_IN + default: + goto parseErr + } + case OP_IN: + switch b { + case 'F', 'f': + nc.ps.state = OP_INF + default: + goto parseErr + } + case OP_INF: + switch b { + case 'O', 'o': + nc.ps.state = OP_INFO + default: + goto parseErr + } + case OP_INFO: + switch b { + case ' ', '\t': + nc.ps.state = OP_INFO_SPC + default: + goto parseErr + } + case OP_INFO_SPC: + switch b { + case ' ', '\t': + continue + default: + nc.ps.state = INFO_ARG + nc.ps.as = i + } + case INFO_ARG: + switch b { + case '\r': + nc.ps.drop = 1 + case '\n': + var arg []byte + if nc.ps.argBuf != nil { + arg = nc.ps.argBuf + nc.ps.argBuf = nil + } else { + arg = buf[nc.ps.as : i-nc.ps.drop] + } + nc.processAsyncInfo(arg) + nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, OP_START + default: + if nc.ps.argBuf != nil { + nc.ps.argBuf = append(nc.ps.argBuf, b) + } + } + default: + goto parseErr + } + } + // Check for split buffer scenarios + if (nc.ps.state == MSG_ARG || nc.ps.state == MINUS_ERR_ARG || nc.ps.state == INFO_ARG) && nc.ps.argBuf == nil { + nc.ps.argBuf = nc.ps.scratch[:0] + nc.ps.argBuf = append(nc.ps.argBuf, buf[nc.ps.as:i-nc.ps.drop]...) + // FIXME, check max len + } + // Check for split msg + if nc.ps.state == MSG_PAYLOAD && nc.ps.msgBuf == nil { + // We need to clone the msgArg if it is still referencing the + // read buffer and we are not able to process the msg. + if nc.ps.argBuf == nil { + nc.cloneMsgArg() + } + + // If we will overflow the scratch buffer, just create a + // new buffer to hold the split message. + if nc.ps.ma.size > cap(nc.ps.scratch)-len(nc.ps.argBuf) { + lrem := len(buf[nc.ps.as:]) + + nc.ps.msgBuf = make([]byte, lrem, nc.ps.ma.size) + copy(nc.ps.msgBuf, buf[nc.ps.as:]) + } else { + nc.ps.msgBuf = nc.ps.scratch[len(nc.ps.argBuf):len(nc.ps.argBuf)] + nc.ps.msgBuf = append(nc.ps.msgBuf, (buf[nc.ps.as:])...) + } + } + + return nil + +parseErr: + return fmt.Errorf("nats: Parse Error [%d]: '%s'", nc.ps.state, buf[i:]) +} + +// cloneMsgArg is used when the split buffer scenario has the pubArg in the existing read buffer, but +// we need to hold onto it into the next read. +func (nc *Conn) cloneMsgArg() { + nc.ps.argBuf = nc.ps.scratch[:0] + nc.ps.argBuf = append(nc.ps.argBuf, nc.ps.ma.subject...) + nc.ps.argBuf = append(nc.ps.argBuf, nc.ps.ma.reply...) + nc.ps.ma.subject = nc.ps.argBuf[:len(nc.ps.ma.subject)] + if nc.ps.ma.reply != nil { + nc.ps.ma.reply = nc.ps.argBuf[len(nc.ps.ma.subject):] + } +} + +const argsLenMax = 4 + +func (nc *Conn) processMsgArgs(arg []byte) error { + // Unroll splitArgs to avoid runtime/heap issues + a := [argsLenMax][]byte{} + args := a[:0] + start := -1 + for i, b := range arg { + switch b { + case ' ', '\t', '\r', '\n': + if start >= 0 { + args = append(args, arg[start:i]) + start = -1 + } + default: + if start < 0 { + start = i + } + } + } + if start >= 0 { + args = append(args, arg[start:]) + } + + switch len(args) { + case 3: + nc.ps.ma.subject = args[0] + nc.ps.ma.sid = parseInt64(args[1]) + nc.ps.ma.reply = nil + nc.ps.ma.size = int(parseInt64(args[2])) + case 4: + nc.ps.ma.subject = args[0] + nc.ps.ma.sid = parseInt64(args[1]) + nc.ps.ma.reply = args[2] + nc.ps.ma.size = int(parseInt64(args[3])) + default: + return fmt.Errorf("nats: processMsgArgs Parse Error: '%s'", arg) + } + if nc.ps.ma.sid < 0 { + return fmt.Errorf("nats: processMsgArgs Bad or Missing Sid: '%s'", arg) + } + if nc.ps.ma.size < 0 { + return fmt.Errorf("nats: processMsgArgs Bad or Missing Size: '%s'", arg) + } + return nil +} + +// Ascii numbers 0-9 +const ( + ascii_0 = 48 + ascii_9 = 57 +) + +// parseInt64 expects decimal positive numbers. We +// return -1 to signal error +func parseInt64(d []byte) (n int64) { + if len(d) == 0 { + return -1 + } + for _, dec := range d { + if dec < ascii_0 || dec > ascii_9 { + return -1 + } + n = n*10 + (int64(dec) - ascii_0) + } + return n +} diff --git a/vendor/github.com/nats-io/go-nats/util/tls.go b/vendor/github.com/nats-io/go-nats/util/tls.go new file mode 100644 index 000000000..51da0b88c --- /dev/null +++ b/vendor/github.com/nats-io/go-nats/util/tls.go @@ -0,0 +1,37 @@ +// Copyright 2016 Apcera Inc. All rights reserved. +// +build go1.7 + +package util + +import ( + "crypto/tls" +) + +// CloneTLSConfig returns a copy of c. Only the exported fields are copied. +// This is temporary, until this is provided by the language. +// https://go-review.googlesource.com/#/c/28075/ +func CloneTLSConfig(c *tls.Config) *tls.Config { + return &tls.Config{ + Rand: c.Rand, + Time: c.Time, + Certificates: c.Certificates, + NameToCertificate: c.NameToCertificate, + GetCertificate: c.GetCertificate, + RootCAs: c.RootCAs, + NextProtos: c.NextProtos, + ServerName: c.ServerName, + ClientAuth: c.ClientAuth, + ClientCAs: c.ClientCAs, + InsecureSkipVerify: c.InsecureSkipVerify, + CipherSuites: c.CipherSuites, + PreferServerCipherSuites: c.PreferServerCipherSuites, + SessionTicketsDisabled: c.SessionTicketsDisabled, + SessionTicketKey: c.SessionTicketKey, + ClientSessionCache: c.ClientSessionCache, + MinVersion: c.MinVersion, + MaxVersion: c.MaxVersion, + CurvePreferences: c.CurvePreferences, + DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled, + Renegotiation: c.Renegotiation, + } +} diff --git a/vendor/github.com/nats-io/go-nats/util/tls_pre17.go b/vendor/github.com/nats-io/go-nats/util/tls_pre17.go new file mode 100644 index 000000000..db198ae31 --- /dev/null +++ b/vendor/github.com/nats-io/go-nats/util/tls_pre17.go @@ -0,0 +1,35 @@ +// Copyright 2016 Apcera Inc. All rights reserved. +// +build go1.5,!go1.7 + +package util + +import ( + "crypto/tls" +) + +// CloneTLSConfig returns a copy of c. Only the exported fields are copied. +// This is temporary, until this is provided by the language. +// https://go-review.googlesource.com/#/c/28075/ +func CloneTLSConfig(c *tls.Config) *tls.Config { + return &tls.Config{ + Rand: c.Rand, + Time: c.Time, + Certificates: c.Certificates, + NameToCertificate: c.NameToCertificate, + GetCertificate: c.GetCertificate, + RootCAs: c.RootCAs, + NextProtos: c.NextProtos, + ServerName: c.ServerName, + ClientAuth: c.ClientAuth, + ClientCAs: c.ClientCAs, + InsecureSkipVerify: c.InsecureSkipVerify, + CipherSuites: c.CipherSuites, + PreferServerCipherSuites: c.PreferServerCipherSuites, + SessionTicketsDisabled: c.SessionTicketsDisabled, + SessionTicketKey: c.SessionTicketKey, + ClientSessionCache: c.ClientSessionCache, + MinVersion: c.MinVersion, + MaxVersion: c.MaxVersion, + CurvePreferences: c.CurvePreferences, + } +} diff --git a/vendor/vendor.json b/vendor/vendor.json index 549f66740..0ec019075 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -61,6 +61,24 @@ "revision": "87d4004f2ab62d0d255e0a38f1680aa534549fe3", "revisionTime": "2016-06-10T14:06:02+03:00" }, + { + "checksumSHA1": "3yco0089CSJ4qbyUccpbDC2+dPg=", + "path": "github.com/gogo/protobuf/gogoproto", + "revision": "84af2615df1ba1d35cc975ba94b64ee67d6c196e", + "revisionTime": "2016-12-20T17:02:12Z" + }, + { + "checksumSHA1": "6ZxSmrIx3Jd15aou16oG0HPylP4=", + "path": "github.com/gogo/protobuf/proto", + "revision": "84af2615df1ba1d35cc975ba94b64ee67d6c196e", + "revisionTime": "2016-12-20T17:02:12Z" + }, + { + "checksumSHA1": "EaY86bsi1nucvO0/UKvp/A72aC8=", + "path": "github.com/gogo/protobuf/protoc-gen-gogo/descriptor", + "revision": "84af2615df1ba1d35cc975ba94b64ee67d6c196e", + "revisionTime": "2016-12-20T17:02:12Z" + }, { "checksumSHA1": "W+E/2xXcE1GmJ0Qb784ald0Fn6I=", "path": "github.com/golang/snappy", @@ -199,6 +217,36 @@ "revision": "e82e73b775766b9011503e80e6772fc32b9afc5b", "revisionTime": "2016-12-19T23:17:30Z" }, + { + "checksumSHA1": "zvQr4zOz1/g/Fui6co0sctxrJ28=", + "path": "github.com/nats-io/go-nats", + "revision": "6b6bf392d34d01f57cc563ae123f00c13778bd57", + "revisionTime": "2016-11-20T20:21:26Z" + }, + { + "checksumSHA1": "8Z7LvciW35L7fbH0IaSoBMae+3o=", + "path": "github.com/nats-io/go-nats-streaming", + "revision": "077898146bfbb849a620202e7e5eaaf707492206", + "revisionTime": "2016-12-16T19:10:29Z" + }, + { + "checksumSHA1": "16sV1MZ45lYmv2tXfKHCz+ZLvyY=", + "path": "github.com/nats-io/go-nats-streaming/pb", + "revision": "077898146bfbb849a620202e7e5eaaf707492206", + "revisionTime": "2016-12-16T19:10:29Z" + }, + { + "checksumSHA1": "Q2c9uTEIGhSIddv5pntYdFNtUdk=", + "path": "github.com/nats-io/go-nats/encoders/builtin", + "revision": "6b6bf392d34d01f57cc563ae123f00c13778bd57", + "revisionTime": "2016-11-20T20:21:26Z" + }, + { + "checksumSHA1": "LhbIA/oMWpscEPPK9Wrx5GLcz44=", + "path": "github.com/nats-io/go-nats/util", + "revision": "6b6bf392d34d01f57cc563ae123f00c13778bd57", + "revisionTime": "2016-11-20T20:21:26Z" + }, { "checksumSHA1": "Nj7vQ2GlvJiPP7sqJX5AurrDSD4=", "path": "github.com/nats-io/nats", From d6a327fbc54e6536ec8552c72faf169b740e0e7c Mon Sep 17 00:00:00 2001 From: Alex Date: Mon, 9 Jan 2017 22:22:10 +0000 Subject: [PATCH 062/100] Add notifications by webhook. Add a new config entry moving to version 13. ``` "webhook": { "1": { "enable": true, "address": "http://requestb.in/1i9al7m1" } } ``` --- .travis.yml | 2 +- cmd/api-headers.go | 3 +- cmd/bucket-notification-utils.go | 7 + cmd/bucket-notification-utils_test.go | 11 ++ cmd/config-migrate.go | 98 +++++++++++++ cmd/config-migrate_test.go | 9 +- cmd/config-old.go | 50 ++++++- cmd/{config-v12.go => config-v13.go} | 93 +++++++----- ...{config-v12_test.go => config-v13_test.go} | 7 + cmd/event-notifier.go | 23 +++ cmd/event-notifier_test.go | 93 ++++++++++++ cmd/globals.go | 6 +- cmd/notifiers.go | 15 ++ cmd/notify-amqp.go | 2 +- cmd/notify-webhook.go | 136 ++++++++++++++++++ cmd/notify-webhook_test.go | 79 ++++++++++ cmd/server_test.go | 32 ++++- 17 files changed, 623 insertions(+), 43 deletions(-) rename cmd/{config-v12.go => config-v13.go} (72%) rename cmd/{config-v12_test.go => config-v13_test.go} (92%) create mode 100644 cmd/notify-webhook.go create mode 100644 cmd/notify-webhook_test.go diff --git a/.travis.yml b/.travis.yml index 814e766c0..fdf9bd708 100644 --- a/.travis.yml +++ b/.travis.yml @@ -21,4 +21,4 @@ after_success: - bash <(curl -s https://codecov.io/bash) go: -- 1.7.3 +- 1.7.4 diff --git a/cmd/api-headers.go b/cmd/api-headers.go index 62d32a0ec..5f6cc730c 100644 --- a/cmd/api-headers.go +++ b/cmd/api-headers.go @@ -21,7 +21,6 @@ import ( "encoding/xml" "fmt" "net/http" - "runtime" "strconv" "time" ) @@ -36,7 +35,7 @@ func mustGetRequestID(t time.Time) string { func setCommonHeaders(w http.ResponseWriter) { // Set unique request ID for each reply. w.Header().Set(responseRequestIDKey, mustGetRequestID(time.Now().UTC())) - w.Header().Set("Server", ("Minio/" + ReleaseTag + " (" + runtime.GOOS + "; " + runtime.GOARCH + ")")) + w.Header().Set("Server", globalServerUserAgent) w.Header().Set("Accept-Ranges", "bytes") } diff --git a/cmd/bucket-notification-utils.go b/cmd/bucket-notification-utils.go index ff25e9d51..2c0e735f5 100644 --- a/cmd/bucket-notification-utils.go +++ b/cmd/bucket-notification-utils.go @@ -131,6 +131,7 @@ func isValidQueueID(queueARN string) bool { // Unmarshals QueueARN into structured object. sqsARN := unmarshalSqsARN(queueARN) // Is Queue identifier valid?. + if isAMQPQueue(sqsARN) { // AMQP eueue. amqpN := serverConfig.GetAMQPNotifyByID(sqsARN.AccountID) return amqpN.Enable && amqpN.URL != "" @@ -151,6 +152,9 @@ func isValidQueueID(queueARN string) bool { kafkaN := serverConfig.GetKafkaNotifyByID(sqsARN.AccountID) return (kafkaN.Enable && len(kafkaN.Brokers) > 0 && kafkaN.Topic != "") + } else if isWebhookQueue(sqsARN) { + webhookN := serverConfig.GetWebhookNotifyByID(sqsARN.AccountID) + return webhookN.Enable && webhookN.Endpoint != "" } return false } @@ -241,6 +245,7 @@ func validateNotificationConfig(nConfig notificationConfig) APIErrorCode { // - redis // - postgresql // - kafka +// - webhook func unmarshalSqsARN(queueARN string) (mSqs arnSQS) { mSqs = arnSQS{} if !strings.HasPrefix(queueARN, minioSqs+serverConfig.GetRegion()+":") { @@ -260,6 +265,8 @@ func unmarshalSqsARN(queueARN string) (mSqs arnSQS) { mSqs.Type = queueTypePostgreSQL case strings.HasSuffix(sqsType, queueTypeKafka): mSqs.Type = queueTypeKafka + case strings.HasSuffix(sqsType, queueTypeWebhook): + mSqs.Type = queueTypeWebhook } // Add more queues here. mSqs.AccountID = strings.TrimSuffix(sqsType, ":"+mSqs.Type) return mSqs diff --git a/cmd/bucket-notification-utils_test.go b/cmd/bucket-notification-utils_test.go index 64130e781..0a59f30f2 100644 --- a/cmd/bucket-notification-utils_test.go +++ b/cmd/bucket-notification-utils_test.go @@ -228,6 +228,12 @@ func TestQueueARN(t *testing.T) { queueARN string errCode APIErrorCode }{ + + // Valid webhook queue arn. + { + queueARN: "arn:minio:sqs:us-east-1:1:webhook", + errCode: ErrNone, + }, // Valid redis queue arn. { queueARN: "arn:minio:sqs:us-east-1:1:redis", @@ -306,6 +312,11 @@ func TestUnmarshalSQSARN(t *testing.T) { queueARN string Type string }{ + // Valid webhook queue arn. + { + queueARN: "arn:minio:sqs:us-east-1:1:webhook", + Type: "webhook", + }, // Valid redis queue arn. { queueARN: "arn:minio:sqs:us-east-1:1:redis", diff --git a/cmd/config-migrate.go b/cmd/config-migrate.go index 8f6bccf4e..acf72f017 100644 --- a/cmd/config-migrate.go +++ b/cmd/config-migrate.go @@ -70,6 +70,10 @@ func migrateConfig() error { if err := migrateV11ToV12(); err != nil { return err } + // Migration version '12' to '13'. + if err := migrateV12ToV13(); err != nil { + return err + } return nil } @@ -836,3 +840,97 @@ func migrateV11ToV12() error { ) return nil } + +// Version '12' to '13' migration. Add support for custom webhook endpoint. +func migrateV12ToV13() error { + cv12, err := loadConfigV12() + if err != nil { + if os.IsNotExist(err) { + return nil + } + return fmt.Errorf("Unable to load config version ‘12’. %v", err) + } + if cv12.Version != "12" { + return nil + } + + // Copy over fields from V12 into V13 config struct + srvConfig := &serverConfigV13{} + srvConfig.Version = "13" + srvConfig.Credential = cv12.Credential + srvConfig.Region = cv12.Region + if srvConfig.Region == "" { + // Region needs to be set for AWS Signature Version 4. + srvConfig.Region = "us-east-1" + } + srvConfig.Logger.Console = cv12.Logger.Console + srvConfig.Logger.File = cv12.Logger.File + + // check and set notifiers config + if len(cv12.Notify.AMQP) == 0 { + srvConfig.Notify.AMQP = make(map[string]amqpNotify) + srvConfig.Notify.AMQP["1"] = amqpNotify{} + } else { + srvConfig.Notify.AMQP = cv12.Notify.AMQP + } + if len(cv12.Notify.ElasticSearch) == 0 { + srvConfig.Notify.ElasticSearch = make(map[string]elasticSearchNotify) + srvConfig.Notify.ElasticSearch["1"] = elasticSearchNotify{} + } else { + srvConfig.Notify.ElasticSearch = cv12.Notify.ElasticSearch + } + if len(cv12.Notify.Redis) == 0 { + srvConfig.Notify.Redis = make(map[string]redisNotify) + srvConfig.Notify.Redis["1"] = redisNotify{} + } else { + srvConfig.Notify.Redis = cv12.Notify.Redis + } + if len(cv12.Notify.PostgreSQL) == 0 { + srvConfig.Notify.PostgreSQL = make(map[string]postgreSQLNotify) + srvConfig.Notify.PostgreSQL["1"] = postgreSQLNotify{} + } else { + srvConfig.Notify.PostgreSQL = cv12.Notify.PostgreSQL + } + if len(cv12.Notify.Kafka) == 0 { + srvConfig.Notify.Kafka = make(map[string]kafkaNotify) + srvConfig.Notify.Kafka["1"] = kafkaNotify{} + } else { + srvConfig.Notify.Kafka = cv12.Notify.Kafka + } + if len(cv12.Notify.NATS) == 0 { + srvConfig.Notify.NATS = make(map[string]natsNotify) + srvConfig.Notify.NATS["1"] = natsNotify{} + } else { + srvConfig.Notify.NATS = cv12.Notify.NATS + } + + // V12 will not have a webhook config. So we initialize one here. + srvConfig.Notify.Webhook = make(map[string]webhookNotify) + srvConfig.Notify.Webhook["1"] = webhookNotify{} + + qc, err := quick.New(srvConfig) + if err != nil { + return fmt.Errorf("Unable to initialize the quick config. %v", + err) + } + configFile, err := getConfigFile() + if err != nil { + return fmt.Errorf("Unable to get config file. %v", err) + } + + err = qc.Save(configFile) + if err != nil { + return fmt.Errorf( + "Failed to migrate config from ‘"+ + cv12.Version+"’ to ‘"+srvConfig.Version+ + "’ failed. %v", err, + ) + } + + console.Println( + "Migration from version ‘" + + cv12.Version + "’ to ‘" + srvConfig.Version + + "’ completed successfully.", + ) + return nil +} diff --git a/cmd/config-migrate_test.go b/cmd/config-migrate_test.go index d4897e049..e5478cd00 100644 --- a/cmd/config-migrate_test.go +++ b/cmd/config-migrate_test.go @@ -101,7 +101,10 @@ func TestServerConfigMigrateInexistentConfig(t *testing.T) { t.Fatal("migrate v10 to v11 should succeed when no config file is found") } if err := migrateV11ToV12(); err != nil { - t.Fatal("migrate v10 to v11 should succeed when no config file is found") + t.Fatal("migrate v11 to v12 should succeed when no config file is found") + } + if err := migrateV12ToV13(); err != nil { + t.Fatal("migrate v12 to v13 should succeed when no config file is found") } } @@ -212,5 +215,7 @@ func TestServerConfigMigrateFaultyConfig(t *testing.T) { if err := migrateV11ToV12(); err == nil { t.Fatal("migrateConfigV11ToV12() should fail with a corrupted json") } - + if err := migrateV12ToV13(); err == nil { + t.Fatal("migrateConfigV12ToV13() should fail with a corrupted json") + } } diff --git a/cmd/config-old.go b/cmd/config-old.go index 30b7fd460..2914dd597 100644 --- a/cmd/config-old.go +++ b/cmd/config-old.go @@ -325,7 +325,8 @@ func loadConfigV6() (*configV6, error) { return c, nil } -// Notifier represents collection of supported notification queues. +// Notifier represents collection of supported notification queues in version +// 1 without NATS streaming. type notifierV1 struct { AMQP map[string]amqpNotify `json:"amqp"` NATS map[string]natsNotifyV1 `json:"nats"` @@ -335,6 +336,17 @@ type notifierV1 struct { Kafka map[string]kafkaNotify `json:"kafka"` } +// Notifier represents collection of supported notification queues in version 2 +// with NATS streaming but without webhook. +type notifierV2 struct { + AMQP map[string]amqpNotify `json:"amqp"` + NATS map[string]natsNotify `json:"nats"` + ElasticSearch map[string]elasticSearchNotify `json:"elasticsearch"` + Redis map[string]redisNotify `json:"redis"` + PostgreSQL map[string]postgreSQLNotify `json:"postgresql"` + Kafka map[string]kafkaNotify `json:"kafka"` +} + // configV7 server configuration version '7'. type serverConfigV7 struct { Version string `json:"version"` @@ -538,3 +550,39 @@ func loadConfigV11() (*serverConfigV11, error) { } return srvCfg, nil } + +// serverConfigV12 server configuration version '12' which is like +// version '11' except it adds support for NATS streaming notifications. +type serverConfigV12 struct { + Version string `json:"version"` + + // S3 API configuration. + Credential credential `json:"credential"` + Region string `json:"region"` + + // Additional error logging configuration. + Logger logger `json:"logger"` + + // Notification queue configuration. + Notify notifierV2 `json:"notify"` +} + +func loadConfigV12() (*serverConfigV12, error) { + configFile, err := getConfigFile() + if err != nil { + return nil, err + } + if _, err = os.Stat(configFile); err != nil { + return nil, err + } + srvCfg := &serverConfigV12{} + srvCfg.Version = "12" + qc, err := quick.New(srvCfg) + if err != nil { + return nil, err + } + if err := qc.Load(configFile); err != nil { + return nil, err + } + return srvCfg, nil +} diff --git a/cmd/config-v12.go b/cmd/config-v13.go similarity index 72% rename from cmd/config-v12.go rename to cmd/config-v13.go index 017529303..b29419699 100644 --- a/cmd/config-v12.go +++ b/cmd/config-v13.go @@ -26,9 +26,9 @@ import ( // Read Write mutex for safe access to ServerConfig. var serverConfigMu sync.RWMutex -// serverConfigV12 server configuration version '12' which is like -// version '11' except it adds support for NATS streaming notifications. -type serverConfigV12 struct { +// serverConfigV13 server configuration version '13' which is like +// version '12' except it adds support for webhook notification. +type serverConfigV13 struct { Version string `json:"version"` // S3 API configuration. @@ -47,7 +47,7 @@ type serverConfigV12 struct { func initConfig() (bool, error) { if !isConfigFileExists() { // Initialize server config. - srvCfg := &serverConfigV12{} + srvCfg := &serverConfigV13{} srvCfg.Version = globalMinioConfigVersion srvCfg.Region = "us-east-1" srvCfg.Credential = newCredential() @@ -71,12 +71,15 @@ func initConfig() (bool, error) { srvCfg.Notify.PostgreSQL["1"] = postgreSQLNotify{} srvCfg.Notify.Kafka = make(map[string]kafkaNotify) srvCfg.Notify.Kafka["1"] = kafkaNotify{} + srvCfg.Notify.Webhook = make(map[string]webhookNotify) + srvCfg.Notify.Webhook["1"] = webhookNotify{} // Create config path. err := createConfigPath() if err != nil { return false, err } + // hold the mutex lock before a new config is assigned. // Save the new config globally. // unlock the mutex. @@ -94,7 +97,7 @@ func initConfig() (bool, error) { if _, err = os.Stat(configFile); err != nil { return false, err } - srvCfg := &serverConfigV12{} + srvCfg := &serverConfigV13{} srvCfg.Version = globalMinioConfigVersion qc, err := quick.New(srvCfg) if err != nil { @@ -116,10 +119,10 @@ func initConfig() (bool, error) { } // serverConfig server config. -var serverConfig *serverConfigV12 +var serverConfig *serverConfigV13 // GetVersion get current config version. -func (s serverConfigV12) GetVersion() string { +func (s serverConfigV13) GetVersion() string { serverConfigMu.RLock() defer serverConfigMu.RUnlock() @@ -128,14 +131,14 @@ func (s serverConfigV12) GetVersion() string { /// Logger related. -func (s *serverConfigV12) SetAMQPNotifyByID(accountID string, amqpn amqpNotify) { +func (s *serverConfigV13) SetAMQPNotifyByID(accountID string, amqpn amqpNotify) { serverConfigMu.Lock() defer serverConfigMu.Unlock() s.Notify.AMQP[accountID] = amqpn } -func (s serverConfigV12) GetAMQP() map[string]amqpNotify { +func (s serverConfigV13) GetAMQP() map[string]amqpNotify { serverConfigMu.RLock() defer serverConfigMu.RUnlock() @@ -143,7 +146,7 @@ func (s serverConfigV12) GetAMQP() map[string]amqpNotify { } // GetAMQPNotify get current AMQP logger. -func (s serverConfigV12) GetAMQPNotifyByID(accountID string) amqpNotify { +func (s serverConfigV13) GetAMQPNotifyByID(accountID string) amqpNotify { serverConfigMu.RLock() defer serverConfigMu.RUnlock() @@ -151,35 +154,35 @@ func (s serverConfigV12) GetAMQPNotifyByID(accountID string) amqpNotify { } // -func (s *serverConfigV12) SetNATSNotifyByID(accountID string, natsn natsNotify) { +func (s *serverConfigV13) SetNATSNotifyByID(accountID string, natsn natsNotify) { serverConfigMu.Lock() defer serverConfigMu.Unlock() s.Notify.NATS[accountID] = natsn } -func (s serverConfigV12) GetNATS() map[string]natsNotify { +func (s serverConfigV13) GetNATS() map[string]natsNotify { serverConfigMu.RLock() defer serverConfigMu.RUnlock() return s.Notify.NATS } // GetNATSNotify get current NATS logger. -func (s serverConfigV12) GetNATSNotifyByID(accountID string) natsNotify { +func (s serverConfigV13) GetNATSNotifyByID(accountID string) natsNotify { serverConfigMu.RLock() defer serverConfigMu.RUnlock() return s.Notify.NATS[accountID] } -func (s *serverConfigV12) SetElasticSearchNotifyByID(accountID string, esNotify elasticSearchNotify) { +func (s *serverConfigV13) SetElasticSearchNotifyByID(accountID string, esNotify elasticSearchNotify) { serverConfigMu.Lock() defer serverConfigMu.Unlock() s.Notify.ElasticSearch[accountID] = esNotify } -func (s serverConfigV12) GetElasticSearch() map[string]elasticSearchNotify { +func (s serverConfigV13) GetElasticSearch() map[string]elasticSearchNotify { serverConfigMu.RLock() defer serverConfigMu.RUnlock() @@ -187,50 +190,72 @@ func (s serverConfigV12) GetElasticSearch() map[string]elasticSearchNotify { } // GetElasticSearchNotify get current ElasicSearch logger. -func (s serverConfigV12) GetElasticSearchNotifyByID(accountID string) elasticSearchNotify { +func (s serverConfigV13) GetElasticSearchNotifyByID(accountID string) elasticSearchNotify { serverConfigMu.RLock() defer serverConfigMu.RUnlock() return s.Notify.ElasticSearch[accountID] } -func (s *serverConfigV12) SetRedisNotifyByID(accountID string, rNotify redisNotify) { +func (s *serverConfigV13) SetRedisNotifyByID(accountID string, rNotify redisNotify) { serverConfigMu.Lock() defer serverConfigMu.Unlock() s.Notify.Redis[accountID] = rNotify } -func (s serverConfigV12) GetRedis() map[string]redisNotify { +func (s serverConfigV13) GetRedis() map[string]redisNotify { serverConfigMu.RLock() defer serverConfigMu.RUnlock() return s.Notify.Redis } +func (s serverConfigV13) GetWebhook() map[string]webhookNotify { + serverConfigMu.RLock() + defer serverConfigMu.RUnlock() + + return s.Notify.Webhook +} + +// GetWebhookNotifyByID get current Webhook logger. +func (s serverConfigV13) GetWebhookNotifyByID(accountID string) webhookNotify { + serverConfigMu.RLock() + defer serverConfigMu.RUnlock() + + return s.Notify.Webhook[accountID] +} + +func (s *serverConfigV13) SetWebhookNotifyByID(accountID string, pgn webhookNotify) { + serverConfigMu.Lock() + defer serverConfigMu.Unlock() + + s.Notify.Webhook[accountID] = pgn +} + // GetRedisNotify get current Redis logger. -func (s serverConfigV12) GetRedisNotifyByID(accountID string) redisNotify { +func (s serverConfigV13) GetRedisNotifyByID(accountID string) redisNotify { serverConfigMu.RLock() defer serverConfigMu.RUnlock() return s.Notify.Redis[accountID] } -func (s *serverConfigV12) SetPostgreSQLNotifyByID(accountID string, pgn postgreSQLNotify) { +func (s *serverConfigV13) SetPostgreSQLNotifyByID(accountID string, pgn postgreSQLNotify) { serverConfigMu.Lock() defer serverConfigMu.Unlock() s.Notify.PostgreSQL[accountID] = pgn } -func (s serverConfigV12) GetPostgreSQL() map[string]postgreSQLNotify { +func (s serverConfigV13) GetPostgreSQL() map[string]postgreSQLNotify { serverConfigMu.RLock() defer serverConfigMu.RUnlock() return s.Notify.PostgreSQL } -func (s serverConfigV12) GetPostgreSQLNotifyByID(accountID string) postgreSQLNotify { +func (s serverConfigV13) GetPostgreSQLNotifyByID(accountID string) postgreSQLNotify { serverConfigMu.RLock() defer serverConfigMu.RUnlock() @@ -238,21 +263,21 @@ func (s serverConfigV12) GetPostgreSQLNotifyByID(accountID string) postgreSQLNot } // Kafka related functions -func (s *serverConfigV12) SetKafkaNotifyByID(accountID string, kn kafkaNotify) { +func (s *serverConfigV13) SetKafkaNotifyByID(accountID string, kn kafkaNotify) { serverConfigMu.Lock() defer serverConfigMu.Unlock() s.Notify.Kafka[accountID] = kn } -func (s serverConfigV12) GetKafka() map[string]kafkaNotify { +func (s serverConfigV13) GetKafka() map[string]kafkaNotify { serverConfigMu.RLock() defer serverConfigMu.RUnlock() return s.Notify.Kafka } -func (s serverConfigV12) GetKafkaNotifyByID(accountID string) kafkaNotify { +func (s serverConfigV13) GetKafkaNotifyByID(accountID string) kafkaNotify { serverConfigMu.RLock() defer serverConfigMu.RUnlock() @@ -260,7 +285,7 @@ func (s serverConfigV12) GetKafkaNotifyByID(accountID string) kafkaNotify { } // SetFileLogger set new file logger. -func (s *serverConfigV12) SetFileLogger(flogger fileLogger) { +func (s *serverConfigV13) SetFileLogger(flogger fileLogger) { serverConfigMu.Lock() defer serverConfigMu.Unlock() @@ -268,7 +293,7 @@ func (s *serverConfigV12) SetFileLogger(flogger fileLogger) { } // GetFileLogger get current file logger. -func (s serverConfigV12) GetFileLogger() fileLogger { +func (s serverConfigV13) GetFileLogger() fileLogger { serverConfigMu.RLock() defer serverConfigMu.RUnlock() @@ -276,7 +301,7 @@ func (s serverConfigV12) GetFileLogger() fileLogger { } // SetConsoleLogger set new console logger. -func (s *serverConfigV12) SetConsoleLogger(clogger consoleLogger) { +func (s *serverConfigV13) SetConsoleLogger(clogger consoleLogger) { serverConfigMu.Lock() defer serverConfigMu.Unlock() @@ -284,7 +309,7 @@ func (s *serverConfigV12) SetConsoleLogger(clogger consoleLogger) { } // GetConsoleLogger get current console logger. -func (s serverConfigV12) GetConsoleLogger() consoleLogger { +func (s serverConfigV13) GetConsoleLogger() consoleLogger { serverConfigMu.RLock() defer serverConfigMu.RUnlock() @@ -292,7 +317,7 @@ func (s serverConfigV12) GetConsoleLogger() consoleLogger { } // SetRegion set new region. -func (s *serverConfigV12) SetRegion(region string) { +func (s *serverConfigV13) SetRegion(region string) { serverConfigMu.Lock() defer serverConfigMu.Unlock() @@ -300,7 +325,7 @@ func (s *serverConfigV12) SetRegion(region string) { } // GetRegion get current region. -func (s serverConfigV12) GetRegion() string { +func (s serverConfigV13) GetRegion() string { serverConfigMu.RLock() defer serverConfigMu.RUnlock() @@ -308,7 +333,7 @@ func (s serverConfigV12) GetRegion() string { } // SetCredentials set new credentials. -func (s *serverConfigV12) SetCredential(creds credential) { +func (s *serverConfigV13) SetCredential(creds credential) { serverConfigMu.Lock() defer serverConfigMu.Unlock() @@ -316,7 +341,7 @@ func (s *serverConfigV12) SetCredential(creds credential) { } // GetCredentials get current credentials. -func (s serverConfigV12) GetCredential() credential { +func (s serverConfigV13) GetCredential() credential { serverConfigMu.RLock() defer serverConfigMu.RUnlock() @@ -324,7 +349,7 @@ func (s serverConfigV12) GetCredential() credential { } // Save config. -func (s serverConfigV12) Save() error { +func (s serverConfigV13) Save() error { serverConfigMu.RLock() defer serverConfigMu.RUnlock() diff --git a/cmd/config-v12_test.go b/cmd/config-v13_test.go similarity index 92% rename from cmd/config-v12_test.go rename to cmd/config-v13_test.go index 755976d43..5a80e43aa 100644 --- a/cmd/config-v12_test.go +++ b/cmd/config-v13_test.go @@ -67,6 +67,13 @@ func TestServerConfig(t *testing.T) { t.Errorf("Expecting Kafka config %#v found %#v", kafkaNotify{}, savedNotifyCfg4) } + // Set new Webhook notification id. + serverConfig.SetWebhookNotifyByID("2", webhookNotify{}) + savedNotifyCfg5 := serverConfig.GetWebhookNotifyByID("2") + if !reflect.DeepEqual(savedNotifyCfg5, webhookNotify{}) { + t.Errorf("Expecting Webhook config %#v found %#v", webhookNotify{}, savedNotifyCfg3) + } + // Set new console logger. serverConfig.SetConsoleLogger(consoleLogger{ Enable: true, diff --git a/cmd/event-notifier.go b/cmd/event-notifier.go index 85c600242..fa3eb5b7b 100644 --- a/cmd/event-notifier.go +++ b/cmd/event-notifier.go @@ -612,6 +612,28 @@ func loadAllQueueTargets() (map[string]*logrus.Logger, error) { } queueTargets[queueARN] = redisLog } + + // Load Webhook targets, initialize their respective loggers. + for accountID, webhookN := range serverConfig.GetWebhook() { + if !webhookN.Enable { + continue + } + // Construct the queue ARN for Webhook. + queueARN := minioSqs + serverConfig.GetRegion() + ":" + accountID + ":" + queueTypeWebhook + _, ok := queueTargets[queueARN] + if ok { + continue + } + + // Using accountID we can now initialize a new Webhook logrus instance. + webhookLog, err := newWebhookNotify(accountID) + if err != nil { + + return nil, err + } + queueTargets[queueARN] = webhookLog + } + // Load elastic targets, initialize their respective loggers. for accountID, elasticN := range serverConfig.GetElasticSearch() { if !elasticN.Enable { @@ -637,6 +659,7 @@ func loadAllQueueTargets() (map[string]*logrus.Logger, error) { } queueTargets[queueARN] = elasticLog } + // Load PostgreSQL targets, initialize their respective loggers. for accountID, pgN := range serverConfig.GetPostgreSQL() { if !pgN.Enable { diff --git a/cmd/event-notifier_test.go b/cmd/event-notifier_test.go index ee842676a..45e8a0b02 100644 --- a/cmd/event-notifier_test.go +++ b/cmd/event-notifier_test.go @@ -77,6 +77,99 @@ func TestInitEventNotifierFaultyDisks(t *testing.T) { } } +// InitEventNotifierWithPostgreSQL - tests InitEventNotifier when PostgreSQL is not prepared +func TestInitEventNotifierWithPostgreSQL(t *testing.T) { + // initialize the server and obtain the credentials and root. + // credentials are necessary to sign the HTTP request. + rootPath, err := newTestConfig("us-east-1") + if err != nil { + t.Fatalf("Init Test config failed") + } + // remove the root directory after the test ends. + defer removeAll(rootPath) + + disks, err := getRandomDisks(1) + defer removeAll(disks[0]) + if err != nil { + t.Fatal("Unable to create directories for FS backend. ", err) + } + endpoints, err := parseStorageEndpoints(disks) + if err != nil { + t.Fatal(err) + } + fs, _, err := initObjectLayer(endpoints) + if err != nil { + t.Fatal("Unable to initialize FS backend.", err) + } + + serverConfig.SetPostgreSQLNotifyByID("1", postgreSQLNotify{Enable: true}) + if err := initEventNotifier(fs); err == nil { + t.Fatal("PostgreSQL config didn't fail.") + } +} + +// InitEventNotifierWithNATS - tests InitEventNotifier when NATS is not prepared +func TestInitEventNotifierWithNATS(t *testing.T) { + // initialize the server and obtain the credentials and root. + // credentials are necessary to sign the HTTP request. + rootPath, err := newTestConfig("us-east-1") + if err != nil { + t.Fatalf("Init Test config failed") + } + // remove the root directory after the test ends. + defer removeAll(rootPath) + + disks, err := getRandomDisks(1) + defer removeAll(disks[0]) + if err != nil { + t.Fatal("Unable to create directories for FS backend. ", err) + } + endpoints, err := parseStorageEndpoints(disks) + if err != nil { + t.Fatal(err) + } + fs, _, err := initObjectLayer(endpoints) + if err != nil { + t.Fatal("Unable to initialize FS backend.", err) + } + + serverConfig.SetNATSNotifyByID("1", natsNotify{Enable: true}) + if err := initEventNotifier(fs); err == nil { + t.Fatal("NATS config didn't fail.") + } +} + +// InitEventNotifierWithWebHook - tests InitEventNotifier when WebHook is not prepared +func TestInitEventNotifierWithWebHook(t *testing.T) { + // initialize the server and obtain the credentials and root. + // credentials are necessary to sign the HTTP request. + rootPath, err := newTestConfig("us-east-1") + if err != nil { + t.Fatalf("Init Test config failed") + } + // remove the root directory after the test ends. + defer removeAll(rootPath) + + disks, err := getRandomDisks(1) + defer removeAll(disks[0]) + if err != nil { + t.Fatal("Unable to create directories for FS backend. ", err) + } + endpoints, err := parseStorageEndpoints(disks) + if err != nil { + t.Fatal(err) + } + fs, _, err := initObjectLayer(endpoints) + if err != nil { + t.Fatal("Unable to initialize FS backend.", err) + } + + serverConfig.SetWebhookNotifyByID("1", webhookNotify{Enable: true}) + if err := initEventNotifier(fs); err == nil { + t.Fatal("WebHook config didn't fail.") + } +} + // InitEventNotifierWithAMQP - tests InitEventNotifier when AMQP is not prepared func TestInitEventNotifierWithAMQP(t *testing.T) { // initialize the server and obtain the credentials and root. diff --git a/cmd/globals.go b/cmd/globals.go index 0f0eb7476..419886e00 100644 --- a/cmd/globals.go +++ b/cmd/globals.go @@ -19,6 +19,7 @@ package cmd import ( "crypto/x509" "os" + "runtime" "strings" "time" @@ -36,7 +37,7 @@ const ( // minio configuration related constants. const ( - globalMinioConfigVersion = "12" + globalMinioConfigVersion = "13" globalMinioConfigDir = ".minio" globalMinioCertsDir = "certs" globalMinioCertsCADir = "CAs" @@ -96,6 +97,9 @@ var ( // List of admin peers. globalAdminPeers = adminPeers{} + // Minio server user agent string. + globalServerUserAgent = "Minio/" + ReleaseTag + " (" + runtime.GOOS + "; " + runtime.GOARCH + ")" + // Add new variable global values here. ) diff --git a/cmd/notifiers.go b/cmd/notifiers.go index 8e65b9d9a..b7b5ec496 100644 --- a/cmd/notifiers.go +++ b/cmd/notifiers.go @@ -40,6 +40,8 @@ const ( queueTypePostgreSQL = "postgresql" // Static string indicating queue type 'kafka'. queueTypeKafka = "kafka" + // Static string for Webhooks + queueTypeWebhook = "webhook" ) // Topic type. @@ -61,6 +63,7 @@ type notifier struct { Redis map[string]redisNotify `json:"redis"` PostgreSQL map[string]postgreSQLNotify `json:"postgresql"` Kafka map[string]kafkaNotify `json:"kafka"` + Webhook map[string]webhookNotify `json:"webhook"` // Add new notification queues. } @@ -102,6 +105,18 @@ func isNATSQueue(sqsArn arnSQS) bool { return true } +// Returns true if queueArn is for an Webhook queue +func isWebhookQueue(sqsArn arnSQS) bool { + if sqsArn.Type != queueTypeWebhook { + return false + } + rNotify := serverConfig.GetWebhookNotifyByID(sqsArn.AccountID) + if !rNotify.Enable { + return false + } + return true +} + // Returns true if queueArn is for an Redis queue. func isRedisQueue(sqsArn arnSQS) bool { if sqsArn.Type != queueTypeRedis { diff --git a/cmd/notify-amqp.go b/cmd/notify-amqp.go index ac5698d62..a5b87f72d 100644 --- a/cmd/notify-amqp.go +++ b/cmd/notify-amqp.go @@ -82,7 +82,7 @@ func newAMQPNotify(accountID string) (*logrus.Logger, error) { return amqpLog, nil } -// Fire is called when an event should be sent to the message broker.k +// Fire is called when an event should be sent to the message broker. func (q amqpConn) Fire(entry *logrus.Entry) error { ch, err := q.Connection.Channel() if err != nil { diff --git a/cmd/notify-webhook.go b/cmd/notify-webhook.go new file mode 100644 index 000000000..10904672e --- /dev/null +++ b/cmd/notify-webhook.go @@ -0,0 +1,136 @@ +/* + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "time" + + "github.com/Sirupsen/logrus" +) + +type webhookNotify struct { + Enable bool `json:"enable"` + Endpoint string `json:"endpoint"` +} + +type httpConn struct { + *http.Client + Endpoint string +} + +// Lookup host address by dialing. +func lookupHost(addr string) error { + dialer := &net.Dialer{ + Timeout: 300 * time.Millisecond, + KeepAlive: 300 * time.Millisecond, + } + nconn, err := dialer.Dial("tcp", addr) + if err != nil { + return err + } + return nconn.Close() +} + +// Initializes new webhook logrus notifier. +func newWebhookNotify(accountID string) (*logrus.Logger, error) { + rNotify := serverConfig.GetWebhookNotifyByID(accountID) + + if rNotify.Endpoint == "" { + return nil, errInvalidArgument + } + + u, err := url.Parse(rNotify.Endpoint) + if err != nil { + return nil, err + } + + if err = lookupHost(u.Host); err != nil { + return nil, err + } + + conn := httpConn{ + // Configure aggressive timeouts for client posts. + Client: &http.Client{ + Transport: &http.Transport{ + DialContext: (&net.Dialer{ + Timeout: 5 * time.Second, + KeepAlive: 5 * time.Second, + }).DialContext, + TLSHandshakeTimeout: 3 * time.Second, + ResponseHeaderTimeout: 3 * time.Second, + ExpectContinueTimeout: 2 * time.Second, + }, + }, + Endpoint: rNotify.Endpoint, + } + + notifyLog := logrus.New() + notifyLog.Out = ioutil.Discard + + // Set default JSON formatter. + notifyLog.Formatter = new(logrus.JSONFormatter) + + notifyLog.Hooks.Add(conn) + + // Success + return notifyLog, nil +} + +// Fire is called when an event should be sent to the message broker. +func (n httpConn) Fire(entry *logrus.Entry) error { + body, err := entry.Reader() + if err != nil { + return err + } + + req, err := http.NewRequest("POST", n.Endpoint, body) + if err != nil { + return err + } + + // Set content-type. + req.Header.Set("Content-Type", "application/json") + + // Set proper server user-agent. + req.Header.Set("User-Agent", globalServerUserAgent) + + // Initiate the http request. + resp, err := n.Do(req) + if err != nil { + return err + } + + if resp.StatusCode != http.StatusOK && + resp.StatusCode != http.StatusAccepted && + resp.StatusCode != http.StatusContinue { + return fmt.Errorf("Unable to send event %s", resp.Status) + } + + return nil +} + +// Levels are Required for logrus hook implementation +func (httpConn) Levels() []logrus.Level { + return []logrus.Level{ + logrus.InfoLevel, + } +} diff --git a/cmd/notify-webhook_test.go b/cmd/notify-webhook_test.go new file mode 100644 index 000000000..3f298694f --- /dev/null +++ b/cmd/notify-webhook_test.go @@ -0,0 +1,79 @@ +/* + * Minio Cloud Storage, (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "fmt" + "io" + "net/http" + "net/http/httptest" + "path" + "testing" + + "github.com/Sirupsen/logrus" +) + +// Custom post handler to handle POST requests. +type postHandler struct{} + +func (p postHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + http.Error(w, fmt.Sprintf("Unexpected method %s", r.Method), http.StatusBadRequest) + return + } + io.Copy(w, r.Body) +} + +// Tests web hook initialization. +func TestNewWebHookNotify(t *testing.T) { + root, err := newTestConfig("us-east-1") + if err != nil { + t.Fatal(err) + } + defer removeAll(root) + + _, err = newWebhookNotify("1") + if err == nil { + t.Fatal("Unexpected should fail") + } + + serverConfig.SetWebhookNotifyByID("10", webhookNotify{Enable: true, Endpoint: "http://www."}) + _, err = newWebhookNotify("10") + if err == nil { + t.Fatal("Unexpected should fail with lookupHost") + } + + serverConfig.SetWebhookNotifyByID("15", webhookNotify{Enable: true, Endpoint: "http://%"}) + _, err = newWebhookNotify("15") + if err == nil { + t.Fatal("Unexpected should fail with invalid URL escape") + } + + server := httptest.NewServer(postHandler{}) + defer server.Close() + + serverConfig.SetWebhookNotifyByID("20", webhookNotify{Enable: true, Endpoint: server.URL}) + webhook, err := newWebhookNotify("20") + if err != nil { + t.Fatal("Unexpected shouldn't fail", err) + } + + webhook.WithFields(logrus.Fields{ + "Key": path.Join("bucket", "object"), + "EventType": "s3:ObjectCreated:Put", + }).Info() +} diff --git a/cmd/server_test.go b/cmd/server_test.go index 5f6fa9b83..04af7e129 100644 --- a/cmd/server_test.go +++ b/cmd/server_test.go @@ -100,7 +100,37 @@ func (s *TestSuiteCommon) TestAuth(c *C) { c.Assert(len(cred.SecretKey), Equals, secretKeyMaxLen) } -func (s *TestSuiteCommon) TestBucketSQSNotification(c *C) { +func (s *TestSuiteCommon) TestBucketSQSNotificationWebHook(c *C) { + // Sample bucket notification. + bucketNotificationBuf := `s3:ObjectCreated:Putprefiximages/1arn:minio:sqs:us-east-1:444455556666:webhook` + // generate a random bucket Name. + bucketName := getRandomBucketName() + // HTTP request to create the bucket. + request, err := newTestSignedRequest("PUT", getMakeBucketURL(s.endPoint, bucketName), + 0, nil, s.accessKey, s.secretKey, s.signer) + c.Assert(err, IsNil) + + client := http.Client{Transport: s.transport} + // execute the request. + response, err := client.Do(request) + c.Assert(err, IsNil) + + // assert the http response status code. + c.Assert(response.StatusCode, Equals, http.StatusOK) + + request, err = newTestSignedRequest("PUT", getPutNotificationURL(s.endPoint, bucketName), + int64(len(bucketNotificationBuf)), bytes.NewReader([]byte(bucketNotificationBuf)), s.accessKey, s.secretKey, s.signer) + c.Assert(err, IsNil) + + client = http.Client{Transport: s.transport} + // execute the HTTP request. + response, err = client.Do(request) + + c.Assert(err, IsNil) + verifyError(c, response, "InvalidArgument", "A specified destination ARN does not exist or is not well-formed. Verify the destination ARN.", http.StatusBadRequest) +} + +func (s *TestSuiteCommon) TestBucketSQSNotificationAMQP(c *C) { // Sample bucket notification. bucketNotificationBuf := `s3:ObjectCreated:Putprefiximages/1arn:minio:sqs:us-east-1:444455556666:amqp` // generate a random bucket Name. From 7b85756c6426ab187c7c8072137676e429bf9685 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Thu, 12 Jan 2017 20:08:00 -0800 Subject: [PATCH 063/100] notify/webhook: Handle webendpoints without port (#3568) Fixes and issue initializing webhook notification ``` FATA[0000] Initializing object layer failed cause=Unable to initialize event \ notification. dial tcp: missing port in address requestb.in source=[server-main.go:448:serverMain()] ``` --- cmd/notify-webhook.go | 8 ++++---- cmd/utils.go | 18 ++++++++++++++++++ 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/cmd/notify-webhook.go b/cmd/notify-webhook.go index 10904672e..95af0fcb4 100644 --- a/cmd/notify-webhook.go +++ b/cmd/notify-webhook.go @@ -37,13 +37,13 @@ type httpConn struct { Endpoint string } -// Lookup host address by dialing. -func lookupHost(addr string) error { +// Lookup endpoint address by successfully dialing. +func lookupEndpoint(u *url.URL) error { dialer := &net.Dialer{ Timeout: 300 * time.Millisecond, KeepAlive: 300 * time.Millisecond, } - nconn, err := dialer.Dial("tcp", addr) + nconn, err := dialer.Dial("tcp", canonicalAddr(u)) if err != nil { return err } @@ -63,7 +63,7 @@ func newWebhookNotify(accountID string) (*logrus.Logger, error) { return nil, err } - if err = lookupHost(u.Host); err != nil { + if err = lookupEndpoint(u); err != nil { return nil, err } diff --git a/cmd/utils.go b/cmd/utils.go index 8723e62ad..c62aed21b 100644 --- a/cmd/utils.go +++ b/cmd/utils.go @@ -102,6 +102,24 @@ func urlPath2BucketObjectName(u *url.URL) (bucketName, objectName string) { return bucketName, objectName } +var portMap = map[string]string{ + "http": "80", + "https": "443", +} + +// Given a string of the form "host", "host:port", or "[ipv6::address]:port", +// return true if the string includes a port. +func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } + +// canonicalAddr returns url.Host but always with a ":port" suffix +func canonicalAddr(u *url.URL) string { + addr := u.Host + if !hasPort(addr) { + return addr + ":" + portMap[u.Scheme] + } + return addr +} + // checkDuplicates - function to validate if there are duplicates in a slice of endPoints. func checkDuplicateEndpoints(endpoints []*url.URL) error { var strs []string From f64f8b03cbf86b2605e780cfe6f990e01d7d35e0 Mon Sep 17 00:00:00 2001 From: Anis Elleuch Date: Fri, 13 Jan 2017 23:25:34 +0100 Subject: [PATCH 064/100] admin: Enhance locks list json response (#3573) --- cmd/lockinfo-handlers.go | 37 +++++++++++++++++++++++++++---------- pkg/madmin/lock-commands.go | 26 ++++++++++++++++---------- 2 files changed, 43 insertions(+), 20 deletions(-) diff --git a/cmd/lockinfo-handlers.go b/cmd/lockinfo-handlers.go index 70dd4e76f..16a915aa1 100644 --- a/cmd/lockinfo-handlers.go +++ b/cmd/lockinfo-handlers.go @@ -38,28 +38,34 @@ type SystemLockState struct { type VolumeLockInfo struct { Bucket string `json:"bucket"` Object string `json:"object"` + // All locks blocked + running for given pair. - LocksOnObject int64 `json:"locksOnObject"` + LocksOnObject int64 `json:"-"` // Count of operations which has successfully acquired the lock // but hasn't unlocked yet( operation in progress). - LocksAcquiredOnObject int64 `json:"locksAcquiredOnObject"` + LocksAcquiredOnObject int64 `json:"-"` // Count of operations which are blocked waiting for the lock // to be released. - TotalBlockedLocks int64 `json:"locksBlockedOnObject"` + TotalBlockedLocks int64 `json:"-"` + + // Count of all read locks + TotalReadLocks int64 `json:"readLocks"` + // Count of all write locks + TotalWriteLocks int64 `json:"writeLocks"` // State information containing state of the locks for all operations // on given pair. - LockDetailsOnObject []OpsLockState `json:"lockDetailsOnObject"` + LockDetailsOnObject []OpsLockState `json:"lockOwners"` } // OpsLockState - structure to fill in state information of the lock. // structure to fill in status information for each operation with given operation ID. type OpsLockState struct { - OperationID string `json:"opsID"` // String containing operation ID. - LockSource string `json:"lockSource"` // Operation type (GetObject, PutObject...) - LockType lockType `json:"lockType"` // Lock type (RLock, WLock) - Status statusType `json:"status"` // Status can be Running/Ready/Blocked. - Since time.Time `json:"statusSince"` // Time when the lock was initially held. - Duration time.Duration `json:"statusDuration"` // Duration since the lock was held. + OperationID string `json:"id"` // String containing operation ID. + LockSource string `json:"source"` // Operation type (GetObject, PutObject...) + LockType lockType `json:"type"` // Lock type (RLock, WLock) + Status statusType `json:"status"` // Status can be Running/Ready/Blocked. + Since time.Time `json:"since"` // Time when the lock was initially held. + Duration time.Duration `json:"duration"` // Duration since the lock was held. } // Read entire state of the locks in the system and return. @@ -75,6 +81,8 @@ func getSystemLockState() (SystemLockState, error) { TotalBlockedLocks: globalNSMutex.counters.blocked, } + var totalReadLocks, totalWriteLocks int64 + for param, debugLock := range globalNSMutex.debugLockMap { volLockInfo := VolumeLockInfo{} volLockInfo.Bucket = param.volume @@ -91,7 +99,16 @@ func getSystemLockState() (SystemLockState, error) { Since: lockInfo.since, Duration: timeNow.Sub(lockInfo.since), }) + switch lockInfo.lType { + case debugRLockStr: + totalReadLocks++ + case debugWLockStr: + totalWriteLocks++ + } } + volLockInfo.TotalReadLocks = totalReadLocks + volLockInfo.TotalWriteLocks = totalWriteLocks + lockState.LocksInfoPerObject = append(lockState.LocksInfoPerObject, volLockInfo) } return lockState, nil diff --git a/pkg/madmin/lock-commands.go b/pkg/madmin/lock-commands.go index 1bae3a8df..5b7e15cb9 100644 --- a/pkg/madmin/lock-commands.go +++ b/pkg/madmin/lock-commands.go @@ -43,12 +43,12 @@ const ( // OpsLockState - represents lock specific details. type OpsLockState struct { - OperationID string `json:"opsID"` // String containing operation ID. - LockSource string `json:"lockSource"` // Operation type (GetObject, PutObject...) - LockType lockType `json:"lockType"` // Lock type (RLock, WLock) - Status statusType `json:"status"` // Status can be Running/Ready/Blocked. - Since time.Time `json:"statusSince"` // Time when the lock was initially held. - Duration time.Duration `json:"statusDuration"` // Duration since the lock was held. + OperationID string `json:"id"` // String containing operation ID. + LockSource string `json:"source"` // Operation type (GetObject, PutObject...) + LockType lockType `json:"type"` // Lock type (RLock, WLock) + Status statusType `json:"status"` // Status can be Running/Ready/Blocked. + Since time.Time `json:"since"` // Time when the lock was initially held. + Duration time.Duration `json:"duration"` // Duration since the lock was held. } // VolumeLockInfo - represents summary and individual lock details of all @@ -56,17 +56,23 @@ type OpsLockState struct { type VolumeLockInfo struct { Bucket string `json:"bucket"` Object string `json:"object"` + // All locks blocked + running for given pair. - LocksOnObject int64 `json:"locksOnObject"` + LocksOnObject int64 `json:"-"` // Count of operations which has successfully acquired the lock // but hasn't unlocked yet( operation in progress). - LocksAcquiredOnObject int64 `json:"locksAcquiredOnObject"` + LocksAcquiredOnObject int64 `json:"-"` // Count of operations which are blocked waiting for the lock // to be released. - TotalBlockedLocks int64 `json:"locksBlockedOnObject"` + TotalBlockedLocks int64 `json:"-"` + + // Count of all read locks + TotalReadLocks int64 `json:"readLocks"` + // Count of all write locks + TotalWriteLocks int64 `json:"writeLocks"` // State information containing state of the locks for all operations // on given pair. - LockDetailsOnObject []OpsLockState `json:"lockDetailsOnObject"` + LockDetailsOnObject []OpsLockState `json:"lockOwners"` } // getLockInfos - unmarshal []VolumeLockInfo from a reader. From 50796e481d625d506522295a8b0bb2483cf5af70 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Fri, 13 Jan 2017 18:51:17 -0800 Subject: [PATCH 065/100] build: Add ARM binary builds for ARMv6 and Aarch64. (#3577) --- buildscripts/build.sh | 56 +++++++++++++++++++++++++++++++++++-------- 1 file changed, 46 insertions(+), 10 deletions(-) diff --git a/buildscripts/build.sh b/buildscripts/build.sh index 5a7af2352..79a7778c8 100755 --- a/buildscripts/build.sh +++ b/buildscripts/build.sh @@ -23,7 +23,7 @@ _init() { fi # List of supported architectures - SUPPORTED_OSARCH='linux/386 linux/amd64 linux/arm windows/386 windows/amd64 darwin/amd64 freebsd/amd64' + SUPPORTED_OSARCH='linux/386 linux/amd64 linux/arm linux/arm64 windows/386 windows/amd64 darwin/amd64 freebsd/amd64' ## System binaries CP=`which cp` @@ -47,18 +47,54 @@ go_build() { release_shasum="$release_str/$os-$arch/$(basename $package).shasum" # Go build to build the binary. - GOOS=$os GOARCH=$arch go build --ldflags "${LDFLAGS}" -o $release_bin + if [ "${arch}" == "arm" ]; then + # Release binary downloadable name + release_real_bin_6="$release_str/$os-${arch}6vl/$(basename $package)" - # Create copy - if [ $os == "windows" ]; then - $CP -p $release_bin ${release_real_bin}.exe + release_bin_6="$release_str/$os-${arch}6vl/$(basename $package).$release_tag" + ## Support building for ARM6vl + GOARM=6 GOOS=$os GOARCH=$arch go build --ldflags "${LDFLAGS}" -o $release_bin_6 + + ## Copy + $CP -p $release_bin_6 $release_real_bin_6 + + # Release shasum name + release_shasum_6="$release_str/$os-${arch}6vl/$(basename $package).shasum" + + # Calculate shasum + shasum_str=$(${SHASUM} ${release_bin_6}) + echo ${shasum_str} | $SED "s/$release_str\/$os-${arch}6vl\///g" > $release_shasum_6 + + # Release binary downloadable name + release_real_bin_7="$release_str/$os-$arch/$(basename $package)" + + release_bin_7="$release_str/$os-$arch/$(basename $package).$release_tag" + ## Support building for ARM7vl + GOARM=7 GOOS=$os GOARCH=$arch go build --ldflags "${LDFLAGS}" -o $release_bin_7 + + ## Copy + $CP -p $release_bin_7 $release_real_bin_7 + + # Release shasum name + release_shasum_7="$release_str/$os-$arch/$(basename $package).shasum" + + # Calculate shasum + shasum_str=$(${SHASUM} ${release_bin_7}) + echo ${shasum_str} | $SED "s/$release_str\/$os-$arch\///g" > $release_shasum_7 else - $CP -p $release_bin $release_real_bin - fi + GOOS=$os GOARCH=$arch go build --ldflags "${LDFLAGS}" -o $release_bin - # Calculate shasum - shasum_str=$(${SHASUM} ${release_bin}) - echo ${shasum_str} | $SED "s/$release_str\/$os-$arch\///g" > $release_shasum + # Create copy + if [ $os == "windows" ]; then + $CP -p $release_bin ${release_real_bin}.exe + else + $CP -p $release_bin $release_real_bin + fi + + # Calculate shasum + shasum_str=$(${SHASUM} ${release_bin}) + echo ${shasum_str} | $SED "s/$release_str\/$os-$arch\///g" > $release_shasum + fi } main() { From 2959c104b32385c502b6125eaedfb1e65217ae31 Mon Sep 17 00:00:00 2001 From: Anis Elleuch Date: Sat, 14 Jan 2017 23:06:23 +0100 Subject: [PATCH 066/100] peer rpc: Fix typo in cluster credentials update (#3579) Update credentials in cluster wasn't working due to a typo --- cmd/browser-peer-rpc.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/browser-peer-rpc.go b/cmd/browser-peer-rpc.go index 834a9c75d..79f9e7b53 100644 --- a/cmd/browser-peer-rpc.go +++ b/cmd/browser-peer-rpc.go @@ -108,7 +108,7 @@ func updateCredsOnPeers(creds credential) map[string]error { serverAddr: peers[ix], secureConn: globalIsSSL, serviceEndpoint: path.Join(reservedBucket, browserPeerPath), - serviceName: "Browser", + serviceName: "BrowserPeer", }) // Construct RPC call arguments. @@ -116,14 +116,14 @@ func updateCredsOnPeers(creds credential) map[string]error { // Make RPC call - we only care about error // response and not the reply. - err := client.Call("Browser.SetAuthPeer", &args, &AuthRPCReply{}) + err := client.Call("BrowserPeer.SetAuthPeer", &args, &AuthRPCReply{}) // We try a bit hard (3 attempts with 1 second delay) // to set creds on peers in case of failure. if err != nil { for i := 0; i < 2; i++ { time.Sleep(1 * time.Second) // 1 second delay. - err = client.Call("Browser.SetAuthPeer", &args, &AuthRPCReply{}) + err = client.Call("BrowserPeer.SetAuthPeer", &args, &AuthRPCReply{}) if err == nil { break } From caecd75a2a1b955163a09e909fbba6fae550c230 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Sat, 14 Jan 2017 14:48:52 -0800 Subject: [PATCH 067/100] Deprecate and remove service stop API. (#3578) Fixes #3570 --- cmd/admin-handlers.go | 18 ------------ cmd/admin-handlers_test.go | 9 ------ cmd/admin-router.go | 3 +- cmd/admin-rpc-client.go | 23 ++------------- cmd/admin-rpc-server.go | 10 ------- cmd/admin-rpc-server_test.go | 10 +------ pkg/madmin/API.md | 18 ------------ pkg/madmin/README.md | 2 -- pkg/madmin/examples/service-stop.go | 44 ----------------------------- pkg/madmin/service.go | 24 ---------------- 10 files changed, 5 insertions(+), 156 deletions(-) delete mode 100644 pkg/madmin/examples/service-stop.go diff --git a/cmd/admin-handlers.go b/cmd/admin-handlers.go index f3e34b8dc..6e5497b7e 100644 --- a/cmd/admin-handlers.go +++ b/cmd/admin-handlers.go @@ -50,24 +50,6 @@ func (adminAPI adminAPIHandlers) ServiceStatusHandler(w http.ResponseWriter, r * writeSuccessResponseJSON(w, jsonBytes) } -// ServiceStopHandler - POST /?service -// HTTP header x-minio-operation: stop -// ---------- -// Stops minio server gracefully. In a distributed setup, stops all the -// servers in the cluster. -func (adminAPI adminAPIHandlers) ServiceStopHandler(w http.ResponseWriter, r *http.Request) { - adminAPIErr := checkRequestAuthType(r, "", "", "") - if adminAPIErr != ErrNone { - writeErrorResponse(w, adminAPIErr, r.URL) - return - } - - // Reply to the client before stopping minio server. - w.WriteHeader(http.StatusOK) - - sendServiceCmd(globalAdminPeers, serviceStop) -} - // ServiceRestartHandler - POST /?service // HTTP header x-minio-operation: restart // ---------- diff --git a/cmd/admin-handlers_test.go b/cmd/admin-handlers_test.go index 01f3f899f..689ac4c9c 100644 --- a/cmd/admin-handlers_test.go +++ b/cmd/admin-handlers_test.go @@ -56,8 +56,6 @@ func (c cmdType) apiMethod() string { switch c { case statusCmd: return "GET" - case stopCmd: - return "POST" case restartCmd: return "POST" } @@ -70,8 +68,6 @@ func (c cmdType) toServiceSignal() serviceSignal { switch c { case statusCmd: return serviceStatus - case stopCmd: - return serviceStop case restartCmd: return serviceRestart } @@ -187,11 +183,6 @@ func TestServiceStatusHandler(t *testing.T) { testServicesCmdHandler(statusCmd, t) } -// Test for service stop management REST API. -func TestServiceStopHandler(t *testing.T) { - testServicesCmdHandler(stopCmd, t) -} - // Test for service restart management REST API. func TestServiceRestartHandler(t *testing.T) { testServicesCmdHandler(restartCmd, t) diff --git a/cmd/admin-router.go b/cmd/admin-router.go index e74d2edc0..053f01164 100644 --- a/cmd/admin-router.go +++ b/cmd/admin-router.go @@ -33,8 +33,7 @@ func registerAdminRouter(mux *router.Router) { // Service status adminRouter.Methods("GET").Queries("service", "").Headers(minioAdminOpHeader, "status").HandlerFunc(adminAPI.ServiceStatusHandler) - // Service stop - adminRouter.Methods("POST").Queries("service", "").Headers(minioAdminOpHeader, "stop").HandlerFunc(adminAPI.ServiceStopHandler) + // Service restart adminRouter.Methods("POST").Queries("service", "").Headers(minioAdminOpHeader, "restart").HandlerFunc(adminAPI.ServiceRestartHandler) diff --git a/cmd/admin-rpc-client.go b/cmd/admin-rpc-client.go index b80ce882f..c117eac9c 100644 --- a/cmd/admin-rpc-client.go +++ b/cmd/admin-rpc-client.go @@ -36,18 +36,10 @@ type remoteAdminClient struct { // adminCmdRunner - abstracts local and remote execution of admin // commands like service stop and service restart. type adminCmdRunner interface { - Stop() error Restart() error ListLocks(bucket, prefix string, relTime time.Duration) ([]VolumeLockInfo, error) } -// Stop - Sends a message over channel to the go-routine responsible -// for stopping the process. -func (lc localAdminClient) Stop() error { - globalServiceSignalCh <- serviceStop - return nil -} - // Restart - Sends a message over channel to the go-routine // responsible for restarting the process. func (lc localAdminClient) Restart() error { @@ -60,13 +52,6 @@ func (lc localAdminClient) ListLocks(bucket, prefix string, relTime time.Duratio return listLocksInfo(bucket, prefix, relTime), nil } -// Stop - Sends stop command to remote server via RPC. -func (rc remoteAdminClient) Stop() error { - args := AuthRPCArgs{} - reply := AuthRPCReply{} - return rc.Call("Admin.Shutdown", &args, &reply) -} - // Restart - Sends restart command to remote server via RPC. func (rc remoteAdminClient) Restart() error { args := AuthRPCArgs{} @@ -88,7 +73,7 @@ func (rc remoteAdminClient) ListLocks(bucket, prefix string, relTime time.Durati return reply.volLocks, nil } -// adminPeer - represents an entity that implements Stop and Restart methods. +// adminPeer - represents an entity that implements Restart methods. type adminPeer struct { addr string cmdRunner adminCmdRunner @@ -146,18 +131,16 @@ func initGlobalAdminPeers(eps []*url.URL) { globalAdminPeers = makeAdminPeers(eps) } -// invokeServiceCmd - Invoke Stop/Restart command. +// invokeServiceCmd - Invoke Restart command. func invokeServiceCmd(cp adminPeer, cmd serviceSignal) (err error) { switch cmd { - case serviceStop: - err = cp.cmdRunner.Stop() case serviceRestart: err = cp.cmdRunner.Restart() } return err } -// sendServiceCmd - Invoke Stop/Restart command on remote peers +// sendServiceCmd - Invoke Restart command on remote peers // adminPeer followed by on the local peer. func sendServiceCmd(cps adminPeers, cmd serviceSignal) { // Send service command like stop or restart to all remote nodes and finally run on local node. diff --git a/cmd/admin-rpc-server.go b/cmd/admin-rpc-server.go index 0980cef11..4bd05549a 100644 --- a/cmd/admin-rpc-server.go +++ b/cmd/admin-rpc-server.go @@ -45,16 +45,6 @@ type ListLocksReply struct { volLocks []VolumeLockInfo } -// Shutdown - Shutdown this instance of minio server. -func (s *adminCmd) Shutdown(args *AuthRPCArgs, reply *AuthRPCReply) error { - if err := args.IsAuthenticated(); err != nil { - return err - } - - globalServiceSignalCh <- serviceStop - return nil -} - // Restart - Restart this instance of minio server. func (s *adminCmd) Restart(args *AuthRPCArgs, reply *AuthRPCReply) error { if err := args.IsAuthenticated(); err != nil { diff --git a/cmd/admin-rpc-server_test.go b/cmd/admin-rpc-server_test.go index 26365d4ab..5894514c7 100644 --- a/cmd/admin-rpc-server_test.go +++ b/cmd/admin-rpc-server_test.go @@ -47,17 +47,13 @@ func testAdminCmd(cmd cmdType, t *testing.T) { } go func() { - // mocking signal receiver + // A test signal receiver <-globalServiceSignalCh }() ga := AuthRPCArgs{AuthToken: reply.AuthToken, RequestTime: time.Now().UTC()} genReply := AuthRPCReply{} switch cmd { - case stopCmd: - if err = adminServer.Shutdown(&ga, &genReply); err != nil { - t.Errorf("stopCmd: Expected: , got: %v", err) - } case restartCmd: if err = adminServer.Restart(&ga, &genReply); err != nil { t.Errorf("restartCmd: Expected: , got: %v", err) @@ -65,10 +61,6 @@ func testAdminCmd(cmd cmdType, t *testing.T) { } } -func TestAdminShutdown(t *testing.T) { - testAdminCmd(stopCmd, t) -} - func TestAdminRestart(t *testing.T) { testAdminCmd(restartCmd, t) } diff --git a/pkg/madmin/API.md b/pkg/madmin/API.md index 2b93991a4..0d60d755b 100644 --- a/pkg/madmin/API.md +++ b/pkg/madmin/API.md @@ -39,7 +39,6 @@ func main() { | Service operations|LockInfo operations|Healing operations| |:---|:---|:---| |[`ServiceStatus`](#ServiceStatus)| | | -|[`ServiceStop`](#ServiceStop)| | | |[`ServiceRestart`](#ServiceRestart)| | | ## 1. Constructor @@ -98,23 +97,6 @@ Fetch service status, replies disk space used, backend type and total disks offl ``` - -### ServiceStop() (error) -If successful shuts down the running minio service, for distributed setup stops all remote minio servers. - - __Example__ - - - ```go - - st, err := madmClnt.ServiceStop() - if err != nil { - log.Fatalln(err) - } - log.Printf("Succes") - - ``` - ### ServiceRestart() (error) If successful restarts the running minio service, for distributed setup restarts all remote minio servers. diff --git a/pkg/madmin/README.md b/pkg/madmin/README.md index 51ba7ae31..4fdb88833 100644 --- a/pkg/madmin/README.md +++ b/pkg/madmin/README.md @@ -105,7 +105,6 @@ go run service-status.go ### API Reference : Service Operations * [`ServiceStatus`](./API.md#ServiceStatus) -* [`ServiceStop`](./API.md#ServiceStop) * [`ServiceRestart`](./API.md#ServiceRestart) ## Full Examples @@ -113,7 +112,6 @@ go run service-status.go #### Full Examples : Service Operations * [service-status.go](https://github.com/minio/minio/blob/master/pkg/madmin/examples/service-status.go) -* [service-stop.go](https://github.com/minio/minio/blob/master/pkg/madmin/examples/service-stop.go) * [service-restart.go](https://github.com/minio/minio/blob/master/pkg/madmin/examples/service-restart.go) ## Contribute diff --git a/pkg/madmin/examples/service-stop.go b/pkg/madmin/examples/service-stop.go deleted file mode 100644 index 05b446eb2..000000000 --- a/pkg/madmin/examples/service-stop.go +++ /dev/null @@ -1,44 +0,0 @@ -// +build ignore - -/* - * Minio Cloud Storage, (C) 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package main - -import ( - "log" - - "github.com/minio/minio/pkg/madmin" -) - -func main() { - // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are - // dummy values, please replace them with original values. - - // API requests are secure (HTTPS) if secure=true and insecure (HTTPS) otherwise. - // New returns an Minio Admin client object. - madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) - if err != nil { - log.Fatalln(err) - } - - err = madmClnt.ServiceStop() - if err != nil { - log.Fatalln(err) - } - log.Println("Success") -} diff --git a/pkg/madmin/service.go b/pkg/madmin/service.go index 6b25a018b..d49213037 100644 --- a/pkg/madmin/service.go +++ b/pkg/madmin/service.go @@ -95,30 +95,6 @@ func (adm *AdminClient) ServiceStatus() (ServiceStatusMetadata, error) { return storageInfo, nil } -// ServiceStop - Call Service Stop Management API to stop a specified Minio server -func (adm *AdminClient) ServiceStop() error { - // - reqData := requestData{} - reqData.queryValues = make(url.Values) - reqData.queryValues.Set("service", "") - reqData.customHeaders = make(http.Header) - reqData.customHeaders.Set(minioAdminOpHeader, "stop") - - // Execute GET on bucket to list objects. - resp, err := adm.executeMethod("POST", reqData) - - defer closeResponse(resp) - if err != nil { - return err - } - - if resp.StatusCode != http.StatusOK { - return errors.New("Got HTTP Status: " + resp.Status) - } - - return nil -} - // ServiceRestart - Call Service Restart API to restart a specified Minio server func (adm *AdminClient) ServiceRestart() error { // From bf2b8879b7227efd998f1722b524e8d90b4c78fc Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Sun, 15 Jan 2017 16:53:01 -0800 Subject: [PATCH 068/100] config: Allow non-standard config dir to be configured with SSL. (#3583) --- cmd/globals.go | 4 +--- cmd/main.go | 7 +++++-- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/cmd/globals.go b/cmd/globals.go index 419886e00..1257b68f6 100644 --- a/cmd/globals.go +++ b/cmd/globals.go @@ -129,9 +129,7 @@ func setGlobalsFromContext(c *cli.Context) { if globalConfigDir == "" { console.Fatalf("Unable to get config file. Config directory is empty.") } + // Set global quiet flag. globalQuiet = c.Bool("quiet") || c.GlobalBool("quiet") - - // Is TLS configured?. - globalIsSSL = isSSL() } diff --git a/cmd/main.go b/cmd/main.go index 02f672613..4864b7c7b 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -165,11 +165,14 @@ func checkUpdate() { // Generic Minio initialization to create/load config, prepare loggers, etc.. func minioInit(ctx *cli.Context) { + // Sets new config directory. + setGlobalConfigPath(globalConfigDir) + // Set global variables after parsing passed arguments setGlobalsFromContext(ctx) - // Sets new config directory. - setGlobalConfigPath(globalConfigDir) + // Is TLS configured?. + globalIsSSL = isSSL() // Migrate any old version of config / state files to newer format. migrate() From f8a3b1e16494bec2d26ede6561e29fdd6fb62b3d Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Mon, 16 Jan 2017 01:48:34 -0800 Subject: [PATCH 069/100] Fix a bug in previous patch. --- cmd/main.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/main.go b/cmd/main.go index 4864b7c7b..bb2a47a71 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -165,12 +165,12 @@ func checkUpdate() { // Generic Minio initialization to create/load config, prepare loggers, etc.. func minioInit(ctx *cli.Context) { - // Sets new config directory. - setGlobalConfigPath(globalConfigDir) - // Set global variables after parsing passed arguments setGlobalsFromContext(ctx) + // Sets new config directory. + setGlobalConfigPath(globalConfigDir) + // Is TLS configured?. globalIsSSL = isSSL() From b580ad24e21ec6a5c04ce6ef9f262b5a1eed4430 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Mon, 16 Jan 2017 03:38:06 -0800 Subject: [PATCH 070/100] server/http: Add missing keep alive for incoming tcp connections. (#3585) This was seen reproducing a bug with @gowithplanb. Windows Cloud Berry clients do not close their respective connections. --- cmd/server-mux.go | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/cmd/server-mux.go b/cmd/server-mux.go index 6719b0733..e879e0c61 100644 --- a/cmd/server-mux.go +++ b/cmd/server-mux.go @@ -151,6 +151,15 @@ type ListenerMuxAcceptRes struct { err error } +// Default keep alive interval timeout, on your Linux system to figure out +// maximum probes sent +// +// $ cat /proc/sys/net/ipv4/tcp_keepalive_probes +// 9 +// +// Effective value of total keep alive comes upto 9 x 3 * time.Minute = 27 Minutes. +var defaultKeepAliveTimeout = 3 * time.Minute // 3 minutes. + // newListenerMux listens and wraps accepted connections with tls after protocol peeking func newListenerMux(listener net.Listener, config *tls.Config) *ListenerMux { l := ListenerMux{ @@ -163,11 +172,24 @@ func newListenerMux(listener net.Listener, config *tls.Config) *ListenerMux { go func() { // Loop for accepting new connections for { - conn, err := l.Listener.Accept() + // Extract tcp listener. + tcpListener, ok := l.Listener.(*net.TCPListener) + if !ok { + l.acceptResCh <- ListenerMuxAcceptRes{err: errInvalidArgument} + return + } + + // Use accept TCP method to receive the connection. + conn, err := tcpListener.AcceptTCP() if err != nil { l.acceptResCh <- ListenerMuxAcceptRes{err: err} return } + + // Enable keep alive for each connection. + conn.SetKeepAlive(true) + conn.SetKeepAlivePeriod(defaultKeepAliveTimeout) + // Wrap the connection with ConnMux to be able to peek the data in the incoming connection // and decide if we need to wrap the connection itself with a TLS or not go func(conn net.Conn) { From a054c73e224015449c27e4f78060f050b8b1734b Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Mon, 16 Jan 2017 15:26:26 -0800 Subject: [PATCH 071/100] Add slack replace gitter (#3584) --- README.md | 2 +- README_ZH.md | 2 +- docs/FreeBSD.md | 2 +- docs/configure-minio-with-tls.md | 2 +- docs/distributed/README.md | 2 +- docs/docker/README.md | 2 +- docs/erasure/README.md | 2 +- pkg/madmin/API.md | 2 +- pkg/madmin/README.md | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 4f9c0c172..82b508ecb 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# Minio Quickstart Guide [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![Go Report Card](https://goreportcard.com/badge/minio/minio)](https://goreportcard.com/report/minio/minio) [![Docker Pulls](https://img.shields.io/docker/pulls/minio/minio.svg?maxAge=604800)](https://hub.docker.com/r/minio/minio/) [![codecov](https://codecov.io/gh/minio/minio/branch/master/graph/badge.svg)](https://codecov.io/gh/minio/minio) +# Minio Quickstart Guide [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) [![Go Report Card](https://goreportcard.com/badge/minio/minio)](https://goreportcard.com/report/minio/minio) [![Docker Pulls](https://img.shields.io/docker/pulls/minio/minio.svg?maxAge=604800)](https://hub.docker.com/r/minio/minio/) [![codecov](https://codecov.io/gh/minio/minio/branch/master/graph/badge.svg)](https://codecov.io/gh/minio/minio) Minio is an object storage server released under Apache License v2.0. It is compatible with Amazon S3 cloud storage service. It is best suited for storing unstructured data such as photos, videos, log files, backups and container / VM images. Size of an object can range from a few KBs to a maximum of 5TB. diff --git a/README_ZH.md b/README_ZH.md index 64aacb1b9..8e911a4ae 100644 --- a/README_ZH.md +++ b/README_ZH.md @@ -1,4 +1,4 @@ -# Minio 快速入门 [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![Go Report Card](https://goreportcard.com/badge/minio/minio)](https://goreportcard.com/report/minio/minio) [![codecov](https://codecov.io/gh/minio/minio/branch/master/graph/badge.svg)](https://codecov.io/gh/minio/minio) +# Minio 快速入门 [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) [![Go Report Card](https://goreportcard.com/badge/minio/minio)](https://goreportcard.com/report/minio/minio) [![codecov](https://codecov.io/gh/minio/minio/branch/master/graph/badge.svg)](https://codecov.io/gh/minio/minio) Minio是一个对象存储服务,基于Apache License v2.0协议. 它完全兼容亚马逊的S3云储存服务,非常适合于存储很多非结构化的数据,例如图片、视频、日志文件、备份数据和容器/虚拟机镜像等,而一个对象文件可以是任意大小,从几kb到最大5T不等。 diff --git a/docs/FreeBSD.md b/docs/FreeBSD.md index bd11c1eea..931643c40 100644 --- a/docs/FreeBSD.md +++ b/docs/FreeBSD.md @@ -1,4 +1,4 @@ -# Minio FreeBSD Quickstart Guide [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +# Minio FreeBSD Quickstart Guide [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) ### Minio with ZFS backend - FreeBSD diff --git a/docs/configure-minio-with-tls.md b/docs/configure-minio-with-tls.md index 2ba9d872b..840f5628c 100644 --- a/docs/configure-minio-with-tls.md +++ b/docs/configure-minio-with-tls.md @@ -1,4 +1,4 @@ -# How to secure access to your Minio server with TLS [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +# How to secure access to your Minio server with TLS [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) In this document, we will show how to configure your Minio servers with TLS certificates. Steps for Windows could be found [here](https://github.com/minio/minio/blob/master/docs/configure-minio-with-gnutls-windows.md). diff --git a/docs/distributed/README.md b/docs/distributed/README.md index 749ca1693..04e4ab302 100644 --- a/docs/distributed/README.md +++ b/docs/distributed/README.md @@ -1,4 +1,4 @@ -# Distributed Minio Quickstart Guide [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![Go Report Card](https://goreportcard.com/badge/minio/minio)](https://goreportcard.com/report/minio/minio) [![Docker Pulls](https://img.shields.io/docker/pulls/minio/minio.svg?maxAge=604800)](https://hub.docker.com/r/minio/minio/) [![codecov](https://codecov.io/gh/minio/minio/branch/master/graph/badge.svg)](https://codecov.io/gh/minio/minio) +# Distributed Minio Quickstart Guide [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) [![Go Report Card](https://goreportcard.com/badge/minio/minio)](https://goreportcard.com/report/minio/minio) [![Docker Pulls](https://img.shields.io/docker/pulls/minio/minio.svg?maxAge=604800)](https://hub.docker.com/r/minio/minio/) [![codecov](https://codecov.io/gh/minio/minio/branch/master/graph/badge.svg)](https://codecov.io/gh/minio/minio) Minio in distributed mode lets you pool multiple drives (even on different machines) into a single object storage server. As drives are distributed across several nodes, distributed Minio can withstand multiple node failures and yet ensure full data protection. diff --git a/docs/docker/README.md b/docs/docker/README.md index 44676730c..a6a307695 100644 --- a/docs/docker/README.md +++ b/docs/docker/README.md @@ -1,4 +1,4 @@ -# Minio Docker Quickstart Guide [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +# Minio Docker Quickstart Guide [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) ## 1. Prerequisites diff --git a/docs/erasure/README.md b/docs/erasure/README.md index 80de9ca82..9016026c7 100644 --- a/docs/erasure/README.md +++ b/docs/erasure/README.md @@ -1,4 +1,4 @@ -# Minio Erasure Code Quickstart Guide [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +# Minio Erasure Code Quickstart Guide [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) Minio protects data against hardware failures and silent data corruption using erasure code and checksums. You may lose half the number (N/2) of drives and still be able to recover the data. diff --git a/pkg/madmin/API.md b/pkg/madmin/API.md index 0d60d755b..7eaf02833 100644 --- a/pkg/madmin/API.md +++ b/pkg/madmin/API.md @@ -1,4 +1,4 @@ -# Golang Admin Client API Reference [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/Minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +# Golang Admin Client API Reference [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) ## Initialize Minio Admin Client object. diff --git a/pkg/madmin/README.md b/pkg/madmin/README.md index 4fdb88833..73aba84a3 100644 --- a/pkg/madmin/README.md +++ b/pkg/madmin/README.md @@ -1,4 +1,4 @@ -# Minio Admin Library. [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/Minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +# Minio Admin Library. [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) The Minio Admin Golang Client SDK provides APIs to manage Minio services. This quickstart guide will show you how to install the Minio Admin client SDK, connect to Minio admin service, and provide a walkthrough of a simple file uploader. From 1c699d8d3fea0e2893b80e109e51933d42f13caf Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Mon, 16 Jan 2017 17:05:00 -0800 Subject: [PATCH 072/100] fs: Re-implement object layer to remember the fd (#3509) This patch re-writes FS backend to support shared backend sharing locks for safe concurrent access across multiple servers. --- cmd/admin-handlers_test.go | 4 +- cmd/benchmark-utils_test.go | 27 +- cmd/bucket-policy-migrate.go | 85 --- cmd/bucket-policy.go | 19 +- cmd/commands_test.go | 41 ++ cmd/crossdomain-xml-handler.go | 5 +- cmd/crossdomain-xml-handler_test.go | 41 ++ cmd/event-notifier_test.go | 20 +- cmd/fallocate_linux.go | 12 +- ...s-v1-errors.go => fallocate_linux_test.go} | 13 +- cmd/format-config-v1.go | 26 - cmd/format-config-v1_test.go | 22 - cmd/fs-helpers.go | 91 --- cmd/fs-v1-background-append.go | 124 ++-- cmd/fs-v1-helpers.go | 373 ++++++++++ cmd/fs-v1-helpers_test.go | 405 +++++++++++ cmd/fs-v1-metadata.go | 142 +++- cmd/fs-v1-metadata_test.go | 92 +-- cmd/fs-v1-multipart-common.go | 140 ++-- cmd/fs-v1-multipart-common_test.go | 65 +- cmd/fs-v1-multipart.go | 594 ++++++++++------ cmd/fs-v1-multipart_test.go | 104 +-- cmd/fs-v1-rwpool.go | 193 +++++ cmd/fs-v1-rwpool_test.go | 112 +++ cmd/fs-v1.go | 664 ++++++++++++------ cmd/fs-v1_test.go | 192 +++-- cmd/globals.go | 3 +- cmd/lock-instrument.go | 2 +- cmd/lock-instrument_test.go | 33 + cmd/lock-rpc-client_test.go | 68 ++ cmd/lockinfo-handlers.go | 46 -- cmd/namespace-lock.go | 30 +- cmd/object-api-common.go | 10 +- cmd/object-api-errors.go | 10 - cmd/object-api-listobjects_test.go | 9 +- cmd/object-api-multipart-common.go | 45 +- cmd/object-api-multipart_test.go | 4 +- cmd/object-handlers.go | 2 +- cmd/object_api_suite_test.go | 10 + cmd/posix-errors.go | 11 + cmd/posix.go | 1 - cmd/posix_test.go | 312 ++++---- cmd/prepare-storage.go | 31 +- cmd/routers.go | 47 -- cmd/server-main.go | 124 ++-- cmd/server-main_test.go | 58 ++ cmd/server-mux_test.go | 6 +- cmd/server_test.go | 4 +- cmd/signature-v2_test.go | 2 +- cmd/signature-v4-utils.go | 7 - cmd/signature-v4-utils_test.go | 19 - cmd/storage-rpc-client_test.go | 9 + cmd/test-utils_test.go | 46 +- cmd/utils.go | 23 - cmd/utils_test.go | 42 -- cmd/web-handlers_test.go | 4 +- cmd/xl-v1-healing.go | 2 +- cmd/xl-v1-object.go | 6 +- cmd/xl-v1.go | 18 + cmd/xl-v1_test.go | 8 +- docs/minio-limitations.md | 8 +- docs/shared-backend/DESIGN.md | 137 ++++ docs/shared-backend/README.md | 92 +++ pkg/lock/lock.go | 102 +++ pkg/lock/lock_nix.go | 75 ++ pkg/lock/lock_test.go | 192 +++++ pkg/lock/lock_windows.go | 172 +++++ pkg/quick/quick.go | 4 +- 68 files changed, 3860 insertions(+), 1580 deletions(-) delete mode 100644 cmd/bucket-policy-migrate.go create mode 100644 cmd/commands_test.go create mode 100644 cmd/crossdomain-xml-handler_test.go rename cmd/{fs-v1-errors.go => fallocate_linux_test.go} (75%) delete mode 100644 cmd/fs-helpers.go create mode 100644 cmd/fs-v1-helpers.go create mode 100644 cmd/fs-v1-helpers_test.go create mode 100644 cmd/fs-v1-rwpool.go create mode 100644 cmd/fs-v1-rwpool_test.go create mode 100644 cmd/lock-rpc-client_test.go create mode 100644 docs/shared-backend/DESIGN.md create mode 100644 docs/shared-backend/README.md create mode 100644 pkg/lock/lock.go create mode 100644 pkg/lock/lock_nix.go create mode 100644 pkg/lock/lock_test.go create mode 100644 pkg/lock/lock_windows.go diff --git a/cmd/admin-handlers_test.go b/cmd/admin-handlers_test.go index 689ac4c9c..0a9e370d5 100644 --- a/cmd/admin-handlers_test.go +++ b/cmd/admin-handlers_test.go @@ -134,11 +134,11 @@ func testServicesCmdHandler(cmd cmdType, t *testing.T) { if cmd == statusCmd { // Initializing objectLayer and corresponding // []StorageAPI since DiskInfo() method requires it. - objLayer, fsDir, fsErr := prepareFS() + objLayer, fsDirs, fsErr := prepareXL() if fsErr != nil { t.Fatalf("failed to initialize XL based object layer - %v.", fsErr) } - defer removeRoots([]string{fsDir}) + defer removeRoots(fsDirs) globalObjLayerMutex.Lock() globalObjectAPI = objLayer globalObjLayerMutex.Unlock() diff --git a/cmd/benchmark-utils_test.go b/cmd/benchmark-utils_test.go index 88501b915..fd80e45bb 100644 --- a/cmd/benchmark-utils_test.go +++ b/cmd/benchmark-utils_test.go @@ -30,32 +30,23 @@ import ( // Prepare benchmark backend func prepareBenchmarkBackend(instanceType string) (ObjectLayer, []string, error) { - var nDisks int switch instanceType { // Total number of disks for FS backend is set to 1. case FSTestStr: - nDisks = 1 - // Total number of disks for FS backend is set to 16. + obj, disk, err := prepareFS() + if err != nil { + return nil, nil, err + } + return obj, []string{disk}, nil + // Total number of disks for XL backend is set to 16. case XLTestStr: - nDisks = 16 - default: - nDisks = 1 + return prepareXL() } - // get `nDisks` random disks. - disks, err := getRandomDisks(nDisks) + obj, disk, err := prepareFS() if err != nil { return nil, nil, err } - endpoints, err := parseStorageEndpoints(disks) - if err != nil { - return nil, nil, err - } - // initialize object layer. - obj, _, err := initObjectLayer(endpoints) - if err != nil { - return nil, nil, err - } - return obj, disks, nil + return obj, []string{disk}, nil } // Benchmark utility functions for ObjectLayer.PutObject(). diff --git a/cmd/bucket-policy-migrate.go b/cmd/bucket-policy-migrate.go deleted file mode 100644 index b54b04697..000000000 --- a/cmd/bucket-policy-migrate.go +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "bytes" - "io/ioutil" - "os" - "path/filepath" - "strings" -) - -const policyJSON = "policy.json" - -func getBucketFromPolicyPath(oldPolicyPath string) string { - bucketPrefix, _ := filepath.Split(oldPolicyPath) - _, bucketName := filepath.Split(strings.TrimSuffix(bucketPrefix, slashSeparator)) - return bucketName -} - -func cleanupOldBucketPolicyConfigs() error { - // Get old bucket policy config directory. - oldBucketsConfigDir, err := getOldBucketsConfigPath() - fatalIf(err, "Unable to fetch buckets config path to migrate bucket policy") - - // Recursively remove configDir/buckets/ - old bucket policy config location. - // N B This is called only if all bucket policies were successfully migrated. - return os.RemoveAll(oldBucketsConfigDir) -} - -func migrateBucketPolicyConfig(objAPI ObjectLayer) error { - // Get old bucket policy config directory. - oldBucketsConfigDir, err := getOldBucketsConfigPath() - fatalIf(err, "Unable to fetch buckets config path to migrate bucket policy") - - // Check if config directory holding bucket policy exists before - // migration. - _, err = os.Stat(oldBucketsConfigDir) - if os.IsNotExist(err) { - return nil - } - if err != nil { - return err - } - - // WalkFunc that migrates access-policy.json to - // .minio.sys/buckets/bucketName/policy.json on all disks. - migrateBucketPolicy := func(policyPath string, fileInfo os.FileInfo, err error) error { - // policyFile - e.g /configDir/sample-bucket/access-policy.json - if err != nil { - return err - } - // Skip entries that aren't bucket policy files. - if fileInfo.Name() != "access-policy.json" { - return nil - } - // Get bucketName from old policy file path. - bucketName := getBucketFromPolicyPath(policyPath) - // Read bucket policy config from old location. - policyBytes, err := ioutil.ReadFile(policyPath) - fatalIf(err, "Unable to read bucket policy to migrate bucket policy", policyPath) - newPolicyPath := retainSlash(bucketConfigPrefix) + retainSlash(bucketName) + policyJSON - var metadata map[string]string - sha256sum := "" - // Erasure code the policy config to all the disks. - _, err = objAPI.PutObject(minioMetaBucket, newPolicyPath, int64(len(policyBytes)), bytes.NewReader(policyBytes), metadata, sha256sum) - fatalIf(err, "Unable to write bucket policy during migration.", newPolicyPath) - return nil - } - return filepath.Walk(oldBucketsConfigDir, migrateBucketPolicy) -} diff --git a/cmd/bucket-policy.go b/cmd/bucket-policy.go index 9619e1c05..e6462db8e 100644 --- a/cmd/bucket-policy.go +++ b/cmd/bucket-policy.go @@ -20,7 +20,6 @@ import ( "bytes" "encoding/json" "io" - "path" "sync" ) @@ -28,6 +27,9 @@ const ( // Static prefix to be used while constructing bucket ARN. // refer to S3 docs for more info. bucketARNPrefix = "arn:" + eventSource + ":::" + + // Bucket policy config name. + bucketPolicyConfig = "policy.json" ) // Variable represents bucket policies in memory. @@ -137,19 +139,10 @@ func initBucketPolicies(objAPI ObjectLayer) error { return nil } -// getOldBucketsConfigPath - get old buckets config path. (Only used for migrating old bucket policies) -func getOldBucketsConfigPath() (string, error) { - configPath, err := getConfigPath() - if err != nil { - return "", err - } - return path.Join(configPath, "buckets"), nil -} - // readBucketPolicyJSON - reads bucket policy for an input bucket, returns BucketPolicyNotFound // if bucket policy is not found. func readBucketPolicyJSON(bucket string, objAPI ObjectLayer) (bucketPolicyReader io.Reader, err error) { - policyPath := pathJoin(bucketConfigPrefix, bucket, policyJSON) + policyPath := pathJoin(bucketConfigPrefix, bucket, bucketPolicyConfig) // Acquire a read lock on policy config before reading. objLock := globalNSMutex.NewNSLock(minioMetaBucket, policyPath) @@ -191,7 +184,7 @@ func readBucketPolicy(bucket string, objAPI ObjectLayer) (*bucketPolicy, error) // removeBucketPolicy - removes any previously written bucket policy. Returns BucketPolicyNotFound // if no policies are found. func removeBucketPolicy(bucket string, objAPI ObjectLayer) error { - policyPath := pathJoin(bucketConfigPrefix, bucket, policyJSON) + policyPath := pathJoin(bucketConfigPrefix, bucket, bucketPolicyConfig) // Acquire a write lock on policy config before modifying. objLock := globalNSMutex.NewNSLock(minioMetaBucket, policyPath) objLock.Lock() @@ -214,7 +207,7 @@ func writeBucketPolicy(bucket string, objAPI ObjectLayer, bpy *bucketPolicy) err errorIf(err, "Unable to marshal bucket policy '%v' to JSON", *bpy) return err } - policyPath := pathJoin(bucketConfigPrefix, bucket, policyJSON) + policyPath := pathJoin(bucketConfigPrefix, bucket, bucketPolicyConfig) // Acquire a write lock on policy config before modifying. objLock := globalNSMutex.NewNSLock(minioMetaBucket, policyPath) objLock.Lock() diff --git a/cmd/commands_test.go b/cmd/commands_test.go new file mode 100644 index 000000000..8f7e5cb00 --- /dev/null +++ b/cmd/commands_test.go @@ -0,0 +1,41 @@ +/* + * Minio Cloud Storage, (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "testing" + + "github.com/minio/cli" +) + +// Tests register command function. +func TestRegisterCommand(t *testing.T) { + registerCommand(cli.Command{ + Name: "test1", + }) + ccount := len(commands) + if ccount != 1 { + t.Fatalf("Unexpected number of commands found %d", ccount) + } + registerCommand(cli.Command{ + Name: "test2", + }) + ccount = len(commands) + if ccount != 2 { + t.Fatalf("Unexpected number of commands found %d", ccount) + } +} diff --git a/cmd/crossdomain-xml-handler.go b/cmd/crossdomain-xml-handler.go index 44e04ab64..bd1ac5fe6 100644 --- a/cmd/crossdomain-xml-handler.go +++ b/cmd/crossdomain-xml-handler.go @@ -21,6 +21,9 @@ import "net/http" // Standard cross domain policy information located at https://s3.amazonaws.com/crossdomain.xml const crossDomainXML = `` +// Standard path where an app would find cross domain policy information. +const crossDomainXMLEntity = "/crossdomain.xml" + // Cross domain policy implements http.Handler interface, implementing a custom ServerHTTP. type crossDomainPolicy struct { handler http.Handler @@ -38,7 +41,7 @@ func setCrossDomainPolicy(h http.Handler) http.Handler { func (c crossDomainPolicy) ServeHTTP(w http.ResponseWriter, r *http.Request) { // Look for 'crossdomain.xml' in the incoming request. switch r.URL.Path { - case "/crossdomain.xml": + case crossDomainXMLEntity: // Write the standard cross domain policy xml. w.Write([]byte(crossDomainXML)) // Request completed, no need to serve to other handlers. diff --git a/cmd/crossdomain-xml-handler_test.go b/cmd/crossdomain-xml-handler_test.go new file mode 100644 index 000000000..2a95cd093 --- /dev/null +++ b/cmd/crossdomain-xml-handler_test.go @@ -0,0 +1,41 @@ +/* + * Minio Cloud Storage, (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "net/http" + "net/http/httptest" + "testing" + + router "github.com/gorilla/mux" +) + +// Test cross domain xml handler. +func TestCrossXMLHandler(t *testing.T) { + // Server initialization. + mux := router.NewRouter().SkipClean(true) + handler := setCrossDomainPolicy(mux) + srv := httptest.NewServer(handler) + + resp, err := http.Get(srv.URL + crossDomainXMLEntity) + if err != nil { + t.Fatal(err) + } + if resp.StatusCode != http.StatusOK { + t.Fatal("Unexpected http status received", resp.Status) + } +} diff --git a/cmd/event-notifier_test.go b/cmd/event-notifier_test.go index 45e8a0b02..89556b0c1 100644 --- a/cmd/event-notifier_test.go +++ b/cmd/event-notifier_test.go @@ -17,6 +17,7 @@ package cmd import ( + "bytes" "fmt" "net" "reflect" @@ -34,11 +35,11 @@ func TestInitEventNotifierFaultyDisks(t *testing.T) { // remove the root directory after the test ends. defer removeAll(rootPath) - disks, err := getRandomDisks(1) + disks, err := getRandomDisks(4) if err != nil { t.Fatal("Unable to create directories for FS backend. ", err) } - defer removeAll(disks[0]) + defer removeRoots(disks) endpoints, err := parseStorageEndpoints(disks) if err != nil { t.Fatal(err) @@ -53,8 +54,7 @@ func TestInitEventNotifierFaultyDisks(t *testing.T) { t.Fatal("Unexpected error:", err) } - fs := obj.(fsObjects) - fsstorage := fs.storage.(*retryStorage) + xl := obj.(*xlObjects) listenARN := "arn:minio:sns:us-east-1:1:listen" queueARN := "arn:minio:sqs:us-east-1:1:redis" @@ -64,14 +64,18 @@ func TestInitEventNotifierFaultyDisks(t *testing.T) { notificationXML += "s3:ObjectRemoved:*s3:ObjectRemoved:*" + listenARN + "" notificationXML += "s3:ObjectRemoved:*s3:ObjectRemoved:*" + queueARN + "" notificationXML += "" - if err := fsstorage.AppendFile(minioMetaBucket, bucketConfigPrefix+"/"+bucketName+"/"+bucketNotificationConfig, []byte(notificationXML)); err != nil { + size := int64(len([]byte(notificationXML))) + reader := bytes.NewReader([]byte(notificationXML)) + if _, err := xl.PutObject(minioMetaBucket, bucketConfigPrefix+"/"+bucketName+"/"+bucketNotificationConfig, size, reader, nil, ""); err != nil { t.Fatal("Unexpected error:", err) } + for i, d := range xl.storageDisks { + xl.storageDisks[i] = newNaughtyDisk(d.(*retryStorage), nil, errFaultyDisk) + } // Test initEventNotifier() with faulty disks for i := 1; i <= 3; i++ { - fs.storage = newNaughtyDisk(fsstorage, map[int]error{i: errFaultyDisk}, nil) - if err := initEventNotifier(fs); errorCause(err) != errFaultyDisk { + if err := initEventNotifier(xl); errorCause(err) != errFaultyDisk { t.Fatal("Unexpected error:", err) } } @@ -387,7 +391,7 @@ func TestInitEventNotifier(t *testing.T) { } // needed to load listener config from disk for testing (in - // single peer mode, the listener config is ingored, but here + // single peer mode, the listener config is ignored, but here // we want to test the loading from disk too.) globalIsDistXL = true diff --git a/cmd/fallocate_linux.go b/cmd/fallocate_linux.go index fa25693b0..301b171ed 100644 --- a/cmd/fallocate_linux.go +++ b/cmd/fallocate_linux.go @@ -24,8 +24,12 @@ import "syscall" // sure that subsequent writes on a file just created will not fail, // in addition, file allocation will be contigous on the disk func Fallocate(fd int, offset int64, len int64) error { - return syscall.Fallocate(fd, - 1, // FALLOC_FL_KEEP_SIZE - offset, - len) + // No need to attempt fallocate for 0 length. + if len == 0 { + return nil + } + // Don't extend size of file even if offset + len is + // greater than file size from . + fallocFLKeepSize := uint32(1) + return syscall.Fallocate(fd, fallocFLKeepSize, offset, len) } diff --git a/cmd/fs-v1-errors.go b/cmd/fallocate_linux_test.go similarity index 75% rename from cmd/fs-v1-errors.go rename to cmd/fallocate_linux_test.go index c506661a8..2b533afb9 100644 --- a/cmd/fs-v1-errors.go +++ b/cmd/fallocate_linux_test.go @@ -1,3 +1,5 @@ +// +build linux + /* * Minio Cloud Storage, (C) 2016 Minio, Inc. * @@ -16,7 +18,12 @@ package cmd -import "errors" +import "testing" -// errFSDiskFormat - returned when given disk format is other than FS format. -var errFSDiskFormat = errors.New("Disk is not in FS format") +// Tests allocate. +func TestFallocate(t *testing.T) { + err := Fallocate(0, 0, 0) + if err != nil { + t.Fatal("Unexpected error in fallocate for length 0:", err) + } +} diff --git a/cmd/format-config-v1.go b/cmd/format-config-v1.go index 2f8b7161a..208ced85a 100644 --- a/cmd/format-config-v1.go +++ b/cmd/format-config-v1.go @@ -149,20 +149,6 @@ func reduceFormatErrs(errs []error, diskCount int) (err error) { return nil } -// creates format.json, the FS format info in minioMetaBucket. -func initFormatFS(storageDisk StorageAPI) error { - // Initialize meta volume, if volume already exists ignores it. - if err := initMetaVolume([]StorageAPI{storageDisk}); err != nil { - return fmt.Errorf("Unable to initialize '.minio.sys' meta volume, %s", err) - } - return saveFSFormatData(storageDisk, newFSFormatV1()) -} - -// loads format.json from minioMetaBucket if it exists. -func loadFormatFS(storageDisk StorageAPI) (format *formatConfigV1, err error) { - return loadFormat(storageDisk) -} - // loadAllFormats - load all format config from all input disks in parallel. func loadAllFormats(bootstrapDisks []StorageAPI) ([]*formatConfigV1, []error) { // Initialize sync waitgroup. @@ -206,18 +192,6 @@ func loadAllFormats(bootstrapDisks []StorageAPI) ([]*formatConfigV1, []error) { return formatConfigs, sErrs } -// genericFormatCheckFS - validates format config and returns an error if any. -func genericFormatCheckFS(formatConfig *formatConfigV1, sErr error) (err error) { - if sErr != nil { - return sErr - } - // Successfully read, validate if FS. - if !isFSFormat(formatConfig) { - return errFSDiskFormat - } - return nil -} - // genericFormatCheckXL - validates and returns error. // if (no quorum) return error // if (any disk is corrupt) return error // phase2 diff --git a/cmd/format-config-v1_test.go b/cmd/format-config-v1_test.go index be9c93e56..8e46fdde3 100644 --- a/cmd/format-config-v1_test.go +++ b/cmd/format-config-v1_test.go @@ -664,28 +664,6 @@ func TestReduceFormatErrs(t *testing.T) { } } -// Tests for genericFormatCheckFS() -func TestGenericFormatCheckFS(t *testing.T) { - // Generate format configs for XL. - formatConfigs := genFormatXLInvalidJBOD() - - // Validate disk format is fs, should fail. - if err := genericFormatCheckFS(formatConfigs[0], nil); err != errFSDiskFormat { - t.Fatalf("Unexpected error, expected %s, got %s", errFSDiskFormat, err) - } - - // Validate disk is unformatted, should fail. - if err := genericFormatCheckFS(nil, errUnformattedDisk); err != errUnformattedDisk { - t.Fatalf("Unexpected error, expected %s, got %s", errUnformattedDisk, err) - } - - // Validate when disk is in FS format. - format := newFSFormatV1() - if err := genericFormatCheckFS(format, nil); err != nil { - t.Fatalf("Unexpected error should pass, failed with %s", err) - } -} - // Tests for genericFormatCheckXL() func TestGenericFormatCheckXL(t *testing.T) { var errs []error diff --git a/cmd/fs-helpers.go b/cmd/fs-helpers.go deleted file mode 100644 index 0c457fad2..000000000 --- a/cmd/fs-helpers.go +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import "io" - -// Reads from the requested local location uses a staging buffer. Restricts -// reads upto requested range of length and offset. If successful staging -// buffer is written to the incoming stream. Returns errors if any. -func fsReadFile(disk StorageAPI, bucket, object string, writer io.Writer, totalLeft, startOffset int64, buf []byte) (err error) { - bufSize := int64(len(buf)) - // Start the read loop until requested range. - for { - // Figure out the right size for the buffer. - curLeft := bufSize - if totalLeft < bufSize { - curLeft = totalLeft - } - // Reads the file at offset. - nr, er := disk.ReadFile(bucket, object, startOffset, buf[:curLeft]) - if nr > 0 { - // Write to response writer. - nw, ew := writer.Write(buf[0:nr]) - if nw > 0 { - // Decrement whats left to write. - totalLeft -= int64(nw) - - // Progress the offset - startOffset += int64(nw) - } - if ew != nil { - err = traceError(ew) - break - } - if nr != int64(nw) { - err = traceError(io.ErrShortWrite) - break - } - } - if er == io.EOF || er == io.ErrUnexpectedEOF { - break - } - if er != nil { - err = traceError(er) - break - } - if totalLeft == 0 { - break - } - } - return err -} - -// Reads from input stream until end of file, takes an input buffer for staging reads. -// The staging buffer is then written to the disk. Returns for any error that occurs -// while reading the stream or writing to disk. Caller should cleanup partial files. -// Upon errors total data written will be 0 and returns error, on success returns -// total data written to disk. -func fsCreateFile(disk StorageAPI, reader io.Reader, buf []byte, bucket, object string) (int64, error) { - bytesWritten := int64(0) - // Read the buffer till io.EOF and appends data to path at bucket/object. - for { - n, rErr := reader.Read(buf) - if rErr != nil && rErr != io.EOF { - return 0, traceError(rErr) - } - bytesWritten += int64(n) - wErr := disk.AppendFile(bucket, object, buf[0:n]) - if wErr != nil { - return 0, traceError(wErr) - } - if rErr == io.EOF { - break - } - } - return bytesWritten, nil -} diff --git a/cmd/fs-v1-background-append.go b/cmd/fs-v1-background-append.go index 1db467010..2e4362b4c 100644 --- a/cmd/fs-v1-background-append.go +++ b/cmd/fs-v1-background-append.go @@ -18,6 +18,8 @@ package cmd import ( "errors" + "io" + "os" "reflect" "sync" "time" @@ -30,15 +32,16 @@ import ( var errPartsMissing = errors.New("required parts missing") // Error sent when appendParts go-routine has waited long enough and timedout. -var errAppendPartsTimeout = errors.New("appendParts goroutine timeout") +var errAppendPartsTimeout = errors.New("appendParts go-routine timeout") // Timeout value for the appendParts go-routine. -var appendPartsTimeout = 24 * 60 * 60 * time.Second // 24 hours. +var appendPartsTimeout = 24 * 60 * 60 * time.Second // 24 Hours. // Holds a map of uploadID->appendParts go-routine type backgroundAppend struct { - infoMap map[string]bgAppendPartsInfo sync.Mutex + infoMap map[string]bgAppendPartsInfo + appendFile io.WriteCloser } // Input to the appendParts go-routine @@ -56,9 +59,9 @@ type bgAppendPartsInfo struct { } // Called after a part is uploaded so that it can be appended in the background. -func (b *backgroundAppend) append(disk StorageAPI, bucket, object, uploadID string, meta fsMetaV1) chan error { - b.Lock() - info, ok := b.infoMap[uploadID] +func (fs fsObjects) append(bucket, object, uploadID string, meta fsMetaV1) chan error { + fs.bgAppend.Lock() + info, ok := fs.bgAppend.infoMap[uploadID] if !ok { // Corresponding appendParts go-routine was not found, create a new one. Would happen when the first // part of a multipart upload is uploaded. @@ -68,11 +71,12 @@ func (b *backgroundAppend) append(disk StorageAPI, bucket, object, uploadID stri completeCh := make(chan struct{}) info = bgAppendPartsInfo{inputCh, timeoutCh, abortCh, completeCh} - b.infoMap[uploadID] = info + fs.bgAppend.infoMap[uploadID] = info - go b.appendParts(disk, bucket, object, uploadID, info) + go fs.appendParts(bucket, object, uploadID, info) } - b.Unlock() + fs.bgAppend.Unlock() + errCh := make(chan error) go func() { // send input in a goroutine as send on the inputCh can block if appendParts go-routine @@ -85,19 +89,23 @@ func (b *backgroundAppend) append(disk StorageAPI, bucket, object, uploadID stri case info.inputCh <- bgAppendPartsInput{meta, errCh}: } }() + return errCh } // Called on complete-multipart-upload. Returns nil if the required parts have been appended. -func (b *backgroundAppend) complete(disk StorageAPI, bucket, object, uploadID string, meta fsMetaV1) error { - b.Lock() - defer b.Unlock() - info, ok := b.infoMap[uploadID] - delete(b.infoMap, uploadID) +func (fs *fsObjects) complete(bucket, object, uploadID string, meta fsMetaV1) error { + fs.bgAppend.Lock() + defer fs.bgAppend.Unlock() + + info, ok := fs.bgAppend.infoMap[uploadID] + delete(fs.bgAppend.infoMap, uploadID) if !ok { return errPartsMissing } + errCh := make(chan error) + select { case <-info.timeoutCh: // This is to handle a rare race condition where we found info in b.infoMap @@ -105,6 +113,7 @@ func (b *backgroundAppend) complete(disk StorageAPI, bucket, object, uploadID st return errAppendPartsTimeout case info.inputCh <- bgAppendPartsInput{meta, errCh}: } + err := <-errCh close(info.completeCh) @@ -113,21 +122,26 @@ func (b *backgroundAppend) complete(disk StorageAPI, bucket, object, uploadID st } // Called after complete-multipart-upload or abort-multipart-upload so that the appendParts go-routine is not left dangling. -func (b *backgroundAppend) abort(uploadID string) { - b.Lock() - defer b.Unlock() - info, ok := b.infoMap[uploadID] +func (fs fsObjects) abort(uploadID string) { + fs.bgAppend.Lock() + defer fs.bgAppend.Unlock() + + info, ok := fs.bgAppend.infoMap[uploadID] if !ok { return } - delete(b.infoMap, uploadID) + + delete(fs.bgAppend.infoMap, uploadID) + info.abortCh <- struct{}{} } // This is run as a go-routine that appends the parts in the background. -func (b *backgroundAppend) appendParts(disk StorageAPI, bucket, object, uploadID string, info bgAppendPartsInfo) { +func (fs fsObjects) appendParts(bucket, object, uploadID string, info bgAppendPartsInfo) { + appendPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, uploadID) // Holds the list of parts that is already appended to the "append" file. appendMeta := fsMetaV1{} + // Allocate staging read buffer. buf := make([]byte, readSizeV1) for { @@ -136,6 +150,7 @@ func (b *backgroundAppend) appendParts(disk StorageAPI, bucket, object, uploadID // We receive on this channel when new part gets uploaded or when complete-multipart sends // a value on this channel to confirm if all the required parts are appended. meta := input.meta + for { // Append should be done such a way that if part-3 and part-2 is uploaded before part-1, we // wait till part-1 is uploaded after which we append part-2 and part-3 as well in this for-loop. @@ -152,18 +167,23 @@ func (b *backgroundAppend) appendParts(disk StorageAPI, bucket, object, uploadID } break } - if err := appendPart(disk, bucket, object, uploadID, part, buf); err != nil { - disk.DeleteFile(minioMetaTmpBucket, uploadID) + + if err := fs.appendPart(bucket, object, uploadID, part, buf); err != nil { + fsRemoveFile(appendPath) appendMeta.Parts = nil input.errCh <- err break } + appendMeta.AddObjectPart(part.Number, part.Name, part.ETag, part.Size) } case <-info.abortCh: // abort-multipart-upload closed abortCh to end the appendParts go-routine. - disk.DeleteFile(minioMetaTmpBucket, uploadID) - close(info.timeoutCh) // So that any racing PutObjectPart does not leave a dangling go-routine. + fsRemoveFile(appendPath) + + // So that any racing PutObjectPart does not leave a dangling go-routine. + close(info.timeoutCh) + return case <-info.completeCh: // complete-multipart-upload closed completeCh to end the appendParts go-routine. @@ -172,11 +192,12 @@ func (b *backgroundAppend) appendParts(disk StorageAPI, bucket, object, uploadID case <-time.After(appendPartsTimeout): // Timeout the goroutine to garbage collect its resources. This would happen if the client initiates // a multipart upload and does not complete/abort it. - b.Lock() - delete(b.infoMap, uploadID) - b.Unlock() + fs.bgAppend.Lock() + delete(fs.bgAppend.infoMap, uploadID) + fs.bgAppend.Unlock() + // Delete the temporary append file as well. - disk.DeleteFile(minioMetaTmpBucket, uploadID) + fsRemoveFile(appendPath) close(info.timeoutCh) return @@ -186,29 +207,34 @@ func (b *backgroundAppend) appendParts(disk StorageAPI, bucket, object, uploadID // Appends the "part" to the append-file inside "tmp/" that finally gets moved to the actual location // upon complete-multipart-upload. -func appendPart(disk StorageAPI, bucket, object, uploadID string, part objectPartInfo, buf []byte) error { - partPath := pathJoin(bucket, object, uploadID, part.Name) +func (fs fsObjects) appendPart(bucket, object, uploadID string, part objectPartInfo, buf []byte) error { + partPath := pathJoin(fs.fsPath, minioMetaMultipartBucket, bucket, object, uploadID, part.Name) offset := int64(0) - totalLeft := part.Size - for totalLeft > 0 { - curLeft := int64(readSizeV1) - if totalLeft < readSizeV1 { - curLeft = totalLeft + // Read each file part to start writing to the temporary concatenated object. + file, size, err := fsOpenFile(partPath, offset) + if err != nil { + if err == errFileNotFound { + return errPartsMissing } - n, err := disk.ReadFile(minioMetaMultipartBucket, partPath, offset, buf[:curLeft]) - if err != nil { - // Check for EOF/ErrUnexpectedEOF not needed as it should never happen as we know - // the exact size of the file and hence know the size of buf[] - // EOF/ErrUnexpectedEOF indicates that the length of file was shorter than part.Size and - // hence considered as an error condition. - return err - } - if err = disk.AppendFile(minioMetaTmpBucket, uploadID, buf[:n]); err != nil { - return err - } - offset += n - totalLeft -= n + return err } - return nil + defer file.Close() + + tmpObjPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, uploadID) + // No need to hold a lock, this is a unique file and will be only written + // to one one process per uploadID per minio process. + wfile, err := os.OpenFile(preparePath(tmpObjPath), os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0666) + if err != nil { + return err + } + defer wfile.Close() + + // Fallocate more space as we concatenate. + if err = fsFAllocate(int(wfile.Fd()), 0, size); err != nil { + return err + } + + _, err = io.CopyBuffer(wfile, file, buf) + return err } diff --git a/cmd/fs-v1-helpers.go b/cmd/fs-v1-helpers.go new file mode 100644 index 000000000..4c911380c --- /dev/null +++ b/cmd/fs-v1-helpers.go @@ -0,0 +1,373 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "io" + "os" + pathutil "path" +) + +// Removes only the file at given path does not remove +// any parent directories, handles long paths for +// windows automatically. +func fsRemoveFile(filePath string) (err error) { + if filePath == "" { + return errInvalidArgument + } + + if err = checkPathLength(filePath); err != nil { + return err + } + + if err = os.Remove(preparePath(filePath)); err != nil { + if os.IsNotExist(err) { + return errFileNotFound + } else if os.IsPermission(err) { + return errFileAccessDenied + } + return err + } + + return nil +} + +// Removes all files and folders at a given path, handles +// long paths for windows automatically. +func fsRemoveAll(dirPath string) (err error) { + if dirPath == "" { + return errInvalidArgument + } + + if err = checkPathLength(dirPath); err != nil { + return err + } + + if err = removeAll(dirPath); err != nil { + if os.IsPermission(err) { + return errVolumeAccessDenied + } + } + + return err + +} + +// Removes a directory only if its empty, handles long +// paths for windows automatically. +func fsRemoveDir(dirPath string) (err error) { + if dirPath == "" { + return errInvalidArgument + } + + if err = checkPathLength(dirPath); err != nil { + return err + } + + if err = os.Remove(preparePath(dirPath)); err != nil { + if os.IsNotExist(err) { + return errVolumeNotFound + } else if isSysErrNotEmpty(err) { + return errVolumeNotEmpty + } + } + + return err +} + +// Creates a new directory, parent dir should exist +// otherwise returns an error. If directory already +// exists returns an error. Windows long paths +// are handled automatically. +func fsMkdir(dirPath string) (err error) { + if dirPath == "" { + return errInvalidArgument + } + + if err = checkPathLength(dirPath); err != nil { + return err + } + + if err = os.Mkdir(preparePath(dirPath), 0777); err != nil { + if os.IsExist(err) { + return errVolumeExists + } else if os.IsPermission(err) { + return errDiskAccessDenied + } else if isSysErrNotDir(err) { + // File path cannot be verified since + // one of the parents is a file. + return errDiskAccessDenied + } else if isSysErrPathNotFound(err) { + // Add specific case for windows. + return errDiskAccessDenied + } + } + + return nil +} + +// Lookup if directory exists, returns directory +// attributes upon success. +func fsStatDir(statDir string) (os.FileInfo, error) { + if statDir == "" { + return nil, errInvalidArgument + } + if err := checkPathLength(statDir); err != nil { + return nil, err + } + + fi, err := os.Stat(preparePath(statDir)) + if err != nil { + if os.IsNotExist(err) { + return nil, errVolumeNotFound + } else if os.IsPermission(err) { + return nil, errVolumeAccessDenied + } + return nil, err + } + + if !fi.IsDir() { + return nil, errVolumeAccessDenied + } + + return fi, nil +} + +// Lookup if file exists, returns file attributes upon success +func fsStatFile(statFile string) (os.FileInfo, error) { + if statFile == "" { + return nil, errInvalidArgument + } + + if err := checkPathLength(statFile); err != nil { + return nil, err + } + + fi, err := os.Stat(preparePath(statFile)) + if err != nil { + if os.IsNotExist(err) { + return nil, errFileNotFound + } else if os.IsPermission(err) { + return nil, errFileAccessDenied + } else if isSysErrNotDir(err) { + return nil, errFileAccessDenied + } else if isSysErrPathNotFound(err) { + return nil, errFileNotFound + } + return nil, err + } + if fi.IsDir() { + return nil, errFileNotFound + } + return fi, nil +} + +// Opens the file at given path, optionally from an offset. Upon success returns +// a readable stream and the size of the readable stream. +func fsOpenFile(readPath string, offset int64) (io.ReadCloser, int64, error) { + if readPath == "" || offset < 0 { + return nil, 0, errInvalidArgument + } + if err := checkPathLength(readPath); err != nil { + return nil, 0, err + } + + fr, err := os.Open(preparePath(readPath)) + if err != nil { + if os.IsNotExist(err) { + return nil, 0, errFileNotFound + } else if os.IsPermission(err) { + return nil, 0, errFileAccessDenied + } else if isSysErrNotDir(err) { + // File path cannot be verified since one of the parents is a file. + return nil, 0, errFileAccessDenied + } else if isSysErrPathNotFound(err) { + // Add specific case for windows. + return nil, 0, errFileNotFound + } + return nil, 0, err + } + + // Stat to get the size of the file at path. + st, err := fr.Stat() + if err != nil { + return nil, 0, err + } + + // Verify if its not a regular file, since subsequent Seek is undefined. + if !st.Mode().IsRegular() { + return nil, 0, errIsNotRegular + } + + // Seek to the requested offset. + if offset > 0 { + _, err = fr.Seek(offset, os.SEEK_SET) + if err != nil { + return nil, 0, err + } + } + + // Success. + return fr, st.Size(), nil +} + +// Creates a file and copies data from incoming reader. Staging buffer is used by io.CopyBuffer. +func fsCreateFile(tempObjPath string, reader io.Reader, buf []byte, fallocSize int64) (int64, error) { + if tempObjPath == "" || reader == nil || buf == nil { + return 0, errInvalidArgument + } + + if err := checkPathLength(tempObjPath); err != nil { + return 0, err + } + + if err := mkdirAll(pathutil.Dir(tempObjPath), 0777); err != nil { + return 0, err + } + + writer, err := os.OpenFile(preparePath(tempObjPath), os.O_CREATE|os.O_WRONLY, 0666) + if err != nil { + // File path cannot be verified since one of the parents is a file. + if isSysErrNotDir(err) { + return 0, errFileAccessDenied + } + return 0, err + } + defer writer.Close() + + // Fallocate only if the size is final object is known. + if fallocSize > 0 { + if err = fsFAllocate(int(writer.Fd()), 0, fallocSize); err != nil { + return 0, err + } + } + + bytesWritten, err := io.CopyBuffer(writer, reader, buf) + if err != nil { + return 0, err + } + + return bytesWritten, nil +} + +// Removes uploadID at destination path. +func fsRemoveUploadIDPath(basePath, uploadIDPath string) error { + if basePath == "" || uploadIDPath == "" { + return errInvalidArgument + } + + // List all the entries in uploadID. + entries, err := readDir(uploadIDPath) + if err != nil && err != errFileNotFound { + return err + } + + // Delete all the entries obtained from previous readdir. + for _, entryPath := range entries { + err = fsDeleteFile(basePath, pathJoin(uploadIDPath, entryPath)) + if err != nil && err != errFileNotFound { + return err + } + } + + return nil +} + +// fsFAllocate is similar to Fallocate but provides a convenient +// wrapper to handle various operating system specific errors. +func fsFAllocate(fd int, offset int64, len int64) (err error) { + e := Fallocate(fd, offset, len) + // Ignore errors when Fallocate is not supported in the current system + if e != nil && !isSysErrNoSys(e) && !isSysErrOpNotSupported(e) { + switch { + case isSysErrNoSpace(e): + err = errDiskFull + case isSysErrIO(e): + err = e + default: + // For errors: EBADF, EINTR, EINVAL, ENODEV, EPERM, ESPIPE and ETXTBSY + // Appending was failed anyway, returns unexpected error + err = errUnexpected + } + return err + } + + return nil +} + +// Renames source path to destination path, creates all the +// missing parents if they don't exist. +func fsRenameFile(sourcePath, destPath string) error { + if err := mkdirAll(pathutil.Dir(destPath), 0777); err != nil { + return traceError(err) + } + if err := os.Rename(preparePath(sourcePath), preparePath(destPath)); err != nil { + return traceError(err) + } + return nil +} + +// Delete a file and its parent if it is empty at the destination path. +// this function additionally protects the basePath from being deleted. +func fsDeleteFile(basePath, deletePath string) error { + if err := checkPathLength(basePath); err != nil { + return err + } + + if err := checkPathLength(deletePath); err != nil { + return err + } + + if basePath == deletePath { + return nil + } + + // Verify if the path exists. + pathSt, err := os.Stat(preparePath(deletePath)) + if err != nil { + if os.IsNotExist(err) { + return errFileNotFound + } else if os.IsPermission(err) { + return errFileAccessDenied + } + return err + } + + if pathSt.IsDir() && !isDirEmpty(deletePath) { + // Verify if directory is empty. + return nil + } + + // Attempt to remove path. + if err = os.Remove(preparePath(deletePath)); err != nil { + if os.IsNotExist(err) { + return errFileNotFound + } else if os.IsPermission(err) { + return errFileAccessDenied + } else if isSysErrNotEmpty(err) { + return errVolumeNotEmpty + } + return err + } + + // Recursively go down the next path and delete again. + if err := fsDeleteFile(basePath, pathutil.Dir(deletePath)); err != nil { + return err + } + + return nil +} diff --git a/cmd/fs-v1-helpers_test.go b/cmd/fs-v1-helpers_test.go new file mode 100644 index 000000000..e58f4319f --- /dev/null +++ b/cmd/fs-v1-helpers_test.go @@ -0,0 +1,405 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "bytes" + "runtime" + "testing" +) + +func TestFSStats(t *testing.T) { + // create posix test setup + _, path, err := newPosixTestSetup() + if err != nil { + t.Fatalf("Unable to create posix test setup, %s", err) + } + defer removeAll(path) + + // Setup test environment. + + if err = fsMkdir(""); err != errInvalidArgument { + t.Fatal("Unexpected error", err) + } + + if err = fsMkdir(pathJoin(path, "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001")); err != errFileNameTooLong { + t.Fatal("Unexpected error", err) + } + + if err = fsMkdir(pathJoin(path, "success-vol")); err != nil { + t.Fatalf("Unable to create volume, %s", err) + } + + var buf = make([]byte, 4096) + var reader = bytes.NewReader([]byte("Hello, world")) + if _, err = fsCreateFile(pathJoin(path, "success-vol", "success-file"), reader, buf, reader.Size()); err != nil { + t.Fatalf("Unable to create file, %s", err) + } + // Seek back. + reader.Seek(0, 0) + + if err = fsMkdir(pathJoin(path, "success-vol", "success-file")); err != errVolumeExists { + t.Fatal("Unexpected error", err) + } + + if _, err = fsCreateFile(pathJoin(path, "success-vol", "path/to/success-file"), reader, buf, reader.Size()); err != nil { + t.Fatalf("Unable to create file, %s", err) + } + // Seek back. + reader.Seek(0, 0) + + testCases := []struct { + srcFSPath string + srcVol string + srcPath string + expectedErr error + }{ + // Test case - 1. + // Test case with valid inputs, expected to pass. + { + srcFSPath: path, + srcVol: "success-vol", + srcPath: "success-file", + expectedErr: nil, + }, + // Test case - 2. + // Test case with valid inputs, expected to pass. + { + srcFSPath: path, + srcVol: "success-vol", + srcPath: "path/to/success-file", + expectedErr: nil, + }, + // Test case - 3. + // Test case with non-existent file. + { + srcFSPath: path, + srcVol: "success-vol", + srcPath: "nonexistent-file", + expectedErr: errFileNotFound, + }, + // Test case - 4. + // Test case with non-existent file path. + { + srcFSPath: path, + srcVol: "success-vol", + srcPath: "path/2/success-file", + expectedErr: errFileNotFound, + }, + // Test case - 5. + // Test case with path being a directory. + { + srcFSPath: path, + srcVol: "success-vol", + srcPath: "path", + expectedErr: errFileNotFound, + }, + // Test case - 6. + // Test case with src path segment > 255. + { + srcFSPath: path, + srcVol: "success-vol", + srcPath: "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", + expectedErr: errFileNameTooLong, + }, + // Test case - 7. + // Test case validate only srcVol exists. + { + srcFSPath: path, + srcVol: "success-vol", + expectedErr: nil, + }, + // Test case - 8. + // Test case validate only srcVol doesn't exist. + { + srcFSPath: path, + srcVol: "success-vol-non-existent", + expectedErr: errVolumeNotFound, + }, + // Test case - 9. + // Test case validate invalid argument. + { + expectedErr: errInvalidArgument, + }, + } + + for i, testCase := range testCases { + if testCase.srcPath != "" { + if _, err := fsStatFile(pathJoin(testCase.srcFSPath, testCase.srcVol, testCase.srcPath)); err != testCase.expectedErr { + t.Fatalf("TestPosix case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) + } + } else { + if _, err := fsStatDir(pathJoin(testCase.srcFSPath, testCase.srcVol)); err != testCase.expectedErr { + t.Fatalf("TestPosix case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) + } + } + } +} + +func TestFSCreateAndOpen(t *testing.T) { + // Setup test environment. + _, path, err := newPosixTestSetup() + if err != nil { + t.Fatalf("Unable to create posix test setup, %s", err) + } + defer removeAll(path) + + if err = fsMkdir(pathJoin(path, "success-vol")); err != nil { + t.Fatalf("Unable to create directory, %s", err) + } + + if _, err = fsCreateFile("", nil, nil, 0); err != errInvalidArgument { + t.Fatal("Unexpected error", err) + } + + if _, _, err = fsOpenFile("", -1); err != errInvalidArgument { + t.Fatal("Unexpected error", err) + } + + var buf = make([]byte, 4096) + var reader = bytes.NewReader([]byte("Hello, world")) + if _, err = fsCreateFile(pathJoin(path, "success-vol", "success-file"), reader, buf, reader.Size()); err != nil { + t.Fatalf("Unable to create file, %s", err) + } + // Seek back. + reader.Seek(0, 0) + + testCases := []struct { + srcVol string + srcPath string + expectedErr error + }{ + // Test case - 1. + // Test case with segment of the volume name > 255. + { + srcVol: "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", + srcPath: "success-file", + expectedErr: errFileNameTooLong, + }, + // Test case - 2. + // Test case with src path segment > 255. + { + srcVol: "success-vol", + srcPath: "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", + expectedErr: errFileNameTooLong, + }, + } + + for i, testCase := range testCases { + _, err = fsCreateFile(pathJoin(path, testCase.srcVol, testCase.srcPath), reader, buf, reader.Size()) + if err != testCase.expectedErr { + t.Errorf("Test case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) + } + _, _, err = fsOpenFile(pathJoin(path, testCase.srcVol, testCase.srcPath), 0) + if err != testCase.expectedErr { + t.Errorf("Test case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) + } + } + + // Attempt to open a directory. + if _, _, err = fsOpenFile(pathJoin(path), 0); err != errIsNotRegular { + t.Fatal("Unexpected error", err) + } +} + +func TestFSDeletes(t *testing.T) { + // create posix test setup + _, path, err := newPosixTestSetup() + if err != nil { + t.Fatalf("Unable to create posix test setup, %s", err) + } + defer removeAll(path) + + // Setup test environment. + if err = fsMkdir(pathJoin(path, "success-vol")); err != nil { + t.Fatalf("Unable to create directory, %s", err) + } + + var buf = make([]byte, 4096) + var reader = bytes.NewReader([]byte("Hello, world")) + if _, err = fsCreateFile(pathJoin(path, "success-vol", "success-file"), reader, buf, reader.Size()); err != nil { + t.Fatalf("Unable to create file, %s", err) + } + // Seek back. + reader.Seek(0, 0) + + testCases := []struct { + srcVol string + srcPath string + expectedErr error + }{ + // Test case - 1. + // valid case with existing volume and file to delete. + { + srcVol: "success-vol", + srcPath: "success-file", + expectedErr: nil, + }, + // Test case - 2. + // The file was deleted in the last case, so DeleteFile should fail. + { + srcVol: "success-vol", + srcPath: "success-file", + expectedErr: errFileNotFound, + }, + // Test case - 3. + // Test case with segment of the volume name > 255. + { + srcVol: "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", + srcPath: "success-file", + expectedErr: errFileNameTooLong, + }, + // Test case - 4. + // Test case with src path segment > 255. + { + srcVol: "success-vol", + srcPath: "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", + expectedErr: errFileNameTooLong, + }, + } + + for i, testCase := range testCases { + if err = fsDeleteFile(path, pathJoin(path, testCase.srcVol, testCase.srcPath)); err != testCase.expectedErr { + t.Errorf("Test case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) + } + } +} + +// Tests fs removes. +func TestFSRemoves(t *testing.T) { + // create posix test setup + _, path, err := newPosixTestSetup() + if err != nil { + t.Fatalf("Unable to create posix test setup, %s", err) + } + defer removeAll(path) + + // Setup test environment. + if err = fsMkdir(pathJoin(path, "success-vol")); err != nil { + t.Fatalf("Unable to create directory, %s", err) + } + + var buf = make([]byte, 4096) + var reader = bytes.NewReader([]byte("Hello, world")) + if _, err = fsCreateFile(pathJoin(path, "success-vol", "success-file"), reader, buf, reader.Size()); err != nil { + t.Fatalf("Unable to create file, %s", err) + } + // Seek back. + reader.Seek(0, 0) + + if _, err = fsCreateFile(pathJoin(path, "success-vol", "success-file-new"), reader, buf, reader.Size()); err != nil { + t.Fatalf("Unable to create file, %s", err) + } + // Seek back. + reader.Seek(0, 0) + + testCases := []struct { + srcFSPath string + srcVol string + srcPath string + expectedErr error + }{ + // Test case - 1. + // valid case with existing volume and file to delete. + { + srcFSPath: path, + srcVol: "success-vol", + srcPath: "success-file", + expectedErr: nil, + }, + // Test case - 2. + // The file was deleted in the last case, so DeleteFile should fail. + { + srcFSPath: path, + srcVol: "success-vol", + srcPath: "success-file", + expectedErr: errFileNotFound, + }, + // Test case - 3. + // Test case with segment of the volume name > 255. + { + srcFSPath: path, + srcVol: "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", + srcPath: "success-file", + expectedErr: errFileNameTooLong, + }, + // Test case - 4. + // Test case with src path segment > 255. + { + srcFSPath: path, + srcVol: "success-vol", + srcPath: "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", + expectedErr: errFileNameTooLong, + }, + // Test case - 5. + // Test case with src path empty. + { + srcFSPath: path, + srcVol: "success-vol", + expectedErr: errVolumeNotEmpty, + }, + // Test case - 6. + // Test case with src path empty. + { + srcFSPath: path, + srcVol: "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", + expectedErr: errFileNameTooLong, + }, + // Test case - 7. + // Test case with src path empty. + { + srcFSPath: path, + srcVol: "non-existent", + expectedErr: errVolumeNotFound, + }, + // Test case - 8. + // Test case with src and volume path empty. + { + expectedErr: errInvalidArgument, + }, + } + + for i, testCase := range testCases { + if testCase.srcPath != "" { + if err = fsRemoveFile(pathJoin(testCase.srcFSPath, testCase.srcVol, testCase.srcPath)); err != testCase.expectedErr { + t.Errorf("Test case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) + } + } else { + if err = fsRemoveDir(pathJoin(testCase.srcFSPath, testCase.srcVol, testCase.srcPath)); err != testCase.expectedErr { + t.Error(err) + } + } + } + + if err = fsRemoveAll(pathJoin(path, "success-vol")); err != nil { + t.Fatal(err) + } + + if err = fsRemoveAll(""); err != errInvalidArgument { + t.Fatal(err) + } + + if err = fsRemoveAll("my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"); err != errFileNameTooLong { + t.Fatal(err) + } + + if runtime.GOOS != "windows" { + if err = fsRemoveAll("/usr"); err != errVolumeAccessDenied { + t.Fatal(err) + } + } +} diff --git a/cmd/fs-v1-metadata.go b/cmd/fs-v1-metadata.go index 5cac60ef4..99d475541 100644 --- a/cmd/fs-v1-metadata.go +++ b/cmd/fs-v1-metadata.go @@ -18,7 +18,15 @@ package cmd import ( "encoding/json" + "io" + "io/ioutil" + "os" + pathutil "path" "sort" + "strings" + + "github.com/minio/minio/pkg/lock" + "github.com/minio/minio/pkg/mimedb" ) const ( @@ -38,6 +46,50 @@ type fsMetaV1 struct { Parts []objectPartInfo `json:"parts,omitempty"` } +// Converts metadata to object info. +func (m fsMetaV1) ToObjectInfo(bucket, object string, fi os.FileInfo) ObjectInfo { + if len(m.Meta) == 0 { + m.Meta = make(map[string]string) + } + + // Guess content-type from the extension if possible. + if m.Meta["content-type"] == "" { + if objectExt := pathutil.Ext(object); objectExt != "" { + if content, ok := mimedb.DB[strings.ToLower(strings.TrimPrefix(objectExt, "."))]; ok { + m.Meta["content-type"] = content.ContentType + } + } + } + + objInfo := ObjectInfo{ + Bucket: bucket, + Name: object, + } + + // We set file into only if its valid. + objInfo.ModTime = timeSentinel + if fi != nil { + objInfo.ModTime = fi.ModTime() + objInfo.Size = fi.Size() + objInfo.IsDir = fi.IsDir() + } + + objInfo.MD5Sum = m.Meta["md5Sum"] + objInfo.ContentType = m.Meta["content-type"] + objInfo.ContentEncoding = m.Meta["content-encoding"] + + // md5Sum has already been extracted into objInfo.MD5Sum. We + // need to remove it from m.Meta to avoid it from appearing as + // part of response headers. e.g, X-Minio-* or X-Amz-*. + delete(m.Meta, "md5Sum") + + // Save all the other userdefined API. + objInfo.UserDefined = m.Meta + + // Success.. + return objInfo +} + // ObjectPartIndex - returns the index of matching object part number. func (m fsMetaV1) ObjectPartIndex(partNumber int) (partIndex int) { for i, part := range m.Parts { @@ -73,41 +125,43 @@ func (m *fsMetaV1) AddObjectPart(partNumber int, partName string, partETag strin sort.Sort(byObjectPartNumber(m.Parts)) } -// readFSMetadata - returns the object metadata `fs.json` content. -func readFSMetadata(disk StorageAPI, bucket, filePath string) (fsMeta fsMetaV1, err error) { - // Read all `fs.json`. - buf, err := disk.ReadAll(bucket, filePath) +func (m *fsMetaV1) WriteTo(writer io.Writer) (n int64, err error) { + var metadataBytes []byte + metadataBytes, err = json.Marshal(m) if err != nil { - return fsMetaV1{}, traceError(err) + return 0, traceError(err) } - // Decode `fs.json` into fsMeta structure. - if err = json.Unmarshal(buf, &fsMeta); err != nil { - return fsMetaV1{}, traceError(err) + if err = writer.(*lock.LockedFile).Truncate(0); err != nil { + return 0, traceError(err) + } + + if _, err = writer.Write(metadataBytes); err != nil { + return 0, traceError(err) } // Success. - return fsMeta, nil + return int64(len(metadataBytes)), nil } -// Write fsMeta to fs.json or fs-append.json. -func writeFSMetadata(disk StorageAPI, bucket, filePath string, fsMeta fsMetaV1) error { - tmpPath := mustGetUUID() - metadataBytes, err := json.Marshal(fsMeta) +func (m *fsMetaV1) ReadFrom(reader io.Reader) (n int64, err error) { + var metadataBytes []byte + metadataBytes, err = ioutil.ReadAll(reader) if err != nil { - return traceError(err) + return 0, traceError(err) } - if err = disk.AppendFile(minioMetaTmpBucket, tmpPath, metadataBytes); err != nil { - return traceError(err) + + if len(metadataBytes) == 0 { + return 0, traceError(io.EOF) } - err = disk.RenameFile(minioMetaTmpBucket, tmpPath, bucket, filePath) - if err != nil { - err = disk.DeleteFile(minioMetaTmpBucket, tmpPath) - if err != nil { - return traceError(err) - } + + // Decode `fs.json` into fsMeta structure. + if err = json.Unmarshal(metadataBytes, m); err != nil { + return 0, traceError(err) } - return nil + + // Success. + return int64(len(metadataBytes)), nil } // newFSMetaV1 - initializes new fsMetaV1. @@ -130,21 +184,49 @@ func newFSFormatV1() (format *formatConfigV1) { } } -// isFSFormat - returns whether given formatConfigV1 is FS type or not. -func isFSFormat(format *formatConfigV1) bool { - return format.Format == "fs" +// loads format.json from minioMetaBucket if it exists. +func loadFormatFS(fsPath string) (*formatConfigV1, error) { + rlk, err := lock.RLockedOpenFile(pathJoin(fsPath, minioMetaBucket, fsFormatJSONFile)) + if err != nil { + if os.IsNotExist(err) { + return nil, errUnformattedDisk + } + return nil, err + } + defer rlk.Close() + + formatBytes, err := ioutil.ReadAll(rlk) + if err != nil { + return nil, err + } + + format := &formatConfigV1{} + if err = json.Unmarshal(formatBytes, format); err != nil { + return nil, err + } + + return format, nil } // writes FS format (format.json) into minioMetaBucket. -func saveFSFormatData(storage StorageAPI, fsFormat *formatConfigV1) error { +func saveFormatFS(formatPath string, fsFormat *formatConfigV1) error { metadataBytes, err := json.Marshal(fsFormat) if err != nil { return err } - // fsFormatJSONFile - format.json file stored in minioMetaBucket(.minio) directory. - if err = storage.AppendFile(minioMetaBucket, fsFormatJSONFile, metadataBytes); err != nil { + + // fsFormatJSONFile - format.json file stored in minioMetaBucket(.minio.sys) directory. + lk, err := lock.LockedOpenFile(preparePath(formatPath), os.O_CREATE|os.O_WRONLY, 0600) + if err != nil { return err } + defer lk.Close() + + if _, err = lk.Write(metadataBytes); err != nil { + return err + } + + // Success. return nil } @@ -153,11 +235,13 @@ func isPartsSame(uploadedParts []objectPartInfo, completeParts []completePart) b if len(uploadedParts) != len(completeParts) { return false } + for i := range completeParts { if uploadedParts[i].Number != completeParts[i].PartNumber || uploadedParts[i].ETag != completeParts[i].ETag { return false } } + return true } diff --git a/cmd/fs-v1-metadata_test.go b/cmd/fs-v1-metadata_test.go index 151948273..b1163d62f 100644 --- a/cmd/fs-v1-metadata_test.go +++ b/cmd/fs-v1-metadata_test.go @@ -18,30 +18,34 @@ package cmd import ( "bytes" + "io" + "os" "path/filepath" "testing" ) -func initFSObjects(disk string, t *testing.T) (obj ObjectLayer) { - endpoints, err := parseStorageEndpoints([]string{disk}) - if err != nil { - t.Fatal(err) +// Tests ToObjectInfo function. +func TestFSV1MetadataObjInfo(t *testing.T) { + fsMeta := newFSMetaV1() + objInfo := fsMeta.ToObjectInfo("testbucket", "testobject", nil) + if objInfo.Size != 0 { + t.Fatal("Unexpected object info value for Size", objInfo.Size) } - obj, _, err = initObjectLayer(endpoints) - if err != nil { - t.Fatal("Unexpected err: ", err) + if objInfo.ModTime != timeSentinel { + t.Fatal("Unexpected object info value for ModTime ", objInfo.ModTime) + } + if objInfo.IsDir { + t.Fatal("Unexpected object info value for IsDir", objInfo.IsDir) } - return obj } -// TestReadFsMetadata - readFSMetadata testing with a healthy and faulty disk +// TestReadFSMetadata - readFSMetadata testing with a healthy and faulty disk func TestReadFSMetadata(t *testing.T) { disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) defer removeAll(disk) obj := initFSObjects(disk, t) - - fs := obj.(fsObjects) + fs := obj.(*fsObjects) bucketName := "bucket" objectName := "object" @@ -56,37 +60,42 @@ func TestReadFSMetadata(t *testing.T) { } // Construct the full path of fs.json - fsPath := "buckets/" + bucketName + "/" + objectName + "/fs.json" + fsPath := pathJoin("buckets", bucketName, objectName, "fs.json") + fsPath = pathJoin(fs.fsPath, minioMetaBucket, fsPath) + rlk, err := fs.rwPool.Open(fsPath) + if err != nil { + t.Fatal("Unexpected error ", err) + } + defer rlk.Close() + + sectionReader := io.NewSectionReader(rlk, 0, rlk.Size()) // Regular fs metadata reading, no errors expected - if _, err := readFSMetadata(fs.storage, ".minio.sys", fsPath); err != nil { + fsMeta := fsMetaV1{} + if _, err = fsMeta.ReadFrom(sectionReader); err != nil { t.Fatal("Unexpected error ", err) } // Corrupted fs.json - if err := fs.storage.AppendFile(".minio.sys", fsPath, []byte{'a'}); err != nil { + file, err := os.OpenFile(preparePath(fsPath), os.O_APPEND|os.O_WRONLY, 0666) + if err != nil { t.Fatal("Unexpected error ", err) } - if _, err := readFSMetadata(fs.storage, ".minio.sys", fsPath); err == nil { + file.Write([]byte{'a'}) + file.Close() + fsMeta = fsMetaV1{} + if _, err := fsMeta.ReadFrom(sectionReader); err == nil { t.Fatal("Should fail", err) } - - // Test with corrupted disk - fsStorage := fs.storage.(*retryStorage) - naughty := newNaughtyDisk(fsStorage, nil, errFaultyDisk) - fs.storage = naughty - if _, err := readFSMetadata(fs.storage, ".minio.sys", fsPath); errorCause(err) != errFaultyDisk { - t.Fatal("Should fail", err) - } - } -// TestWriteFsMetadata - tests of writeFSMetadata with healthy and faulty disks +// TestWriteFSMetadata - tests of writeFSMetadata with healthy disk. func TestWriteFSMetadata(t *testing.T) { disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) defer removeAll(disk) + obj := initFSObjects(disk, t) - fs := obj.(fsObjects) + fs := obj.(*fsObjects) bucketName := "bucket" objectName := "object" @@ -100,24 +109,27 @@ func TestWriteFSMetadata(t *testing.T) { t.Fatal("Unexpected err: ", err) } - // Construct the complete path of fs.json - fsPath := "buckets/" + bucketName + "/" + objectName + "/fs.json" + // Construct the full path of fs.json + fsPath := pathJoin("buckets", bucketName, objectName, "fs.json") + fsPath = pathJoin(fs.fsPath, minioMetaBucket, fsPath) - // Fs metadata reading, no errors expected (healthy disk) - fsMeta, err := readFSMetadata(fs.storage, ".minio.sys", fsPath) + rlk, err := fs.rwPool.Open(fsPath) if err != nil { t.Fatal("Unexpected error ", err) } + defer rlk.Close() - // Reading metadata with a corrupted disk - fsStorage := fs.storage.(*retryStorage) - for i := 1; i <= 2; i++ { - naughty := newNaughtyDisk(fsStorage, map[int]error{i: errFaultyDisk, i + 1: errFaultyDisk}, nil) - fs.storage = naughty - if err = writeFSMetadata(fs.storage, ".minio.sys", fsPath, fsMeta); errorCause(err) != errFaultyDisk { - t.Fatal("Unexpected error", i, err) - - } + sectionReader := io.NewSectionReader(rlk, 0, rlk.Size()) + // FS metadata reading, no errors expected (healthy disk) + fsMeta := fsMetaV1{} + _, err = fsMeta.ReadFrom(sectionReader) + if err != nil { + t.Fatal("Unexpected error ", err) + } + if fsMeta.Version != "1.0.0" { + t.Fatalf("Unexpected version %s", fsMeta.Version) + } + if fsMeta.Format != "fs" { + t.Fatalf("Unexpected format %s", fsMeta.Format) } - } diff --git a/cmd/fs-v1-multipart-common.go b/cmd/fs-v1-multipart-common.go index 21c87099c..c7b9c7f63 100644 --- a/cmd/fs-v1-multipart-common.go +++ b/cmd/fs-v1-multipart-common.go @@ -17,72 +17,126 @@ package cmd import ( - "path" + "fmt" + "io" + "runtime" "time" + + pathutil "path" + + "github.com/minio/minio/pkg/lock" ) // Returns if the prefix is a multipart upload. func (fs fsObjects) isMultipartUpload(bucket, prefix string) bool { - _, err := fs.storage.StatFile(bucket, pathJoin(prefix, uploadsJSONFile)) - return err == nil -} - -// isUploadIDExists - verify if a given uploadID exists and is valid. -func (fs fsObjects) isUploadIDExists(bucket, object, uploadID string) bool { - uploadIDPath := path.Join(bucket, object, uploadID) - _, err := fs.storage.StatFile(minioMetaMultipartBucket, path.Join(uploadIDPath, fsMetaJSONFile)) + uploadsIDPath := pathJoin(fs.fsPath, bucket, prefix, uploadsJSONFile) + _, err := fsStatFile(uploadsIDPath) if err != nil { if err == errFileNotFound { return false } - errorIf(err, "Unable to access upload id "+pathJoin(minioMetaMultipartBucket, uploadIDPath)) + errorIf(err, "Unable to access uploads.json "+uploadsIDPath) return false } return true } -// updateUploadJSON - add or remove upload ID info in all `uploads.json`. -func (fs fsObjects) updateUploadJSON(bucket, object, uploadID string, initiated time.Time, isRemove bool) error { - uploadsPath := path.Join(bucket, object, uploadsJSONFile) - tmpUploadsPath := mustGetUUID() +// Delete uploads.json file wrapper handling a tricky case on windows. +func (fs fsObjects) deleteUploadsJSON(bucket, object, uploadID string) error { + timeID := fmt.Sprintf("%X", time.Now().UTC().UnixNano()) + tmpPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, uploadID+"+"+timeID) - uploadsJSON, err := readUploadsJSON(bucket, object, fs.storage) - if errorCause(err) == errFileNotFound { - // If file is not found, we assume a default (empty) - // upload info. - uploadsJSON, err = newUploadsV1("fs"), nil + multipartBucketPath := pathJoin(fs.fsPath, minioMetaMultipartBucket) + uploadPath := pathJoin(multipartBucketPath, bucket, object) + uploadsMetaPath := pathJoin(uploadPath, uploadsJSONFile) + + // Special case for windows please read through. + if runtime.GOOS == "windows" { + // Ordinarily windows does not permit deletion or renaming of files still + // in use, but if all open handles to that file were opened with FILE_SHARE_DELETE + // then it can permit renames and deletions of open files. + // + // There are however some gotchas with this, and it is worth listing them here. + // Firstly, Windows never allows you to really delete an open file, rather it is + // flagged as delete pending and its entry in its directory remains visible + // (though no new file handles may be opened to it) and when the very last + // open handle to the file in the system is closed, only then is it truly + // deleted. Well, actually only sort of truly deleted, because Windows only + // appears to remove the file entry from the directory, but in fact that + // entry is merely hidden and actually still exists and attempting to create + // a file with the same name will return an access denied error. How long it + // silently exists for depends on a range of factors, but put it this way: + // if your code loops creating and deleting the same file name as you might + // when operating a lock file, you're going to see lots of random spurious + // access denied errors and truly dismal lock file performance compared to POSIX. + // + // We work-around these un-POSIX file semantics by taking a dual step to + // deleting files. Firstly, it renames the file to tmp location into multipartTmpBucket + // We always open files with FILE_SHARE_DELETE permission enabled, with that + // flag Windows permits renaming and deletion, and because the name was changed + // to a very random name somewhere not in its origin directory before deletion, + // you don't see those unexpected random errors when creating files with the + // same name as a recently deleted file as you do anywhere else on Windows. + // Because the file is probably not in its original containing directory any more, + // deletions of that directory will not fail with “directory not empty” as they + // otherwise normally would either. + fsRenameFile(uploadsMetaPath, tmpPath) + + // Proceed to deleting the directory. + if err := fsDeleteFile(multipartBucketPath, uploadPath); err != nil { + return err + } + + // Finally delete the renamed file. + return fsDeleteFile(pathutil.Dir(tmpPath), tmpPath) } + return fsDeleteFile(multipartBucketPath, uploadsMetaPath) +} + +// Removes the uploadID, called either by CompleteMultipart of AbortMultipart. If the resuling uploads +// slice is empty then we remove/purge the file. +func (fs fsObjects) removeUploadID(bucket, object, uploadID string, rwlk *lock.LockedFile) error { + uploadIDs := uploadsV1{} + _, err := uploadIDs.ReadFrom(io.NewSectionReader(rwlk, 0, rwlk.Size())) if err != nil { return err } - // update the uploadsJSON struct - if !isRemove { - // Add the uploadID - uploadsJSON.AddUploadID(uploadID, initiated) - } else { - // Remove the upload ID - uploadsJSON.RemoveUploadID(uploadID) - } + // Removes upload id from the uploads list. + uploadIDs.RemoveUploadID(uploadID) - // update the file or delete it? - if len(uploadsJSON.Uploads) > 0 { - err = writeUploadJSON(&uploadsJSON, uploadsPath, tmpUploadsPath, fs.storage) - } else { - // no uploads, so we delete the file. - if err = fs.storage.DeleteFile(minioMetaMultipartBucket, uploadsPath); err != nil { - return toObjectErr(traceError(err), minioMetaMultipartBucket, uploadsPath) - } - } + // Check this is the last entry. + if uploadIDs.IsEmpty() { + // No more uploads left, so we delete `uploads.json` file. + return fs.deleteUploadsJSON(bucket, object, uploadID) + } // else not empty + + // Write update `uploads.json`. + _, err = uploadIDs.WriteTo(rwlk) return err } -// addUploadID - add upload ID and its initiated time to 'uploads.json'. -func (fs fsObjects) addUploadID(bucket, object string, uploadID string, initiated time.Time) error { - return fs.updateUploadJSON(bucket, object, uploadID, initiated, false) -} +// Adds a new uploadID if no previous `uploads.json` is +// found we initialize a new one. +func (fs fsObjects) addUploadID(bucket, object, uploadID string, initiated time.Time, rwlk *lock.LockedFile) error { + uploadIDs := uploadsV1{} -// removeUploadID - remove upload ID in 'uploads.json'. -func (fs fsObjects) removeUploadID(bucket, object string, uploadID string) error { - return fs.updateUploadJSON(bucket, object, uploadID, time.Time{}, true) + _, err := uploadIDs.ReadFrom(io.NewSectionReader(rwlk, 0, rwlk.Size())) + // For all unexpected errors, we return. + if err != nil && errorCause(err) != io.EOF { + return err + } + + // If we couldn't read anything, we assume a default + // (empty) upload info. + if errorCause(err) == io.EOF { + uploadIDs = newUploadsV1("fs") + } + + // Adds new upload id to the list. + uploadIDs.AddUploadID(uploadID, initiated) + + // Write update `uploads.json`. + _, err = uploadIDs.WriteTo(rwlk) + return err } diff --git a/cmd/fs-v1-multipart-common_test.go b/cmd/fs-v1-multipart-common_test.go index 2f4b4555a..026799580 100644 --- a/cmd/fs-v1-multipart-common_test.go +++ b/cmd/fs-v1-multipart-common_test.go @@ -19,53 +19,8 @@ package cmd import ( "path/filepath" "testing" - "time" ) -// TestFSIsUploadExists - complete test with valid and invalid cases -func TestFSIsUploadExists(t *testing.T) { - // Prepare for testing - disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) - defer removeAll(disk) - - obj := initFSObjects(disk, t) - fs := obj.(fsObjects) - - bucketName := "bucket" - objectName := "object" - - if err := obj.MakeBucket(bucketName); err != nil { - t.Fatal("Unexpected err: ", err) - } - - uploadID, err := obj.NewMultipartUpload(bucketName, objectName, nil) - if err != nil { - t.Fatal("Unexpected err: ", err) - } - // Test with valid upload id - if exists := fs.isUploadIDExists(bucketName, objectName, uploadID); !exists { - t.Fatal("Wrong result, expected: ", exists) - } - - // Test with inexistant bucket/object names - if exists := fs.isUploadIDExists("bucketfoo", "objectfoo", uploadID); exists { - t.Fatal("Wrong result, expected: ", !exists) - } - - // Test with inexistant upload ID - if exists := fs.isUploadIDExists(bucketName, objectName, uploadID+"-ff"); exists { - t.Fatal("Wrong result, expected: ", !exists) - } - - // isUploadIdExists with a faulty disk should return false - fsStorage := fs.storage.(*retryStorage) - naughty := newNaughtyDisk(fsStorage, nil, errFaultyDisk) - fs.storage = naughty - if exists := fs.isUploadIDExists(bucketName, objectName, uploadID); exists { - t.Fatal("Wrong result, expected: ", !exists) - } -} - // TestFSWriteUploadJSON - tests for writeUploadJSON for FS func TestFSWriteUploadJSON(t *testing.T) { // Prepare for tests @@ -73,31 +28,21 @@ func TestFSWriteUploadJSON(t *testing.T) { defer removeAll(disk) obj := initFSObjects(disk, t) - fs := obj.(fsObjects) bucketName := "bucket" objectName := "object" obj.MakeBucket(bucketName) - uploadID, err := obj.NewMultipartUpload(bucketName, objectName, nil) + _, err := obj.NewMultipartUpload(bucketName, objectName, nil) if err != nil { t.Fatal("Unexpected err: ", err) } + // newMultipartUpload will fail. + removeAll(disk) // Remove disk. + _, err = obj.NewMultipartUpload(bucketName, objectName, nil) if err != nil { - t.Fatal("Unexpected err: ", err) - } - - if err := fs.addUploadID(bucketName, objectName, uploadID, time.Now().UTC()); err != nil { - t.Fatal("Unexpected err: ", err) - } - - // isUploadIdExists with a faulty disk should return false - fsStorage := fs.storage.(*retryStorage) - for i := 1; i <= 3; i++ { - naughty := newNaughtyDisk(fsStorage, map[int]error{i: errFaultyDisk}, nil) - fs.storage = naughty - if err := fs.addUploadID(bucketName, objectName, uploadID, time.Now().UTC()); errorCause(err) != errFaultyDisk { + if _, ok := errorCause(err).(BucketNotFound); !ok { t.Fatal("Unexpected err: ", err) } } diff --git a/cmd/fs-v1-multipart.go b/cmd/fs-v1-multipart.go index 963f6b291..bea98d0f8 100644 --- a/cmd/fs-v1-multipart.go +++ b/cmd/fs-v1-multipart.go @@ -22,13 +22,68 @@ import ( "fmt" "hash" "io" - "path" + "os" + pathutil "path" "strings" "time" "github.com/minio/sha256-simd" ) +// listMultipartUploadIDs - list all the upload ids from a marker up to 'count'. +func (fs fsObjects) listMultipartUploadIDs(bucketName, objectName, uploadIDMarker string, count int) ([]uploadMetadata, bool, error) { + var uploads []uploadMetadata + + // Hold the lock so that two parallel complete-multipart-uploads + // do not leave a stale uploads.json behind. + objectMPartPathLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket, pathJoin(bucketName, objectName)) + objectMPartPathLock.RLock() + defer objectMPartPathLock.RUnlock() + + uploadsPath := pathJoin(bucketName, objectName, uploadsJSONFile) + rlk, err := fs.rwPool.Open(pathJoin(fs.fsPath, minioMetaMultipartBucket, uploadsPath)) + if err != nil { + if err == errFileNotFound || err == errFileAccessDenied { + return nil, true, nil + } + return nil, false, traceError(err) + } + defer fs.rwPool.Close(pathJoin(fs.fsPath, minioMetaMultipartBucket, uploadsPath)) + + // Read `uploads.json`. + uploadIDs := uploadsV1{} + if _, err = uploadIDs.ReadFrom(io.NewSectionReader(rlk, 0, rlk.Size())); err != nil { + return nil, false, err + } + + index := 0 + if uploadIDMarker != "" { + for ; index < len(uploadIDs.Uploads); index++ { + if uploadIDs.Uploads[index].UploadID == uploadIDMarker { + // Skip the uploadID as it would already be listed in previous listing. + index++ + break + } + } + } + + for index < len(uploadIDs.Uploads) { + uploads = append(uploads, uploadMetadata{ + Object: objectName, + UploadID: uploadIDs.Uploads[index].UploadID, + Initiated: uploadIDs.Uploads[index].Initiated, + }) + count-- + index++ + if count == 0 { + break + } + } + + end := (index == len(uploadIDs.Uploads)) + return uploads, end, nil +} + // listMultipartUploads - lists all multipart uploads. func (fs fsObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, error) { result := ListMultipartsInfo{} @@ -54,31 +109,40 @@ func (fs fsObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark if keyMarker != "" { multipartMarkerPath = pathJoin(bucket, keyMarker) } + var uploads []uploadMetadata var err error var eof bool + if uploadIDMarker != "" { - keyMarkerLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket, - pathJoin(bucket, keyMarker)) - keyMarkerLock.RLock() - uploads, _, err = listMultipartUploadIDs(bucket, keyMarker, uploadIDMarker, maxUploads, fs.storage) - keyMarkerLock.RUnlock() + uploads, _, err = fs.listMultipartUploadIDs(bucket, keyMarker, uploadIDMarker, maxUploads) if err != nil { return ListMultipartsInfo{}, err } maxUploads = maxUploads - len(uploads) } + var walkResultCh chan treeWalkResult var endWalkCh chan struct{} - heal := false // true only for xl.ListObjectsHeal() + + // true only for xl.ListObjectsHeal(), set to false. + heal := false + + // Proceed to list only if we have more uploads to be listed. if maxUploads > 0 { - walkResultCh, endWalkCh = fs.listPool.Release(listParams{minioMetaMultipartBucket, recursive, multipartMarkerPath, multipartPrefixPath, heal}) + listPrms := listParams{minioMetaMultipartBucket, recursive, multipartMarkerPath, multipartPrefixPath, heal} + + // Pop out any previously waiting marker. + walkResultCh, endWalkCh = fs.listPool.Release(listPrms) if walkResultCh == nil { endWalkCh = make(chan struct{}) isLeaf := fs.isMultipartUpload - listDir := listDirFactory(isLeaf, fsTreeWalkIgnoredErrs, fs.storage) - walkResultCh = startTreeWalk(minioMetaMultipartBucket, multipartPrefixPath, multipartMarkerPath, recursive, listDir, isLeaf, endWalkCh) + listDir := fs.listDirFactory(isLeaf) + walkResultCh = startTreeWalk(minioMetaMultipartBucket, multipartPrefixPath, + multipartMarkerPath, recursive, listDir, isLeaf, endWalkCh) } + + // List until maxUploads requested. for maxUploads > 0 { walkResult, ok := <-walkResultCh if !ok { @@ -86,6 +150,7 @@ func (fs fsObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark eof = true break } + // For any walk error return right away. if walkResult.err != nil { // File not found or Disk not found is a valid case. @@ -95,6 +160,7 @@ func (fs fsObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark } return ListMultipartsInfo{}, walkResult.err } + entry := strings.TrimPrefix(walkResult.entry, retainSlash(bucket)) if strings.HasSuffix(walkResult.entry, slashSeparator) { uploads = append(uploads, uploadMetadata{ @@ -109,18 +175,16 @@ func (fs fsObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark } continue } + var tmpUploads []uploadMetadata var end bool uploadIDMarker = "" - entryLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket, - pathJoin(bucket, entry)) - entryLock.RLock() - tmpUploads, end, err = listMultipartUploadIDs(bucket, entry, uploadIDMarker, maxUploads, fs.storage) - entryLock.RUnlock() + tmpUploads, end, err = fs.listMultipartUploadIDs(bucket, entry, uploadIDMarker, maxUploads) if err != nil { return ListMultipartsInfo{}, err } + uploads = append(uploads, tmpUploads...) maxUploads -= len(tmpUploads) if walkResult.end && end { @@ -129,6 +193,7 @@ func (fs fsObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark } } } + // Loop through all the received uploads fill in the multiparts result. for _, upload := range uploads { var objectName string @@ -158,6 +223,8 @@ func (fs fsObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark result.NextKeyMarker = "" result.NextUploadIDMarker = "" } + + // Success. return result, nil } @@ -174,6 +241,11 @@ func (fs fsObjects) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMark if err := checkListMultipartArgs(bucket, prefix, keyMarker, uploadIDMarker, delimiter, fs); err != nil { return ListMultipartsInfo{}, err } + + if _, err := fs.statBucketDir(bucket); err != nil { + return ListMultipartsInfo{}, toObjectErr(err, bucket) + } + return fs.listMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads) } @@ -191,23 +263,35 @@ func (fs fsObjects) newMultipartUpload(bucket string, object string, meta map[st // Save additional metadata. fsMeta.Meta = meta - // This lock needs to be held for any changes to the directory - // contents of ".minio.sys/multipart/object/" - objectMPartPathLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket, - pathJoin(bucket, object)) - objectMPartPathLock.Lock() - defer objectMPartPathLock.Unlock() - uploadID = mustGetUUID() initiated := time.Now().UTC() + // Add upload ID to uploads.json - if err = fs.addUploadID(bucket, object, uploadID, initiated); err != nil { - return "", err + uploadsPath := pathJoin(bucket, object, uploadsJSONFile) + rwlk, err := fs.rwPool.Create(pathJoin(fs.fsPath, minioMetaMultipartBucket, uploadsPath)) + if err != nil { + return "", toObjectErr(traceError(err), bucket, object) } - uploadIDPath := path.Join(bucket, object, uploadID) - if err = writeFSMetadata(fs.storage, minioMetaMultipartBucket, path.Join(uploadIDPath, fsMetaJSONFile), fsMeta); err != nil { - return "", toObjectErr(err, minioMetaMultipartBucket, uploadIDPath) + defer rwlk.Close() + + uploadIDPath := pathJoin(bucket, object, uploadID) + fsMetaPath := pathJoin(fs.fsPath, minioMetaMultipartBucket, uploadIDPath, fsMetaJSONFile) + metaFile, err := fs.rwPool.Create(fsMetaPath) + if err != nil { + return "", toObjectErr(traceError(err), bucket, object) } + defer metaFile.Close() + + // Add a new upload id. + if err = fs.addUploadID(bucket, object, uploadID, initiated, rwlk); err != nil { + return "", toObjectErr(err, bucket, object) + } + + // Write all the set metadata. + if _, err = fsMeta.WriteTo(metaFile); err != nil { + return "", toObjectErr(err, bucket, object) + } + // Return success. return uploadID, nil } @@ -221,6 +305,17 @@ func (fs fsObjects) NewMultipartUpload(bucket, object string, meta map[string]st if err := checkNewMultipartArgs(bucket, object, fs); err != nil { return "", err } + + if _, err := fs.statBucketDir(bucket); err != nil { + return "", toObjectErr(err, bucket) + } + + // Hold the lock so that two parallel complete-multipart-uploads + // do not leave a stale uploads.json behind. + objectMPartPathLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket, pathJoin(bucket, object)) + objectMPartPathLock.Lock() + defer objectMPartPathLock.Unlock() + return fs.newMultipartUpload(bucket, object, meta) } @@ -229,12 +324,14 @@ func partToAppend(fsMeta fsMetaV1, fsAppendMeta fsMetaV1) (part objectPartInfo, if len(fsMeta.Parts) == 0 { return } + // As fsAppendMeta.Parts will be sorted len(fsAppendMeta.Parts) will naturally be the next part number nextPartNum := len(fsAppendMeta.Parts) + 1 nextPartIndex := fsMeta.ObjectPartIndex(nextPartNum) if nextPartIndex == -1 { return } + return fsMeta.Parts[nextPartIndex], true } @@ -247,15 +344,43 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s return "", err } - uploadIDPath := path.Join(bucket, object, uploadID) + if _, err := fs.statBucketDir(bucket); err != nil { + return "", toObjectErr(err, bucket) + } + + // Hold the lock so that two parallel complete-multipart-uploads + // do not leave a stale uploads.json behind. + objectMPartPathLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket, pathJoin(bucket, object)) + objectMPartPathLock.Lock() + defer objectMPartPathLock.Unlock() + + // Disallow any parallel abort or complete multipart operations. + uploadsPath := pathJoin(fs.fsPath, minioMetaMultipartBucket, bucket, object, uploadsJSONFile) + if _, err := fs.rwPool.Open(uploadsPath); err != nil { + if err == errFileNotFound || err == errFileAccessDenied { + return "", traceError(InvalidUploadID{UploadID: uploadID}) + } + return "", toObjectErr(traceError(err), bucket, object) + } + defer fs.rwPool.Close(uploadsPath) + + uploadIDPath := pathJoin(bucket, object, uploadID) - preUploadIDLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket, uploadIDPath) - preUploadIDLock.RLock() // Just check if the uploadID exists to avoid copy if it doesn't. - uploadIDExists := fs.isUploadIDExists(bucket, object, uploadID) - preUploadIDLock.RUnlock() - if !uploadIDExists { - return "", traceError(InvalidUploadID{UploadID: uploadID}) + fsMetaPath := pathJoin(fs.fsPath, minioMetaMultipartBucket, uploadIDPath, fsMetaJSONFile) + rwlk, err := fs.rwPool.Write(fsMetaPath) + if err != nil { + if err == errFileNotFound || err == errFileAccessDenied { + return "", traceError(InvalidUploadID{UploadID: uploadID}) + } + return "", toObjectErr(traceError(err), bucket, object) + } + defer rwlk.Close() + + fsMeta := fsMetaV1{} + _, err = fsMeta.ReadFrom(io.NewSectionReader(rwlk, 0, rwlk.Size())) + if err != nil { + return "", toObjectErr(err, minioMetaMultipartBucket, fsMetaPath) } partSuffix := fmt.Sprintf("object%d", partID) @@ -263,7 +388,6 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s // Initialize md5 writer. md5Writer := md5.New() - hashWriters := []io.Writer{md5Writer} var sha256Writer hash.Hash @@ -272,6 +396,7 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s hashWriters = append(hashWriters, sha256Writer) } multiWriter := io.MultiWriter(hashWriters...) + // Limit the reader to its provided size if specified. var limitDataReader io.Reader if size > 0 { @@ -289,31 +414,24 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s } buf := make([]byte, int(bufSize)) - if size > 0 { - // Prepare file to avoid disk fragmentation - err := fs.storage.PrepareFile(minioMetaTmpBucket, tmpPartPath, size) - if err != nil { - return "", toObjectErr(err, minioMetaTmpBucket, tmpPartPath) - } - } - - bytesWritten, cErr := fsCreateFile(fs.storage, teeReader, buf, minioMetaTmpBucket, tmpPartPath) + fsPartPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, tmpPartPath) + bytesWritten, cErr := fsCreateFile(fsPartPath, teeReader, buf, size) if cErr != nil { - fs.storage.DeleteFile(minioMetaTmpBucket, tmpPartPath) + fsRemoveFile(fsPartPath) return "", toObjectErr(cErr, minioMetaTmpBucket, tmpPartPath) } // Should return IncompleteBody{} error when reader has fewer // bytes than specified in request header. if bytesWritten < size { - fs.storage.DeleteFile(minioMetaTmpBucket, tmpPartPath) + fsRemoveFile(fsPartPath) return "", traceError(IncompleteBody{}) } // Delete temporary part in case of failure. If // PutObjectPart succeeds then there would be nothing to // delete. - defer fs.storage.DeleteFile(minioMetaTmpBucket, tmpPartPath) + defer fsRemoveFile(fsPartPath) newMD5Hex := hex.EncodeToString(md5Writer.Sum(nil)) if md5Hex != "" { @@ -329,45 +447,32 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s } } - // Hold write lock as we are updating fs.json - postUploadIDLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket, uploadIDPath) - postUploadIDLock.Lock() - defer postUploadIDLock.Unlock() - - // Just check if the uploadID exists to avoid copy if it doesn't. - if !fs.isUploadIDExists(bucket, object, uploadID) { - return "", traceError(InvalidUploadID{UploadID: uploadID}) - } - - fsMetaPath := pathJoin(uploadIDPath, fsMetaJSONFile) - fsMeta, err := readFSMetadata(fs.storage, minioMetaMultipartBucket, fsMetaPath) - if err != nil { - return "", toObjectErr(err, minioMetaMultipartBucket, fsMetaPath) - } - fsMeta.AddObjectPart(partID, partSuffix, newMD5Hex, size) - - partPath := path.Join(bucket, object, uploadID, partSuffix) + partPath := pathJoin(bucket, object, uploadID, partSuffix) // Lock the part so that another part upload with same part-number gets blocked // while the part is getting appended in the background. partLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket, partPath) partLock.Lock() - err = fs.storage.RenameFile(minioMetaTmpBucket, tmpPartPath, minioMetaMultipartBucket, partPath) - if err != nil { + + fsNSPartPath := pathJoin(fs.fsPath, minioMetaMultipartBucket, partPath) + if err = fsRenameFile(fsPartPath, fsNSPartPath); err != nil { partLock.Unlock() - return "", toObjectErr(traceError(err), minioMetaMultipartBucket, partPath) + return "", toObjectErr(err, minioMetaMultipartBucket, partPath) } - uploadIDPath = path.Join(bucket, object, uploadID) - if err = writeFSMetadata(fs.storage, minioMetaMultipartBucket, path.Join(uploadIDPath, fsMetaJSONFile), fsMeta); err != nil { + + // Save the object part info in `fs.json`. + fsMeta.AddObjectPart(partID, partSuffix, newMD5Hex, size) + if _, err = fsMeta.WriteTo(rwlk); err != nil { partLock.Unlock() return "", toObjectErr(err, minioMetaMultipartBucket, uploadIDPath) } // Append the part in background. - errCh := fs.bgAppend.append(fs.storage, bucket, object, uploadID, fsMeta) + errCh := fs.append(bucket, object, uploadID, fsMeta) go func() { - // Also receive the error so that the appendParts go-routine does not block on send. - // But the error received is ignored as fs.PutObjectPart() would have already - // returned success to the client. + // Also receive the error so that the appendParts go-routine + // does not block on send. But the error received is ignored + // as fs.PutObjectPart() would have already returned success + // to the client. <-errCh partLock.Unlock() }() @@ -381,36 +486,51 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s func (fs fsObjects) listObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (ListPartsInfo, error) { result := ListPartsInfo{} - fsMetaPath := path.Join(bucket, object, uploadID, fsMetaJSONFile) - fsMeta, err := readFSMetadata(fs.storage, minioMetaMultipartBucket, fsMetaPath) + uploadIDPath := pathJoin(bucket, object, uploadID) + fsMetaPath := pathJoin(fs.fsPath, minioMetaMultipartBucket, uploadIDPath, fsMetaJSONFile) + metaFile, err := fs.rwPool.Open(fsMetaPath) + if err != nil { + if err == errFileNotFound || err == errFileAccessDenied { + // On windows oddly this is returned. + return ListPartsInfo{}, traceError(InvalidUploadID{UploadID: uploadID}) + } + return ListPartsInfo{}, toObjectErr(traceError(err), bucket, object) + } + defer fs.rwPool.Close(fsMetaPath) + + fsMeta := fsMetaV1{} + _, err = fsMeta.ReadFrom((io.NewSectionReader(metaFile, 0, metaFile.Size()))) if err != nil { return ListPartsInfo{}, toObjectErr(err, minioMetaBucket, fsMetaPath) } + // Only parts with higher part numbers will be listed. partIdx := fsMeta.ObjectPartIndex(partNumberMarker) parts := fsMeta.Parts if partIdx != -1 { parts = fsMeta.Parts[partIdx+1:] } + count := maxParts for _, part := range parts { - var fi FileInfo - partNamePath := path.Join(bucket, object, uploadID, part.Name) - fi, err = fs.storage.StatFile(minioMetaMultipartBucket, partNamePath) + var fi os.FileInfo + partNamePath := pathJoin(fs.fsPath, minioMetaMultipartBucket, uploadIDPath, part.Name) + fi, err = fsStatFile(partNamePath) if err != nil { return ListPartsInfo{}, toObjectErr(traceError(err), minioMetaMultipartBucket, partNamePath) } result.Parts = append(result.Parts, partInfo{ PartNumber: part.Number, ETag: part.ETag, - LastModified: fi.ModTime, - Size: fi.Size, + LastModified: fi.ModTime(), + Size: fi.Size(), }) count-- if count == 0 { break } } + // If listed entries are more than maxParts, we set IsTruncated as true. if len(parts) > len(result.Parts) { result.IsTruncated = true @@ -423,6 +543,8 @@ func (fs fsObjects) listObjectParts(bucket, object, uploadID string, partNumberM result.Object = object result.UploadID = uploadID result.MaxParts = maxParts + + // Success. return result, nil } @@ -438,29 +560,23 @@ func (fs fsObjects) ListObjectParts(bucket, object, uploadID string, partNumberM return ListPartsInfo{}, err } - // Hold lock so that there is no competing - // abort-multipart-upload or complete-multipart-upload. - uploadIDLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket, - pathJoin(bucket, object, uploadID)) - uploadIDLock.Lock() - defer uploadIDLock.Unlock() - - if !fs.isUploadIDExists(bucket, object, uploadID) { - return ListPartsInfo{}, traceError(InvalidUploadID{UploadID: uploadID}) + if _, err := fs.statBucketDir(bucket); err != nil { + return ListPartsInfo{}, toObjectErr(err, bucket) } - return fs.listObjectParts(bucket, object, uploadID, partNumberMarker, maxParts) -} -func (fs fsObjects) totalObjectSize(fsMeta fsMetaV1, parts []completePart) (int64, error) { - objSize := int64(0) - for _, part := range parts { - partIdx := fsMeta.ObjectPartIndex(part.PartNumber) - if partIdx == -1 { - return 0, InvalidPart{} - } - objSize += fsMeta.Parts[partIdx].Size + // Hold the lock so that two parallel complete-multipart-uploads + // do not leave a stale uploads.json behind. + objectMPartPathLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket, pathJoin(bucket, object)) + objectMPartPathLock.RLock() + defer objectMPartPathLock.RUnlock() + + listPartsInfo, err := fs.listObjectParts(bucket, object, uploadID, partNumberMarker, maxParts) + if err != nil { + return ListPartsInfo{}, toObjectErr(err, bucket, object) } - return objSize, nil + + // Success. + return listPartsInfo, nil } // CompleteMultipartUpload - completes an ongoing multipart @@ -474,18 +590,8 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload return "", err } - uploadIDPath := path.Join(bucket, object, uploadID) - - // Hold lock so that - // 1) no one aborts this multipart upload - // 2) no one does a parallel complete-multipart-upload on this - // multipart upload - uploadIDLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket, uploadIDPath) - uploadIDLock.Lock() - defer uploadIDLock.Unlock() - - if !fs.isUploadIDExists(bucket, object, uploadID) { - return "", traceError(InvalidUploadID{UploadID: uploadID}) + if _, err := fs.statBucketDir(bucket); err != nil { + return "", toObjectErr(err, bucket) } // Calculate s3 compatible md5sum for complete multipart. @@ -494,22 +600,65 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload return "", err } - // Read saved fs metadata for ongoing multipart. - fsMetaPath := pathJoin(uploadIDPath, fsMetaJSONFile) - fsMeta, err := readFSMetadata(fs.storage, minioMetaMultipartBucket, fsMetaPath) + uploadIDPath := pathJoin(bucket, object, uploadID) + + // Hold the lock so that two parallel complete-multipart-uploads + // do not leave a stale uploads.json behind. + objectMPartPathLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket, pathJoin(bucket, object)) + objectMPartPathLock.Lock() + defer objectMPartPathLock.Unlock() + + fsMetaPathMultipart := pathJoin(fs.fsPath, minioMetaMultipartBucket, uploadIDPath, fsMetaJSONFile) + rlk, err := fs.rwPool.Open(fsMetaPathMultipart) if err != nil { - return "", toObjectErr(err, minioMetaMultipartBucket, fsMetaPath) + if err == errFileNotFound || err == errFileAccessDenied { + return "", traceError(InvalidUploadID{UploadID: uploadID}) + } + return "", toObjectErr(traceError(err), bucket, object) } + // Disallow any parallel abort or complete multipart operations. + rwlk, err := fs.rwPool.Write(pathJoin(fs.fsPath, minioMetaMultipartBucket, bucket, object, uploadsJSONFile)) + if err != nil { + fs.rwPool.Close(fsMetaPathMultipart) + if err == errFileNotFound || err == errFileAccessDenied { + return "", traceError(InvalidUploadID{UploadID: uploadID}) + } + return "", toObjectErr(traceError(err), bucket, object) + } + defer rwlk.Close() + + fsMeta := fsMetaV1{} + // Read saved fs metadata for ongoing multipart. + _, err = fsMeta.ReadFrom(io.NewSectionReader(rlk, 0, rlk.Size())) + if err != nil { + fs.rwPool.Close(fsMetaPathMultipart) + return "", toObjectErr(err, minioMetaMultipartBucket, fsMetaPathMultipart) + } + + // Wait for any competing PutObject() operation on bucket/object, since same namespace + // would be acquired for `fs.json`. + fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fsMetaJSONFile) + metaFile, err := fs.rwPool.Create(fsMetaPath) + if err != nil { + fs.rwPool.Close(fsMetaPathMultipart) + return "", toObjectErr(traceError(err), bucket, object) + } + defer metaFile.Close() + // This lock is held during rename of the appended tmp file to the actual // location so that any competing GetObject/PutObject/DeleteObject do not race. appendFallback := true // In case background-append did not append the required parts. + if isPartsSame(fsMeta.Parts, parts) { - err = fs.bgAppend.complete(fs.storage, bucket, object, uploadID, fsMeta) + err = fs.complete(bucket, object, uploadID, fsMeta) if err == nil { appendFallback = false - if err = fs.storage.RenameFile(minioMetaTmpBucket, uploadID, bucket, object); err != nil { - return "", toObjectErr(traceError(err), minioMetaTmpBucket, uploadID) + fsTmpObjPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, uploadID) + fsNSObjPath := pathJoin(fs.fsPath, bucket, object) + if err = fsRenameFile(fsTmpObjPath, fsNSObjPath); err != nil { + fs.rwPool.Close(fsMetaPathMultipart) + return "", toObjectErr(err, minioMetaTmpBucket, uploadID) } } } @@ -518,77 +667,79 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload // background append could not do append all the required parts, hence we do it here. tempObj := uploadID + "-" + "part.1" + fsTmpObjPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, tempObj) + // Delete the temporary object in the case of a + // failure. If PutObject succeeds, then there would be + // nothing to delete. + defer fsRemoveFile(fsTmpObjPath) + // Allocate staging buffer. var buf = make([]byte, readSizeV1) - var objSize int64 - objSize, err = fs.totalObjectSize(fsMeta, parts) - if err != nil { - return "", traceError(err) - } - if objSize > 0 { - // Prepare file to avoid disk fragmentation - err = fs.storage.PrepareFile(minioMetaTmpBucket, tempObj, objSize) - if err != nil { - return "", traceError(err) - } - } - - // Loop through all parts, validate them and then commit to disk. + // Validate all parts and then commit to disk. for i, part := range parts { partIdx := fsMeta.ObjectPartIndex(part.PartNumber) if partIdx == -1 { + fs.rwPool.Close(fsMetaPathMultipart) return "", traceError(InvalidPart{}) } + if fsMeta.Parts[partIdx].ETag != part.ETag { + fs.rwPool.Close(fsMetaPathMultipart) return "", traceError(BadDigest{}) } + // All parts except the last part has to be atleast 5MB. if (i < len(parts)-1) && !isMinAllowedPartSize(fsMeta.Parts[partIdx].Size) { + fs.rwPool.Close(fsMetaPathMultipart) return "", traceError(PartTooSmall{ PartNumber: part.PartNumber, PartSize: fsMeta.Parts[partIdx].Size, PartETag: part.ETag, }) } + // Construct part suffix. partSuffix := fmt.Sprintf("object%d", part.PartNumber) - multipartPartFile := path.Join(bucket, object, uploadID, partSuffix) + multipartPartFile := pathJoin(fs.fsPath, minioMetaMultipartBucket, uploadIDPath, partSuffix) + + var reader io.ReadCloser offset := int64(0) - totalLeft := fsMeta.Parts[partIdx].Size - for totalLeft > 0 { - curLeft := int64(readSizeV1) - if totalLeft < readSizeV1 { - curLeft = totalLeft + reader, _, err = fsOpenFile(multipartPartFile, offset) + if err != nil { + fs.rwPool.Close(fsMetaPathMultipart) + if err == errFileNotFound { + return "", traceError(InvalidPart{}) } - var n int64 - n, err = fs.storage.ReadFile(minioMetaMultipartBucket, multipartPartFile, offset, buf[:curLeft]) - if n > 0 { - if err = fs.storage.AppendFile(minioMetaTmpBucket, tempObj, buf[:n]); err != nil { - return "", toObjectErr(traceError(err), minioMetaTmpBucket, tempObj) - } - } - if err != nil { - if err == io.EOF || err == io.ErrUnexpectedEOF { - break - } - if err == errFileNotFound { - return "", traceError(InvalidPart{}) - } - return "", toObjectErr(traceError(err), minioMetaMultipartBucket, multipartPartFile) - } - offset += n - totalLeft -= n + return "", toObjectErr(traceError(err), minioMetaMultipartBucket, partSuffix) } + + // No need to hold a lock, this is a unique file and will be only written + // to one one process per uploadID per minio process. + var wfile *os.File + wfile, err = os.OpenFile(preparePath(fsTmpObjPath), os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0666) + if err != nil { + reader.Close() + fs.rwPool.Close(fsMetaPathMultipart) + return "", toObjectErr(traceError(err), bucket, object) + } + + _, err = io.CopyBuffer(wfile, reader, buf) + if err != nil { + wfile.Close() + reader.Close() + fs.rwPool.Close(fsMetaPathMultipart) + return "", toObjectErr(traceError(err), bucket, object) + } + + wfile.Close() + reader.Close() } - // Rename the file back to original location, if not delete the temporary object. - err = fs.storage.RenameFile(minioMetaTmpBucket, tempObj, bucket, object) - if err != nil { - if dErr := fs.storage.DeleteFile(minioMetaTmpBucket, tempObj); dErr != nil { - return "", toObjectErr(traceError(dErr), minioMetaTmpBucket, tempObj) - } - return "", toObjectErr(traceError(err), bucket, object) + fsNSObjPath := pathJoin(fs.fsPath, bucket, object) + if err = fsRenameFile(fsTmpObjPath, fsNSObjPath); err != nil { + fs.rwPool.Close(fsMetaPathMultipart) + return "", toObjectErr(err, minioMetaTmpBucket, uploadID) } } @@ -601,54 +752,33 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload } fsMeta.Meta["md5Sum"] = s3MD5 - fsMetaPath = path.Join(bucketMetaPrefix, bucket, object, fsMetaJSONFile) - // Write the metadata to a temp file and rename it to the actual location. - if err = writeFSMetadata(fs.storage, minioMetaBucket, fsMetaPath, fsMeta); err != nil { + // Write all the set metadata. + if _, err = fsMeta.WriteTo(metaFile); err != nil { + fs.rwPool.Close(fsMetaPathMultipart) return "", toObjectErr(err, bucket, object) } + // Close lock held on bucket/object/uploadid/fs.json, + // this needs to be done for windows so that we can happily + // delete the bucket/object/uploadid + fs.rwPool.Close(fsMetaPathMultipart) + // Cleanup all the parts if everything else has been safely committed. - if err = cleanupUploadedParts(bucket, object, uploadID, fs.storage); err != nil { - return "", toObjectErr(err, bucket, object) + multipartObjectDir := pathJoin(fs.fsPath, minioMetaMultipartBucket, bucket, object) + multipartUploadIDDir := pathJoin(multipartObjectDir, uploadID) + if err = fsRemoveUploadIDPath(multipartObjectDir, multipartUploadIDDir); err != nil { + return "", toObjectErr(traceError(err), bucket, object) } - // Hold the lock so that two parallel - // complete-multipart-uploads do not leave a stale - // uploads.json behind. - objectMPartPathLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket, - pathJoin(bucket, object)) - objectMPartPathLock.Lock() - defer objectMPartPathLock.Unlock() - - // remove entry from uploads.json - if err := fs.removeUploadID(bucket, object, uploadID); err != nil { - return "", toObjectErr(err, minioMetaMultipartBucket, path.Join(bucket, object)) + // Remove entry from `uploads.json`. + if err = fs.removeUploadID(bucket, object, uploadID, rwlk); err != nil { + return "", toObjectErr(err, minioMetaMultipartBucket, pathutil.Join(bucket, object)) } // Return md5sum. return s3MD5, nil } -// abortMultipartUpload - wrapper for purging an ongoing multipart -// transaction, deletes uploadID entry from `uploads.json` and purges -// the directory at '.minio.sys/multipart/bucket/object/uploadID' holding -// all the upload parts. -func (fs fsObjects) abortMultipartUpload(bucket, object, uploadID string) error { - // Signal appendParts routine to stop waiting for new parts to arrive. - fs.bgAppend.abort(uploadID) - // Cleanup all uploaded parts. - if err := cleanupUploadedParts(bucket, object, uploadID, fs.storage); err != nil { - return err - } - // remove entry from uploads.json with quorum - if err := fs.removeUploadID(bucket, object, uploadID); err != nil { - return toObjectErr(err, bucket, object) - } - - // success - return nil -} - // AbortMultipartUpload - aborts an ongoing multipart operation // signified by the input uploadID. This is an atomic operation // doesn't require clients to initiate multiple such requests. @@ -666,17 +796,57 @@ func (fs fsObjects) AbortMultipartUpload(bucket, object, uploadID string) error return err } - // Hold lock so that there is no competing - // complete-multipart-upload or put-object-part. - uploadIDLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket, - pathJoin(bucket, object, uploadID)) - uploadIDLock.Lock() - defer uploadIDLock.Unlock() - - if !fs.isUploadIDExists(bucket, object, uploadID) { - return traceError(InvalidUploadID{UploadID: uploadID}) + if _, err := fs.statBucketDir(bucket); err != nil { + return toObjectErr(err, bucket) } - err := fs.abortMultipartUpload(bucket, object, uploadID) - return err + uploadIDPath := pathJoin(bucket, object, uploadID) + + // Hold the lock so that two parallel complete-multipart-uploads + // do not leave a stale uploads.json behind. + objectMPartPathLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket, + pathJoin(bucket, object)) + objectMPartPathLock.Lock() + defer objectMPartPathLock.Unlock() + + fsMetaPath := pathJoin(fs.fsPath, minioMetaMultipartBucket, uploadIDPath, fsMetaJSONFile) + if _, err := fs.rwPool.Open(fsMetaPath); err != nil { + if err == errFileNotFound || err == errFileAccessDenied { + return traceError(InvalidUploadID{UploadID: uploadID}) + } + return toObjectErr(traceError(err), bucket, object) + } + + uploadsPath := pathJoin(bucket, object, uploadsJSONFile) + rwlk, err := fs.rwPool.Write(pathJoin(fs.fsPath, minioMetaMultipartBucket, uploadsPath)) + if err != nil { + fs.rwPool.Close(fsMetaPath) + if err == errFileNotFound || err == errFileAccessDenied { + return traceError(InvalidUploadID{UploadID: uploadID}) + } + return toObjectErr(traceError(err), bucket, object) + } + defer rwlk.Close() + + // Signal appendParts routine to stop waiting for new parts to arrive. + fs.abort(uploadID) + + // Close lock held on bucket/object/uploadid/fs.json, + // this needs to be done for windows so that we can happily + // delete the bucket/object/uploadid + fs.rwPool.Close(fsMetaPath) + + // Cleanup all uploaded parts and abort the upload. + multipartObjectDir := pathJoin(fs.fsPath, minioMetaMultipartBucket, bucket, object) + multipartUploadIDDir := pathJoin(multipartObjectDir, uploadID) + if err = fsRemoveUploadIDPath(multipartObjectDir, multipartUploadIDDir); err != nil { + return toObjectErr(traceError(err), bucket, object) + } + + // Remove entry from `uploads.json`. + if err = fs.removeUploadID(bucket, object, uploadID, rwlk); err != nil { + return toObjectErr(err, bucket, object) + } + + return nil } diff --git a/cmd/fs-v1-multipart_test.go b/cmd/fs-v1-multipart_test.go index 65dda780f..660b7af78 100644 --- a/cmd/fs-v1-multipart_test.go +++ b/cmd/fs-v1-multipart_test.go @@ -19,7 +19,6 @@ package cmd import ( "bytes" "path/filepath" - "reflect" "testing" ) @@ -30,7 +29,7 @@ func TestNewMultipartUploadFaultyDisk(t *testing.T) { defer removeAll(disk) obj := initFSObjects(disk, t) - fs := obj.(fsObjects) + fs := obj.(*fsObjects) bucketName := "bucket" objectName := "object" @@ -38,20 +37,11 @@ func TestNewMultipartUploadFaultyDisk(t *testing.T) { t.Fatal("Cannot create bucket, err: ", err) } - // Test with faulty disk - fsStorage := fs.storage.(*retryStorage) - for i := 1; i <= 5; i++ { - // Faulty disk generates errFaultyDisk at 'i' storage api call number - fs.storage = newNaughtyDisk(fsStorage, map[int]error{i: errFaultyDisk}, nil) - if _, err := fs.NewMultipartUpload(bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"}); errorCause(err) != errFaultyDisk { - switch i { - case 1: - if !isSameType(errorCause(err), BucketNotFound{}) { - t.Fatal("Unexpected error ", err) - } - default: - t.Fatal("Unexpected error ", err) - } + // Test with disk removed. + removeAll(disk) // remove disk. + if _, err := fs.NewMultipartUpload(bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"}); err != nil { + if !isSameType(errorCause(err), BucketNotFound{}) { + t.Fatal("Unexpected error ", err) } } } @@ -68,7 +58,7 @@ func TestPutObjectPartFaultyDisk(t *testing.T) { disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) defer removeAll(disk) obj := initFSObjects(disk, t) - fs := obj.(fsObjects) + fs := obj.(*fsObjects) bucketName := "bucket" objectName := "object" data := []byte("12345") @@ -86,30 +76,10 @@ func TestPutObjectPartFaultyDisk(t *testing.T) { md5Hex := getMD5Hash(data) sha256sum := "" - // Test with faulty disk - fsStorage := fs.storage.(*retryStorage) - for i := 1; i <= 7; i++ { - // Faulty disk generates errFaultyDisk at 'i' storage api call number - fs.storage = newNaughtyDisk(fsStorage, map[int]error{i: errFaultyDisk}, nil) - md5sum, err := fs.PutObjectPart(bucketName, objectName, uploadID, 1, dataLen, bytes.NewReader(data), md5Hex, sha256sum) - if errorCause(err) != errFaultyDisk { - if errorCause(err) == nil { - t.Fatalf("Test %d shouldn't succeed, md5sum = %s\n", i, md5sum) - } - switch i { - case 1: - if !isSameType(errorCause(err), BucketNotFound{}) { - t.Fatal("Unexpected error ", err) - } - case 3: - case 2, 4, 5, 6: - if !isSameType(errorCause(err), InvalidUploadID{}) { - t.Fatal("Unexpected error ", err) - } - default: - t.Fatal("Unexpected error ", i, err, reflect.TypeOf(errorCause(err)), reflect.TypeOf(errFaultyDisk)) - } - } + removeAll(disk) // Disk not found. + _, err = fs.PutObjectPart(bucketName, objectName, uploadID, 1, dataLen, bytes.NewReader(data), md5Hex, sha256sum) + if !isSameType(errorCause(err), BucketNotFound{}) { + t.Fatal("Unexpected error ", err) } } @@ -120,7 +90,7 @@ func TestCompleteMultipartUploadFaultyDisk(t *testing.T) { defer removeAll(disk) obj := initFSObjects(disk, t) - fs := obj.(fsObjects) + fs := obj.(*fsObjects) bucketName := "bucket" objectName := "object" data := []byte("12345") @@ -143,23 +113,10 @@ func TestCompleteMultipartUploadFaultyDisk(t *testing.T) { parts := []completePart{{PartNumber: 1, ETag: md5Hex}} - fsStorage := fs.storage.(*retryStorage) - for i := 1; i <= 3; i++ { - // Faulty disk generates errFaultyDisk at 'i' storage api call number - fs.storage = newNaughtyDisk(fsStorage, map[int]error{i: errFaultyDisk}, nil) - if _, err := fs.CompleteMultipartUpload(bucketName, objectName, uploadID, parts); errorCause(err) != errFaultyDisk { - switch i { - case 1: - if !isSameType(errorCause(err), BucketNotFound{}) { - t.Fatal("Unexpected error ", err) - } - case 2: - if !isSameType(errorCause(err), InvalidUploadID{}) { - t.Fatal("Unexpected error ", err) - } - default: - t.Fatal("Unexpected error ", i, err, reflect.TypeOf(errorCause(err)), reflect.TypeOf(errFaultyDisk)) - } + removeAll(disk) // Disk not found. + if _, err := fs.CompleteMultipartUpload(bucketName, objectName, uploadID, parts); err != nil { + if !isSameType(errorCause(err), BucketNotFound{}) { + t.Fatal("Unexpected error ", err) } } } @@ -169,8 +126,10 @@ func TestListMultipartUploadsFaultyDisk(t *testing.T) { // Prepare for tests disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) defer removeAll(disk) + obj := initFSObjects(disk, t) - fs := obj.(fsObjects) + + fs := obj.(*fsObjects) bucketName := "bucket" objectName := "object" data := []byte("12345") @@ -191,27 +150,10 @@ func TestListMultipartUploadsFaultyDisk(t *testing.T) { t.Fatal("Unexpected error ", err) } - fsStorage := fs.storage.(*retryStorage) - for i := 1; i <= 4; i++ { - // Faulty disk generates errFaultyDisk at 'i' storage api call number - fs.storage = newNaughtyDisk(fsStorage, map[int]error{i: errFaultyDisk}, nil) - if _, err := fs.ListMultipartUploads(bucketName, objectName, "", "", "", 1000); errorCause(err) != errFaultyDisk { - switch i { - case 1: - if !isSameType(errorCause(err), BucketNotFound{}) { - t.Fatal("Unexpected error ", err) - } - case 2: - if !isSameType(errorCause(err), InvalidUploadID{}) { - t.Fatal("Unexpected error ", err) - } - case 3: - if errorCause(err) != errFileNotFound { - t.Fatal("Unexpected error ", err) - } - default: - t.Fatal("Unexpected error ", i, err, reflect.TypeOf(errorCause(err)), reflect.TypeOf(errFaultyDisk)) - } + removeAll(disk) // Disk not found. + if _, err := fs.ListMultipartUploads(bucketName, objectName, "", "", "", 1000); err != nil { + if !isSameType(errorCause(err), BucketNotFound{}) { + t.Fatal("Unexpected error ", err) } } } diff --git a/cmd/fs-v1-rwpool.go b/cmd/fs-v1-rwpool.go new file mode 100644 index 000000000..b568c05d5 --- /dev/null +++ b/cmd/fs-v1-rwpool.go @@ -0,0 +1,193 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "os" + pathutil "path" + "sync" + + "github.com/minio/minio/pkg/lock" +) + +// fsIOPool represents a protected list to keep track of all +// the concurrent readers at a given path. +type fsIOPool struct { + sync.Mutex + readersMap map[string]*lock.RLockedFile +} + +// Open is a wrapper call to read locked file which +// returns a ReadAtCloser. +// +// ReaderAt is provided so that the fd is non seekable, since +// we are sharing fd's with concurrent threads, we don't want +// all readers to change offsets on each other during such +// concurrent operations. Using ReadAt allows us to read from +// any offsets. +// +// Closer is implemented to track total readers and to close +// only when there no more readers, the fd is purged if the lock +// count has reached zero. +func (fsi *fsIOPool) Open(path string) (*lock.RLockedFile, error) { + if err := checkPathLength(path); err != nil { + return nil, err + } + + fsi.Lock() + rlkFile, ok := fsi.readersMap[path] + + // File reference exists on map, validate if its + // really closed and we are safe to purge it. + if ok && rlkFile != nil { + // If the file is closed and not removed from map is a bug. + if rlkFile.IsClosed() { + // Log this as an error. + errorIf(errUnexpected, "Unexpected entry found on the map %s", path) + + // Purge the cached lock path from map. + delete(fsi.readersMap, path) + + // Indicate that we can populate the new fd. + ok = false + } else { + // Increment the lock ref, since the file is not closed yet + // and caller requested to read the file again. + rlkFile.IncLockRef() + } + } + fsi.Unlock() + + // Locked path reference doesn't exist, freshly open the file in + // read lock mode. + if !ok { + var err error + // Open file for reading. + rlkFile, err = lock.RLockedOpenFile(preparePath(path)) + if err != nil { + if os.IsNotExist(err) { + return nil, errFileNotFound + } else if os.IsPermission(err) { + return nil, errFileAccessDenied + } else if isSysErrIsDir(err) { + return nil, errIsNotRegular + } else if isSysErrNotDir(err) { + return nil, errFileAccessDenied + } else if isSysErrPathNotFound(err) { + return nil, errFileNotFound + } + return nil, err + } + + // Save new reader on the map. + fsi.Lock() + fsi.readersMap[path] = rlkFile + fsi.Unlock() + } + + // Success. + return rlkFile, nil +} + +// Write - Attempt to lock the file if it exists, +// - if the file exists. Then we try to get a write lock this +// will block if we can't get a lock perhaps another write +// or read is in progress. Concurrent calls are protected +// by the global namspace lock within the same process. +func (fsi *fsIOPool) Write(path string) (wlk *lock.LockedFile, err error) { + if err = checkPathLength(path); err != nil { + return nil, err + } + + wlk, err = lock.LockedOpenFile(preparePath(path), os.O_RDWR, 0666) + if err != nil { + if os.IsNotExist(err) { + return nil, errFileNotFound + } else if os.IsPermission(err) { + return nil, errFileAccessDenied + } else if isSysErrIsDir(err) { + return nil, errIsNotRegular + } + return nil, err + } + return wlk, nil +} + +// Create - creates a new write locked file instance. +// - if the file doesn't exist. We create the file and hold lock. +func (fsi *fsIOPool) Create(path string) (wlk *lock.LockedFile, err error) { + if err = checkPathLength(path); err != nil { + return nil, err + } + + // Creates parent if missing. + if err = mkdirAll(pathutil.Dir(path), 0777); err != nil { + if os.IsPermission(err) { + return nil, errFileAccessDenied + } else if isSysErrNotDir(err) { + return nil, errFileAccessDenied + } + return nil, err + } + + // Attempt to create the file. + wlk, err = lock.LockedOpenFile(preparePath(path), os.O_RDWR|os.O_CREATE, 0666) + if err != nil { + if os.IsPermission(err) { + return nil, errFileAccessDenied + } else if isSysErrIsDir(err) { + return nil, errIsNotRegular + } else if isSysErrPathNotFound(err) { + return nil, errFileAccessDenied + } + return nil, err + } + + // Success. + return wlk, err +} + +// Close implements closing the path referenced by the reader in such +// a way that it makes sure to remove entry from the map immediately +// if no active readers are present. +func (fsi *fsIOPool) Close(path string) error { + fsi.Lock() + defer fsi.Unlock() + + if err := checkPathLength(path); err != nil { + return err + } + + // Pop readers from path. + rlkFile, ok := fsi.readersMap[path] + if !ok { + return nil + } + + // Close the reader. + rlkFile.Close() + + // If the file is closed, remove it from the reader pool map. + if rlkFile.IsClosed() { + + // Purge the cached lock path from map. + delete(fsi.readersMap, path) + } + + // Success. + return nil +} diff --git a/cmd/fs-v1-rwpool_test.go b/cmd/fs-v1-rwpool_test.go new file mode 100644 index 000000000..4a9280f33 --- /dev/null +++ b/cmd/fs-v1-rwpool_test.go @@ -0,0 +1,112 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "runtime" + "testing" + + "github.com/minio/minio/pkg/lock" +) + +// Tests long path calls. +func TestRWPoolLongPath(t *testing.T) { + rwPool := &fsIOPool{ + readersMap: make(map[string]*lock.RLockedFile), + } + + longPath := "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001" + if _, err := rwPool.Create(longPath); err != errFileNameTooLong { + t.Fatal(err) + } + + if _, err := rwPool.Write(longPath); err != errFileNameTooLong { + t.Fatal(err) + } + + if _, err := rwPool.Open(longPath); err != errFileNameTooLong { + t.Fatal(err) + } +} + +// Tests all RWPool methods. +func TestRWPool(t *testing.T) { + // create posix test setup + _, path, err := newPosixTestSetup() + if err != nil { + t.Fatalf("Unable to create posix test setup, %s", err) + } + defer removeAll(path) + + rwPool := &fsIOPool{ + readersMap: make(map[string]*lock.RLockedFile), + } + wlk, err := rwPool.Create(pathJoin(path, "success-vol", "file/path/1.txt")) + if err != nil { + t.Fatal(err) + } + wlk.Close() + + // Fails to create a parent directory if there is a file. + _, err = rwPool.Create(pathJoin(path, "success-vol", "file/path/1.txt/test")) + if err != errFileAccessDenied { + t.Fatal("Unexpected error", err) + } + + // Fails to create a file if there is a directory. + _, err = rwPool.Create(pathJoin(path, "success-vol", "file")) + if runtime.GOOS == "windows" { + if err != errFileAccessDenied { + t.Fatal("Unexpected error", err) + } + } else { + if err != errIsNotRegular { + t.Fatal("Unexpected error", err) + } + } + + rlk, err := rwPool.Open(pathJoin(path, "success-vol", "file/path/1.txt")) + if err != nil { + t.Fatal("Unexpected error", err) + } + rlk.Close() + + // Fails to read a directory. + _, err = rwPool.Open(pathJoin(path, "success-vol", "file")) + if runtime.GOOS == "windows" { + if err != errFileAccessDenied { + t.Fatal("Unexpected error", err) + } + } else { + if err != errIsNotRegular { + t.Fatal("Unexpected error", err) + } + } + + // Fails to open a file which has a parent as file. + _, err = rwPool.Open(pathJoin(path, "success-vol", "file/path/1.txt/test")) + if runtime.GOOS != "windows" { + if err != errFileAccessDenied { + t.Fatal("Unexpected error", err) + } + } else { + if err != errFileNotFound { + t.Fatal("Unexpected error", err) + } + } + +} diff --git a/cmd/fs-v1.go b/cmd/fs-v1.go index f0e77f6eb..6f33594e7 100644 --- a/cmd/fs-v1.go +++ b/cmd/fs-v1.go @@ -23,110 +23,203 @@ import ( "fmt" "hash" "io" - "path" + "os" + "path/filepath" + "runtime" "sort" "strings" + "syscall" - "github.com/minio/minio/pkg/mimedb" + "github.com/minio/minio/pkg/disk" + "github.com/minio/minio/pkg/lock" "github.com/minio/sha256-simd" ) // fsObjects - Implements fs object layer. type fsObjects struct { - storage StorageAPI + // Path to be exported over S3 API. + fsPath string - // List pool management. + // Unique value to be used for all + // temporary transactions. + fsUUID string + + minFreeSpace int64 + minFreeInodes int64 + + // FS rw pool. + rwPool *fsIOPool + + // ListObjects pool management. listPool *treeWalkPool // To manage the appendRoutine go0routines bgAppend *backgroundAppend } -// list of all errors that can be ignored in tree walk operation in FS -var fsTreeWalkIgnoredErrs = []error{ - errFileNotFound, - errVolumeNotFound, +// Initializes meta volume on all the fs path. +func initMetaVolumeFS(fsPath, fsUUID string) error { + // This happens for the first time, but keep this here since this + // is the only place where it can be made less expensive + // optimizing all other calls. Create minio meta volume, + // if it doesn't exist yet. + metaBucketPath := pathJoin(fsPath, minioMetaBucket) + if err := mkdirAll(metaBucketPath, 0777); err != nil { + return err + } + + metaTmpPath := pathJoin(fsPath, minioMetaTmpBucket, fsUUID) + if err := mkdirAll(metaTmpPath, 0777); err != nil { + return err + } + + metaMultipartPath := pathJoin(fsPath, minioMetaMultipartBucket) + if err := mkdirAll(metaMultipartPath, 0777); err != nil { + return err + } + + // Return success here. + return nil + } -// newFSObjects - initialize new fs object layer. -func newFSObjects(storage StorageAPI) (ObjectLayer, error) { - if storage == nil { +// newFSObjectLayer - initialize new fs object layer. +func newFSObjectLayer(fsPath string) (ObjectLayer, error) { + if fsPath == "" { return nil, errInvalidArgument } - // Load format and validate. - _, err := loadFormatFS(storage) + var err error + // Disallow relative paths, figure out absolute paths. + fsPath, err = filepath.Abs(fsPath) if err != nil { - return nil, fmt.Errorf("Unable to recognize backend format, %s", err) + return nil, err } + fi, err := os.Stat(preparePath(fsPath)) + if err == nil { + if !fi.IsDir() { + return nil, syscall.ENOTDIR + } + } + if os.IsNotExist(err) { + // Disk not found create it. + err = mkdirAll(fsPath, 0777) + if err != nil { + return nil, err + } + } + + // Assign a new UUID for FS minio mode. Each server instance + // gets its own UUID for temporary file transaction. + fsUUID := mustGetUUID() + // Initialize meta volume, if volume already exists ignores it. - if err = initMetaVolume([]StorageAPI{storage}); err != nil { + if err = initMetaVolumeFS(fsPath, fsUUID); err != nil { return nil, fmt.Errorf("Unable to initialize '.minio.sys' meta volume, %s", err) } + // Load `format.json`. + format, err := loadFormatFS(fsPath) + if err != nil && err != errUnformattedDisk { + return nil, fmt.Errorf("Unable to load 'format.json', %s", err) + } + + // If the `format.json` doesn't exist create one. + if err == errUnformattedDisk { + fsFormatPath := pathJoin(fsPath, minioMetaBucket, fsFormatJSONFile) + // Initialize format.json, if already exists overwrite it. + if serr := saveFormatFS(fsFormatPath, newFSFormatV1()); serr != nil { + return nil, fmt.Errorf("Unable to initialize 'format.json', %s", serr) + } + } + + // Validate if we have the same format. + if err == nil && format.Format != "fs" { + return nil, fmt.Errorf("Unable to recognize backend format, Disk is not in FS format. %s", format.Format) + } + // Initialize fs objects. - fs := fsObjects{ - storage: storage, + fs := &fsObjects{ + fsPath: fsPath, + fsUUID: fsUUID, + minFreeSpace: fsMinFreeSpace, + minFreeInodes: fsMinFreeInodes, + rwPool: &fsIOPool{ + readersMap: make(map[string]*lock.RLockedFile), + }, listPool: newTreeWalkPool(globalLookupTimeout), bgAppend: &backgroundAppend{ infoMap: make(map[string]bgAppendPartsInfo), }, } + // Validate if disk has enough free space to use. + if err = fs.checkDiskFree(); err != nil { + return nil, err + } + + // Initialize and load bucket policies. + err = initBucketPolicies(fs) + if err != nil { + return nil, fmt.Errorf("Unable to load all bucket policies. %s", err) + } + + // Initialize a new event notifier. + err = initEventNotifier(fs) + if err != nil { + return nil, fmt.Errorf("Unable to initialize event notification. %s", err) + } + // Return successfully initialized object layer. return fs, nil } -// Should be called when process shuts down. -func (fs fsObjects) Shutdown() error { - // List if there are any multipart entries. - prefix := "" - entries, err := fs.storage.ListDir(minioMetaMultipartBucket, prefix) - if err != nil { - // A non nil err means that an unexpected error occurred - return toObjectErr(traceError(err)) - } - if len(entries) > 0 { - // Should not remove .minio.sys if there are any multipart - // uploads were found. +// checkDiskFree verifies if disk path has sufficient minimum free disk space and files. +func (fs fsObjects) checkDiskFree() (err error) { + // We don't validate disk space or inode utilization on windows. + // Each windows calls to 'GetVolumeInformationW' takes around 3-5seconds. + if runtime.GOOS == "windows" { return nil } - if err = fs.storage.DeleteVol(minioMetaMultipartBucket); err != nil { - return toObjectErr(traceError(err)) - } - // List if there are any bucket configuration entries. - _, err = fs.storage.ListDir(minioMetaBucket, bucketConfigPrefix) - if err != errFileNotFound { - // A nil err means that bucket config directory is not empty hence do not remove '.minio.sys' volume. - // A non nil err means that an unexpected error occurred - return toObjectErr(traceError(err)) - } - // Cleanup and delete tmp bucket. - if err = cleanupDir(fs.storage, minioMetaTmpBucket, prefix); err != nil { + + var di disk.Info + di, err = getDiskInfo(preparePath(fs.fsPath)) + if err != nil { return err } - if err = fs.storage.DeleteVol(minioMetaTmpBucket); err != nil { - return toObjectErr(traceError(err)) + + // Remove 5% from free space for cumulative disk space used for journalling, inodes etc. + availableDiskSpace := float64(di.Free) * 0.95 + if int64(availableDiskSpace) <= fs.minFreeSpace { + return errDiskFull } - // Remove format.json and delete .minio.sys bucket - if err = fs.storage.DeleteFile(minioMetaBucket, fsFormatJSONFile); err != nil { - return toObjectErr(traceError(err)) - } - if err = fs.storage.DeleteVol(minioMetaBucket); err != nil { - if err != errVolumeNotEmpty { - return toObjectErr(traceError(err)) + // Some filesystems do not implement a way to provide total inodes available, instead inodes + // are allocated based on available disk space. For example CephFS, StoreNext CVFS, AzureFile driver. + // Allow for the available disk to be separately validate and we will validate inodes only if + // total inodes are provided by the underlying filesystem. + if di.Files != 0 { + availableFiles := int64(di.Ffree) + if availableFiles <= fs.minFreeInodes { + return errDiskFull } } - // Successful. + + // Success. return nil } +// Should be called when process shuts down. +func (fs fsObjects) Shutdown() error { + // Cleanup and delete tmp uuid. + return fsRemoveAll(pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID)) +} + // StorageInfo - returns underlying storage statistics. func (fs fsObjects) StorageInfo() StorageInfo { - info, err := fs.storage.DiskInfo() - errorIf(err, "Unable to get disk info %#v", fs.storage) + info, err := getDiskInfo(preparePath(fs.fsPath)) + errorIf(err, "Unable to get disk info %#v", fs.fsPath) storageInfo := StorageInfo{ Total: info.Total, Free: info.Free, @@ -137,81 +230,141 @@ func (fs fsObjects) StorageInfo() StorageInfo { /// Bucket operations -// MakeBucket - make a bucket. -func (fs fsObjects) MakeBucket(bucket string) error { +// getBucketDir - will convert incoming bucket names to +// corresponding valid bucket names on the backend in a platform +// compatible way for all operating systems. +func (fs fsObjects) getBucketDir(bucket string) (string, error) { // Verify if bucket is valid. if !IsValidBucketName(bucket) { - return traceError(BucketNameInvalid{Bucket: bucket}) + return "", traceError(BucketNameInvalid{Bucket: bucket}) } - if err := fs.storage.MakeVol(bucket); err != nil { + + bucketDir := pathJoin(fs.fsPath, bucket) + return bucketDir, nil +} + +func (fs fsObjects) statBucketDir(bucket string) (os.FileInfo, error) { + bucketDir, err := fs.getBucketDir(bucket) + if err != nil { + return nil, err + } + st, err := fsStatDir(bucketDir) + if err != nil { + return nil, traceError(err) + } + return st, nil +} + +// MakeBucket - create a new bucket, returns if it +// already exists. +func (fs fsObjects) MakeBucket(bucket string) error { + bucketDir, err := fs.getBucketDir(bucket) + if err != nil { + return toObjectErr(err, bucket) + } + + if err = fsMkdir(bucketDir); err != nil { return toObjectErr(traceError(err), bucket) } + return nil } -// GetBucketInfo - get bucket info. +// GetBucketInfo - fetch bucket metadata info. func (fs fsObjects) GetBucketInfo(bucket string) (BucketInfo, error) { - // Verify if bucket is valid. - if !IsValidBucketName(bucket) { - return BucketInfo{}, traceError(BucketNameInvalid{Bucket: bucket}) - } - vi, err := fs.storage.StatVol(bucket) + st, err := fs.statBucketDir(bucket) if err != nil { - return BucketInfo{}, toObjectErr(traceError(err), bucket) + return BucketInfo{}, toObjectErr(err, bucket) } + + // As os.Stat() doesn't carry other than ModTime(), use ModTime() as CreatedTime. + createdTime := st.ModTime() return BucketInfo{ Name: bucket, - Created: vi.Created, + Created: createdTime, }, nil } -// ListBuckets - list buckets. +// ListBuckets - list all s3 compatible buckets (directories) at fsPath. func (fs fsObjects) ListBuckets() ([]BucketInfo, error) { - var bucketInfos []BucketInfo - vols, err := fs.storage.ListVols() - if err != nil { - return nil, toObjectErr(traceError(err)) + if err := checkPathLength(fs.fsPath); err != nil { + return nil, err } + var bucketInfos []BucketInfo + entries, err := readDir(preparePath(fs.fsPath)) + if err != nil { + return nil, toObjectErr(traceError(errDiskNotFound)) + } + var invalidBucketNames []string - for _, vol := range vols { - // StorageAPI can send volume names which are incompatible - // with buckets, handle it and skip them. - if !IsValidBucketName(vol.Name) { - invalidBucketNames = append(invalidBucketNames, vol.Name) + for _, entry := range entries { + if entry == minioMetaBucket+"/" || !strings.HasSuffix(entry, slashSeparator) { continue } - // Ignore the volume special bucket. - if vol.Name == minioMetaBucket { + + var fi os.FileInfo + fi, err = fsStatDir(pathJoin(fs.fsPath, entry)) + if err != nil { + // If the directory does not exist, skip the entry. + if err == errVolumeNotFound { + continue + } else if err == errVolumeAccessDenied { + // Skip the entry if its a file. + continue + } + return nil, err + } + + if !IsValidBucketName(fi.Name()) { + invalidBucketNames = append(invalidBucketNames, fi.Name()) continue } + bucketInfos = append(bucketInfos, BucketInfo{ - Name: vol.Name, - Created: vol.Created, + Name: fi.Name(), + // As os.Stat() doesn't carry other than ModTime(), use ModTime() as CreatedTime. + Created: fi.ModTime(), }) } + // Print a user friendly message if we indeed skipped certain directories which are // incompatible with S3's bucket name restrictions. if len(invalidBucketNames) > 0 { errorIf(errors.New("One or more invalid bucket names found"), "Skipping %s", invalidBucketNames) } + + // Sort bucket infos by bucket name. sort.Sort(byBucketName(bucketInfos)) + + // Succes. return bucketInfos, nil } -// DeleteBucket - delete a bucket. +// DeleteBucket - delete a bucket and all the metadata associated +// with the bucket including pending multipart, object metadata. func (fs fsObjects) DeleteBucket(bucket string) error { - // Verify if bucket is valid. - if !IsValidBucketName(bucket) { - return traceError(BucketNameInvalid{Bucket: bucket}) - } - // Attempt to delete regular bucket. - if err := fs.storage.DeleteVol(bucket); err != nil { - return toObjectErr(traceError(err), bucket) - } - // Cleanup all the previously incomplete multiparts. - if err := cleanupDir(fs.storage, minioMetaMultipartBucket, bucket); err != nil && errorCause(err) != errVolumeNotFound { + bucketDir, err := fs.getBucketDir(bucket) + if err != nil { return toObjectErr(err, bucket) } + + // Attempt to delete regular bucket. + if err = fsRemoveDir(bucketDir); err != nil { + return toObjectErr(err, bucket) + } + + // Cleanup all the previously incomplete multiparts. + minioMetaMultipartBucketDir := pathJoin(fs.fsPath, minioMetaMultipartBucket, bucket) + if err = fsRemoveAll(minioMetaMultipartBucketDir); err != nil { + return toObjectErr(err, bucket) + } + + // Cleanup all the bucket metadata. + minioMetadataBucketDir := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket) + if err = fsRemoveAll(minioMetadataBucketDir); err != nil { + return toObjectErr(err, bucket) + } + return nil } @@ -221,8 +374,12 @@ func (fs fsObjects) DeleteBucket(bucket string) error { // if source object and destination object are same we only // update metadata. func (fs fsObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string, metadata map[string]string) (ObjectInfo, error) { + if _, err := fs.statBucketDir(srcBucket); err != nil { + return ObjectInfo{}, toObjectErr(err, srcBucket) + } + // Stat the file to get file size. - fi, err := fs.storage.StatFile(srcBucket, srcObject) + fi, err := fsStatFile(pathJoin(fs.fsPath, srcBucket, srcObject)) if err != nil { return ObjectInfo{}, toObjectErr(traceError(err), srcBucket, srcObject) } @@ -230,21 +387,28 @@ func (fs fsObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string // Check if this request is only metadata update. cpMetadataOnly := strings.EqualFold(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject)) if cpMetadataOnly { + fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, srcBucket, srcObject, fsMetaJSONFile) + var wlk *lock.LockedFile + wlk, err = fs.rwPool.Write(fsMetaPath) + if err != nil { + return ObjectInfo{}, toObjectErr(err, srcBucket, srcObject) + } + // This close will allow for locks to be synchronized on `fs.json`. + defer wlk.Close() + // Save objects' metadata in `fs.json`. fsMeta := newFSMetaV1() fsMeta.Meta = metadata - - fsMetaPath := pathJoin(bucketMetaPrefix, dstBucket, dstObject, fsMetaJSONFile) - if err = writeFSMetadata(fs.storage, minioMetaBucket, fsMetaPath, fsMeta); err != nil { - return ObjectInfo{}, toObjectErr(err, dstBucket, dstObject) + if _, err = fsMeta.WriteTo(wlk); err != nil { + return ObjectInfo{}, toObjectErr(err, srcBucket, srcObject) } - // Get object info. - return fs.getObjectInfo(dstBucket, dstObject) + // Return the new object info. + return fsMeta.ToObjectInfo(srcBucket, srcObject, fi), nil } // Length of the file to read. - length := fi.Size + length := fi.Size() // Initialize pipe. pipeReader, pipeWriter := io.Pipe() @@ -280,88 +444,89 @@ func (fs fsObjects) GetObject(bucket, object string, offset int64, length int64, if err = checkGetObjArgs(bucket, object); err != nil { return err } + + if _, err = fs.statBucketDir(bucket); err != nil { + return toObjectErr(err, bucket) + } + // Offset cannot be negative. if offset < 0 { return toObjectErr(traceError(errUnexpected), bucket, object) } + // Writer cannot be nil. if writer == nil { return toObjectErr(traceError(errUnexpected), bucket, object) } - // Stat the file to get file size. - fi, err := fs.storage.StatFile(bucket, object) + if bucket != minioMetaBucket { + fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fsMetaJSONFile) + _, err = fs.rwPool.Open(fsMetaPath) + if err != nil && err != errFileNotFound { + return toObjectErr(traceError(err), bucket, object) + } + defer fs.rwPool.Close(fsMetaPath) + } + + // Read the object, doesn't exist returns an s3 compatible error. + fsObjPath := pathJoin(fs.fsPath, bucket, object) + reader, size, err := fsOpenFile(fsObjPath, offset) if err != nil { return toObjectErr(traceError(err), bucket, object) } + defer reader.Close() - // For negative length we read everything. - if length < 0 { - length = fi.Size - offset - } - - // Reply back invalid range if the input offset and length fall out of range. - if offset > fi.Size || offset+length > fi.Size { - return traceError(InvalidRange{offset, length, fi.Size}) - } - - var totalLeft = length bufSize := int64(readSizeV1) if length > 0 && bufSize > length { bufSize = length } + + // For negative length we read everything. + if length < 0 { + length = size - offset + } + + // Reply back invalid range if the input offset and length fall out of range. + if offset > size || offset+length > size { + return traceError(InvalidRange{offset, length, size}) + } + // Allocate a staging buffer. buf := make([]byte, int(bufSize)) - if err = fsReadFile(fs.storage, bucket, object, writer, totalLeft, offset, buf); err != nil { - // Returns any error. - return toObjectErr(err, bucket, object) - } - return nil + + _, err = io.CopyBuffer(writer, io.LimitReader(reader, length), buf) + + return toObjectErr(traceError(err), bucket, object) } // getObjectInfo - wrapper for reading object metadata and constructs ObjectInfo. func (fs fsObjects) getObjectInfo(bucket, object string) (ObjectInfo, error) { - fi, err := fs.storage.StatFile(bucket, object) - if err != nil { - return ObjectInfo{}, toObjectErr(traceError(err), bucket, object) - } - fsMeta, err := readFSMetadata(fs.storage, minioMetaBucket, path.Join(bucketMetaPrefix, bucket, object, fsMetaJSONFile)) - // Ignore error if the metadata file is not found, other errors must be returned. - if err != nil && errorCause(err) != errFileNotFound { - return ObjectInfo{}, toObjectErr(err, bucket, object) - } + fsMeta := fsMetaV1{} + fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fsMetaJSONFile) - if len(fsMeta.Meta) == 0 { - fsMeta.Meta = make(map[string]string) - } - - // Guess content-type from the extension if possible. - if fsMeta.Meta["content-type"] == "" { - if objectExt := path.Ext(object); objectExt != "" { - if content, ok := mimedb.DB[strings.ToLower(strings.TrimPrefix(objectExt, "."))]; ok { - fsMeta.Meta["content-type"] = content.ContentType - } + // Read `fs.json` to perhaps contend with + // parallel Put() operations. + rlk, err := fs.rwPool.Open(fsMetaPath) + if err == nil { + // Read from fs metadata only if it exists. + defer fs.rwPool.Close(fsMetaPath) + if _, rerr := fsMeta.ReadFrom(io.NewSectionReader(rlk, 0, rlk.Size())); rerr != nil { + return ObjectInfo{}, toObjectErr(rerr, bucket, object) } } - objInfo := ObjectInfo{ - Bucket: bucket, - Name: object, - ModTime: fi.ModTime, - Size: fi.Size, - IsDir: fi.Mode.IsDir(), - MD5Sum: fsMeta.Meta["md5Sum"], - ContentType: fsMeta.Meta["content-type"], - ContentEncoding: fsMeta.Meta["content-encoding"], + // Ignore if `fs.json` is not available, this is true for pre-existing data. + if err != nil && err != errFileNotFound { + return ObjectInfo{}, toObjectErr(traceError(err), bucket, object) } - // md5Sum has already been extracted into objInfo.MD5Sum. We - // need to remove it from fsMeta.Meta to avoid it from appearing as - // part of response headers. e.g, X-Minio-* or X-Amz-*. - delete(fsMeta.Meta, "md5Sum") - objInfo.UserDefined = fsMeta.Meta + // Stat the file to get file size. + fi, err := fsStatFile(pathJoin(fs.fsPath, bucket, object)) + if err != nil { + return ObjectInfo{}, toObjectErr(traceError(err), bucket, object) + } - return objInfo, nil + return fsMeta.ToObjectInfo(bucket, object, fi), nil } // GetObjectInfo - reads object metadata and replies back ObjectInfo. @@ -369,6 +534,11 @@ func (fs fsObjects) GetObjectInfo(bucket, object string) (ObjectInfo, error) { if err := checkGetObjArgs(bucket, object); err != nil { return ObjectInfo{}, err } + + if _, err := fs.statBucketDir(bucket); err != nil { + return ObjectInfo{}, toObjectErr(err, bucket) + } + return fs.getObjectInfo(bucket, object) } @@ -380,17 +550,34 @@ func (fs fsObjects) PutObject(bucket string, object string, size int64, data io. if err = checkPutObjectArgs(bucket, object, fs); err != nil { return ObjectInfo{}, err } + + if _, err = fs.statBucketDir(bucket); err != nil { + return ObjectInfo{}, toObjectErr(err, bucket) + } + // No metadata is set, allocate a new one. if metadata == nil { metadata = make(map[string]string) } - uniqueID := mustGetUUID() + fsMeta := newFSMetaV1() + fsMeta.Meta = metadata + + var wlk *lock.LockedFile + if bucket != minioMetaBucket { + fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fsMetaJSONFile) + wlk, err = fs.rwPool.Create(fsMetaPath) + if err != nil { + return ObjectInfo{}, toObjectErr(err, bucket, object) + } + // This close will allow for locks to be synchronized on `fs.json`. + defer wlk.Close() + } // Uploaded object will first be written to the temporary location which will eventually // be renamed to the actual location. It is first written to the temporary location // so that cleaning it up will be easy if the server goes down. - tempObj := uniqueID + tempObj := mustGetUUID() // Initialize md5 writer. md5Writer := md5.New() @@ -414,26 +601,17 @@ func (fs fsObjects) PutObject(bucket string, object string, size int64, data io. limitDataReader = data } - // Prepare file to avoid disk fragmentation - if size > 0 { - err = fs.storage.PrepareFile(minioMetaTmpBucket, tempObj, size) - if err != nil { - return ObjectInfo{}, toObjectErr(err, bucket, object) - } - } - // Allocate a buffer to Read() from request body bufSize := int64(readSizeV1) if size > 0 && bufSize > size { bufSize = size } - buf := make([]byte, int(bufSize)) teeReader := io.TeeReader(limitDataReader, multiWriter) - var bytesWritten int64 - bytesWritten, err = fsCreateFile(fs.storage, teeReader, buf, minioMetaTmpBucket, tempObj) + fsTmpObjPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, tempObj) + bytesWritten, err := fsCreateFile(fsTmpObjPath, teeReader, buf, size) if err != nil { - fs.storage.DeleteFile(minioMetaTmpBucket, tempObj) + fsRemoveFile(fsTmpObjPath) errorIf(err, "Failed to create object %s/%s", bucket, object) return ObjectInfo{}, toObjectErr(err, bucket, object) } @@ -441,14 +619,14 @@ func (fs fsObjects) PutObject(bucket string, object string, size int64, data io. // Should return IncompleteBody{} error when reader has fewer // bytes than specified in request header. if bytesWritten < size { - fs.storage.DeleteFile(minioMetaTmpBucket, tempObj) + fsRemoveFile(fsTmpObjPath) return ObjectInfo{}, traceError(IncompleteBody{}) } // Delete the temporary object in the case of a // failure. If PutObject succeeds, then there would be // nothing to delete. - defer fs.storage.DeleteFile(minioMetaTmpBucket, tempObj) + defer fsRemoveFile(fsTmpObjPath) newMD5Hex := hex.EncodeToString(md5Writer.Sum(nil)) // Update the md5sum if not set with the newly calculated one. @@ -473,25 +651,26 @@ func (fs fsObjects) PutObject(bucket string, object string, size int64, data io. } // Entire object was written to the temp location, now it's safe to rename it to the actual location. - err = fs.storage.RenameFile(minioMetaTmpBucket, tempObj, bucket, object) + fsNSObjPath := pathJoin(fs.fsPath, bucket, object) + if err = fsRenameFile(fsTmpObjPath, fsNSObjPath); err != nil { + return ObjectInfo{}, toObjectErr(err, bucket, object) + } + + if bucket != minioMetaBucket { + // Write FS metadata after a successful namespace operation. + if _, err = fsMeta.WriteTo(wlk); err != nil { + return ObjectInfo{}, toObjectErr(err, bucket, object) + } + } + + // Stat the file to fetch timestamp, size. + fi, err := fsStatFile(pathJoin(fs.fsPath, bucket, object)) if err != nil { return ObjectInfo{}, toObjectErr(traceError(err), bucket, object) } - if bucket != minioMetaBucket { - // Save objects' metadata in `fs.json`. - // Skip creating fs.json if bucket is .minio.sys as the object would have been created - // by minio's S3 layer (ex. policy.json) - fsMeta := newFSMetaV1() - fsMeta.Meta = metadata - - fsMetaPath := path.Join(bucketMetaPrefix, bucket, object, fsMetaJSONFile) - if err = writeFSMetadata(fs.storage, minioMetaBucket, fsMetaPath, fsMeta); err != nil { - return ObjectInfo{}, toObjectErr(traceError(err), bucket, object) - } - } - - return fs.getObjectInfo(bucket, object) + // Success. + return fsMeta.ToObjectInfo(bucket, object, fi), nil } // DeleteObject - deletes an object from a bucket, this operation is destructive @@ -501,38 +680,96 @@ func (fs fsObjects) DeleteObject(bucket, object string) error { return err } + if _, err := fs.statBucketDir(bucket); err != nil { + return toObjectErr(err, bucket) + } + + minioMetaBucketDir := pathJoin(fs.fsPath, minioMetaBucket) + fsMetaPath := pathJoin(minioMetaBucketDir, bucketMetaPrefix, bucket, object, fsMetaJSONFile) if bucket != minioMetaBucket { - // We don't store fs.json for minio-S3-layer created files like policy.json, - // hence we don't try to delete fs.json for such files. - err := fs.storage.DeleteFile(minioMetaBucket, path.Join(bucketMetaPrefix, bucket, object, fsMetaJSONFile)) + rwlk, lerr := fs.rwPool.Write(fsMetaPath) + if lerr == nil { + // This close will allow for fs locks to be synchronized on `fs.json`. + defer rwlk.Close() + } + if lerr != nil && lerr != errFileNotFound { + return toObjectErr(lerr, bucket, object) + } + } + + // Delete the object. + if err := fsDeleteFile(pathJoin(fs.fsPath, bucket), pathJoin(fs.fsPath, bucket, object)); err != nil { + return toObjectErr(traceError(err), bucket, object) + } + + if bucket != minioMetaBucket { + // Delete the metadata object. + err := fsDeleteFile(minioMetaBucketDir, fsMetaPath) if err != nil && err != errFileNotFound { return toObjectErr(traceError(err), bucket, object) } } - if err := fs.storage.DeleteFile(bucket, object); err != nil { - return toObjectErr(traceError(err), bucket, object) - } return nil } +// list of all errors that can be ignored in tree walk operation in FS +var fsTreeWalkIgnoredErrs = append(baseIgnoredErrs, []error{ + errFileNotFound, + errVolumeNotFound, +}...) + +// Returns function "listDir" of the type listDirFunc. +// isLeaf - is used by listDir function to check if an entry +// is a leaf or non-leaf entry. +func (fs fsObjects) listDirFactory(isLeaf isLeafFunc) listDirFunc { + // listDir - lists all the entries at a given prefix and given entry in the prefix. + listDir := func(bucket, prefixDir, prefixEntry string) (entries []string, delayIsLeaf bool, err error) { + entries, err = readDir(pathJoin(fs.fsPath, bucket, prefixDir)) + if err == nil { + // Listing needs to be sorted. + sort.Strings(entries) + + // Filter entries that have the prefix prefixEntry. + entries = filterMatchingPrefix(entries, prefixEntry) + + // Can isLeaf() check be delayed till when it has to be sent down the + // treeWalkResult channel? + delayIsLeaf = delayIsLeafCheck(entries) + if delayIsLeaf { + return entries, delayIsLeaf, nil + } + + // isLeaf() check has to happen here so that trailing "/" for objects can be removed. + for i, entry := range entries { + if isLeaf(bucket, pathJoin(prefixDir, entry)) { + entries[i] = strings.TrimSuffix(entry, slashSeparator) + } + } + + // Sort again after removing trailing "/" for objects as the previous sort + // does not hold good anymore. + sort.Strings(entries) + + // Succes. + return entries, delayIsLeaf, nil + } // Return error at the end. + + // Error. + return nil, false, err + } + + // Return list factory instance. + return listDir +} + // ListObjects - list all objects at prefix upto maxKeys., optionally delimited by '/'. Maintains the list pool // state for future re-entrant list requests. func (fs fsObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) { - // Convert entry to ObjectInfo - entryToObjectInfo := func(entry string) (objInfo ObjectInfo, err error) { - if strings.HasSuffix(entry, slashSeparator) { - // Object name needs to be full path. - objInfo.Name = entry - objInfo.IsDir = true - return - } - if objInfo, err = fs.getObjectInfo(bucket, entry); err != nil { - return ObjectInfo{}, err - } - return + if err := checkListObjsArgs(bucket, prefix, marker, delimiter, fs); err != nil { + return ListObjectsInfo{}, err } - if err := checkListObjsArgs(bucket, prefix, marker, delimiter, fs); err != nil { + if _, err := fs.statBucketDir(bucket); err != nil { return ListObjectsInfo{}, err } @@ -561,6 +798,24 @@ func (fs fsObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKey recursive = false } + // Convert entry to ObjectInfo + entryToObjectInfo := func(entry string) (objInfo ObjectInfo, err error) { + if strings.HasSuffix(entry, slashSeparator) { + // Object name needs to be full path. + objInfo.Name = entry + objInfo.IsDir = true + return + } + // Stat the file to get file size. + var fi os.FileInfo + fi, err = fsStatFile(pathJoin(fs.fsPath, bucket, entry)) + if err != nil { + return ObjectInfo{}, toObjectErr(traceError(err), bucket, entry) + } + fsMeta := fsMetaV1{} + return fsMeta.ToObjectInfo(bucket, entry, fi), nil + } + heal := false // true only for xl.ListObjectsHeal() walkResultCh, endWalkCh := fs.listPool.Release(listParams{bucket, recursive, marker, prefix, heal}) if walkResultCh == nil { @@ -571,12 +826,15 @@ func (fs fsObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKey // object string does not end with "/". return !strings.HasSuffix(object, slashSeparator) } - listDir := listDirFactory(isLeaf, fsTreeWalkIgnoredErrs, fs.storage) + listDir := fs.listDirFactory(isLeaf) walkResultCh = startTreeWalk(bucket, prefix, marker, recursive, listDir, isLeaf, endWalkCh) } + var objInfos []ObjectInfo var eof bool var nextMarker string + + // List until maxKeys requested. for i := 0; i < maxKeys; { walkResult, ok := <-walkResultCh if !ok { @@ -604,6 +862,8 @@ func (fs fsObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKey } i++ } + + // Save list routine for the next marker if we haven't reached EOF. params := listParams{bucket, recursive, nextMarker, prefix, heal} if !eof { fs.listPool.Set(params, walkResultCh, endWalkCh) @@ -618,6 +878,8 @@ func (fs fsObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKey } result.Objects = append(result.Objects, objInfo) } + + // Success. return result, nil } diff --git a/cmd/fs-v1_test.go b/cmd/fs-v1_test.go index 7624e2106..e0176d18e 100644 --- a/cmd/fs-v1_test.go +++ b/cmd/fs-v1_test.go @@ -18,81 +18,26 @@ package cmd import ( "bytes" + "fmt" + "os" "path/filepath" "testing" - "time" ) // TestNewFS - tests initialization of all input disks // and constructs a valid `FS` object layer. func TestNewFS(t *testing.T) { // Do not attempt to create this path, the test validates - // so that newFSObjects initializes non existing paths + // so that newFSObjectLayer initializes non existing paths // and successfully returns initialized object layer. disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) defer removeAll(disk) - // Setup to test errFSDiskFormat. - disks := []string{} - for i := 0; i < 6; i++ { - xlDisk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) - defer removeAll(xlDisk) - disks = append(disks, xlDisk) - } - - endpoints, err := parseStorageEndpoints([]string{disk}) - if err != nil { - t.Fatal("Uexpected error: ", err) - } - - fsStorageDisks, err := initStorageDisks(endpoints) - if err != nil { - t.Fatal("Uexpected error: ", err) - } - - endpoints, err = parseStorageEndpoints(disks) - if err != nil { - t.Fatal("Uexpected error: ", err) - } - - xlStorageDisks, err := initStorageDisks(endpoints) - if err != nil { - t.Fatal("Uexpected error: ", err) - } - - // Initializes all disks with XL - formattedDisks, err := waitForFormatDisks(true, endpoints, xlStorageDisks) - if err != nil { - t.Fatalf("Unable to format XL %s", err) - } - _, err = newXLObjects(formattedDisks) - if err != nil { - t.Fatalf("Unable to initialize XL object, %s", err) - } - - testCases := []struct { - disk StorageAPI - expectedErr error - }{ - {fsStorageDisks[0], nil}, - {xlStorageDisks[0], errFSDiskFormat}, - } - - for _, testCase := range testCases { - if _, err = waitForFormatDisks(true, endpoints, []StorageAPI{testCase.disk}); err != testCase.expectedErr { - t.Errorf("expected: %s, got :%s", testCase.expectedErr, err) - } - } - _, err = newFSObjects(nil) + _, err := newFSObjectLayer("") if err != errInvalidArgument { t.Errorf("Expecting error invalid argument, got %s", err) } - _, err = newFSObjects(&retryStorage{ - remoteStorage: xlStorageDisks[0], - maxRetryAttempts: 1, - retryUnit: time.Millisecond, - retryCap: time.Millisecond * 10, - }) + _, err = newFSObjectLayer(disk) if err != nil { errMsg := "Unable to recognize backend format, Disk is not in FS format." if err.Error() == errMsg { @@ -113,10 +58,10 @@ func TestFSShutdown(t *testing.T) { bucketName := "testbucket" objectName := "object" // Create and return an fsObject with its path in the disk - prepareTest := func() (fsObjects, string) { + prepareTest := func() (*fsObjects, string) { disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) obj := initFSObjects(disk, t) - fs := obj.(fsObjects) + fs := obj.(*fsObjects) objectContent := "12345" obj.MakeBucket(bucketName) sha256sum := "" @@ -135,12 +80,10 @@ func TestFSShutdown(t *testing.T) { for i := 1; i <= 5; i++ { fs, disk := prepareTest() fs.DeleteObject(bucketName, objectName) - fsStorage := fs.storage.(*retryStorage) - fs.storage = newNaughtyDisk(fsStorage, map[int]error{i: errFaultyDisk}, nil) - if err := fs.Shutdown(); errorCause(err) != errFaultyDisk { + removeAll(disk) + if err := fs.Shutdown(); err != nil { t.Fatal(i, ", Got unexpected fs shutdown error: ", err) } - removeAll(disk) } } @@ -150,26 +93,38 @@ func TestFSLoadFormatFS(t *testing.T) { disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) defer removeAll(disk) - obj := initFSObjects(disk, t) - fs := obj.(fsObjects) + // Assign a new UUID. + uuid := mustGetUUID() - // Regular format loading - _, err := loadFormatFS(fs.storage) + // Initialize meta volume, if volume already exists ignores it. + if err := initMetaVolumeFS(disk, uuid); err != nil { + t.Fatal(err) + } + + fsFormatPath := pathJoin(disk, minioMetaBucket, fsFormatJSONFile) + if err := saveFormatFS(preparePath(fsFormatPath), newFSFormatV1()); err != nil { + t.Fatal("Should not fail here", err) + } + _, err := loadFormatFS(disk) if err != nil { t.Fatal("Should not fail here", err) } // Loading corrupted format file - fs.storage.AppendFile(minioMetaBucket, fsFormatJSONFile, []byte{'b'}) - _, err = loadFormatFS(fs.storage) + file, err := os.OpenFile(preparePath(fsFormatPath), os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatal("Should not fail here", err) + } + file.Write([]byte{'b'}) + file.Close() + _, err = loadFormatFS(disk) if err == nil { t.Fatal("Should return an error here") } - // Loading format file from faulty disk - fsStorage := fs.storage.(*retryStorage) - fs.storage = newNaughtyDisk(fsStorage, nil, errFaultyDisk) - _, err = loadFormatFS(fs.storage) - if err != errFaultyDisk { - t.Fatal("Should return faulty disk error") + // Loading format file from disk not found. + removeAll(disk) + _, err = loadFormatFS(disk) + if err != nil && err != errUnformattedDisk { + t.Fatal("Should return unformatted disk, but got", err) } } @@ -180,7 +135,7 @@ func TestFSGetBucketInfo(t *testing.T) { defer removeAll(disk) obj := initFSObjects(disk, t) - fs := obj.(fsObjects) + fs := obj.(*fsObjects) bucketName := "bucket" obj.MakeBucket(bucketName) @@ -200,14 +155,12 @@ func TestFSGetBucketInfo(t *testing.T) { t.Fatal("BucketNameInvalid error not returned") } - // Loading format file from faulty disk - fsStorage := fs.storage.(*retryStorage) - fs.storage = newNaughtyDisk(fsStorage, nil, errFaultyDisk) + // Check for buckets and should get disk not found. + removeAll(disk) _, err = fs.GetBucketInfo(bucketName) - if errorCause(err) != errFaultyDisk { - t.Fatal("errFaultyDisk error not returned") + if !isSameType(errorCause(err), BucketNotFound{}) { + t.Fatal("BucketNotFound error not returned") } - } // TestFSDeleteObject - test fs.DeleteObject() with healthy and corrupted disks @@ -217,7 +170,7 @@ func TestFSDeleteObject(t *testing.T) { defer removeAll(disk) obj := initFSObjects(disk, t) - fs := obj.(fsObjects) + fs := obj.(*fsObjects) bucketName := "bucket" objectName := "object" @@ -229,12 +182,16 @@ func TestFSDeleteObject(t *testing.T) { if err := fs.DeleteObject("fo", objectName); !isSameType(errorCause(err), BucketNameInvalid{}) { t.Fatal("Unexpected error: ", err) } + // Test with bucket does not exist + if err := fs.DeleteObject("foobucket", "fooobject"); !isSameType(errorCause(err), BucketNotFound{}) { + t.Fatal("Unexpected error: ", err) + } // Test with invalid object name if err := fs.DeleteObject(bucketName, "\\"); !isSameType(errorCause(err), ObjectNameInvalid{}) { t.Fatal("Unexpected error: ", err) } - // Test with inexist bucket/object - if err := fs.DeleteObject("foobucket", "fooobject"); !isSameType(errorCause(err), BucketNotFound{}) { + // Test with object does not exist. + if err := fs.DeleteObject(bucketName, "foooobject"); !isSameType(errorCause(err), ObjectNotFound{}) { t.Fatal("Unexpected error: ", err) } // Test with valid condition @@ -242,11 +199,12 @@ func TestFSDeleteObject(t *testing.T) { t.Fatal("Unexpected error: ", err) } - // Loading format file from faulty disk - fsStorage := fs.storage.(*retryStorage) - fs.storage = newNaughtyDisk(fsStorage, nil, errFaultyDisk) - if err := fs.DeleteObject(bucketName, objectName); errorCause(err) != errFaultyDisk { - t.Fatal("Unexpected error: ", err) + // Delete object should err disk not found. + removeAll(disk) + if err := fs.DeleteObject(bucketName, objectName); err != nil { + if !isSameType(errorCause(err), BucketNotFound{}) { + t.Fatal("Unexpected error: ", err) + } } } @@ -258,7 +216,7 @@ func TestFSDeleteBucket(t *testing.T) { defer removeAll(disk) obj := initFSObjects(disk, t) - fs := obj.(fsObjects) + fs := obj.(*fsObjects) bucketName := "bucket" err := obj.MakeBucket(bucketName) @@ -267,29 +225,27 @@ func TestFSDeleteBucket(t *testing.T) { } // Test with an invalid bucket name - if err := fs.DeleteBucket("fo"); !isSameType(errorCause(err), BucketNameInvalid{}) { + if err = fs.DeleteBucket("fo"); !isSameType(errorCause(err), BucketNameInvalid{}) { t.Fatal("Unexpected error: ", err) } // Test with an inexistant bucket - if err := fs.DeleteBucket("foobucket"); !isSameType(errorCause(err), BucketNotFound{}) { + if err = fs.DeleteBucket("foobucket"); !isSameType(errorCause(err), BucketNotFound{}) { t.Fatal("Unexpected error: ", err) } // Test with a valid case - if err := fs.DeleteBucket(bucketName); err != nil { + if err = fs.DeleteBucket(bucketName); err != nil { t.Fatal("Unexpected error: ", err) } obj.MakeBucket(bucketName) - // Loading format file from faulty disk - fsStorage := fs.storage.(*retryStorage) - for i := 1; i <= 2; i++ { - fs.storage = newNaughtyDisk(fsStorage, map[int]error{i: errFaultyDisk}, nil) - if err := fs.DeleteBucket(bucketName); errorCause(err) != errFaultyDisk { + // Delete bucker should get error disk not found. + removeAll(disk) + if err = fs.DeleteBucket(bucketName); err != nil { + if !isSameType(errorCause(err), BucketNotFound{}) { t.Fatal("Unexpected error: ", err) } } - } // TestFSListBuckets - tests for fs ListBuckets @@ -299,7 +255,7 @@ func TestFSListBuckets(t *testing.T) { defer removeAll(disk) obj := initFSObjects(disk, t) - fs := obj.(fsObjects) + fs := obj.(*fsObjects) bucketName := "bucket" if err := obj.MakeBucket(bucketName); err != nil { @@ -307,28 +263,40 @@ func TestFSListBuckets(t *testing.T) { } // Create a bucket with invalid name - if err := fs.storage.MakeVol("vo^"); err != nil { + if err := mkdirAll(pathJoin(fs.fsPath, "vo^"), 0777); err != nil { t.Fatal("Unexpected error: ", err) } + f, err := os.Create(pathJoin(fs.fsPath, "test")) + if err != nil { + t.Fatal("Unexpected error: ", err) + } + f.Close() - // Test + // Test list buckets to have only one entry. buckets, err := fs.ListBuckets() if err != nil { t.Fatal("Unexpected error: ", err) } if len(buckets) != 1 { - t.Fatal("ListBuckets not working properly") + t.Fatal("ListBuckets not working properly", buckets) } - // Test ListBuckets with faulty disks - fsStorage := fs.storage.(*retryStorage) - for i := 1; i <= 2; i++ { - fs.storage = newNaughtyDisk(fsStorage, nil, errFaultyDisk) - if _, err := fs.ListBuckets(); errorCause(err) != errFaultyDisk { + // Test ListBuckets with disk not found. + removeAll(disk) + + if _, err := fs.ListBuckets(); err != nil { + if errorCause(err) != errDiskNotFound { t.Fatal("Unexpected error: ", err) } } + longPath := fmt.Sprintf("%0256d", 1) + fs.fsPath = longPath + if _, err := fs.ListBuckets(); err != nil { + if errorCause(err) != errFileNameTooLong { + t.Fatal("Unexpected error: ", err) + } + } } // TestFSHealObject - tests for fs HealObject diff --git a/cmd/globals.go b/cmd/globals.go index 1257b68f6..bae69452a 100644 --- a/cmd/globals.go +++ b/cmd/globals.go @@ -62,7 +62,8 @@ var ( globalConfigDir = mustGetConfigPath() // config-dir flag set via command line // Add new global flags here. - globalIsDistXL = false // "Is Distributed?" flag. + // Indicates if the running minio server is distributed setup. + globalIsDistXL = false // This flag is set to 'true' by default, it is set to `false` // when MINIO_BROWSER env is set to 'off'. diff --git a/cmd/lock-instrument.go b/cmd/lock-instrument.go index b11dd81e5..d39fb17ad 100644 --- a/cmd/lock-instrument.go +++ b/cmd/lock-instrument.go @@ -147,7 +147,7 @@ func (n *nsLockMap) statusBlockedToRunning(param nsParam, lockSource, opsID stri // newDebugLockInfo - Constructs a debugLockInfo value given lock source, status and type. func newDebugLockInfo(lockSource string, status statusType, readLock bool) debugLockInfo { - lType := debugRLockStr + var lType lockType if readLock { lType = debugRLockStr } else { diff --git a/cmd/lock-instrument_test.go b/cmd/lock-instrument_test.go index d77030559..f06950c64 100644 --- a/cmd/lock-instrument_test.go +++ b/cmd/lock-instrument_test.go @@ -119,6 +119,39 @@ func verifyRPCLockInfoResponse(l lockStateCase, rpcLockInfoMap map[string]*Syste } } +// Read entire state of the locks in the system and return. +func getSystemLockState() (SystemLockState, error) { + globalNSMutex.lockMapMutex.Lock() + defer globalNSMutex.lockMapMutex.Unlock() + + lockState := SystemLockState{} + + lockState.TotalBlockedLocks = globalNSMutex.counters.blocked + lockState.TotalLocks = globalNSMutex.counters.total + lockState.TotalAcquiredLocks = globalNSMutex.counters.granted + + for param, debugLock := range globalNSMutex.debugLockMap { + volLockInfo := VolumeLockInfo{} + volLockInfo.Bucket = param.volume + volLockInfo.Object = param.path + volLockInfo.LocksOnObject = debugLock.counters.total + volLockInfo.TotalBlockedLocks = debugLock.counters.blocked + volLockInfo.LocksAcquiredOnObject = debugLock.counters.granted + for opsID, lockInfo := range debugLock.lockInfo { + volLockInfo.LockDetailsOnObject = append(volLockInfo.LockDetailsOnObject, OpsLockState{ + OperationID: opsID, + LockSource: lockInfo.lockSource, + LockType: lockInfo.lType, + Status: lockInfo.status, + Since: lockInfo.since, + Duration: time.Now().UTC().Sub(lockInfo.since), + }) + } + lockState.LocksInfoPerObject = append(lockState.LocksInfoPerObject, volLockInfo) + } + return lockState, nil +} + // Asserts the lock counter from the global globalNSMutex inmemory lock with the expected one. func verifyGlobalLockStats(l lockStateCase, t *testing.T, testNum int) { globalNSMutex.lockMapMutex.Lock() diff --git a/cmd/lock-rpc-client_test.go b/cmd/lock-rpc-client_test.go new file mode 100644 index 000000000..d8ecf1a4f --- /dev/null +++ b/cmd/lock-rpc-client_test.go @@ -0,0 +1,68 @@ +/* + * Minio Cloud Storage, (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "fmt" + "testing" + "time" + + "github.com/minio/dsync" +) + +// Tests lock rpc client. +func TestLockRPCClient(t *testing.T) { + lkClient := newLockRPCClient(authConfig{ + accessKey: "abcd", + secretKey: "abcd123", + serverAddr: fmt.Sprintf("%X", time.Now().UTC().UnixNano()), + serviceEndpoint: pathJoin(lockRPCPath, "/test/1"), + secureConn: false, + serviceName: "Dsync", + }) + + // Attempt all calls. + _, err := lkClient.RLock(dsync.LockArgs{}) + if err == nil { + t.Fatal("Expected for Rlock to fail") + } + + _, err = lkClient.Lock(dsync.LockArgs{}) + if err == nil { + t.Fatal("Expected for Lock to fail") + } + + _, err = lkClient.RUnlock(dsync.LockArgs{}) + if err == nil { + t.Fatal("Expected for RUnlock to fail") + } + + _, err = lkClient.Unlock(dsync.LockArgs{}) + if err == nil { + t.Fatal("Expected for Unlock to fail") + } + + _, err = lkClient.ForceUnlock(dsync.LockArgs{}) + if err == nil { + t.Fatal("Expected for ForceUnlock to fail") + } + + _, err = lkClient.Expired(dsync.LockArgs{}) + if err == nil { + t.Fatal("Expected for Expired to fail") + } +} diff --git a/cmd/lockinfo-handlers.go b/cmd/lockinfo-handlers.go index 16a915aa1..9fd98a65f 100644 --- a/cmd/lockinfo-handlers.go +++ b/cmd/lockinfo-handlers.go @@ -68,52 +68,6 @@ type OpsLockState struct { Duration time.Duration `json:"duration"` // Duration since the lock was held. } -// Read entire state of the locks in the system and return. -func getSystemLockState() (SystemLockState, error) { - globalNSMutex.lockMapMutex.Lock() - defer globalNSMutex.lockMapMutex.Unlock() - - // Fetch current time once instead of fetching system time for every lock. - timeNow := time.Now().UTC() - lockState := SystemLockState{ - TotalAcquiredLocks: globalNSMutex.counters.granted, - TotalLocks: globalNSMutex.counters.total, - TotalBlockedLocks: globalNSMutex.counters.blocked, - } - - var totalReadLocks, totalWriteLocks int64 - - for param, debugLock := range globalNSMutex.debugLockMap { - volLockInfo := VolumeLockInfo{} - volLockInfo.Bucket = param.volume - volLockInfo.Object = param.path - volLockInfo.LocksOnObject = debugLock.counters.total - volLockInfo.TotalBlockedLocks = debugLock.counters.blocked - volLockInfo.LocksAcquiredOnObject = debugLock.counters.granted - for opsID, lockInfo := range debugLock.lockInfo { - volLockInfo.LockDetailsOnObject = append(volLockInfo.LockDetailsOnObject, OpsLockState{ - OperationID: opsID, - LockSource: lockInfo.lockSource, - LockType: lockInfo.lType, - Status: lockInfo.status, - Since: lockInfo.since, - Duration: timeNow.Sub(lockInfo.since), - }) - switch lockInfo.lType { - case debugRLockStr: - totalReadLocks++ - case debugWLockStr: - totalWriteLocks++ - } - } - volLockInfo.TotalReadLocks = totalReadLocks - volLockInfo.TotalWriteLocks = totalWriteLocks - - lockState.LocksInfoPerObject = append(lockState.LocksInfoPerObject, volLockInfo) - } - return lockState, nil -} - // listLocksInfo - Fetches locks held on bucket, matching prefix older than relTime. func listLocksInfo(bucket, prefix string, relTime time.Duration) []VolumeLockInfo { globalNSMutex.lockMapMutex.Lock() diff --git a/cmd/namespace-lock.go b/cmd/namespace-lock.go index 316b3fba4..492e45ac4 100644 --- a/cmd/namespace-lock.go +++ b/cmd/namespace-lock.go @@ -28,6 +28,14 @@ import ( // Global name space lock. var globalNSMutex *nsLockMap +// RWLocker - locker interface extends sync.Locker +// to introduce RLock, RUnlock. +type RWLocker interface { + sync.Locker + RLock() + RUnlock() +} + // Initialize distributed locking only in case of distributed setup. // Returns if the setup is distributed or not on success. func initDsyncNodes(eps []*url.URL) error { @@ -68,13 +76,6 @@ func initNSLock(isDistXL bool) { globalNSMutex.debugLockMap = make(map[nsParam]*debugLockInfoPerVolumePath) } -// RWLocker - interface that any read-write locking library should implement. -type RWLocker interface { - sync.Locker - RLock() - RUnlock() -} - // nsParam - carries name space resource. type nsParam struct { volume string @@ -94,8 +95,7 @@ type nsLockMap struct { counters *lockStat debugLockMap map[nsParam]*debugLockInfoPerVolumePath // Info for instrumentation on locks. - // Indicates whether the locking service is part - // of a distributed setup or not. + // Indicates if namespace is part of a distributed setup. isDistXL bool lockMap map[nsParam]*nsLock lockMapMutex sync.Mutex @@ -256,14 +256,14 @@ func (n *nsLockMap) ForceUnlock(volume, path string) { // lockInstance - frontend/top-level interface for namespace locks. type lockInstance struct { - n *nsLockMap + ns *nsLockMap volume, path, opsID string } // NewNSLock - returns a lock instance for a given volume and // path. The returned lockInstance object encapsulates the nsLockMap, // volume, path and operation ID. -func (n *nsLockMap) NewNSLock(volume, path string) *lockInstance { +func (n *nsLockMap) NewNSLock(volume, path string) RWLocker { return &lockInstance{n, volume, path, getOpsID()} } @@ -271,24 +271,24 @@ func (n *nsLockMap) NewNSLock(volume, path string) *lockInstance { func (li *lockInstance) Lock() { lockSource := callerSource() readLock := false - li.n.lock(li.volume, li.path, lockSource, li.opsID, readLock) + li.ns.lock(li.volume, li.path, lockSource, li.opsID, readLock) } // Unlock - block until write lock is released. func (li *lockInstance) Unlock() { readLock := false - li.n.unlock(li.volume, li.path, li.opsID, readLock) + li.ns.unlock(li.volume, li.path, li.opsID, readLock) } // RLock - block until read lock is taken. func (li *lockInstance) RLock() { lockSource := callerSource() readLock := true - li.n.lock(li.volume, li.path, lockSource, li.opsID, readLock) + li.ns.lock(li.volume, li.path, lockSource, li.opsID, readLock) } // RUnlock - block until read lock is released. func (li *lockInstance) RUnlock() { readLock := true - li.n.unlock(li.volume, li.path, li.opsID, readLock) + li.ns.unlock(li.volume, li.path, li.opsID, readLock) } diff --git a/cmd/object-api-common.go b/cmd/object-api-common.go index 5fa038f8c..aaf2d8c43 100644 --- a/cmd/object-api-common.go +++ b/cmd/object-api-common.go @@ -48,6 +48,12 @@ func init() { globalObjLayerMutex = &sync.Mutex{} } +// Check if the disk is remote. +func isRemoteDisk(disk StorageAPI) bool { + _, ok := disk.(*networkStorage) + return ok +} + // House keeping code for FS/XL and distributed Minio setup. func houseKeeping(storageDisks []StorageAPI) error { var wg = &sync.WaitGroup{} @@ -60,8 +66,8 @@ func houseKeeping(storageDisks []StorageAPI) error { if disk == nil { continue } - if _, ok := disk.(*networkStorage); ok { - // Skip remote disks. + // Skip remote disks. + if isRemoteDisk(disk) { continue } wg.Add(1) diff --git a/cmd/object-api-errors.go b/cmd/object-api-errors.go index e98e08385..78a62c1b2 100644 --- a/cmd/object-api-errors.go +++ b/cmd/object-api-errors.go @@ -352,16 +352,6 @@ func isErrBucketPolicyNotFound(err error) bool { return false } -// Check if error type is ObjectNameInvalid. -func isErrObjectNameInvalid(err error) bool { - err = errorCause(err) - switch err.(type) { - case ObjectNameInvalid: - return true - } - return false -} - // Check if error type is ObjectNotFound. func isErrObjectNotFound(err error) bool { err = errorCause(err) diff --git a/cmd/object-api-listobjects_test.go b/cmd/object-api-listobjects_test.go index 77651a20a..8b3d4e94d 100644 --- a/cmd/object-api-listobjects_test.go +++ b/cmd/object-api-listobjects_test.go @@ -571,16 +571,11 @@ func testListObjects(obj ObjectLayer, instanceType string, t TestErrHandler) { // Initialize FS backend for the benchmark. func initFSObjectsB(disk string, t *testing.B) (obj ObjectLayer) { - endPoints, err := parseStorageEndpoints([]string{disk}) + var err error + obj, err = newFSObjectLayer(disk) if err != nil { t.Fatal("Unexpected err: ", err) } - - obj, _, err = initObjectLayer(endPoints) - if err != nil { - t.Fatal("Unexpected err: ", err) - } - return obj } diff --git a/cmd/object-api-multipart-common.go b/cmd/object-api-multipart-common.go index 2744a9f09..b515d5b5e 100644 --- a/cmd/object-api-multipart-common.go +++ b/cmd/object-api-multipart-common.go @@ -18,10 +18,14 @@ package cmd import ( "encoding/json" + "io" + "io/ioutil" "path" "sort" "sync" "time" + + "github.com/minio/minio/pkg/lock" ) // A uploadInfo represents the s3 compatible spec. @@ -67,6 +71,44 @@ func (u *uploadsV1) RemoveUploadID(uploadID string) { } } +// IsEmpty - is true if no more uploads available. +func (u *uploadsV1) IsEmpty() bool { + return len(u.Uploads) == 0 +} + +func (u *uploadsV1) WriteTo(writer io.Writer) (n int64, err error) { + // Serialize to prepare to write to disk. + var uplBytes []byte + uplBytes, err = json.Marshal(u) + if err != nil { + return 0, traceError(err) + } + if err = writer.(*lock.LockedFile).Truncate(0); err != nil { + return 0, traceError(err) + } + _, err = writer.Write(uplBytes) + if err != nil { + return 0, traceError(err) + } + return int64(len(uplBytes)), nil +} + +func (u *uploadsV1) ReadFrom(reader io.Reader) (n int64, err error) { + var uploadIDBytes []byte + uploadIDBytes, err = ioutil.ReadAll(reader) + if err != nil { + return 0, traceError(err) + } + if len(uploadIDBytes) == 0 { + return 0, traceError(io.EOF) + } + // Decode `uploads.json`. + if err = json.Unmarshal(uploadIDBytes, u); err != nil { + return 0, traceError(err) + } + return int64(len(uploadIDBytes)), nil +} + // readUploadsJSON - get all the saved uploads JSON. func readUploadsJSON(bucket, object string, disk StorageAPI) (uploadIDs uploadsV1, err error) { uploadJSONPath := path.Join(bucket, object, uploadsJSONFile) @@ -100,8 +142,7 @@ func writeUploadJSON(u *uploadsV1, uploadsPath, tmpPath string, disk StorageAPI) return traceError(wErr) } - // Write `uploads.json` to disk. First to tmp location and - // then rename. + // Write `uploads.json` to disk. First to tmp location and then rename. if wErr = disk.AppendFile(minioMetaTmpBucket, tmpPath, uplBytes); wErr != nil { return traceError(wErr) } diff --git a/cmd/object-api-multipart_test.go b/cmd/object-api-multipart_test.go index e8f2d8bf8..f15cad520 100644 --- a/cmd/object-api-multipart_test.go +++ b/cmd/object-api-multipart_test.go @@ -1207,7 +1207,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan } for i, testCase := range testCases { - // fmt.Println(testCase) // uncomment to peek into the test cases. + // fmt.Println(i+1, testCase) // uncomment to peek into the test cases. actualResult, actualErr := obj.ListMultipartUploads(testCase.bucket, testCase.prefix, testCase.keyMarker, testCase.uploadIDMarker, testCase.delimiter, testCase.maxUploads) if actualErr != nil && testCase.shouldPass { t.Errorf("Test %d: %s: Expected to pass, but failed with: %s", i+1, instanceType, actualErr.Error()) @@ -1520,7 +1520,7 @@ func TestListObjectParts(t *testing.T) { ExecObjectLayerTest(t, testListObjectParts) } -// testListMultipartUploads - Tests validate listing of multipart uploads. +// testListObjectParts - test validate listing of object parts. func testListObjectParts(obj ObjectLayer, instanceType string, t TestErrHandler) { bucketNames := []string{"minio-bucket", "minio-2-bucket"} diff --git a/cmd/object-handlers.go b/cmd/object-handlers.go index 831ccf818..e8490c138 100644 --- a/cmd/object-handlers.go +++ b/cmd/object-handlers.go @@ -770,8 +770,8 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite md5Sum, err = objectAPI.CompleteMultipartUpload(bucket, object, uploadID, completeParts) if err != nil { - err = errorCause(err) errorIf(err, "Unable to complete multipart upload.") + err = errorCause(err) switch oErr := err.(type) { case PartTooSmall: // Write part too small error. diff --git a/cmd/object_api_suite_test.go b/cmd/object_api_suite_test.go index c74841a02..5166f65c4 100644 --- a/cmd/object_api_suite_test.go +++ b/cmd/object_api_suite_test.go @@ -715,6 +715,16 @@ func testNonExistantObjectInBucket(obj ObjectLayer, instanceType string, c TestE } } +// Check if error type is ObjectNameInvalid. +func isErrObjectNameInvalid(err error) bool { + err = errorCause(err) + switch err.(type) { + case ObjectNameInvalid: + return true + } + return false +} + // Wrapper for calling testGetDirectoryReturnsObjectNotFound for both XL and FS. func (s *ObjectLayerAPISuite) TestGetDirectoryReturnsObjectNotFound(c *C) { ExecObjectLayerTest(c, testGetDirectoryReturnsObjectNotFound) diff --git a/cmd/posix-errors.go b/cmd/posix-errors.go index deed81c5a..dfa583257 100644 --- a/cmd/posix-errors.go +++ b/cmd/posix-errors.go @@ -42,6 +42,17 @@ func isSysErrIO(err error) bool { return err == syscall.EIO } +// Check if the given error corresponds to EISDIR (is a directory). +func isSysErrIsDir(err error) bool { + if pathErr, ok := err.(*os.PathError); ok { + switch pathErr.Err { + case syscall.EISDIR: + return true + } + } + return false +} + // Check if the given error corresponds to ENOTDIR (is not a directory). func isSysErrNotDir(err error) bool { if pathErr, ok := err.(*os.PathError); ok { diff --git a/cmd/posix.go b/cmd/posix.go index 230d43048..bdfb9652b 100644 --- a/cmd/posix.go +++ b/cmd/posix.go @@ -652,7 +652,6 @@ func (s *posix) createFile(volume, path string) (f *os.File, err error) { // PrepareFile - run prior actions before creating a new file for optimization purposes // Currently we use fallocate when available to avoid disk fragmentation as much as possible func (s *posix) PrepareFile(volume, path string, fileSize int64) (err error) { - // It doesn't make sense to create a negative-sized file if fileSize <= 0 { return errInvalidArgument diff --git a/cmd/posix_test.go b/cmd/posix_test.go index 6e0390232..f1dc0b428 100644 --- a/cmd/posix_test.go +++ b/cmd/posix_test.go @@ -43,8 +43,8 @@ func newPosixTestSetup() (StorageAPI, string, error) { return posixStorage, diskPath, nil } -// Tests posix.getDiskInfo() -func TestGetDiskInfo(t *testing.T) { +// TestPosixs posix.getDiskInfo() +func TestPosixGetDiskInfo(t *testing.T) { path, err := ioutil.TempDir(globalTestTmpDir, "minio-") if err != nil { t.Fatalf("Unable to create a temporary directory, %s", err) @@ -67,8 +67,8 @@ func TestGetDiskInfo(t *testing.T) { } } -// TestReadAll - Tests the functionality implemented by posix ReadAll storage API. -func TestReadAll(t *testing.T) { +// TestPosixReadAll - TestPosixs the functionality implemented by posix ReadAll storage API. +func TestPosixReadAll(t *testing.T) { // create posix test setup posixStorage, path, err := newPosixTestSetup() if err != nil { @@ -91,27 +91,27 @@ func TestReadAll(t *testing.T) { t.Fatalf("Unable to create a file \"as-file-parent\", %s", err) } - // Testcases to validate different conditions for ReadAll API. + // TestPosixcases to validate different conditions for ReadAll API. testCases := []struct { volume string path string err error }{ - // Test case - 1. + // TestPosix case - 1. // Validate volume does not exist. { volume: "i-dont-exist", path: "", err: errVolumeNotFound, }, - // Test case - 2. + // TestPosix case - 2. // Validate bad condition file does not exist. { volume: "exists", path: "as-file-not-found", err: errFileNotFound, }, - // Test case - 3. + // TestPosix case - 3. // Validate bad condition file exists as prefix/directory and // we are attempting to read it. { @@ -119,21 +119,21 @@ func TestReadAll(t *testing.T) { path: "as-directory", err: errFileNotFound, }, - // Test case - 4. + // TestPosix case - 4. { volume: "exists", path: "as-file-parent/as-file", err: errFileNotFound, }, - // Test case - 5. + // TestPosix case - 5. // Validate the good condition file exists and we are able to read it. { volume: "exists", path: "as-file", err: nil, }, - // Test case - 6. - // Test case with invalid volume name. + // TestPosix case - 6. + // TestPosix case with invalid volume name. { volume: "ab", path: "as-file", @@ -146,15 +146,15 @@ func TestReadAll(t *testing.T) { for i, testCase := range testCases { dataRead, err = posixStorage.ReadAll(testCase.volume, testCase.path) if err != testCase.err { - t.Fatalf("Test %d: Expected err \"%s\", got err \"%s\"", i+1, testCase.err, err) + t.Fatalf("TestPosix %d: Expected err \"%s\", got err \"%s\"", i+1, testCase.err, err) } if err == nil { if string(dataRead) != string([]byte("Hello, World")) { - t.Errorf("Test %d: Expected the data read to be \"%s\", but instead got \"%s\"", i+1, "Hello, World", string(dataRead)) + t.Errorf("TestPosix %d: Expected the data read to be \"%s\", but instead got \"%s\"", i+1, "Hello, World", string(dataRead)) } } } - // Testing for faulty disk. + // TestPosixing for faulty disk. // Setting ioErrCount > maxAllowedIOError. if posixType, ok := posixStorage.(*posix); ok { // setting the io error count from as specified in the test case. @@ -168,8 +168,8 @@ func TestReadAll(t *testing.T) { } } -// TestNewPosix all the cases handled in posix storage layer initialization. -func TestNewPosix(t *testing.T) { +// TestPosixNewPosix all the cases handled in posix storage layer initialization. +func TestPosixNewPosix(t *testing.T) { // Temporary dir name. tmpDirName := globalTestTmpDir + "/" + "minio-" + nextSuffix() // Temporary file name. @@ -207,14 +207,14 @@ func TestNewPosix(t *testing.T) { // Initialize a new posix layer. _, err := newPosix(testCase.name) if err != testCase.err { - t.Fatalf("Test %d failed wanted: %s, got: %s", i+1, err, testCase.err) + t.Fatalf("TestPosix %d failed wanted: %s, got: %s", i+1, err, testCase.err) } } } -// TestMakeVol - Test validate the logic for creation of new posix volume. +// TestPosixMakeVol - TestPosix validate the logic for creation of new posix volume. // Asserts the failures too against the expected failures. -func TestMakeVol(t *testing.T) { +func TestPosixMakeVol(t *testing.T) { // create posix test setup posixStorage, path, err := newPosixTestSetup() if err != nil { @@ -237,35 +237,35 @@ func TestMakeVol(t *testing.T) { ioErrCount int expectedErr error }{ - // Test case - 1. + // TestPosix case - 1. // A valid case, volume creation is expected to succeed. { volName: "success-vol", ioErrCount: 0, expectedErr: nil, }, - // Test case - 2. + // TestPosix case - 2. // Case where a file exists by the name of the volume to be created. { volName: "vol-as-file", ioErrCount: 0, expectedErr: errVolumeExists, }, - // Test case - 3. + // TestPosix case - 3. { volName: "existing-vol", ioErrCount: 0, expectedErr: errVolumeExists, }, - // Test case - 4. + // TestPosix case - 4. // IO error > maxAllowedIOError, should fail with errFaultyDisk. { volName: "vol", ioErrCount: 6, expectedErr: errFaultyDisk, }, - // Test case - 5. - // Test case with invalid volume name. + // TestPosix case - 5. + // TestPosix case with invalid volume name. { volName: "ab", ioErrCount: 0, @@ -281,11 +281,11 @@ func TestMakeVol(t *testing.T) { t.Errorf("Expected the StorageAPI to be of type *posix") } if err := posixStorage.MakeVol(testCase.volName); err != testCase.expectedErr { - t.Fatalf("Test %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) + t.Fatalf("TestPosix %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) } } - // Test for permission denied. + // TestPosix for permission denied. if runtime.GOOS != "windows" { // Initialize posix storage layer for permission denied error. posix, err := newPosix("/usr") @@ -299,8 +299,8 @@ func TestMakeVol(t *testing.T) { } } -// TestDeleteVol - Validates the expected behaviour of posix.DeleteVol for various cases. -func TestDeleteVol(t *testing.T) { +// TestPosixDeleteVol - Validates the expected behaviour of posix.DeleteVol for various cases. +func TestPosixDeleteVol(t *testing.T) { // create posix test setup posixStorage, path, err := newPosixTestSetup() if err != nil { @@ -313,7 +313,7 @@ func TestDeleteVol(t *testing.T) { t.Fatalf("Unable to create volume, %s", err) } - // Test failure cases. + // TestPosix failure cases. vol := slashpath.Join(path, "nonempty-vol") if err = os.Mkdir(vol, 0777); err != nil { t.Fatalf("Unable to create directory, %s", err) @@ -327,35 +327,35 @@ func TestDeleteVol(t *testing.T) { ioErrCount int expectedErr error }{ - // Test case - 1. + // TestPosix case - 1. // A valida case. Empty vol, should be possible to delete. { volName: "success-vol", ioErrCount: 0, expectedErr: nil, }, - // Test case - 2. + // TestPosix case - 2. // volume is non-existent. { volName: "nonexistent-vol", ioErrCount: 0, expectedErr: errVolumeNotFound, }, - // Test case - 3. + // TestPosix case - 3. // It shouldn't be possible to delete an non-empty volume, validating the same. { volName: "nonempty-vol", ioErrCount: 0, expectedErr: errVolumeNotEmpty, }, - // Test case - 4. + // TestPosix case - 4. // IO error > maxAllowedIOError, should fail with errFaultyDisk. { volName: "my-disk", ioErrCount: 6, expectedErr: errFaultyDisk, }, - // Test case - 5. + // TestPosix case - 5. // Invalid volume name. { volName: "ab", @@ -372,11 +372,11 @@ func TestDeleteVol(t *testing.T) { t.Errorf("Expected the StorageAPI to be of type *posix") } if err = posixStorage.DeleteVol(testCase.volName); err != testCase.expectedErr { - t.Fatalf("Test: %d, expected: %s, got: %s", i+1, testCase.expectedErr, err) + t.Fatalf("TestPosix: %d, expected: %s, got: %s", i+1, testCase.expectedErr, err) } } - // Test for permission denied. + // TestPosix for permission denied. if runtime.GOOS != "windows" { // Initialize posix storage layer for permission denied error. posixStorage, err = newPosix("/usr") @@ -396,7 +396,7 @@ func TestDeleteVol(t *testing.T) { // removing the disk, used to recreate disk not found error. removeAll(diskPath) - // Test for delete on an removed disk. + // TestPosix for delete on an removed disk. // should fail with disk not found. err = posixDeletedStorage.DeleteVol("Del-Vol") if err != errDiskNotFound { @@ -404,8 +404,8 @@ func TestDeleteVol(t *testing.T) { } } -// TestStatVol - Tests validate the volume info returned by posix.StatVol() for various inputs. -func TestStatVol(t *testing.T) { +// TestPosixStatVol - TestPosixs validate the volume info returned by posix.StatVol() for various inputs. +func TestPosixStatVol(t *testing.T) { // create posix test setup posixStorage, path, err := newPosixTestSetup() if err != nil { @@ -423,25 +423,25 @@ func TestStatVol(t *testing.T) { ioErrCount int expectedErr error }{ - // Test case - 1. + // TestPosix case - 1. { volName: "success-vol", ioErrCount: 0, expectedErr: nil, }, - // Test case - 2. + // TestPosix case - 2. { volName: "nonexistent-vol", ioErrCount: 0, expectedErr: errVolumeNotFound, }, - // Test case - 3. + // TestPosix case - 3. { volName: "success-vol", ioErrCount: 6, expectedErr: errFaultyDisk, }, - // Test case - 4. + // TestPosix case - 4. { volName: "ab", ioErrCount: 0, @@ -460,12 +460,12 @@ func TestStatVol(t *testing.T) { } volInfo, err = posixStorage.StatVol(testCase.volName) if err != testCase.expectedErr { - t.Fatalf("Test case : %d, Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) + t.Fatalf("TestPosix case : %d, Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) } if err == nil { if volInfo.Name != volInfo.Name { - t.Errorf("Test case %d: Expected the volume name to be \"%s\", instead found \"%s\"", i+1, volInfo.Name, volInfo.Name) + t.Errorf("TestPosix case %d: Expected the volume name to be \"%s\", instead found \"%s\"", i+1, volInfo.Name, volInfo.Name) } } } @@ -477,7 +477,7 @@ func TestStatVol(t *testing.T) { // removing the disk, used to recreate disk not found error. removeAll(diskPath) - // Test for delete on an removed disk. + // TestPosix for delete on an removed disk. // should fail with disk not found. _, err = posixDeletedStorage.StatVol("Stat vol") if err != errDiskNotFound { @@ -485,8 +485,8 @@ func TestStatVol(t *testing.T) { } } -// TestListVols - Validates the result and the error output for posix volume listing functionality posix.ListVols(). -func TestListVols(t *testing.T) { +// TestPosixListVols - Validates the result and the error output for posix volume listing functionality posix.ListVols(). +func TestPosixListVols(t *testing.T) { // create posix test setup posixStorage, path, err := newPosixTestSetup() if err != nil { @@ -494,14 +494,14 @@ func TestListVols(t *testing.T) { } var volInfo []VolInfo - // Test empty list vols. + // TestPosix empty list vols. if volInfo, err = posixStorage.ListVols(); err != nil { t.Fatalf("expected: , got: %s", err) } else if len(volInfo) != 0 { t.Fatalf("expected: [], got: %s", volInfo) } - // Test non-empty list vols. + // TestPosix non-empty list vols. if err = posixStorage.MakeVol("success-vol"); err != nil { t.Fatalf("Unable to create volume, %s", err) } @@ -538,8 +538,8 @@ func TestListVols(t *testing.T) { } } -// TestPosixListDir - Tests validate the directory listing functionality provided by posix.ListDir . -func TestPosixListDir(t *testing.T) { +// TestPosixPosixListDir - TestPosixs validate the directory listing functionality provided by posix.ListDir . +func TestPosixPosixListDir(t *testing.T) { // create posix test setup posixStorage, path, err := newPosixTestSetup() if err != nil { @@ -573,7 +573,7 @@ func TestPosixListDir(t *testing.T) { expectedListDir []string expectedErr error }{ - // Test case - 1. + // TestPosix case - 1. // valid case with existing volume and file to delete. { srcVol: "success-vol", @@ -582,7 +582,7 @@ func TestPosixListDir(t *testing.T) { expectedListDir: []string{"def/", "xyz/"}, expectedErr: nil, }, - // Test case - 1. + // TestPosix case - 1. // valid case with existing volume and file to delete. { srcVol: "success-vol", @@ -591,7 +591,7 @@ func TestPosixListDir(t *testing.T) { expectedListDir: []string{"ghi/"}, expectedErr: nil, }, - // Test case - 1. + // TestPosix case - 1. // valid case with existing volume and file to delete. { srcVol: "success-vol", @@ -600,31 +600,31 @@ func TestPosixListDir(t *testing.T) { expectedListDir: []string{"success-file"}, expectedErr: nil, }, - // Test case - 2. + // TestPosix case - 2. { srcVol: "success-vol", srcPath: "abcdef", ioErrCnt: 0, expectedErr: errFileNotFound, }, - // Test case - 3. - // Test case with invalid volume name. + // TestPosix case - 3. + // TestPosix case with invalid volume name. { srcVol: "ab", srcPath: "success-file", ioErrCnt: 0, expectedErr: errInvalidArgument, }, - // Test case - 4. - // Test case with io error count > max limit. + // TestPosix case - 4. + // TestPosix case with io error count > max limit. { srcVol: "success-vol", srcPath: "success-file", ioErrCnt: 6, expectedErr: errFaultyDisk, }, - // Test case - 5. - // Test case with non existent volume. + // TestPosix case - 5. + // TestPosix case with non existent volume. { srcVol: "non-existent-vol", srcPath: "success-file", @@ -644,18 +644,18 @@ func TestPosixListDir(t *testing.T) { } dirList, err = posixStorage.ListDir(testCase.srcVol, testCase.srcPath) if err != testCase.expectedErr { - t.Fatalf("Test case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) + t.Fatalf("TestPosix case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) } if err == nil { for _, expected := range testCase.expectedListDir { if !strings.Contains(strings.Join(dirList, ","), expected) { - t.Errorf("Test case %d: Expected the directory listing to be \"%v\", but got \"%v\"", i+1, testCase.expectedListDir, dirList) + t.Errorf("TestPosix case %d: Expected the directory listing to be \"%v\", but got \"%v\"", i+1, testCase.expectedListDir, dirList) } } } } - // Test for permission denied. + // TestPosix for permission denied. if runtime.GOOS != "windows" { // Initialize posix storage layer for permission denied error. posixStorage, err = newPosix("/usr") @@ -668,7 +668,7 @@ func TestPosixListDir(t *testing.T) { } } - // Test for delete on an removed disk. + // TestPosix for delete on an removed disk. // should fail with disk not found. err = posixDeletedStorage.DeleteFile("del-vol", "my-file") if err != errDiskNotFound { @@ -676,8 +676,8 @@ func TestPosixListDir(t *testing.T) { } } -// TestDeleteFile - Series of test cases construct valid and invalid input data and validates the result and the error response. -func TestDeleteFile(t *testing.T) { +// TestPosixDeleteFile - Series of test cases construct valid and invalid input data and validates the result and the error response. +func TestPosixDeleteFile(t *testing.T) { // create posix test setup posixStorage, path, err := newPosixTestSetup() if err != nil { @@ -706,7 +706,7 @@ func TestDeleteFile(t *testing.T) { ioErrCnt int expectedErr error }{ - // Test case - 1. + // TestPosix case - 1. // valid case with existing volume and file to delete. { srcVol: "success-vol", @@ -714,7 +714,7 @@ func TestDeleteFile(t *testing.T) { ioErrCnt: 0, expectedErr: nil, }, - // Test case - 2. + // TestPosix case - 2. // The file was deleted in the last case, so DeleteFile should fail. { srcVol: "success-vol", @@ -722,32 +722,32 @@ func TestDeleteFile(t *testing.T) { ioErrCnt: 0, expectedErr: errFileNotFound, }, - // Test case - 3. - // Test case with io error count > max limit. + // TestPosix case - 3. + // TestPosix case with io error count > max limit. { srcVol: "success-vol", srcPath: "success-file", ioErrCnt: 6, expectedErr: errFaultyDisk, }, - // Test case - 4. - // Test case with segment of the volume name > 255. + // TestPosix case - 4. + // TestPosix case with segment of the volume name > 255. { srcVol: "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", srcPath: "success-file", ioErrCnt: 0, expectedErr: errInvalidArgument, }, - // Test case - 5. - // Test case with non-existent volume. + // TestPosix case - 5. + // TestPosix case with non-existent volume. { srcVol: "non-existent-vol", srcPath: "success-file", ioErrCnt: 0, expectedErr: errVolumeNotFound, }, - // Test case - 6. - // Test case with src path segment > 255. + // TestPosix case - 6. + // TestPosix case with src path segment > 255. { srcVol: "success-vol", srcPath: "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", @@ -765,11 +765,11 @@ func TestDeleteFile(t *testing.T) { t.Errorf("Expected the StorageAPI to be of type *posix") } if err = posixStorage.DeleteFile(testCase.srcVol, testCase.srcPath); err != testCase.expectedErr { - t.Errorf("Test case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) + t.Errorf("TestPosix case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) } } - // Test for permission denied. + // TestPosix for permission denied. if runtime.GOOS != "windows" { // Initialize posix storage layer for permission denied error. posixStorage, err = newPosix("/usr") @@ -782,7 +782,7 @@ func TestDeleteFile(t *testing.T) { } } - // Test for delete on an removed disk. + // TestPosix for delete on an removed disk. // should fail with disk not found. err = posixDeletedStorage.DeleteFile("del-vol", "my-file") if err != errDiskNotFound { @@ -790,8 +790,8 @@ func TestDeleteFile(t *testing.T) { } } -// TestReadFile - Tests posix.ReadFile with wide range of cases and asserts the result and error response. -func TestReadFile(t *testing.T) { +// TestPosixReadFile - TestPosixs posix.ReadFile with wide range of cases and asserts the result and error response. +func TestPosixReadFile(t *testing.T) { // create posix test setup posixStorage, path, err := newPosixTestSetup() if err != nil { @@ -984,7 +984,7 @@ func TestReadFile(t *testing.T) { } } - // Test for permission denied. + // TestPosix for permission denied. if runtime.GOOS == "linux" { // Initialize posix storage layer for permission denied error. posixStorage, err = newPosix("/") @@ -1000,7 +1000,7 @@ func TestReadFile(t *testing.T) { } } - // Testing for faulty disk. + // TestPosixing for faulty disk. // setting ioErrCnt to 6. // should fail with errFaultyDisk. if posixType, ok := posixStorage.(*posix); ok { @@ -1017,8 +1017,8 @@ func TestReadFile(t *testing.T) { } } -// Test posix.AppendFile() -func TestAppendFile(t *testing.T) { +// TestPosix posix.AppendFile() +func TestPosixAppendFile(t *testing.T) { // create posix test setup posixStorage, path, err := newPosixTestSetup() if err != nil { @@ -1042,9 +1042,9 @@ func TestAppendFile(t *testing.T) { }{ {"myobject", nil}, {"path/to/my/object", nil}, - // Test to append to previously created file. + // TestPosix to append to previously created file. {"myobject", nil}, - // Test to use same path of previously created file. + // TestPosix to use same path of previously created file. {"path/to/my/testobject", nil}, // One path segment length is 255 chars long. {"path/to/my/object000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", nil}, @@ -1074,7 +1074,7 @@ func TestAppendFile(t *testing.T) { } } - // Test for permission denied. + // TestPosix for permission denied. if runtime.GOOS != "windows" { // Initialize posix storage layer for permission denied error. posixStorage, err = newPosix("/usr") @@ -1086,14 +1086,14 @@ func TestAppendFile(t *testing.T) { t.Errorf("expected: Permission error, got: %s", err) } } - // Test case with invalid volume name. + // TestPosix case with invalid volume name. // A valid volume name should be atleast of size 3. err = posixStorage.AppendFile("bn", "yes", []byte("hello, world")) if err != errInvalidArgument { t.Fatalf("expected: \"Invalid argument error\", got: \"%s\"", err) } - // Test case with IO error count > max limit. + // TestPosix case with IO error count > max limit. // setting ioErrCnt to 6. // should fail with errFaultyDisk. @@ -1109,8 +1109,8 @@ func TestAppendFile(t *testing.T) { } } -// Test posix.PrepareFile() -func TestPrepareFile(t *testing.T) { +// TestPosix posix.PrepareFile() +func TestPosixPrepareFile(t *testing.T) { // create posix test setup posixStorage, path, err := newPosixTestSetup() if err != nil { @@ -1133,9 +1133,9 @@ func TestPrepareFile(t *testing.T) { }{ {"myobject", nil}, {"path/to/my/object", nil}, - // Test to append to previously created file. + // TestPosix to append to previously created file. {"myobject", nil}, - // Test to use same path of previously created file. + // TestPosix to use same path of previously created file. {"path/to/my/testobject", nil}, {"object-as-dir", errIsNotRegular}, // path segment uses previously uploaded object. @@ -1161,7 +1161,7 @@ func TestPrepareFile(t *testing.T) { } } - // Test for permission denied. + // TestPosix for permission denied. if runtime.GOOS != "windows" { // Initialize posix storage layer for permission denied error. posixStorage, err = newPosix("/usr") @@ -1174,20 +1174,20 @@ func TestPrepareFile(t *testing.T) { } } - // Test case with invalid file size which should be strictly positive + // TestPosix case with invalid file size which should be strictly positive err = posixStorage.PrepareFile("bn", "yes", -3) if err != errInvalidArgument { t.Fatalf("should fail: %v", err) } - // Test case with invalid volume name. + // TestPosix case with invalid volume name. // A valid volume name should be atleast of size 3. err = posixStorage.PrepareFile("bn", "yes", 16) if err != errInvalidArgument { t.Fatalf("expected: \"Invalid argument error\", got: \"%s\"", err) } - // Test case with IO error count > max limit. + // TestPosix case with IO error count > max limit. // setting ioErrCnt to 6. // should fail with errFaultyDisk. @@ -1203,8 +1203,8 @@ func TestPrepareFile(t *testing.T) { } } -// Test posix.RenameFile() -func TestRenameFile(t *testing.T) { +// TestPosix posix.RenameFile() +func TestPosixRenameFile(t *testing.T) { // create posix test setup posixStorage, path, err := newPosixTestSetup() if err != nil { @@ -1250,7 +1250,7 @@ func TestRenameFile(t *testing.T) { ioErrCnt int expectedErr error }{ - // Test case - 1. + // TestPosix case - 1. { srcVol: "src-vol", destVol: "dest-vol", @@ -1259,7 +1259,7 @@ func TestRenameFile(t *testing.T) { ioErrCnt: 0, expectedErr: nil, }, - // Test case - 2. + // TestPosix case - 2. { srcVol: "src-vol", destVol: "dest-vol", @@ -1268,8 +1268,8 @@ func TestRenameFile(t *testing.T) { ioErrCnt: 0, expectedErr: nil, }, - // Test case - 3. - // Test to overwrite destination file. + // TestPosix case - 3. + // TestPosix to overwrite destination file. { srcVol: "src-vol", destVol: "dest-vol", @@ -1278,8 +1278,8 @@ func TestRenameFile(t *testing.T) { ioErrCnt: 0, expectedErr: nil, }, - // Test case - 4. - // Test case with io error count set to 1. + // TestPosix case - 4. + // TestPosix case with io error count set to 1. // expected not to fail. { srcVol: "src-vol", @@ -1289,8 +1289,8 @@ func TestRenameFile(t *testing.T) { ioErrCnt: 1, expectedErr: nil, }, - // Test case - 5. - // Test case with io error count set to maximum allowed count. + // TestPosix case - 5. + // TestPosix case with io error count set to maximum allowed count. // expected not to fail. { srcVol: "src-vol", @@ -1300,8 +1300,8 @@ func TestRenameFile(t *testing.T) { ioErrCnt: 5, expectedErr: nil, }, - // Test case - 6. - // Test case with non-existent source file. + // TestPosix case - 6. + // TestPosix case with non-existent source file. { srcVol: "src-vol", destVol: "dest-vol", @@ -1310,8 +1310,8 @@ func TestRenameFile(t *testing.T) { ioErrCnt: 0, expectedErr: errFileNotFound, }, - // Test case - 7. - // Test to check failure of source and destination are not same type. + // TestPosix case - 7. + // TestPosix to check failure of source and destination are not same type. { srcVol: "src-vol", destVol: "dest-vol", @@ -1320,8 +1320,8 @@ func TestRenameFile(t *testing.T) { ioErrCnt: 0, expectedErr: errFileAccessDenied, }, - // Test case - 8. - // Test to check failure of destination directory exists. + // TestPosix case - 8. + // TestPosix to check failure of destination directory exists. { srcVol: "src-vol", destVol: "dest-vol", @@ -1330,8 +1330,8 @@ func TestRenameFile(t *testing.T) { ioErrCnt: 0, expectedErr: errFileAccessDenied, }, - // Test case - 9. - // Test case with io error count is greater than maxAllowedIOError. + // TestPosix case - 9. + // TestPosix case with io error count is greater than maxAllowedIOError. { srcVol: "src-vol", destVol: "dest-vol", @@ -1340,8 +1340,8 @@ func TestRenameFile(t *testing.T) { ioErrCnt: 6, expectedErr: errFaultyDisk, }, - // Test case - 10. - // Test case with source being a file and destination being a directory. + // TestPosix case - 10. + // TestPosix case with source being a file and destination being a directory. // Either both have to be files or directories. // Expecting to fail with `errFileAccessDenied`. { @@ -1352,8 +1352,8 @@ func TestRenameFile(t *testing.T) { ioErrCnt: 0, expectedErr: errFileAccessDenied, }, - // Test case - 11. - // Test case with non-existent source volume. + // TestPosix case - 11. + // TestPosix case with non-existent source volume. // Expecting to fail with `errVolumeNotFound`. { srcVol: "src-vol-non-existent", @@ -1363,8 +1363,8 @@ func TestRenameFile(t *testing.T) { ioErrCnt: 0, expectedErr: errVolumeNotFound, }, - // Test case - 12. - // Test case with non-existent destination volume. + // TestPosix case - 12. + // TestPosix case with non-existent destination volume. // Expecting to fail with `errVolumeNotFound`. { srcVol: "src-vol", @@ -1374,8 +1374,8 @@ func TestRenameFile(t *testing.T) { ioErrCnt: 0, expectedErr: errVolumeNotFound, }, - // Test case - 13. - // Test case with invalid src volume name. Length should be atleast 3. + // TestPosix case - 13. + // TestPosix case with invalid src volume name. Length should be atleast 3. // Expecting to fail with `errInvalidArgument`. { srcVol: "ab", @@ -1385,8 +1385,8 @@ func TestRenameFile(t *testing.T) { ioErrCnt: 0, expectedErr: errInvalidArgument, }, - // Test case - 14. - // Test case with invalid destination volume name. Length should be atleast 3. + // TestPosix case - 14. + // TestPosix case with invalid destination volume name. Length should be atleast 3. // Expecting to fail with `errInvalidArgument`. { srcVol: "abcd", @@ -1396,8 +1396,8 @@ func TestRenameFile(t *testing.T) { ioErrCnt: 0, expectedErr: errInvalidArgument, }, - // Test case - 15. - // Test case with invalid destination volume name. Length should be atleast 3. + // TestPosix case - 15. + // TestPosix case with invalid destination volume name. Length should be atleast 3. // Expecting to fail with `errInvalidArgument`. { srcVol: "abcd", @@ -1407,8 +1407,8 @@ func TestRenameFile(t *testing.T) { ioErrCnt: 0, expectedErr: errInvalidArgument, }, - // Test case - 16. - // Test case with the parent of the destination being a file. + // TestPosix case - 16. + // TestPosix case with the parent of the destination being a file. // expected to fail with `errFileAccessDenied`. { srcVol: "src-vol", @@ -1418,8 +1418,8 @@ func TestRenameFile(t *testing.T) { ioErrCnt: 0, expectedErr: errFileAccessDenied, }, - // Test case - 17. - // Test case with segment of source file name more than 255. + // TestPosix case - 17. + // TestPosix case with segment of source file name more than 255. // expected not to fail. { srcVol: "src-vol", @@ -1429,8 +1429,8 @@ func TestRenameFile(t *testing.T) { ioErrCnt: 0, expectedErr: errFileNameTooLong, }, - // Test case - 18. - // Test case with segment of destination file name more than 255. + // TestPosix case - 18. + // TestPosix case with segment of destination file name more than 255. // expected not to fail. { srcVol: "src-vol", @@ -1452,13 +1452,13 @@ func TestRenameFile(t *testing.T) { } if err := posixStorage.RenameFile(testCase.srcVol, testCase.srcPath, testCase.destVol, testCase.destPath); err != testCase.expectedErr { - t.Fatalf("Test %d: Expected the error to be : \"%v\", got: \"%v\".", i+1, testCase.expectedErr, err) + t.Fatalf("TestPosix %d: Expected the error to be : \"%v\", got: \"%v\".", i+1, testCase.expectedErr, err) } } } -// Test posix.StatFile() -func TestStatFile(t *testing.T) { +// TestPosix posix.StatFile() +func TestPosixStatFile(t *testing.T) { // create posix test setup posixStorage, path, err := newPosixTestSetup() if err != nil { @@ -1485,56 +1485,56 @@ func TestStatFile(t *testing.T) { ioErrCnt int expectedErr error }{ - // Test case - 1. - // Test case with valid inputs, expected to pass. + // TestPosix case - 1. + // TestPosix case with valid inputs, expected to pass. { srcVol: "success-vol", srcPath: "success-file", ioErrCnt: 0, expectedErr: nil, }, - // Test case - 2. - // Test case with valid inputs, expected to pass. + // TestPosix case - 2. + // TestPosix case with valid inputs, expected to pass. { srcVol: "success-vol", srcPath: "path/to/success-file", ioErrCnt: 0, expectedErr: nil, }, - // Test case - 3. - // Test case with non-existent file. + // TestPosix case - 3. + // TestPosix case with non-existent file. { srcVol: "success-vol", srcPath: "nonexistent-file", ioErrCnt: 0, expectedErr: errFileNotFound, }, - // Test case - 4. - // Test case with non-existent file path. + // TestPosix case - 4. + // TestPosix case with non-existent file path. { srcVol: "success-vol", srcPath: "path/2/success-file", ioErrCnt: 0, expectedErr: errFileNotFound, }, - // Test case - 5. - // Test case with path being a directory. + // TestPosix case - 5. + // TestPosix case with path being a directory. { srcVol: "success-vol", srcPath: "path", ioErrCnt: 0, expectedErr: errFileNotFound, }, - // Test case - 6. - // Test case with io error count > max limit. + // TestPosix case - 6. + // TestPosix case with io error count > max limit. { srcVol: "success-vol", srcPath: "success-file", ioErrCnt: 6, expectedErr: errFaultyDisk, }, - // Test case - 7. - // Test case with non existent volume. + // TestPosix case - 7. + // TestPosix case with non existent volume. { srcVol: "non-existent-vol", srcPath: "success-file", @@ -1552,7 +1552,7 @@ func TestStatFile(t *testing.T) { t.Errorf("Expected the StorageAPI to be of type *posix") } if _, err := posixStorage.StatFile(testCase.srcVol, testCase.srcPath); err != testCase.expectedErr { - t.Fatalf("Test case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) + t.Fatalf("TestPosix case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) } } } diff --git a/cmd/prepare-storage.go b/cmd/prepare-storage.go index d922db647..36bb20a1e 100644 --- a/cmd/prepare-storage.go +++ b/cmd/prepare-storage.go @@ -70,7 +70,8 @@ import ( type InitActions int const ( - // FormatDisks - see above table for disk states where it is applicable. + // FormatDisks - see above table for disk states where it + // is applicable. FormatDisks InitActions = iota // WaitForHeal - Wait for disks to heal. @@ -82,10 +83,12 @@ const ( // WaitForAll - Wait for all disks to be online. WaitForAll - // WaitForFormatting - Wait for formatting to be triggered from the '1st' server in the cluster. + // WaitForFormatting - Wait for formatting to be triggered + // from the '1st' server in the cluster. WaitForFormatting - // WaitForConfig - Wait for all servers to have the same config including (credentials, version and time). + // WaitForConfig - Wait for all servers to have the same config + // including (credentials, version and time). WaitForConfig // InitObjectLayer - Initialize object layer. @@ -96,8 +99,8 @@ const ( Abort ) -// Quick error to actions converts looking for specific errors which need to -// be returned quickly and server should wait instead. +// Quick error to actions converts looking for specific errors +// which need to be returned quickly and server should wait instead. func quickErrToActions(errMap map[error]int) InitActions { var action InitActions switch { @@ -187,7 +190,7 @@ func printRetryMsg(sErrs []error, storageDisks []StorageAPI) { // Implements a jitter backoff loop for formatting all disks during // initialization of the server. -func retryFormattingDisks(firstDisk bool, endpoints []*url.URL, storageDisks []StorageAPI) error { +func retryFormattingXLDisks(firstDisk bool, endpoints []*url.URL, storageDisks []StorageAPI) error { if len(endpoints) == 0 { return errInvalidArgument } @@ -220,17 +223,6 @@ func retryFormattingDisks(firstDisk bool, endpoints []*url.URL, storageDisks []S // for disks not being available. printRetryMsg(sErrs, storageDisks) } - if len(formatConfigs) == 1 { - err := genericFormatCheckFS(formatConfigs[0], sErrs[0]) - if err != nil { - // For an new directory or existing data. - if err == errUnformattedDisk || err == errCorruptedFormat { - return initFormatFS(storageDisks[0]) - } - return err - } - return nil - } // Check if this is a XL or distributed XL, anything > 1 is considered XL backend. // Pre-emptively check if one of the formatted disks // is invalid. This function returns success for the // most part unless one of the formats is not consistent @@ -239,6 +231,7 @@ func retryFormattingDisks(firstDisk bool, endpoints []*url.URL, storageDisks []S if err := checkFormatXLValues(formatConfigs); err != nil { return err } + // Check if this is a XL or distributed XL, anything > 1 is considered XL backend. switch prepForInitXL(firstDisk, sErrs, len(storageDisks)) { case Abort: return errCorruptedFormat @@ -300,7 +293,7 @@ func initStorageDisks(endpoints []*url.URL) ([]StorageAPI, error) { } // Format disks before initialization object layer. -func waitForFormatDisks(firstDisk bool, endpoints []*url.URL, storageDisks []StorageAPI) (formattedDisks []StorageAPI, err error) { +func waitForFormatXLDisks(firstDisk bool, endpoints []*url.URL, storageDisks []StorageAPI) (formattedDisks []StorageAPI, err error) { if len(endpoints) == 0 { return nil, errInvalidArgument } @@ -327,7 +320,7 @@ func waitForFormatDisks(firstDisk bool, endpoints []*url.URL, storageDisks []Sto // Start retry loop retrying until disks are formatted properly, until we have reached // a conditional quorum of formatted disks. - err = retryFormattingDisks(firstDisk, endpoints, retryDisks) + err = retryFormattingXLDisks(firstDisk, endpoints, retryDisks) if err != nil { return nil, err } diff --git a/cmd/routers.go b/cmd/routers.go index 837180bee..dfee5e590 100644 --- a/cmd/routers.go +++ b/cmd/routers.go @@ -28,53 +28,6 @@ func newObjectLayerFn() ObjectLayer { return globalObjectAPI } -// newObjectLayer - initialize any object layer depending on the number of disks. -func newObjectLayer(storageDisks []StorageAPI) (ObjectLayer, error) { - var objAPI ObjectLayer - var err error - if len(storageDisks) == 1 { - // Initialize FS object layer. - objAPI, err = newFSObjects(storageDisks[0]) - } else { - // Initialize XL object layer. - objAPI, err = newXLObjects(storageDisks) - } - if err != nil { - return nil, err - } - - // The following actions are performed here, so that any - // requests coming in early in the bootup sequence don't fail - // unexpectedly - e.g. if initEventNotifier was initialized - // after this function completes, an event could be generated - // before the notification system is ready, causing event - // drops or crashes. - - // Migrate bucket policy from configDir to .minio.sys/buckets/ - err = migrateBucketPolicyConfig(objAPI) - if err != nil { - errorIf(err, "Unable to migrate bucket policy from config directory") - return nil, err - } - - err = cleanupOldBucketPolicyConfigs() - if err != nil { - errorIf(err, "Unable to clean up bucket policy from config directory.") - return nil, err - } - - // Initialize and load bucket policies. - err = initBucketPolicies(objAPI) - fatalIf(err, "Unable to load all bucket policies.") - - // Initialize a new event notifier. - err = initEventNotifier(objAPI) - fatalIf(err, "Unable to initialize event notification.") - - // Success. - return objAPI, nil -} - // Composed function registering routers for only distributed XL setup. func registerDistXLRouters(mux *router.Router, srvCmdConfig serverCmdConfig) error { // Register storage rpc router only if its a distributed setup. diff --git a/cmd/server-main.go b/cmd/server-main.go index b2a6b6d61..ea0a373dd 100644 --- a/cmd/server-main.go +++ b/cmd/server-main.go @@ -84,9 +84,8 @@ EXAMPLES: } type serverCmdConfig struct { - serverAddr string - endpoints []*url.URL - storageDisks []StorageAPI + serverAddr string + endpoints []*url.URL } // Parse an array of end-points (from the command line) @@ -248,6 +247,8 @@ func checkServerSyntax(c *cli.Context) { // Verify syntax for all the XL disks. disks := c.Args() + + // Parse disks check if they comply with expected URI style. endpoints, err := parseStorageEndpoints(disks) fatalIf(err, "Unable to parse storage endpoints %s", strings.Join(disks, " ")) @@ -262,7 +263,7 @@ func checkServerSyntax(c *cli.Context) { if len(endpoints) > 1 { // Validate if we have sufficient disks for XL setup. err = checkSufficientDisks(endpoints) - fatalIf(err, "Invalid number of disks supplied.") + fatalIf(err, "Insufficient number of disks.") } else { // Validate if we have invalid disk for FS setup. if endpoints[0].Host != "" && endpoints[0].Scheme != "" { @@ -360,7 +361,7 @@ func serverMain(c *cli.Context) { // Initialization routine, such as config loading, enable logging, .. minioInit(c) - // Check for minio updates from dl.minio.io + // Check for new updates from dl.minio.io. checkUpdate() // Server address. @@ -371,10 +372,13 @@ func serverMain(c *cli.Context) { fatalIf(err, "Unable to extract host and port %s", serverAddr) // Check server syntax and exit in case of errors. - // Done after globalMinioHost and globalMinioPort is set as parseStorageEndpoints() - // depends on it. + // Done after globalMinioHost and globalMinioPort is set + // as parseStorageEndpoints() depends on it. checkServerSyntax(c) + // Initialize server config. + initServerConfig(c) + // Disks to be used in server init. endpoints, err := parseStorageEndpoints(c.Args()) fatalIf(err, "Unable to parse storage endpoints %s", c.Args()) @@ -391,39 +395,32 @@ func serverMain(c *cli.Context) { // on all nodes. sort.Sort(byHostPath(endpoints)) - storageDisks, err := initStorageDisks(endpoints) - fatalIf(err, "Unable to initialize storage disk(s).") - - // Cleanup objects that weren't successfully written into the namespace. - fatalIf(houseKeeping(storageDisks), "Unable to purge temporary files.") - - // Initialize server config. - initServerConfig(c) - - // First disk argument check if it is local. - firstDisk := isLocalStorage(endpoints[0]) + // Configure server. + srvConfig := serverCmdConfig{ + serverAddr: serverAddr, + endpoints: endpoints, + } // Check if endpoints are part of distributed setup. globalIsDistXL = isDistributedSetup(endpoints) - // Configure server. - srvConfig := serverCmdConfig{ - serverAddr: serverAddr, - endpoints: endpoints, - storageDisks: storageDisks, + // Set nodes for dsync for distributed setup. + if globalIsDistXL { + fatalIf(initDsyncNodes(endpoints), "Unable to initialize distributed locking clients") } + // Initialize name space lock. + initNSLock(globalIsDistXL) + // Configure server. handler, err := configureServerHandler(srvConfig) fatalIf(err, "Unable to configure one of server's RPC services.") - // Set nodes for dsync for distributed setup. - if globalIsDistXL { - fatalIf(initDsyncNodes(endpoints), "Unable to initialize distributed locking") - } + // Initialize S3 Peers inter-node communication only in distributed setup. + initGlobalS3Peers(endpoints) - // Initialize name space lock. - initNSLock(globalIsDistXL) + // Initialize Admin Peers inter-node communication only in distributed setup. + initGlobalAdminPeers(endpoints) // Initialize a new HTTP server. apiServer := NewServerMux(serverAddr, handler) @@ -438,12 +435,6 @@ func serverMain(c *cli.Context) { // Set the global API endpoints value. globalAPIEndpoints = apiEndPoints - // Initialize S3 Peers inter-node communication - initGlobalS3Peers(endpoints) - - // Initialize Admin Peers inter-node communication - initGlobalAdminPeers(endpoints) - // Start server, automatically configures TLS if certs are available. go func() { cert, key := "", "" @@ -453,13 +444,8 @@ func serverMain(c *cli.Context) { fatalIf(apiServer.ListenAndServe(cert, key), "Failed to start minio server.") }() - // Wait for formatting of disks. - formattedDisks, err := waitForFormatDisks(firstDisk, endpoints, storageDisks) - fatalIf(err, "formatting storage disks failed") - - // Once formatted, initialize object layer. - newObject, err := newObjectLayer(formattedDisks) - fatalIf(err, "intializing object layer failed") + newObject, err := newObjectLayer(srvConfig) + fatalIf(err, "Initializing object layer failed") globalObjLayerMutex.Lock() globalObjectAPI = newObject @@ -471,3 +457,57 @@ func serverMain(c *cli.Context) { // Waits on the server. <-globalServiceDoneCh } + +// Initialize object layer with the supplied disks, objectLayer is nil upon any error. +func newObjectLayer(srvCmdCfg serverCmdConfig) (newObject ObjectLayer, err error) { + // For FS only, directly use the disk. + isFS := len(srvCmdCfg.endpoints) == 1 + if isFS { + // Unescape is needed for some UNC paths on windows + // which are of this form \\127.0.0.1\\export\test. + var fsPath string + fsPath, err = url.QueryUnescape(srvCmdCfg.endpoints[0].String()) + if err != nil { + return nil, err + } + + // Initialize new FS object layer. + newObject, err = newFSObjectLayer(fsPath) + if err != nil { + return nil, err + } + + // FS initialized, return. + return newObject, nil + } + + // First disk argument check if it is local. + firstDisk := isLocalStorage(srvCmdCfg.endpoints[0]) + + // Initialize storage disks. + storageDisks, err := initStorageDisks(srvCmdCfg.endpoints) + if err != nil { + return nil, err + } + + // Wait for formatting disks for XL backend. + var formattedDisks []StorageAPI + formattedDisks, err = waitForFormatXLDisks(firstDisk, srvCmdCfg.endpoints, storageDisks) + if err != nil { + return nil, err + } + + // Cleanup objects that weren't successfully written into the namespace. + if err = houseKeeping(storageDisks); err != nil { + return nil, err + } + + // Once XL formatted, initialize object layer. + newObject, err = newXLObjectLayer(formattedDisks) + if err != nil { + return nil, err + } + + // XL initialized, return. + return newObject, nil +} diff --git a/cmd/server-main_test.go b/cmd/server-main_test.go index 0586c8113..627dd7065 100644 --- a/cmd/server-main_test.go +++ b/cmd/server-main_test.go @@ -21,6 +21,7 @@ import ( "flag" "net/http" "os" + "reflect" "runtime" "testing" @@ -222,6 +223,63 @@ func TestCheckSufficientDisks(t *testing.T) { } } +// Tests initializing new object layer. +func TestNewObjectLayer(t *testing.T) { + // Tests for FS object layer. + nDisks := 1 + disks, err := getRandomDisks(nDisks) + if err != nil { + t.Fatal("Failed to create disks for the backend") + } + defer removeRoots(disks) + + endpoints, err := parseStorageEndpoints(disks) + if err != nil { + t.Fatal("Unexpected parse error", err) + } + + obj, err := newObjectLayer(serverCmdConfig{ + serverAddr: ":9000", + endpoints: endpoints, + }) + if err != nil { + t.Fatal("Unexpected object layer initialization error", err) + } + _, ok := obj.(*fsObjects) + if !ok { + t.Fatal("Unexpected object layer detected", reflect.TypeOf(obj)) + } + + // Tests for XL object layer initialization. + + // Create temporary backend for the test server. + nDisks = 16 + disks, err = getRandomDisks(nDisks) + if err != nil { + t.Fatal("Failed to create disks for the backend") + } + defer removeRoots(disks) + + endpoints, err = parseStorageEndpoints(disks) + if err != nil { + t.Fatal("Unexpected parse error", err) + } + + obj, err = newObjectLayer(serverCmdConfig{ + serverAddr: ":9000", + endpoints: endpoints, + }) + if err != nil { + t.Fatal("Unexpected object layer initialization error", err) + } + + _, ok = obj.(*xlObjects) + if !ok { + t.Fatal("Unexpected object layer detected", reflect.TypeOf(obj)) + } +} + +// Tests parsing various types of input endpoints and paths. func TestParseStorageEndpoints(t *testing.T) { testCases := []struct { globalMinioHost string diff --git a/cmd/server-mux_test.go b/cmd/server-mux_test.go index 0a4510b78..7d0555af4 100644 --- a/cmd/server-mux_test.go +++ b/cmd/server-mux_test.go @@ -109,18 +109,20 @@ func dial(addr string) error { // Tests initializing listeners. func TestInitListeners(t *testing.T) { + portTest1 := getFreePort() + portTest2 := getFreePort() testCases := []struct { serverAddr string shouldPass bool }{ // Test 1 with ip and port. { - serverAddr: "127.0.0.1:" + getFreePort(), + serverAddr: "127.0.0.1:" + portTest1, shouldPass: true, }, // Test 2 only port. { - serverAddr: ":" + getFreePort(), + serverAddr: ":" + portTest2, shouldPass: true, }, // Test 3 with no port error. diff --git a/cmd/server_test.go b/cmd/server_test.go index 04af7e129..9709f3ba6 100644 --- a/cmd/server_test.go +++ b/cmd/server_test.go @@ -1145,8 +1145,8 @@ func (s *TestSuiteCommon) TestSHA256Mismatch(c *C) { } } -// TestNotBeAbleToCreateObjectInNonexistentBucket - Validates the error response -// on an attempt to upload an object into a non-existent bucket. +// TestPutObjectLongName - Validates the error response +// on an attempt to upload an object with long name. func (s *TestSuiteCommon) TestPutObjectLongName(c *C) { // generate a random bucket name. bucketName := getRandomBucketName() diff --git a/cmd/signature-v2_test.go b/cmd/signature-v2_test.go index 4983eb183..4b0462a11 100644 --- a/cmd/signature-v2_test.go +++ b/cmd/signature-v2_test.go @@ -80,7 +80,7 @@ func TestDoesPresignedV2SignatureMatch(t *testing.T) { // (5) Should error when the signature does not match. { queryParams: map[string]string{ - "Expires": fmt.Sprintf("%d", now.Unix()), + "Expires": fmt.Sprintf("%d", now.Unix()+60), "Signature": "zOM2YrY/yAQe15VWmT78OlBrK6g=", "AWSAccessKeyId": serverConfig.GetCredential().AccessKey, }, diff --git a/cmd/signature-v4-utils.go b/cmd/signature-v4-utils.go index f2a6cf49b..bf956636e 100644 --- a/cmd/signature-v4-utils.go +++ b/cmd/signature-v4-utils.go @@ -106,13 +106,6 @@ func getURLEncodedName(name string) string { return encodedName } -func findHost(signedHeaders []string) APIErrorCode { - if contains(signedHeaders, "host") { - return ErrNone - } - return ErrUnsignedHeaders -} - // extractSignedHeaders extract signed headers from Authorization header func extractSignedHeaders(signedHeaders []string, reqHeaders http.Header) (http.Header, APIErrorCode) { // find whether "host" is part of list of signed headers. diff --git a/cmd/signature-v4-utils_test.go b/cmd/signature-v4-utils_test.go index 9a1928841..17004862c 100644 --- a/cmd/signature-v4-utils_test.go +++ b/cmd/signature-v4-utils_test.go @@ -194,25 +194,6 @@ func TestExtractSignedHeaders(t *testing.T) { } } -// TestFindHost - tests the logic to find whether "host" is part of signed headers. -func TestFindHost(t *testing.T) { - // doesn't contain "host". - signedHeaders := []string{"x-amz-content-sha256", "x-amz-date"} - errCode := findHost(signedHeaders) - // expected to error out with code ErrUnsignedHeaders . - if errCode != ErrUnsignedHeaders { - t.Fatalf("Expected the APIErrorCode to be %d, but got %d", ErrUnsignedHeaders, errCode) - } - - // adding "host". - signedHeaders = append(signedHeaders, "host") - // epxected to pass. - errCode = findHost(signedHeaders) - if errCode != ErrNone { - t.Fatalf("Expected the APIErrorCode to be %d, but got %d", ErrNone, errCode) - } -} - // TestSignV4TrimAll - tests the logic of TrimAll() function func TestSignV4TrimAll(t *testing.T) { testCases := []struct { diff --git a/cmd/storage-rpc-client_test.go b/cmd/storage-rpc-client_test.go index 56970ca79..37965075b 100644 --- a/cmd/storage-rpc-client_test.go +++ b/cmd/storage-rpc-client_test.go @@ -351,6 +351,9 @@ func (s *TestRPCStorageSuite) testRPCStorageListDir(t *testing.T) { } } dirs, err := storageDisk.ListDir("myvol", "") + if err != nil { + t.Error(err) + } if len(dirs) != dirCount { t.Errorf("Expected %d directories but found only %d", dirCount, len(dirs)) } @@ -361,6 +364,9 @@ func (s *TestRPCStorageSuite) testRPCStorageListDir(t *testing.T) { } } dirs, err = storageDisk.ListDir("myvol", "") + if err != nil { + t.Error(err) + } if len(dirs) != 0 { t.Errorf("Expected no directories but found %d", dirCount) } @@ -370,6 +376,9 @@ func (s *TestRPCStorageSuite) testRPCStorageListDir(t *testing.T) { t.Error("Unable to initiate DeleteVol", err) } vols, err := storageDisk.ListVols() + if err != nil { + t.Error(err) + } if len(vols) != 0 { t.Errorf("Expected no volumes but found %d", dirCount) } diff --git a/cmd/test-utils_test.go b/cmd/test-utils_test.go index cce866721..cfdc9ad86 100644 --- a/cmd/test-utils_test.go +++ b/cmd/test-utils_test.go @@ -70,7 +70,8 @@ func init() { } func prepareFS() (ObjectLayer, string, error) { - fsDirs, err := getRandomDisks(1) + nDisks := 1 + fsDirs, err := getRandomDisks(nDisks) if err != nil { return nil, "", err } @@ -78,12 +79,15 @@ func prepareFS() (ObjectLayer, string, error) { if err != nil { return nil, "", err } - obj, _, err := initObjectLayer(endpoints) + fsPath, err := url.QueryUnescape(endpoints[0].String()) if err != nil { - removeRoots(fsDirs) return nil, "", err } - return obj, fsDirs[0], nil + obj, err := newFSObjectLayer(fsPath) + if err != nil { + return nil, "", err + } + return obj, endpoints[0].Path, nil } func prepareXL() (ObjectLayer, []string, error) { @@ -104,6 +108,17 @@ func prepareXL() (ObjectLayer, []string, error) { return obj, fsDirs, nil } +// Initialize FS objects. +func initFSObjects(disk string, t *testing.T) (obj ObjectLayer) { + newTestConfig("us-east-1") + var err error + obj, err = newFSObjectLayer(disk) + if err != nil { + t.Fatal(err) + } + return obj +} + // TestErrHandler - Golang Testing.T and Testing.B, and gocheck.C satisfy this interface. // This makes it easy to run the TestServer from any of the tests. // Using this interface, functionalities to be used in tests can be made generalized, and can be integrated in benchmarks/unit tests/go check suite tests. @@ -118,6 +133,7 @@ type TestErrHandler interface { const ( // FSTestStr is the string which is used as notation for Single node ObjectLayer in the unit tests. FSTestStr string = "FS" + // XLTestStr is the string which is used as notation for XL ObjectLayer in the unit tests. XLTestStr string = "XL" ) @@ -204,15 +220,15 @@ func UnstartedTestServer(t TestErrHandler, instanceType string) TestServer { testServer.AccessKey = credentials.AccessKey testServer.SecretKey = credentials.SecretKey - objLayer, storageDisks, err := initObjectLayer(testServer.Disks) + objLayer, _, err := initObjectLayer(testServer.Disks) if err != nil { t.Fatalf("Failed obtaining Temp Backend: %s", err) } srvCmdCfg := serverCmdConfig{ - endpoints: testServer.Disks, - storageDisks: storageDisks, + endpoints: testServer.Disks, } + httpHandler, err := configureServerHandler( srvCmdCfg, ) @@ -338,7 +354,7 @@ func initTestStorageRPCEndPoint(srvCmdConfig serverCmdConfig) http.Handler { return muxRouter } -// StartTestStorageRPCServer - Creates a temp XL/FS backend and initializes storage RPC end points, +// StartTestStorageRPCServer - Creates a temp XL backend and initializes storage RPC end points, // then starts a test server with those storage RPC end points registered. func StartTestStorageRPCServer(t TestErrHandler, instanceType string, diskN int) TestServer { // create temporary backend for the test server. @@ -402,7 +418,7 @@ func StartTestPeersRPCServer(t TestErrHandler, instanceType string) TestServer { testRPCServer.SecretKey = credentials.SecretKey // create temporary backend for the test server. - objLayer, storageDisks, err := initObjectLayer(endpoints) + objLayer, _, err := initObjectLayer(endpoints) if err != nil { t.Fatalf("Failed obtaining Temp Backend: %s", err) } @@ -413,8 +429,7 @@ func StartTestPeersRPCServer(t TestErrHandler, instanceType string) TestServer { globalObjLayerMutex.Unlock() srvCfg := serverCmdConfig{ - endpoints: endpoints, - storageDisks: storageDisks, + endpoints: endpoints, } mux := router.NewRouter() @@ -1620,12 +1635,12 @@ func initObjectLayer(endpoints []*url.URL) (ObjectLayer, []StorageAPI, error) { return nil, nil, err } - formattedDisks, err := waitForFormatDisks(true, endpoints, storageDisks) + formattedDisks, err := waitForFormatXLDisks(true, endpoints, storageDisks) if err != nil { return nil, nil, err } - objLayer, err := newObjectLayer(formattedDisks) + objLayer, err := newXLObjectLayer(formattedDisks) if err != nil { return nil, nil, err } @@ -1722,7 +1737,7 @@ func initAPIHandlerTest(obj ObjectLayer, endpoints []string) (bucketName string, // failed to create newbucket, return err. return "", nil, err } - // Register the API end points with XL/FS object layer. + // Register the API end points with XL object layer. // Registering only the GetObject handler. apiRouter = initTestAPIEndPoints(obj, endpoints) return bucketName, apiRouter, nil @@ -1928,7 +1943,6 @@ func ExecObjectLayerAPITest(t *testing.T, objAPITest objAPITestType, endpoints [ if err != nil { t.Fatalf("Initialzation of API handler tests failed: %s", err) } - credentials = serverConfig.GetCredential() // Executing the object layer tests for XL. objAPITest(objLayer, XLTestStr, bucketXL, xlAPIRouter, credentials, t) // clean up the temporary test backend. @@ -2118,7 +2132,7 @@ func registerAPIFunctions(muxRouter *router.Router, objLayer ObjectLayer, apiFun registerBucketLevelFunc(bucketRouter, api, apiFunctions...) } -// Takes in XL/FS object layer, and the list of API end points to be tested/required, registers the API end points and returns the HTTP handler. +// Takes in XL object layer, and the list of API end points to be tested/required, registers the API end points and returns the HTTP handler. // Need isolated registration of API end points while writing unit tests for end points. // All the API end points are registered only for the default case. func initTestAPIEndPoints(objLayer ObjectLayer, apiFunctions []string) http.Handler { diff --git a/cmd/utils.go b/cmd/utils.go index c62aed21b..a026d6712 100644 --- a/cmd/utils.go +++ b/cmd/utils.go @@ -194,29 +194,6 @@ func contains(stringList []string, element string) bool { return false } -// Contains endpoint returns true if endpoint found in the list of input endpoints. -func containsEndpoint(endpoints []*url.URL, endpoint *url.URL) bool { - for _, ep := range endpoints { - if *ep == *endpoint { - return true - } - } - return false -} - -// urlPathSplit - split url path into bucket and object components. -func urlPathSplit(urlPath string) (bucketName, prefixName string) { - if urlPath == "" { - return urlPath, "" - } - urlPath = strings.TrimPrefix(urlPath, "/") - i := strings.Index(urlPath, "/") - if i != -1 { - return urlPath[:i], urlPath[i+1:] - } - return urlPath, "" -} - // Starts a profiler returns nil if profiler is not enabled, caller needs to handle this. func startProfiler(profiler string) interface { Stop() diff --git a/cmd/utils_test.go b/cmd/utils_test.go index a112b1d55..49d9f65eb 100644 --- a/cmd/utils_test.go +++ b/cmd/utils_test.go @@ -126,48 +126,6 @@ func TestMaxObjectSize(t *testing.T) { } } -// Test urlPathSplit. -func TestURLPathSplit(t *testing.T) { - type test struct { - urlPath string - bucketName string - prefixName string - } - - testCases := []test{ - { - urlPath: "/b/c/", - bucketName: "b", - prefixName: "c/", - }, - { - urlPath: "c/aa", - bucketName: "c", - prefixName: "aa", - }, - { - urlPath: "", - bucketName: "", - prefixName: "", - }, - { - urlPath: "/b", - bucketName: "b", - prefixName: "", - }, - } - - for i, testCase := range testCases { - bucketName, prefixName := urlPathSplit(testCase.urlPath) - if bucketName != testCase.bucketName { - t.Errorf("Tets %d: Expected %s, %s", i+1, testCase.bucketName, bucketName) - } - if prefixName != testCase.prefixName { - t.Errorf("Tets %d: Expected %s, %s", i+1, testCase.bucketName, bucketName) - } - } -} - // Tests minimum allowed part size. func TestMinAllowedPartSize(t *testing.T) { sizes := []struct { diff --git a/cmd/web-handlers_test.go b/cmd/web-handlers_test.go index 5031cbee6..f3c0ff026 100644 --- a/cmd/web-handlers_test.go +++ b/cmd/web-handlers_test.go @@ -461,7 +461,7 @@ func testListObjectsWebHandler(obj ObjectLayer, instanceType string, t TestErrHa verifyReply(reply) // Unauthenticated ListObjects should fail. - err, reply = test("") + err, _ = test("") if err == nil { t.Fatalf("Expected error `%s`", err) } @@ -870,7 +870,7 @@ func testDownloadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandl } // Unauthenticated download should fail. - code, bodyContent = test("") + code, _ = test("") if code != http.StatusForbidden { t.Fatalf("Expected the response status to be 403, but instead found `%d`", code) } diff --git a/cmd/xl-v1-healing.go b/cmd/xl-v1-healing.go index d542d3d97..f14b6baf4 100644 --- a/cmd/xl-v1-healing.go +++ b/cmd/xl-v1-healing.go @@ -137,7 +137,7 @@ func healBucketMetadata(storageDisks []StorageAPI, bucket string, readQuorum int } // Heal `policy.json` for missing entries, ignores if `policy.json` is not found. - policyPath := pathJoin(bucketConfigPrefix, bucket, policyJSON) + policyPath := pathJoin(bucketConfigPrefix, bucket, bucketPolicyConfig) if err := healBucketMetaFn(policyPath); err != nil { return err } diff --git a/cmd/xl-v1-object.go b/cmd/xl-v1-object.go index 702af34ca..d22c24ab4 100644 --- a/cmd/xl-v1-object.go +++ b/cmd/xl-v1-object.go @@ -505,9 +505,9 @@ func (xl xlObjects) PutObject(bucket string, object string, size int64, data io. onlineDisks := getOrderedDisks(xlMeta.Erasure.Distribution, xl.storageDisks) - // Delete temporary object in the event of failure. If - // PutObject succeeded there would be no temporary object to - // delete. + // Delete temporary object in the event of failure. + // If PutObject succeeded there would be no temporary + // object to delete. defer xl.deleteObject(minioMetaTmpBucket, tempObj) if size > 0 { diff --git a/cmd/xl-v1.go b/cmd/xl-v1.go index b3abfb615..d9ad936e7 100644 --- a/cmd/xl-v1.go +++ b/cmd/xl-v1.go @@ -76,6 +76,24 @@ type xlObjects struct { // list of all errors that can be ignored in tree walk operation in XL var xlTreeWalkIgnoredErrs = append(baseIgnoredErrs, errDiskAccessDenied, errVolumeNotFound, errFileNotFound) +// newXLObjectLayer - initialize any object layer depending on the number of disks. +func newXLObjectLayer(storageDisks []StorageAPI) (ObjectLayer, error) { + // Initialize XL object layer. + objAPI, err := newXLObjects(storageDisks) + fatalIf(err, "Unable to initialize XL object layer.") + + // Initialize and load bucket policies. + err = initBucketPolicies(objAPI) + fatalIf(err, "Unable to load all bucket policies.") + + // Initialize a new event notifier. + err = initEventNotifier(objAPI) + fatalIf(err, "Unable to initialize event notification.") + + // Success. + return objAPI, nil +} + // newXLObjects - initialize new xl object layer. func newXLObjects(storageDisks []StorageAPI) (ObjectLayer, error) { if storageDisks == nil { diff --git a/cmd/xl-v1_test.go b/cmd/xl-v1_test.go index a098dae7e..c7bed4f9a 100644 --- a/cmd/xl-v1_test.go +++ b/cmd/xl-v1_test.go @@ -132,7 +132,7 @@ func TestNewXL(t *testing.T) { var erasureDisks []string for i := 0; i < nDisks; i++ { // Do not attempt to create this path, the test validates - // so that newFSObjects initializes non existing paths + // so that newXLObjects initializes non existing paths // and successfully returns initialized object layer. disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) erasureDisks = append(erasureDisks, disk) @@ -155,18 +155,18 @@ func TestNewXL(t *testing.T) { t.Fatal("Unexpected error: ", err) } - _, err = waitForFormatDisks(true, endpoints, nil) + _, err = waitForFormatXLDisks(true, endpoints, nil) if err != errInvalidArgument { t.Fatalf("Expecting error, got %s", err) } - _, err = waitForFormatDisks(true, nil, storageDisks) + _, err = waitForFormatXLDisks(true, nil, storageDisks) if err != errInvalidArgument { t.Fatalf("Expecting error, got %s", err) } // Initializes all erasure disks - formattedDisks, err := waitForFormatDisks(true, endpoints, storageDisks) + formattedDisks, err := waitForFormatXLDisks(true, endpoints, storageDisks) if err != nil { t.Fatalf("Unable to format disks for erasure, %s", err) } diff --git a/docs/minio-limitations.md b/docs/minio-limitations.md index 4af0c6074..539e6ef9f 100644 --- a/docs/minio-limitations.md +++ b/docs/minio-limitations.md @@ -22,12 +22,12 @@ We found the following APIs to be redundant or less useful outside of AWS. If yo |:---|:---| |Maximum number of buckets| no-limit| |Maximum number of objects per bucket| no-limit| -|Maximum object size| 5 TB| +|Maximum object size| 5 TiB| |Minimum object size| 0 B| -|Maximum object size per PUT operation| 5 GB| +|Maximum object size per PUT operation| 5 GiB| |Maximum number of parts per upload| 10,000| -|Part size|5 MB to 5 GB. Last part can be 0 B to 5 GB| -|Maximum number of parts returned per list parts request| 1000| +|Part size|5 MiB to 5 GiB. Last part can be 0 B to 5 GiB| +|Maximum number of parts returned per list parts request| 1000| |Maximum number of objects returned per list objects request| 1000| |Maximum number of multipart uploads returned per list multipart uploads request| 1000| diff --git a/docs/shared-backend/DESIGN.md b/docs/shared-backend/DESIGN.md new file mode 100644 index 000000000..cf7d2689f --- /dev/null +++ b/docs/shared-backend/DESIGN.md @@ -0,0 +1,137 @@ +Introduction [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) +------------ + +This feature allows Minio to serve a shared NAS drive across multiple Minio instances. There are no special configuration changes required to enable this feature. Access to files stored on NAS volume are locked and synchronized by default. + +Motivation +---------- + +Since Minio instances serve the purpose of a single tenant there is an increasing requirement where users want to run multiple Minio instances on a same backend which is managed by an existing NAS (NFS, GlusterFS, Other distributed filesystems) rather than a local disk. This feature is implemented also with minimal disruption in mind for the user and overall UI. + +Restrictions +------------ + +* A PutObject() is blocked and waits if another GetObject() is in progress. +* A CompleteMultipartUpload() is blocked and waits if another PutObject() or GetObject() is in progress. +* Cannot run FS mode as a remote disk RPC. + +## How To Run? + +Running Minio instances on shared backend is no different than running on a stand-alone disk. There are no special configuration changes required to enable this feature. Access to files stored on NAS volume are locked and synchronized by default. Following examples will clarify this further for each operating system of your choice: + +### Ubuntu 16.04 LTS + +Example 1: Start Minio instance on a shared backend mounted and available at `/mnt/nfs`. + +On linux server1 +```shell +minio server /mnt/nfs +``` + +On linux server2 +```shell +minio server /mnt/nfs +``` + +### Windows 2012 Server + +Example 1: Start Minio instance on a shared backend mounted and available at `\\remote-server\cifs`. + +On windows server1 +```cmd +minio.exe server \\remote-server\cifs\export +``` + +On windows server2 +```cmd +minio.exe server \\remote-server\cifs\export +``` + +Alternatively if `\\remote-server\cifs` is mounted as `D:\` drive. + +On windows server1 +```cmd +minio.exe server D:\export +``` + +On windows server2 +```cmd +minio.exe server D:\export +``` + +Architecture +------------------ + +## POSIX/Win32 Locks + +### Lock process + +With in the same Minio instance locking is handled by existing in-memory namespace locks (**sync.RWMutex** et. al). To synchronize locks between many Minio instances we leverage POSIX `fcntl()` locks on Unixes and on Windows `LockFileEx()` Win32 API. Requesting write lock block if there are any read locks held by neighboring Minio instance on the same path. So does the read lock if there are any active write locks in-progress. + +### Unlock process + +Unlocking happens on filesystems locks by just closing the file descriptor (fd) which was initially requested for lock operation. Closing the fd tells the kernel to relinquish all the locks held on the path by the current process. This gets trickier when there are many readers on the same path by the same process, it would mean that closing an fd relinquishes locks for all concurrent readers as well. To properly manage this situation a simple fd reference count is implemented, the same fd is shared between many readers. When readers start closing on the fd we start reducing the reference count, once reference count has reached zero we can be sure that there are no more readers active. So we proceed and close the underlying file descriptor which would relinquish the read lock held on the path. + +This doesn't apply for the writes because there is always one writer and many readers for any unique object. + +## Handling Concurrency. + +An example here shows how the contention is handled with GetObject(). + +GetObject() holds a read lock on `fs.json`. +```go + + fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fsMetaJSONFile) + rlk, err := fs.rwPool.Open(fsMetaPath) + if err != nil { + return toObjectErr(traceError(err), bucket, object) + } + defer rlk.Close() + +... you can perform other operations here ... + + _, err = io.CopyBuffer(writer, reader, buf) + +... after successful copy operation unlocks the read lock ... + +``` + +A concurrent PutObject is requested on the same object, PutObject() attempts a write lock on `fs.json`. + +```go + fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fsMetaJSONFile) + wlk, err := fs.rwPool.Create(fsMetaPath) + if err != nil { + return ObjectInfo{}, toObjectErr(err, bucket, object) + } + // This close will allow for locks to be synchronized on `fs.json`. + defer wlk.Close() +``` + +Now from the above snippet the following code one can notice that until the GetObject() returns writing to the client. Following portion of the code will block. + +```go + wlk, err := fs.rwPool.Create(fsMetaPath) +``` + +This restriction is needed so that corrupted data is not returned to the client in between I/O. The logic works vice-versa as well an on-going PutObject(), GetObject() would wait for the PutObject() to complete. + +### Caveats (concurrency) + +Consider for example 3 servers sharing the same backend + +On minio1 + +- DeleteObject(object1) --> lock acquired on `fs.json` while object1 is being deleted. + +On minio2 + +- PutObject(object1) --> lock waiting until DeleteObject finishes. + +On minio3 + +- PutObject(object1) --> (concurrent request during PutObject minio2 checking if `fs.json` exists) + +Once lock is acquired the minio2 validates if the file really exists to avoid obtaining lock on an fd which is already deleted. But this situation calls for a race with a third server which is also attempting to write the same file before the minio2 can validate if the file exists. It might be potentially possible `fs.json` is created so the lock acquired by minio2 might be invalid and can lead to a potential inconsistency. + +This is a known problem and cannot be solved by POSIX fcntl locks. These are considered to be the limits of shared filesystem. \ No newline at end of file diff --git a/docs/shared-backend/README.md b/docs/shared-backend/README.md new file mode 100644 index 000000000..26dc64f84 --- /dev/null +++ b/docs/shared-backend/README.md @@ -0,0 +1,92 @@ +# Shared Backend Minio Quickstart Guide [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) [![Go Report Card](https://goreportcard.com/badge/minio/minio)](https://goreportcard.com/report/minio/minio) [![Docker Pulls](https://img.shields.io/docker/pulls/minio/minio.svg?maxAge=604800)](https://hub.docker.com/r/minio/minio/) [![codecov](https://codecov.io/gh/minio/minio/branch/master/graph/badge.svg)](https://codecov.io/gh/minio/minio) + +Minio now supports shared backend across multiple instances. This solves certain specific use cases. + +## Use Cases + +- Minio on NAS +- Minio on Distributed Filesystems +- Multi-user Shared Backend. + +## Why Minio On Shared Backend? + +This feature allows Minio to serve a shared NAS drive across multiple Minio instances. There are no special configuration changes required to enable this feature. Access to files stored on NAS volume are locked and synchronized by default. + +# Get started + +If you're aware of stand-alone Minio set up, the installation and running remains the same. + +## 1. Prerequisites + +Install Minio - [Minio Quickstart Guide](https://docs.minio.io/docs/minio). + +## 2. Run Minio On Shared Backend + +Below examples will clarify further for each operating system of your choice: + +### Ubuntu 16.04 LTS + +Run the following commands on all the object storage gateway servers where your NAS volume is accessible. By explicitly passing access and secret keys through the environment variable you make sure that all the gateway servers share the same key across. + +Example 1: Start Minio instance on a shared backend mounted and available at `/mnt/nfs`. + +On linux server1 +```sh +minio server /mnt/nfs +``` + +On linux server2 +```sh +minio server /mnt/nfs +``` + +### Windows 2012 Server + +Run the following commands on all the object storage gateway servers where your NAS volume is accessible. By explicitly passing access and secret keys through the environment variable you make sure that all the gateway servers share the same key across. + +Example 1: Start Minio instance on a shared backend mounted and available at `\\remote-server\smb`. + +On windows server1 +```cmd +set MINIO_ACCESS_KEY=my-username +set MINIO_SECRET_KEY=my-password +minio.exe server \\remote-server\smb\export +``` + +On windows server2 +```cmd +set MINIO_ACCESS_KEY=my-username +set MINIO_SECRET_KEY=my-password +minio.exe server \\remote-server\smb\export +``` + +Alternatively if `\\remote-server\smb` is mounted as `M:\` drive. + +On windows server1 +```cmd +set MINIO_ACCESS_KEY=my-username +set MINIO_SECRET_KEY=my-password +net use m: \\remote-server\smb\export /P:Yes +minio.exe server M:\export +``` + +On windows server2 +```cmd +set MINIO_ACCESS_KEY=my-username +set MINIO_SECRET_KEY=my-password +net use m: \\remote-server\smb\export /P:Yes +minio.exe server M:\export +``` + +## 3. Test your setup + +To test this setup, access the Minio server via browser or [`mc`](https://docs.minio.io/docs/minio-client-quickstart-guide). You’ll see the uploaded files are accessible from the node2 endpoint as well. + +## Explore Further +- [Use `mc` with Minio Server](https://docs.minio.io/docs/minio-client-quickstart-guide) +- [Use `aws-cli` with Minio Server](https://docs.minio.io/docs/aws-cli-with-minio) +- [Use `s3cmd` with Minio Server](https://docs.minio.io/docs/s3cmd-with-minio) +- [Use `minio-go` SDK with Minio Server](https://docs.minio.io/docs/golang-client-quickstart-guide) +- [The Minio documentation website](https://docs.minio.io) + + diff --git a/pkg/lock/lock.go b/pkg/lock/lock.go new file mode 100644 index 000000000..a72750fdc --- /dev/null +++ b/pkg/lock/lock.go @@ -0,0 +1,102 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package lock - implements filesystem locking wrappers around an +// open file descriptor. +package lock + +import ( + "os" + "sync" +) + +// RLockedFile represents a read locked file, implements a special +// closer which only closes the associated *os.File when the ref count. +// has reached zero, i.e when all the readers have given up their locks. +type RLockedFile struct { + *LockedFile + mutex sync.Mutex + refs int // Holds read lock refs. +} + +// IsClosed - Check if the rlocked file is already closed. +func (r *RLockedFile) IsClosed() bool { + r.mutex.Lock() + defer r.mutex.Unlock() + return r.refs == 0 +} + +// IncLockRef - is used by called to indicate lock refs. +func (r *RLockedFile) IncLockRef() { + r.mutex.Lock() + r.refs++ + r.mutex.Unlock() +} + +// Close - this closer implements a special closer +// closes the underlying fd only when the refs +// reach zero. +func (r *RLockedFile) Close() (err error) { + r.mutex.Lock() + defer r.mutex.Unlock() + + if r.refs == 0 { + return os.ErrInvalid + } + + r.refs-- + if r.refs == 0 { + err = r.File.Close() + } + + return err +} + +// Provides a new initialized read locked struct from *os.File +func newRLockedFile(lkFile *LockedFile) (*RLockedFile, error) { + if lkFile == nil { + return nil, os.ErrInvalid + } + + return &RLockedFile{ + LockedFile: lkFile, + refs: 1, + }, nil +} + +// RLockedOpenFile - returns a wrapped read locked file, if the file +// doesn't exist at path returns an error. +func RLockedOpenFile(path string) (*RLockedFile, error) { + lkFile, err := LockedOpenFile(path, os.O_RDONLY, 0666) + if err != nil { + return nil, err + } + + return newRLockedFile(lkFile) + +} + +// LockedFile represents a locked file, implements a helper +// method Size(), represents the size of the underlying object. +type LockedFile struct { + *os.File + size int64 +} + +// Size - size of the underlying locked file. +func (l *LockedFile) Size() int64 { + return l.size +} diff --git a/pkg/lock/lock_nix.go b/pkg/lock/lock_nix.go new file mode 100644 index 000000000..14263335d --- /dev/null +++ b/pkg/lock/lock_nix.go @@ -0,0 +1,75 @@ +// +build !windows,!plan9,!solaris + +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package lock + +import ( + "fmt" + "os" + "syscall" +) + +// LockedOpenFile - initializes a new lock and protects +// the file from concurrent access across mount points. +// This implementation doesn't support all the open +// flags and shouldn't be considered as replacement +// for os.OpenFile(). +func LockedOpenFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + var lockType int + switch flag { + case syscall.O_RDONLY: + lockType = syscall.LOCK_SH + case syscall.O_WRONLY: + fallthrough + case syscall.O_RDWR: + fallthrough + case syscall.O_WRONLY | syscall.O_CREAT: + fallthrough + case syscall.O_RDWR | syscall.O_CREAT: + lockType = syscall.LOCK_EX + default: + return nil, fmt.Errorf("Unsupported flag (%d)", flag) + } + + f, err := os.OpenFile(path, flag|syscall.O_SYNC, perm) + if err != nil { + return nil, err + } + + if err = syscall.Flock(int(f.Fd()), lockType); err != nil { + f.Close() + return nil, err + } + + st, err := os.Stat(path) + if err != nil { + f.Close() + return nil, err + } + + if st.IsDir() { + f.Close() + return nil, &os.PathError{ + Op: "open", + Path: path, + Err: syscall.EISDIR, + } + } + + return &LockedFile{File: f, size: st.Size()}, nil +} diff --git a/pkg/lock/lock_test.go b/pkg/lock/lock_test.go new file mode 100644 index 000000000..4c3547417 --- /dev/null +++ b/pkg/lock/lock_test.go @@ -0,0 +1,192 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package lock + +import ( + "io/ioutil" + "os" + "testing" + "time" +) + +// Test lock fails. +func TestLockFail(t *testing.T) { + f, err := ioutil.TempFile("", "lock") + if err != nil { + t.Fatal(err) + } + f.Close() + defer func() { + err = os.Remove(f.Name()) + if err != nil { + t.Fatal(err) + } + }() + + _, err = LockedOpenFile(f.Name(), os.O_APPEND, 0600) + if err == nil { + t.Fatal("Should fail here") + } +} + +// Tests lock directory fail. +func TestLockDirFail(t *testing.T) { + d, err := ioutil.TempDir("", "lockDir") + if err != nil { + t.Fatal(err) + } + defer func() { + err = os.Remove(d) + if err != nil { + t.Fatal(err) + } + }() + + _, err = LockedOpenFile(d, os.O_APPEND, 0600) + if err == nil { + t.Fatal("Should fail here") + } +} + +// Tests rwlock methods. +func TestRWLockedFile(t *testing.T) { + f, err := ioutil.TempFile("", "lock") + if err != nil { + t.Fatal(err) + } + f.Close() + defer func() { + err = os.Remove(f.Name()) + if err != nil { + t.Fatal(err) + } + }() + + rlk, err := RLockedOpenFile(f.Name()) + if err != nil { + t.Fatal(err) + } + if rlk.Size() != 0 { + t.Fatal("File size should be zero", rlk.Size()) + } + isClosed := rlk.IsClosed() + if isClosed { + t.Fatal("File ref count shouldn't be zero") + } + + // Increase reference count to 2. + rlk.IncLockRef() + + isClosed = rlk.IsClosed() + if isClosed { + t.Fatal("File ref count shouldn't be zero") + } + + // Decrease reference count by 1. + if err = rlk.Close(); err != nil { + t.Fatal(err) + } + + isClosed = rlk.IsClosed() + if isClosed { + t.Fatal("File ref count shouldn't be zero") + } + + // Decrease reference count by 1. + if err = rlk.Close(); err != nil { + t.Fatal(err) + } + + // Now file should be closed. + isClosed = rlk.IsClosed() + if !isClosed { + t.Fatal("File ref count should be zero") + } + + // Closing a file again should result in invalid argument. + if err = rlk.Close(); err != os.ErrInvalid { + t.Fatal(err) + } + + _, err = newRLockedFile(nil) + if err != os.ErrInvalid { + t.Fatal("Unexpected error", err) + } +} + +// Tests lock and unlock semantics. +func TestLockAndUnlock(t *testing.T) { + f, err := ioutil.TempFile("", "lock") + if err != nil { + t.Fatal(err) + } + f.Close() + defer func() { + err = os.Remove(f.Name()) + if err != nil { + t.Fatal(err) + } + }() + + // lock the file + l, err := LockedOpenFile(f.Name(), os.O_WRONLY, 0600) + if err != nil { + t.Fatal(err) + } + + // unlock the file + if err = l.Close(); err != nil { + t.Fatal(err) + } + + // try lock the unlocked file + dupl, err := LockedOpenFile(f.Name(), os.O_WRONLY|os.O_CREATE, 0600) + if err != nil { + t.Errorf("err = %v, want %v", err, nil) + } + + // blocking on locked file + locked := make(chan struct{}, 1) + go func() { + bl, blerr := LockedOpenFile(f.Name(), os.O_WRONLY, 0600) + if blerr != nil { + t.Fatal(blerr) + } + locked <- struct{}{} + if blerr = bl.Close(); blerr != nil { + t.Fatal(blerr) + } + }() + + select { + case <-locked: + t.Error("unexpected unblocking") + case <-time.After(100 * time.Millisecond): + } + + // unlock + if err = dupl.Close(); err != nil { + t.Fatal(err) + } + + // the previously blocked routine should be unblocked + select { + case <-locked: + case <-time.After(1 * time.Second): + t.Error("unexpected blocking") + } +} diff --git a/pkg/lock/lock_windows.go b/pkg/lock/lock_windows.go new file mode 100644 index 000000000..dcbb7ed33 --- /dev/null +++ b/pkg/lock/lock_windows.go @@ -0,0 +1,172 @@ +// +build windows + +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package lock + +import ( + "errors" + "os" + "syscall" + "unsafe" +) + +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + procLockFileEx = modkernel32.NewProc("LockFileEx") + + errLocked = errors.New("The process cannot access the file because another process has locked a portion of the file.") +) + +const ( + // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx + errLockViolation syscall.Errno = 0x21 +) + +// LockedOpenFile - initializes a new lock and protects +// the file from concurrent access. +func LockedOpenFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + f, err := open(path, flag, perm) + if err != nil { + return nil, err + } + + if err = lockFile(syscall.Handle(f.Fd()), 0); err != nil { + f.Close() + return nil, err + } + + st, err := os.Stat(path) + if err != nil { + f.Close() + return nil, err + } + + if st.IsDir() { + f.Close() + return nil, &os.PathError{ + Op: "open", + Path: path, + Err: syscall.EISDIR, + } + } + + return &LockedFile{File: f, size: st.Size()}, nil +} + +func makeInheritSa() *syscall.SecurityAttributes { + var sa syscall.SecurityAttributes + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + return &sa +} + +// perm param is ignored, on windows file perms/NT acls +// are not octet combinations. Providing access to NT +// acls is out of scope here. +func open(path string, flag int, perm os.FileMode) (*os.File, error) { + if path == "" { + return nil, syscall.ERROR_FILE_NOT_FOUND + } + + pathp, err := syscall.UTF16PtrFromString(path) + if err != nil { + return nil, err + } + + var access uint32 + switch flag { + case syscall.O_RDONLY: + access = syscall.GENERIC_READ + case syscall.O_WRONLY: + access = syscall.GENERIC_WRITE + case syscall.O_RDWR: + access = syscall.GENERIC_READ | syscall.GENERIC_WRITE + case syscall.O_RDWR | syscall.O_CREAT: + access = syscall.GENERIC_ALL + case syscall.O_WRONLY | syscall.O_CREAT: + access = syscall.GENERIC_ALL + } + + if flag&syscall.O_APPEND != 0 { + access &^= syscall.GENERIC_WRITE + access |= syscall.FILE_APPEND_DATA + } + + var sa *syscall.SecurityAttributes + if flag&syscall.O_CLOEXEC == 0 { + sa = makeInheritSa() + } + + var createflag uint32 + switch { + case flag&(syscall.O_CREAT|syscall.O_EXCL) == (syscall.O_CREAT | syscall.O_EXCL): + createflag = syscall.CREATE_NEW + case flag&(syscall.O_CREAT|syscall.O_TRUNC) == (syscall.O_CREAT | syscall.O_TRUNC): + createflag = syscall.CREATE_ALWAYS + case flag&syscall.O_CREAT == syscall.O_CREAT: + createflag = syscall.OPEN_ALWAYS + case flag&syscall.O_TRUNC == syscall.O_TRUNC: + createflag = syscall.TRUNCATE_EXISTING + default: + createflag = syscall.OPEN_EXISTING + } + + shareflag := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE | syscall.FILE_SHARE_DELETE) + accessAttr := uint32(syscall.FILE_ATTRIBUTE_NORMAL | 0x80000000) + + fd, err := syscall.CreateFile(pathp, access, shareflag, sa, createflag, accessAttr, 0) + if err != nil { + return nil, err + } + + return os.NewFile(uintptr(fd), path), nil +} + +func lockFile(fd syscall.Handle, flags uint32) error { + // https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx + var flag uint32 = 2 // Lockfile exlusive. + flag |= flags + + if fd == syscall.InvalidHandle { + return nil + } + + err := lockFileEx(fd, flag, 1, 0, &syscall.Overlapped{}) + if err == nil { + return nil + } else if err.Error() == errLocked.Error() { + return errors.New("lock already acquired") + } else if err != errLockViolation { + return err + } + + return nil +} + +func lockFileEx(h syscall.Handle, flags, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + var reserved = uint32(0) + r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol))) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} diff --git a/pkg/quick/quick.go b/pkg/quick/quick.go index 82a8b66b0..15094e522 100644 --- a/pkg/quick/quick.go +++ b/pkg/quick/quick.go @@ -298,7 +298,7 @@ func (d config) Diff(c Config) ([]structs.Field, error) { currFields := structs.Fields(d.Data()) newFields := structs.Fields(c.Data()) - found := false + var found bool for _, currField := range currFields { found = false for _, newField := range newFields { @@ -324,7 +324,7 @@ func (d config) DeepDiff(c Config) ([]structs.Field, error) { currFields := structs.Fields(d.Data()) newFields := structs.Fields(c.Data()) - found := false + var found bool for _, currField := range currFields { found = false for _, newField := range newFields { From 98a6a2bcab10dc1c8884f80e1938b00b86f1bcbe Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Mon, 16 Jan 2017 19:23:43 -0800 Subject: [PATCH 073/100] obj: Return objectInfo for CompleteMultipartUpload(). (#3587) This patch avoids doing GetObjectInfo() in similar way how we did for PutOject(). --- cmd/fs-v1-multipart.go | 57 +++++++++++++++++--------------- cmd/object-api-interface.go | 2 +- cmd/object-api-multipart_test.go | 2 +- cmd/object-handlers.go | 14 ++------ cmd/object_api_suite_test.go | 4 +-- cmd/xl-v1-multipart.go | 46 ++++++++++++++++---------- cmd/xl-v1-object.go | 2 ++ 7 files changed, 69 insertions(+), 58 deletions(-) diff --git a/cmd/fs-v1-multipart.go b/cmd/fs-v1-multipart.go index bea98d0f8..a1079f795 100644 --- a/cmd/fs-v1-multipart.go +++ b/cmd/fs-v1-multipart.go @@ -585,19 +585,19 @@ func (fs fsObjects) ListObjectParts(bucket, object, uploadID string, partNumberM // md5sums of all the parts. // // Implements S3 compatible Complete multipart API. -func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, uploadID string, parts []completePart) (string, error) { +func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, uploadID string, parts []completePart) (ObjectInfo, error) { if err := checkCompleteMultipartArgs(bucket, object, fs); err != nil { - return "", err + return ObjectInfo{}, err } if _, err := fs.statBucketDir(bucket); err != nil { - return "", toObjectErr(err, bucket) + return ObjectInfo{}, toObjectErr(err, bucket) } // Calculate s3 compatible md5sum for complete multipart. s3MD5, err := getCompleteMultipartMD5(parts) if err != nil { - return "", err + return ObjectInfo{}, err } uploadIDPath := pathJoin(bucket, object, uploadID) @@ -612,9 +612,9 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload rlk, err := fs.rwPool.Open(fsMetaPathMultipart) if err != nil { if err == errFileNotFound || err == errFileAccessDenied { - return "", traceError(InvalidUploadID{UploadID: uploadID}) + return ObjectInfo{}, traceError(InvalidUploadID{UploadID: uploadID}) } - return "", toObjectErr(traceError(err), bucket, object) + return ObjectInfo{}, toObjectErr(traceError(err), bucket, object) } // Disallow any parallel abort or complete multipart operations. @@ -622,9 +622,9 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload if err != nil { fs.rwPool.Close(fsMetaPathMultipart) if err == errFileNotFound || err == errFileAccessDenied { - return "", traceError(InvalidUploadID{UploadID: uploadID}) + return ObjectInfo{}, traceError(InvalidUploadID{UploadID: uploadID}) } - return "", toObjectErr(traceError(err), bucket, object) + return ObjectInfo{}, toObjectErr(traceError(err), bucket, object) } defer rwlk.Close() @@ -633,7 +633,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload _, err = fsMeta.ReadFrom(io.NewSectionReader(rlk, 0, rlk.Size())) if err != nil { fs.rwPool.Close(fsMetaPathMultipart) - return "", toObjectErr(err, minioMetaMultipartBucket, fsMetaPathMultipart) + return ObjectInfo{}, toObjectErr(err, minioMetaMultipartBucket, fsMetaPathMultipart) } // Wait for any competing PutObject() operation on bucket/object, since same namespace @@ -642,10 +642,12 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload metaFile, err := fs.rwPool.Create(fsMetaPath) if err != nil { fs.rwPool.Close(fsMetaPathMultipart) - return "", toObjectErr(traceError(err), bucket, object) + return ObjectInfo{}, toObjectErr(traceError(err), bucket, object) } defer metaFile.Close() + fsNSObjPath := pathJoin(fs.fsPath, bucket, object) + // This lock is held during rename of the appended tmp file to the actual // location so that any competing GetObject/PutObject/DeleteObject do not race. appendFallback := true // In case background-append did not append the required parts. @@ -655,10 +657,9 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload if err == nil { appendFallback = false fsTmpObjPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, uploadID) - fsNSObjPath := pathJoin(fs.fsPath, bucket, object) if err = fsRenameFile(fsTmpObjPath, fsNSObjPath); err != nil { fs.rwPool.Close(fsMetaPathMultipart) - return "", toObjectErr(err, minioMetaTmpBucket, uploadID) + return ObjectInfo{}, toObjectErr(err, minioMetaTmpBucket, uploadID) } } } @@ -681,18 +682,18 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload partIdx := fsMeta.ObjectPartIndex(part.PartNumber) if partIdx == -1 { fs.rwPool.Close(fsMetaPathMultipart) - return "", traceError(InvalidPart{}) + return ObjectInfo{}, traceError(InvalidPart{}) } if fsMeta.Parts[partIdx].ETag != part.ETag { fs.rwPool.Close(fsMetaPathMultipart) - return "", traceError(BadDigest{}) + return ObjectInfo{}, traceError(BadDigest{}) } // All parts except the last part has to be atleast 5MB. if (i < len(parts)-1) && !isMinAllowedPartSize(fsMeta.Parts[partIdx].Size) { fs.rwPool.Close(fsMetaPathMultipart) - return "", traceError(PartTooSmall{ + return ObjectInfo{}, traceError(PartTooSmall{ PartNumber: part.PartNumber, PartSize: fsMeta.Parts[partIdx].Size, PartETag: part.ETag, @@ -709,9 +710,9 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload if err != nil { fs.rwPool.Close(fsMetaPathMultipart) if err == errFileNotFound { - return "", traceError(InvalidPart{}) + return ObjectInfo{}, traceError(InvalidPart{}) } - return "", toObjectErr(traceError(err), minioMetaMultipartBucket, partSuffix) + return ObjectInfo{}, toObjectErr(traceError(err), minioMetaMultipartBucket, partSuffix) } // No need to hold a lock, this is a unique file and will be only written @@ -721,7 +722,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload if err != nil { reader.Close() fs.rwPool.Close(fsMetaPathMultipart) - return "", toObjectErr(traceError(err), bucket, object) + return ObjectInfo{}, toObjectErr(traceError(err), bucket, object) } _, err = io.CopyBuffer(wfile, reader, buf) @@ -729,17 +730,16 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload wfile.Close() reader.Close() fs.rwPool.Close(fsMetaPathMultipart) - return "", toObjectErr(traceError(err), bucket, object) + return ObjectInfo{}, toObjectErr(traceError(err), bucket, object) } wfile.Close() reader.Close() } - fsNSObjPath := pathJoin(fs.fsPath, bucket, object) if err = fsRenameFile(fsTmpObjPath, fsNSObjPath); err != nil { fs.rwPool.Close(fsMetaPathMultipart) - return "", toObjectErr(err, minioMetaTmpBucket, uploadID) + return ObjectInfo{}, toObjectErr(err, minioMetaTmpBucket, uploadID) } } @@ -755,7 +755,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload // Write all the set metadata. if _, err = fsMeta.WriteTo(metaFile); err != nil { fs.rwPool.Close(fsMetaPathMultipart) - return "", toObjectErr(err, bucket, object) + return ObjectInfo{}, toObjectErr(err, bucket, object) } // Close lock held on bucket/object/uploadid/fs.json, @@ -767,16 +767,21 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload multipartObjectDir := pathJoin(fs.fsPath, minioMetaMultipartBucket, bucket, object) multipartUploadIDDir := pathJoin(multipartObjectDir, uploadID) if err = fsRemoveUploadIDPath(multipartObjectDir, multipartUploadIDDir); err != nil { - return "", toObjectErr(traceError(err), bucket, object) + return ObjectInfo{}, toObjectErr(traceError(err), bucket, object) } // Remove entry from `uploads.json`. if err = fs.removeUploadID(bucket, object, uploadID, rwlk); err != nil { - return "", toObjectErr(err, minioMetaMultipartBucket, pathutil.Join(bucket, object)) + return ObjectInfo{}, toObjectErr(err, minioMetaMultipartBucket, pathutil.Join(bucket, object)) } - // Return md5sum. - return s3MD5, nil + fi, err := fsStatFile(fsNSObjPath) + if err != nil { + return ObjectInfo{}, toObjectErr(traceError(err), bucket, object) + } + + // Return object info. + return fsMeta.ToObjectInfo(bucket, object, fi), nil } // AbortMultipartUpload - aborts an ongoing multipart operation diff --git a/cmd/object-api-interface.go b/cmd/object-api-interface.go index 04a88061b..b0a04510c 100644 --- a/cmd/object-api-interface.go +++ b/cmd/object-api-interface.go @@ -44,7 +44,7 @@ type ObjectLayer interface { PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (md5 string, err error) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListPartsInfo, err error) AbortMultipartUpload(bucket, object, uploadID string) error - CompleteMultipartUpload(bucket, object, uploadID string, uploadedParts []completePart) (md5 string, err error) + CompleteMultipartUpload(bucket, object, uploadID string, uploadedParts []completePart) (objInfo ObjectInfo, err error) // Healing operations. HealBucket(bucket string) error diff --git a/cmd/object-api-multipart_test.go b/cmd/object-api-multipart_test.go index f15cad520..abb5f8841 100644 --- a/cmd/object-api-multipart_test.go +++ b/cmd/object-api-multipart_test.go @@ -1930,7 +1930,7 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T if actualErr == nil && testCase.shouldPass { // Asserting IsTruncated. - if actualResult != testCase.expectedS3MD5 { + if actualResult.MD5Sum != testCase.expectedS3MD5 { t.Errorf("Test %d: %s: Expected the result to be \"%v\", but found it to \"%v\"", i+1, instanceType, testCase.expectedS3MD5, actualResult) } } diff --git a/cmd/object-handlers.go b/cmd/object-handlers.go index e8490c138..3c9100ec7 100644 --- a/cmd/object-handlers.go +++ b/cmd/object-handlers.go @@ -733,7 +733,6 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite // Get upload id. uploadID, _, _, _ := getObjectResources(r.URL.Query()) - var md5Sum string completeMultipartBytes, err := ioutil.ReadAll(r.Body) if err != nil { errorIf(err, "Unable to complete multipart upload.") @@ -768,7 +767,7 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite destLock.Lock() defer destLock.Unlock() - md5Sum, err = objectAPI.CompleteMultipartUpload(bucket, object, uploadID, completeParts) + objInfo, err := objectAPI.CompleteMultipartUpload(bucket, object, uploadID, completeParts) if err != nil { errorIf(err, "Unable to complete multipart upload.") err = errorCause(err) @@ -786,7 +785,7 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite // Get object location. location := getLocation(r) // Generate complete multipart response. - response := generateCompleteMultpartUploadResponse(bucket, object, location, md5Sum) + response := generateCompleteMultpartUploadResponse(bucket, object, location, objInfo.MD5Sum) encodedSuccessResponse := encodeResponse(response) if err != nil { errorIf(err, "Unable to parse CompleteMultipartUpload response") @@ -795,18 +794,11 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite } // Set etag. - w.Header().Set("ETag", "\""+md5Sum+"\"") + w.Header().Set("ETag", "\""+objInfo.MD5Sum+"\"") // Write success response. writeSuccessResponseXML(w, encodedSuccessResponse) - // Fetch object info for notifications. - objInfo, err := objectAPI.GetObjectInfo(bucket, object) - if err != nil { - errorIf(err, "Unable to fetch object info for \"%s\"", path.Join(bucket, object)) - return - } - // Notify object created event. eventNotify(eventData{ Type: ObjectCreatedCompleteMultipartUpload, diff --git a/cmd/object_api_suite_test.go b/cmd/object_api_suite_test.go index 5166f65c4..88ad87964 100644 --- a/cmd/object_api_suite_test.go +++ b/cmd/object_api_suite_test.go @@ -116,11 +116,11 @@ func testMultipartObjectCreation(obj ObjectLayer, instanceType string, c TestErr } completedParts.Parts = append(completedParts.Parts, completePart{PartNumber: i, ETag: calculatedMD5sum}) } - md5Sum, err := obj.CompleteMultipartUpload("bucket", "key", uploadID, completedParts.Parts) + objInfo, err := obj.CompleteMultipartUpload("bucket", "key", uploadID, completedParts.Parts) if err != nil { c.Fatalf("%s: %s", instanceType, err) } - if md5Sum != "7d364cb728ce42a74a96d22949beefb2-10" { + if objInfo.MD5Sum != "7d364cb728ce42a74a96d22949beefb2-10" { c.Errorf("Md5 mismtch") } } diff --git a/cmd/xl-v1-multipart.go b/cmd/xl-v1-multipart.go index 1ce509639..58f421121 100644 --- a/cmd/xl-v1-multipart.go +++ b/cmd/xl-v1-multipart.go @@ -575,9 +575,9 @@ func (xl xlObjects) ListObjectParts(bucket, object, uploadID string, partNumberM // md5sums of all the parts. // // Implements S3 compatible Complete multipart API. -func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, uploadID string, parts []completePart) (string, error) { +func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, uploadID string, parts []completePart) (ObjectInfo, error) { if err := checkCompleteMultipartArgs(bucket, object, xl); err != nil { - return "", err + return ObjectInfo{}, err } // Hold lock so that @@ -592,12 +592,12 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload defer uploadIDLock.Unlock() if !xl.isUploadIDExists(bucket, object, uploadID) { - return "", traceError(InvalidUploadID{UploadID: uploadID}) + return ObjectInfo{}, traceError(InvalidUploadID{UploadID: uploadID}) } // Calculate s3 compatible md5sum for complete multipart. s3MD5, err := getCompleteMultipartMD5(parts) if err != nil { - return "", err + return ObjectInfo{}, err } uploadIDPath := pathJoin(bucket, object, uploadID) @@ -606,7 +606,7 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload partsMetadata, errs := readAllXLMetadata(xl.storageDisks, minioMetaMultipartBucket, uploadIDPath) // Do we have writeQuorum?. if !isDiskQuorum(errs, xl.writeQuorum) { - return "", toObjectErr(traceError(errXLWriteQuorum), bucket, object) + return ObjectInfo{}, toObjectErr(traceError(errXLWriteQuorum), bucket, object) } onlineDisks, modTime := listOnlineDisks(xl.storageDisks, partsMetadata, errs) @@ -617,7 +617,7 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload // Pick one from the first valid metadata. xlMeta, err := pickValidXLMeta(partsMetadata, modTime) if err != nil { - return "", err + return ObjectInfo{}, err } // Order online disks in accordance with distribution order. @@ -637,17 +637,17 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload partIdx := objectPartIndex(currentXLMeta.Parts, part.PartNumber) // All parts should have same part number. if partIdx == -1 { - return "", traceError(InvalidPart{}) + return ObjectInfo{}, traceError(InvalidPart{}) } // All parts should have same ETag as previously generated. if currentXLMeta.Parts[partIdx].ETag != part.ETag { - return "", traceError(BadDigest{}) + return ObjectInfo{}, traceError(BadDigest{}) } // All parts except the last part has to be atleast 5MB. if (i < len(parts)-1) && !isMinAllowedPartSize(currentXLMeta.Parts[partIdx].Size) { - return "", traceError(PartTooSmall{ + return ObjectInfo{}, traceError(PartTooSmall{ PartNumber: part.PartNumber, PartSize: currentXLMeta.Parts[partIdx].Size, PartETag: part.ETag, @@ -675,7 +675,7 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload // Check if an object is present as one of the parent dir. if xl.parentDirIsObject(bucket, path.Dir(object)) { - return "", toObjectErr(traceError(errFileAccessDenied), bucket, object) + return ObjectInfo{}, toObjectErr(traceError(errFileAccessDenied), bucket, object) } // Save the final object size and modtime. @@ -697,11 +697,11 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload // Write unique `xl.json` for each disk. if err = writeUniqueXLMetadata(onlineDisks, minioMetaTmpBucket, tempUploadIDPath, partsMetadata, xl.writeQuorum); err != nil { - return "", toObjectErr(err, minioMetaTmpBucket, tempUploadIDPath) + return ObjectInfo{}, toObjectErr(err, minioMetaTmpBucket, tempUploadIDPath) } rErr := commitXLMetadata(onlineDisks, minioMetaTmpBucket, tempUploadIDPath, minioMetaMultipartBucket, uploadIDPath, xl.writeQuorum) if rErr != nil { - return "", toObjectErr(rErr, minioMetaMultipartBucket, uploadIDPath) + return ObjectInfo{}, toObjectErr(rErr, minioMetaMultipartBucket, uploadIDPath) } defer func() { @@ -725,7 +725,7 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload // regardless of `xl.json` status and rolled back in case of errors. err = renameObject(xl.storageDisks, bucket, object, minioMetaTmpBucket, uniqueID, xl.writeQuorum) if err != nil { - return "", toObjectErr(err, bucket, object) + return ObjectInfo{}, toObjectErr(err, bucket, object) } } @@ -744,7 +744,7 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload // Rename the multipart object to final location. if err = renameObject(onlineDisks, minioMetaMultipartBucket, uploadIDPath, bucket, object, xl.writeQuorum); err != nil { - return "", toObjectErr(err, bucket, object) + return ObjectInfo{}, toObjectErr(err, bucket, object) } // Delete the previously successfully renamed object. @@ -760,11 +760,23 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload // remove entry from uploads.json with quorum if err = xl.removeUploadID(bucket, object, uploadID); err != nil { - return "", toObjectErr(err, minioMetaMultipartBucket, path.Join(bucket, object)) + return ObjectInfo{}, toObjectErr(err, minioMetaMultipartBucket, path.Join(bucket, object)) } - // Return md5sum. - return s3MD5, nil + objInfo := ObjectInfo{ + IsDir: false, + Bucket: bucket, + Name: object, + Size: xlMeta.Stat.Size, + ModTime: xlMeta.Stat.ModTime, + MD5Sum: xlMeta.Meta["md5Sum"], + ContentType: xlMeta.Meta["content-type"], + ContentEncoding: xlMeta.Meta["content-encoding"], + UserDefined: xlMeta.Meta, + } + + // Success, return object info. + return objInfo, nil } // abortMultipartUpload - wrapper for purging an ongoing multipart diff --git a/cmd/xl-v1-object.go b/cmd/xl-v1-object.go index d22c24ab4..8fa41ca77 100644 --- a/cmd/xl-v1-object.go +++ b/cmd/xl-v1-object.go @@ -638,6 +638,8 @@ func (xl xlObjects) PutObject(bucket string, object string, size int64, data io. ContentEncoding: xlMeta.Meta["content-encoding"], UserDefined: xlMeta.Meta, } + + // Success, return object info. return objInfo, nil } From c194b9f5f1d49559c8dde8a97be6529327e71acc Mon Sep 17 00:00:00 2001 From: Krishnan Parthasarathi Date: Tue, 17 Jan 2017 23:32:58 +0530 Subject: [PATCH 074/100] Implement mgmt REST APIs for heal subcommands (#3533) The heal APIs supported in this change are, - listing of objects to be healed. - healing a bucket. - healing an object. --- cmd/admin-handlers.go | 210 ++++++++++- cmd/admin-handlers_test.go | 548 ++++++++++++++++++++++++++--- cmd/admin-router.go | 10 +- cmd/api-response.go | 3 + cmd/object-api-datatypes.go | 18 +- cmd/object-api-utils.go | 12 +- cmd/object-api-utils_test.go | 37 ++ cmd/xl-v1-healing-common.go | 68 +++- cmd/xl-v1-healing-common_test.go | 4 +- cmd/xl-v1-healing.go | 4 +- cmd/xl-v1-list-objects-heal.go | 23 +- cmd/xl-v1-utils.go | 2 +- pkg/madmin/API.md | 147 ++++++-- pkg/madmin/examples/heal-bucket.go | 58 +++ pkg/madmin/examples/heal-list.go | 76 ++++ pkg/madmin/examples/heal-object.go | 57 +++ pkg/madmin/heal-commands.go | 308 ++++++++++++++++ 17 files changed, 1482 insertions(+), 103 deletions(-) create mode 100644 pkg/madmin/examples/heal-bucket.go create mode 100644 pkg/madmin/examples/heal-list.go create mode 100644 pkg/madmin/examples/heal-object.go create mode 100644 pkg/madmin/heal-commands.go diff --git a/cmd/admin-handlers.go b/cmd/admin-handlers.go index 6e5497b7e..cebfcd3b0 100644 --- a/cmd/admin-handlers.go +++ b/cmd/admin-handlers.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,6 +20,7 @@ import ( "encoding/json" "net/http" "net/url" + "strconv" "time" ) @@ -27,6 +28,21 @@ const ( minioAdminOpHeader = "X-Minio-Operation" ) +// Type-safe query params. +type mgmtQueryKey string + +// Only valid query params for list/clear locks management APIs. +const ( + mgmtBucket mgmtQueryKey = "bucket" + mgmtObject mgmtQueryKey = "object" + mgmtPrefix mgmtQueryKey = "prefix" + mgmtOlderThan mgmtQueryKey = "older-than" + mgmtDelimiter mgmtQueryKey = "delimiter" + mgmtMarker mgmtQueryKey = "marker" + mgmtMaxKey mgmtQueryKey = "max-key" + mgmtDryRun mgmtQueryKey = "dry-run" +) + // ServiceStatusHandler - GET /?service // HTTP header x-minio-operation: status // ---------- @@ -63,32 +79,21 @@ func (adminAPI adminAPIHandlers) ServiceRestartHandler(w http.ResponseWriter, r } // Reply to the client before restarting minio server. - w.WriteHeader(http.StatusOK) + writeSuccessResponseHeadersOnly(w) sendServiceCmd(globalAdminPeers, serviceRestart) } -// Type-safe lock query params. -type lockQueryKey string - -// Only valid query params for list/clear locks management APIs. -const ( - lockBucket lockQueryKey = "bucket" - lockPrefix lockQueryKey = "prefix" - lockOlderThan lockQueryKey = "older-than" -) - // validateLockQueryParams - Validates query params for list/clear locks management APIs. func validateLockQueryParams(vars url.Values) (string, string, time.Duration, APIErrorCode) { - bucket := vars.Get(string(lockBucket)) - prefix := vars.Get(string(lockPrefix)) - relTimeStr := vars.Get(string(lockOlderThan)) + bucket := vars.Get(string(mgmtBucket)) + prefix := vars.Get(string(mgmtPrefix)) + relTimeStr := vars.Get(string(mgmtOlderThan)) // N B empty bucket name is invalid if !IsValidBucketName(bucket) { return "", "", time.Duration(0), ErrInvalidBucketName } - // empty prefix is valid. if !IsValidObjectPrefix(prefix) { return "", "", time.Duration(0), ErrInvalidObjectName @@ -195,3 +200,176 @@ func (adminAPI adminAPIHandlers) ClearLocksHandler(w http.ResponseWriter, r *htt // Reply with list of locks cleared, as json. writeSuccessResponseJSON(w, jsonBytes) } + +// validateHealQueryParams - Validates query params for heal list management API. +func validateHealQueryParams(vars url.Values) (string, string, string, string, int, APIErrorCode) { + bucket := vars.Get(string(mgmtBucket)) + prefix := vars.Get(string(mgmtPrefix)) + marker := vars.Get(string(mgmtMarker)) + delimiter := vars.Get(string(mgmtDelimiter)) + maxKeyStr := vars.Get(string(mgmtMaxKey)) + + // N B empty bucket name is invalid + if !IsValidBucketName(bucket) { + return "", "", "", "", 0, ErrInvalidBucketName + } + + // empty prefix is valid. + if !IsValidObjectPrefix(prefix) { + return "", "", "", "", 0, ErrInvalidObjectName + } + + // check if maxKey is a valid integer. + maxKey, err := strconv.Atoi(maxKeyStr) + if err != nil { + return "", "", "", "", 0, ErrInvalidMaxKeys + } + + // Validate prefix, marker, delimiter and maxKey. + apiErr := validateListObjectsArgs(prefix, marker, delimiter, maxKey) + if apiErr != ErrNone { + return "", "", "", "", 0, apiErr + } + + return bucket, prefix, marker, delimiter, maxKey, ErrNone +} + +// ListObjectsHealHandler - GET /?heal&bucket=mybucket&prefix=myprefix&marker=mymarker&delimiter=&mydelimiter&maxKey=1000 +// - bucket is mandatory query parameter +// - rest are optional query parameters +// List upto maxKey objects that need healing in a given bucket matching the given prefix. +func (adminAPI adminAPIHandlers) ListObjectsHealHandler(w http.ResponseWriter, r *http.Request) { + // Get object layer instance. + objLayer := newObjectLayerFn() + if objLayer == nil { + writeErrorResponse(w, ErrServerNotInitialized, r.URL) + return + } + + // Validate request signature. + adminAPIErr := checkRequestAuthType(r, "", "", "") + if adminAPIErr != ErrNone { + writeErrorResponse(w, adminAPIErr, r.URL) + return + } + + // Validate query params. + vars := r.URL.Query() + bucket, prefix, marker, delimiter, maxKey, adminAPIErr := validateHealQueryParams(vars) + if adminAPIErr != ErrNone { + writeErrorResponse(w, adminAPIErr, r.URL) + return + } + + // Get the list objects to be healed. + objectInfos, err := objLayer.ListObjectsHeal(bucket, prefix, marker, delimiter, maxKey) + if err != nil { + writeErrorResponse(w, toAPIErrorCode(err), r.URL) + return + } + + listResponse := generateListObjectsV1Response(bucket, prefix, marker, delimiter, maxKey, objectInfos) + // Write success response. + writeSuccessResponseXML(w, encodeResponse(listResponse)) +} + +// HealBucketHandler - POST /?heal&bucket=mybucket +// - bucket is mandatory query parameter +// Heal a given bucket, if present. +func (adminAPI adminAPIHandlers) HealBucketHandler(w http.ResponseWriter, r *http.Request) { + // Get object layer instance. + objLayer := newObjectLayerFn() + if objLayer == nil { + writeErrorResponse(w, ErrServerNotInitialized, r.URL) + return + } + + // Validate request signature. + adminAPIErr := checkRequestAuthType(r, "", "", "") + if adminAPIErr != ErrNone { + writeErrorResponse(w, adminAPIErr, r.URL) + return + } + + // Validate bucket name and check if it exists. + vars := r.URL.Query() + bucket := vars.Get(string(mgmtBucket)) + if err := checkBucketExist(bucket, objLayer); err != nil { + writeErrorResponse(w, toAPIErrorCode(err), r.URL) + return + } + + // if dry-run=yes, then only perform validations and return success. + if isDryRun(vars) { + writeSuccessResponseHeadersOnly(w) + return + } + + // Heal the given bucket. + err := objLayer.HealBucket(bucket) + if err != nil { + writeErrorResponse(w, toAPIErrorCode(err), r.URL) + return + } + + // Return 200 on success. + writeSuccessResponseHeadersOnly(w) +} + +// isDryRun - returns true if dry-run query param was set to yes and false otherwise. +func isDryRun(qval url.Values) bool { + if dryRun := qval.Get(string(mgmtDryRun)); dryRun == "yes" { + return true + } + return false +} + +// HealObjectHandler - POST /?heal&bucket=mybucket&object=myobject +// - bucket and object are both mandatory query parameters +// Heal a given object, if present. +func (adminAPI adminAPIHandlers) HealObjectHandler(w http.ResponseWriter, r *http.Request) { + // Get object layer instance. + objLayer := newObjectLayerFn() + if objLayer == nil { + writeErrorResponse(w, ErrServerNotInitialized, r.URL) + return + } + + // Validate request signature. + adminAPIErr := checkRequestAuthType(r, "", "", "") + if adminAPIErr != ErrNone { + writeErrorResponse(w, adminAPIErr, r.URL) + return + } + + vars := r.URL.Query() + bucket := vars.Get(string(mgmtBucket)) + object := vars.Get(string(mgmtObject)) + + // Validate bucket and object names. + if err := checkBucketAndObjectNames(bucket, object); err != nil { + writeErrorResponse(w, toAPIErrorCode(err), r.URL) + return + } + + // Check if object exists. + if _, err := objLayer.GetObjectInfo(bucket, object); err != nil { + writeErrorResponse(w, toAPIErrorCode(err), r.URL) + return + } + + // if dry-run=yes, then only perform validations and return success. + if isDryRun(vars) { + writeSuccessResponseHeadersOnly(w) + return + } + + err := objLayer.HealObject(bucket, object) + if err != nil { + writeErrorResponse(w, toAPIErrorCode(err), r.URL) + return + } + + // Return 200 on success. + writeSuccessResponseHeadersOnly(w) +} diff --git a/cmd/admin-handlers_test.go b/cmd/admin-handlers_test.go index 0a9e370d5..0f0c39c66 100644 --- a/cmd/admin-handlers_test.go +++ b/cmd/admin-handlers_test.go @@ -17,8 +17,8 @@ package cmd import ( + "bytes" "encoding/json" - "fmt" "net/http" "net/http/httptest" "net/url" @@ -134,11 +134,12 @@ func testServicesCmdHandler(cmd cmdType, t *testing.T) { if cmd == statusCmd { // Initializing objectLayer and corresponding // []StorageAPI since DiskInfo() method requires it. - objLayer, fsDirs, fsErr := prepareXL() - if fsErr != nil { - t.Fatalf("failed to initialize XL based object layer - %v.", fsErr) + objLayer, xlDirs, xlErr := prepareXL() + if xlErr != nil { + t.Fatalf("failed to initialize XL based object layer - %v.", xlErr) } - defer removeRoots(fsDirs) + defer removeRoots(xlDirs) + // Make objLayer available to all internal services via globalObjectAPI. globalObjLayerMutex.Lock() globalObjectAPI = objLayer globalObjLayerMutex.Unlock() @@ -188,6 +189,16 @@ func TestServiceRestartHandler(t *testing.T) { testServicesCmdHandler(restartCmd, t) } +// mkLockQueryVal - helper function to build lock query param. +func mkLockQueryVal(bucket, prefix, relTimeStr string) url.Values { + qVal := url.Values{} + qVal.Set("lock", "") + qVal.Set(string(mgmtBucket), bucket) + qVal.Set(string(mgmtPrefix), prefix) + qVal.Set(string(mgmtOlderThan), relTimeStr) + return qVal +} + // Test for locks list management REST API. func TestListLocksHandler(t *testing.T) { // reset globals. @@ -212,6 +223,10 @@ func TestListLocksHandler(t *testing.T) { globalMinioAddr = eps[0].Host initGlobalAdminPeers(eps) + // Setup admin mgmt REST API handlers. + adminRouter := router.NewRouter() + registerAdminRouter(adminRouter) + testCases := []struct { bucket string prefix string @@ -223,37 +238,34 @@ func TestListLocksHandler(t *testing.T) { bucket: "mybucket", prefix: "myobject", relTime: "1s", - expectedStatus: 200, + expectedStatus: http.StatusOK, }, // Test 2 - invalid duration { bucket: "mybucket", prefix: "myprefix", relTime: "invalidDuration", - expectedStatus: 400, + expectedStatus: http.StatusBadRequest, }, // Test 3 - invalid bucket name { bucket: `invalid\\Bucket`, prefix: "myprefix", relTime: "1h", - expectedStatus: 400, + expectedStatus: http.StatusBadRequest, }, // Test 4 - invalid prefix { bucket: "mybucket", prefix: `invalid\\Prefix`, relTime: "1h", - expectedStatus: 400, + expectedStatus: http.StatusBadRequest, }, } - adminRouter := router.NewRouter() - registerAdminRouter(adminRouter) - for i, test := range testCases { - queryStr := fmt.Sprintf("&bucket=%s&prefix=%s&older-than=%s", test.bucket, test.prefix, test.relTime) - req, err := newTestRequest("GET", "/?lock"+queryStr, 0, nil) + queryVal := mkLockQueryVal(test.bucket, test.prefix, test.relTime) + req, err := newTestRequest("GET", "/?"+queryVal.Encode(), 0, nil) if err != nil { t.Fatalf("Test %d - Failed to construct list locks request - %v", i+1, err) } @@ -293,6 +305,10 @@ func TestClearLocksHandler(t *testing.T) { } initGlobalAdminPeers(eps) + // Setup admin mgmt REST API handlers. + adminRouter := router.NewRouter() + registerAdminRouter(adminRouter) + testCases := []struct { bucket string prefix string @@ -304,37 +320,34 @@ func TestClearLocksHandler(t *testing.T) { bucket: "mybucket", prefix: "myobject", relTime: "1s", - expectedStatus: 200, + expectedStatus: http.StatusOK, }, // Test 2 - invalid duration { bucket: "mybucket", prefix: "myprefix", relTime: "invalidDuration", - expectedStatus: 400, + expectedStatus: http.StatusBadRequest, }, // Test 3 - invalid bucket name { bucket: `invalid\\Bucket`, prefix: "myprefix", relTime: "1h", - expectedStatus: 400, + expectedStatus: http.StatusBadRequest, }, // Test 4 - invalid prefix { bucket: "mybucket", prefix: `invalid\\Prefix`, relTime: "1h", - expectedStatus: 400, + expectedStatus: http.StatusBadRequest, }, } - adminRouter := router.NewRouter() - registerAdminRouter(adminRouter) - for i, test := range testCases { - queryStr := fmt.Sprintf("&bucket=%s&prefix=%s&older-than=%s", test.bucket, test.prefix, test.relTime) - req, err := newTestRequest("POST", "/?lock"+queryStr, 0, nil) + queryVal := mkLockQueryVal(test.bucket, test.prefix, test.relTime) + req, err := newTestRequest("POST", "/?"+queryVal.Encode(), 0, nil) if err != nil { t.Fatalf("Test %d - Failed to construct clear locks request - %v", i+1, err) } @@ -361,25 +374,10 @@ func TestValidateLockQueryParams(t *testing.T) { // initialize NSLock. initNSLock(false) // Sample query values for test cases. - allValidVal := url.Values{} - allValidVal.Set(string(lockBucket), "bucket") - allValidVal.Set(string(lockPrefix), "prefix") - allValidVal.Set(string(lockOlderThan), "1s") - - invalidBucketVal := url.Values{} - invalidBucketVal.Set(string(lockBucket), `invalid\\Bucket`) - invalidBucketVal.Set(string(lockPrefix), "prefix") - invalidBucketVal.Set(string(lockOlderThan), "invalidDuration") - - invalidPrefixVal := url.Values{} - invalidPrefixVal.Set(string(lockBucket), "bucket") - invalidPrefixVal.Set(string(lockPrefix), `invalid\\PRefix`) - invalidPrefixVal.Set(string(lockOlderThan), "invalidDuration") - - invalidOlderThanVal := url.Values{} - invalidOlderThanVal.Set(string(lockBucket), "bucket") - invalidOlderThanVal.Set(string(lockPrefix), "prefix") - invalidOlderThanVal.Set(string(lockOlderThan), "invalidDuration") + allValidVal := mkLockQueryVal("bucket", "prefix", "1s") + invalidBucketVal := mkLockQueryVal(`invalid\\Bucket`, "prefix", "1s") + invalidPrefixVal := mkLockQueryVal("bucket", `invalid\\Prefix`, "1s") + invalidOlderThanVal := mkLockQueryVal("bucket", "prefix", "invalidDuration") testCases := []struct { qVals url.Values @@ -410,3 +408,469 @@ func TestValidateLockQueryParams(t *testing.T) { } } } + +// mkListObjectsQueryStr - helper to build ListObjectsHeal query string. +func mkListObjectsQueryVal(bucket, prefix, marker, delimiter, maxKeyStr string) url.Values { + qVal := url.Values{} + qVal.Set("heal", "") + qVal.Set(string(mgmtBucket), bucket) + qVal.Set(string(mgmtPrefix), prefix) + qVal.Set(string(mgmtMarker), marker) + qVal.Set(string(mgmtDelimiter), delimiter) + qVal.Set(string(mgmtMaxKey), maxKeyStr) + return qVal +} + +// TestValidateHealQueryParams - Test for query param validation helper function for heal APIs. +func TestValidateHealQueryParams(t *testing.T) { + testCases := []struct { + bucket string + prefix string + marker string + delimiter string + maxKeys string + apiErr APIErrorCode + }{ + // 1. Valid params. + { + bucket: "mybucket", + prefix: "prefix", + marker: "prefix11", + delimiter: "/", + maxKeys: "10", + apiErr: ErrNone, + }, + // 2. Valid params with meta bucket. + { + bucket: minioMetaBucket, + prefix: "prefix", + marker: "prefix11", + delimiter: "/", + maxKeys: "10", + apiErr: ErrNone, + }, + // 3. Valid params with empty prefix. + { + bucket: "mybucket", + prefix: "", + marker: "", + delimiter: "/", + maxKeys: "10", + apiErr: ErrNone, + }, + // 4. Invalid params with invalid bucket. + { + bucket: `invalid\\Bucket`, + prefix: "prefix", + marker: "prefix11", + delimiter: "/", + maxKeys: "10", + apiErr: ErrInvalidBucketName, + }, + // 5. Invalid params with invalid prefix. + { + bucket: "mybucket", + prefix: `invalid\\Prefix`, + marker: "prefix11", + delimiter: "/", + maxKeys: "10", + apiErr: ErrInvalidObjectName, + }, + // 6. Invalid params with invalid maxKeys. + { + bucket: "mybucket", + prefix: "prefix", + marker: "prefix11", + delimiter: "/", + maxKeys: "-1", + apiErr: ErrInvalidMaxKeys, + }, + // 7. Invalid params with unsupported prefix marker combination. + { + bucket: "mybucket", + prefix: "prefix", + marker: "notmatchingmarker", + delimiter: "/", + maxKeys: "10", + apiErr: ErrNotImplemented, + }, + // 8. Invalid params with unsupported delimiter. + { + bucket: "mybucket", + prefix: "prefix", + marker: "notmatchingmarker", + delimiter: "unsupported", + maxKeys: "10", + apiErr: ErrNotImplemented, + }, + // 9. Invalid params with invalid max Keys + { + bucket: "mybucket", + prefix: "prefix", + marker: "prefix11", + delimiter: "/", + maxKeys: "999999999999999999999999999", + apiErr: ErrInvalidMaxKeys, + }, + } + for i, test := range testCases { + vars := mkListObjectsQueryVal(test.bucket, test.prefix, test.marker, test.delimiter, test.maxKeys) + _, _, _, _, _, actualErr := validateHealQueryParams(vars) + if actualErr != test.apiErr { + t.Errorf("Test %d - Expected %v but received %v", + i+1, getAPIError(test.apiErr), getAPIError(actualErr)) + } + } +} + +// TestListObjectsHeal - Test for ListObjectsHealHandler. +func TestListObjectsHealHandler(t *testing.T) { + rootPath, err := newTestConfig("us-east-1") + if err != nil { + t.Fatalf("Unable to initialize server config. %s", err) + } + defer removeAll(rootPath) + + // Initializing objectLayer and corresponding []StorageAPI + // since ListObjectsHeal() method requires it. + objLayer, xlDirs, xlErr := prepareXL() + if xlErr != nil { + t.Fatalf("failed to initialize XL based object layer - %v.", xlErr) + } + defer removeRoots(xlDirs) + + err = objLayer.MakeBucket("mybucket") + if err != nil { + t.Fatalf("Failed to make bucket - %v", err) + } + + // Delete bucket after running all test cases. + defer objLayer.DeleteBucket("mybucket") + + // Make objLayer available to all internal services via globalObjectAPI. + globalObjLayerMutex.Lock() + globalObjectAPI = objLayer + globalObjLayerMutex.Unlock() + + // Setup admin mgmt REST API handlers. + adminRouter := router.NewRouter() + registerAdminRouter(adminRouter) + + testCases := []struct { + bucket string + prefix string + marker string + delimiter string + maxKeys string + statusCode int + }{ + // 1. Valid params. + { + bucket: "mybucket", + prefix: "prefix", + marker: "prefix11", + delimiter: "/", + maxKeys: "10", + statusCode: http.StatusOK, + }, + // 2. Valid params with meta bucket. + { + bucket: minioMetaBucket, + prefix: "prefix", + marker: "prefix11", + delimiter: "/", + maxKeys: "10", + statusCode: http.StatusOK, + }, + // 3. Valid params with empty prefix. + { + bucket: "mybucket", + prefix: "", + marker: "", + delimiter: "/", + maxKeys: "10", + statusCode: http.StatusOK, + }, + // 4. Invalid params with invalid bucket. + { + bucket: `invalid\\Bucket`, + prefix: "prefix", + marker: "prefix11", + delimiter: "/", + maxKeys: "10", + statusCode: getAPIError(ErrInvalidBucketName).HTTPStatusCode, + }, + // 5. Invalid params with invalid prefix. + { + bucket: "mybucket", + prefix: `invalid\\Prefix`, + marker: "prefix11", + delimiter: "/", + maxKeys: "10", + statusCode: getAPIError(ErrInvalidObjectName).HTTPStatusCode, + }, + // 6. Invalid params with invalid maxKeys. + { + bucket: "mybucket", + prefix: "prefix", + marker: "prefix11", + delimiter: "/", + maxKeys: "-1", + statusCode: getAPIError(ErrInvalidMaxKeys).HTTPStatusCode, + }, + // 7. Invalid params with unsupported prefix marker combination. + { + bucket: "mybucket", + prefix: "prefix", + marker: "notmatchingmarker", + delimiter: "/", + maxKeys: "10", + statusCode: getAPIError(ErrNotImplemented).HTTPStatusCode, + }, + // 8. Invalid params with unsupported delimiter. + { + bucket: "mybucket", + prefix: "prefix", + marker: "notmatchingmarker", + delimiter: "unsupported", + maxKeys: "10", + statusCode: getAPIError(ErrNotImplemented).HTTPStatusCode, + }, + // 9. Invalid params with invalid max Keys + { + bucket: "mybucket", + prefix: "prefix", + marker: "prefix11", + delimiter: "/", + maxKeys: "999999999999999999999999999", + statusCode: getAPIError(ErrInvalidMaxKeys).HTTPStatusCode, + }, + } + + for i, test := range testCases { + if i != 0 { + continue + } + queryVal := mkListObjectsQueryVal(test.bucket, test.prefix, test.marker, test.delimiter, test.maxKeys) + req, err := newTestRequest("GET", "/?"+queryVal.Encode(), 0, nil) + if err != nil { + t.Fatalf("Test %d - Failed to construct list objects needing heal request - %v", i+1, err) + } + req.Header.Set(minioAdminOpHeader, "list") + + cred := serverConfig.GetCredential() + err = signRequestV4(req, cred.AccessKey, cred.SecretKey) + if err != nil { + t.Fatalf("Test %d - Failed to sign list objects needing heal request - %v", i+1, err) + } + rec := httptest.NewRecorder() + adminRouter.ServeHTTP(rec, req) + if test.statusCode != rec.Code { + t.Errorf("Test %d - Expected HTTP status code %d but received %d", i+1, test.statusCode, rec.Code) + } + } +} + +// TestHealBucketHandler - Test for HealBucketHandler. +func TestHealBucketHandler(t *testing.T) { + rootPath, err := newTestConfig("us-east-1") + if err != nil { + t.Fatalf("Unable to initialize server config. %s", err) + } + defer removeAll(rootPath) + + // Initializing objectLayer and corresponding []StorageAPI + // since MakeBucket() and DeleteBucket() methods requires it. + objLayer, xlDirs, xlErr := prepareXL() + if xlErr != nil { + t.Fatalf("failed to initialize XL based object layer - %v.", xlErr) + } + defer removeRoots(xlDirs) + + err = objLayer.MakeBucket("mybucket") + if err != nil { + t.Fatalf("Failed to make bucket - %v", err) + } + + // Delete bucket after running all test cases. + defer objLayer.DeleteBucket("mybucket") + + // Make objLayer available to all internal services via globalObjectAPI. + globalObjLayerMutex.Lock() + globalObjectAPI = objLayer + globalObjLayerMutex.Unlock() + + // Setup admin mgmt REST API handlers. + adminRouter := router.NewRouter() + registerAdminRouter(adminRouter) + + testCases := []struct { + bucket string + statusCode int + dryrun string + }{ + // 1. Valid test case. + { + bucket: "mybucket", + statusCode: http.StatusOK, + }, + // 2. Invalid bucket name. + { + bucket: `invalid\\Bucket`, + statusCode: http.StatusBadRequest, + }, + // 3. Bucket not found. + { + bucket: "bucketnotfound", + statusCode: http.StatusNotFound, + }, + // 4. Valid test case with dry-run. + { + bucket: "mybucket", + statusCode: http.StatusOK, + dryrun: "yes", + }, + } + for i, test := range testCases { + // Prepare query params. + queryVal := url.Values{} + queryVal.Set(string(mgmtBucket), test.bucket) + queryVal.Set("heal", "") + queryVal.Set(string(mgmtDryRun), test.dryrun) + + req, err := newTestRequest("POST", "/?"+queryVal.Encode(), 0, nil) + if err != nil { + t.Fatalf("Test %d - Failed to construct heal bucket request - %v", i+1, err) + } + + req.Header.Set(minioAdminOpHeader, "bucket") + + cred := serverConfig.GetCredential() + err = signRequestV4(req, cred.AccessKey, cred.SecretKey) + if err != nil { + t.Fatalf("Test %d - Failed to sign heal bucket request - %v", i+1, err) + } + rec := httptest.NewRecorder() + adminRouter.ServeHTTP(rec, req) + if test.statusCode != rec.Code { + t.Errorf("Test %d - Expected HTTP status code %d but received %d", i+1, test.statusCode, rec.Code) + } + + } +} + +// TestHealObjectHandler - Test for HealObjectHandler. +func TestHealObjectHandler(t *testing.T) { + rootPath, err := newTestConfig("us-east-1") + if err != nil { + t.Fatalf("Unable to initialize server config. %s", err) + } + defer removeAll(rootPath) + + // Initializing objectLayer and corresponding []StorageAPI + // since MakeBucket(), PutObject() and DeleteBucket() method requires it. + objLayer, xlDirs, xlErr := prepareXL() + if xlErr != nil { + t.Fatalf("failed to initialize XL based object layer - %v.", xlErr) + } + defer removeRoots(xlDirs) + + // Create an object myobject under bucket mybucket. + bucketName := "mybucket" + objName := "myobject" + err = objLayer.MakeBucket(bucketName) + if err != nil { + t.Fatalf("Failed to make bucket %s - %v", bucketName, err) + } + + _, err = objLayer.PutObject(bucketName, objName, int64(len("hello")), bytes.NewReader([]byte("hello")), nil, "") + if err != nil { + t.Fatalf("Failed to create %s - %v", objName, err) + } + + // Delete bucket and object after running all test cases. + defer func(objLayer ObjectLayer, bucketName, objName string) { + objLayer.DeleteObject(bucketName, objName) + objLayer.DeleteBucket(bucketName) + }(objLayer, bucketName, objName) + + // Make objLayer available to all internal services via globalObjectAPI. + globalObjLayerMutex.Lock() + globalObjectAPI = objLayer + globalObjLayerMutex.Unlock() + + // Setup admin mgmt REST API handlers. + adminRouter := router.NewRouter() + registerAdminRouter(adminRouter) + + testCases := []struct { + bucket string + object string + dryrun string + statusCode int + }{ + // 1. Valid test case. + { + bucket: bucketName, + object: objName, + statusCode: http.StatusOK, + }, + // 2. Invalid bucket name. + { + bucket: `invalid\\Bucket`, + object: "myobject", + statusCode: http.StatusBadRequest, + }, + // 3. Bucket not found. + { + bucket: "bucketnotfound", + object: "myobject", + statusCode: http.StatusNotFound, + }, + // 4. Invalid object name. + { + bucket: bucketName, + object: `invalid\\Object`, + statusCode: http.StatusBadRequest, + }, + // 5. Object not found. + { + bucket: bucketName, + object: "objectnotfound", + statusCode: http.StatusNotFound, + }, + // 6. Valid test case with dry-run. + { + bucket: bucketName, + object: objName, + dryrun: "yes", + statusCode: http.StatusOK, + }, + } + for i, test := range testCases { + // Prepare query params. + queryVal := url.Values{} + queryVal.Set(string(mgmtBucket), test.bucket) + queryVal.Set(string(mgmtObject), test.object) + queryVal.Set("heal", "") + queryVal.Set(string(mgmtDryRun), test.dryrun) + + req, err := newTestRequest("POST", "/?"+queryVal.Encode(), 0, nil) + if err != nil { + t.Fatalf("Test %d - Failed to construct heal object request - %v", i+1, err) + } + + req.Header.Set(minioAdminOpHeader, "object") + + cred := serverConfig.GetCredential() + err = signRequestV4(req, cred.AccessKey, cred.SecretKey) + if err != nil { + t.Fatalf("Test %d - Failed to sign heal object request - %v", i+1, err) + } + rec := httptest.NewRecorder() + adminRouter.ServeHTTP(rec, req) + if test.statusCode != rec.Code { + t.Errorf("Test %d - Expected HTTP status code %d but received %d", i+1, test.statusCode, rec.Code) + } + } +} diff --git a/cmd/admin-router.go b/cmd/admin-router.go index 053f01164..dde7faea8 100644 --- a/cmd/admin-router.go +++ b/cmd/admin-router.go @@ -41,7 +41,15 @@ func registerAdminRouter(mux *router.Router) { // List Locks adminRouter.Methods("GET").Queries("lock", "").Headers(minioAdminOpHeader, "list").HandlerFunc(adminAPI.ListLocksHandler) - // Clear locks adminRouter.Methods("POST").Queries("lock", "").Headers(minioAdminOpHeader, "clear").HandlerFunc(adminAPI.ClearLocksHandler) + + /// Heal operations + + // List Objects needing heal. + adminRouter.Methods("GET").Queries("heal", "").Headers(minioAdminOpHeader, "list").HandlerFunc(adminAPI.ListObjectsHealHandler) + // Heal Buckets. + adminRouter.Methods("POST").Queries("heal", "").Headers(minioAdminOpHeader, "bucket").HandlerFunc(adminAPI.HealBucketHandler) + // Heal Objects. + adminRouter.Methods("POST").Queries("heal", "").Headers(minioAdminOpHeader, "object").HandlerFunc(adminAPI.HealObjectHandler) } diff --git a/cmd/api-response.go b/cmd/api-response.go index 42d2730d4..f8f175903 100644 --- a/cmd/api-response.go +++ b/cmd/api-response.go @@ -197,6 +197,7 @@ type Object struct { // The class of storage used to store the object. StorageClass string + HealInfo *HealInfo `xml:"HealInfo,omitempty"` } // CopyObjectResponse container returns ETag and LastModified of the successfully copied object @@ -316,6 +317,8 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter string, max content.Size = object.Size content.StorageClass = "STANDARD" content.Owner = owner + // object.HealInfo is non-empty only when resp is constructed in ListObjectsHeal. + content.HealInfo = object.HealInfo contents = append(contents, content) } // TODO - support EncodingType in xml decoding diff --git a/cmd/object-api-datatypes.go b/cmd/object-api-datatypes.go index bec29c0cc..336fb16b2 100644 --- a/cmd/object-api-datatypes.go +++ b/cmd/object-api-datatypes.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -59,6 +59,21 @@ type BucketInfo struct { Created time.Time } +type healStatus int + +const ( + canHeal healStatus = iota // Object can be healed + corrupted // Object can't be healed + quorumUnavailable // Object can't be healed until read quorum is available +) + +// HealInfo - represents healing related information of an object. +type HealInfo struct { + Status healStatus + MissingDataCount int + MissingPartityCount int +} + // ObjectInfo - represents object metadata. type ObjectInfo struct { // Name of the bucket. @@ -89,6 +104,7 @@ type ObjectInfo struct { // User-Defined metadata UserDefined map[string]string + HealInfo *HealInfo `xml:"HealInfo,omitempty"` } // ListPartsInfo - represents list of all parts. diff --git a/cmd/object-api-utils.go b/cmd/object-api-utils.go index d07a37c6c..95f63d339 100644 --- a/cmd/object-api-utils.go +++ b/cmd/object-api-utils.go @@ -43,13 +43,21 @@ const ( var validBucket = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`) var isIPAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`) +// isMinioBucket returns true if given bucket is a Minio internal +// bucket and false otherwise. +func isMinioMetaBucketName(bucket string) bool { + return bucket == minioMetaBucket || + bucket == minioMetaMultipartBucket || + bucket == minioMetaTmpBucket +} + // IsValidBucketName verifies a bucket name in accordance with Amazon's // requirements. It must be 3-63 characters long, can contain dashes // and periods, but must begin and end with a lowercase letter or a number. // See: http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html func IsValidBucketName(bucket string) bool { - // Special case when bucket is equal to 'metaBucket'. - if bucket == minioMetaBucket || bucket == minioMetaMultipartBucket { + // Special case when bucket is equal to one of the meta buckets. + if isMinioMetaBucketName(bucket) { return true } if len(bucket) < 3 || len(bucket) > 63 { diff --git a/cmd/object-api-utils_test.go b/cmd/object-api-utils_test.go index 310b94b9e..b93ceabb8 100644 --- a/cmd/object-api-utils_test.go +++ b/cmd/object-api-utils_test.go @@ -175,3 +175,40 @@ func TestGetCompleteMultipartMD5(t *testing.T) { } } } + +// TestIsMinioBucketName - Tests isMinioBucketName helper function. +func TestIsMinioMetaBucketName(t *testing.T) { + testCases := []struct { + bucket string + result bool + }{ + // Minio meta bucket. + { + bucket: minioMetaBucket, + result: true, + }, + // Minio meta bucket. + { + bucket: minioMetaMultipartBucket, + result: true, + }, + // Minio meta bucket. + { + bucket: minioMetaTmpBucket, + result: true, + }, + // Normal bucket + { + bucket: "mybucket", + result: false, + }, + } + + for i, test := range testCases { + actual := isMinioMetaBucketName(test.bucket) + if actual != test.result { + t.Errorf("Test %d - expected %v but received %v", + i+1, test.result, actual) + } + } +} diff --git a/cmd/xl-v1-healing-common.go b/cmd/xl-v1-healing-common.go index 70ffe3bd3..c87d60977 100644 --- a/cmd/xl-v1-healing-common.go +++ b/cmd/xl-v1-healing-common.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,7 +19,7 @@ package cmd import "time" // commonTime returns a maximally occurring time from a list of time. -func commonTime(modTimes []time.Time) (modTime time.Time) { +func commonTime(modTimes []time.Time) (modTime time.Time, count int) { var maxima int // Counter for remembering max occurrence of elements. timeOccurenceMap := make(map[time.Time]int) // Ignore the uuid sentinel and count the rest. @@ -32,13 +32,17 @@ func commonTime(modTimes []time.Time) (modTime time.Time) { // Find the common cardinality from previously collected // occurrences of elements. for time, count := range timeOccurenceMap { - if count > maxima { + if count == maxima && time.After(modTime) { + maxima = count + modTime = time + + } else if count > maxima { maxima = count modTime = time } } // Return the collected common uuid. - return modTime + return modTime, maxima } // Beginning of unix time is treated as sentinel value here. @@ -85,7 +89,7 @@ func listOnlineDisks(disks []StorageAPI, partsMetadata []xlMetaV1, errs []error) modTimes := listObjectModtimes(partsMetadata, errs) // Reduce list of UUIDs to a single common value. - modTime = commonTime(modTimes) + modTime, _ = commonTime(modTimes) // Create a new online disks slice, which have common uuid. for index, t := range modTimes { @@ -119,7 +123,7 @@ func outDatedDisks(disks []StorageAPI, partsMetadata []xlMetaV1, errs []error) ( // Returns if the object should be healed. func xlShouldHeal(partsMetadata []xlMetaV1, errs []error) bool { - modTime := commonTime(listObjectModtimes(partsMetadata, errs)) + modTime, _ := commonTime(listObjectModtimes(partsMetadata, errs)) for index := range partsMetadata { if errs[index] == errDiskNotFound { continue @@ -133,3 +137,55 @@ func xlShouldHeal(partsMetadata []xlMetaV1, errs []error) bool { } return false } + +// xlHealStat - returns a structure which describes how many data, +// parity erasure blocks are missing and if it is possible to heal +// with the blocks present. +func xlHealStat(xl xlObjects, partsMetadata []xlMetaV1, errs []error) HealInfo { + // Less than quorum erasure coded blocks of the object have the same create time. + // This object can't be healed with the information we have. + modTime, count := commonTime(listObjectModtimes(partsMetadata, errs)) + if count < xl.readQuorum { + return HealInfo{ + Status: quorumUnavailable, + MissingDataCount: 0, + MissingPartityCount: 0, + } + } + + // If there isn't a valid xlMeta then we can't heal the object. + xlMeta, err := pickValidXLMeta(partsMetadata, modTime) + if err != nil { + return HealInfo{ + Status: corrupted, + MissingDataCount: 0, + MissingPartityCount: 0, + } + } + + // Compute heal statistics like bytes to be healed, missing + // data and missing parity count. + missingDataCount := 0 + missingParityCount := 0 + + for i, err := range errs { + // xl.json is not found, which implies the erasure + // coded blocks are unavailable in the corresponding disk. + // First half of the disks are data and the rest are parity. + if realErr := errorCause(err); realErr == errFileNotFound || realErr == errDiskNotFound { + if xlMeta.Erasure.Distribution[i]-1 < xl.dataBlocks { + missingDataCount++ + } else { + missingParityCount++ + } + } + } + + // This object can be healed. We have enough object metadata + // to reconstruct missing erasure coded blocks. + return HealInfo{ + Status: canHeal, + MissingDataCount: missingDataCount, + MissingPartityCount: missingParityCount, + } +} diff --git a/cmd/xl-v1-healing-common_test.go b/cmd/xl-v1-healing-common_test.go index da18c8b8e..be7f200b9 100644 --- a/cmd/xl-v1-healing-common_test.go +++ b/cmd/xl-v1-healing-common_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -75,7 +75,7 @@ func TestCommonTime(t *testing.T) { // common modtime. Tests fail if modtime does not match. for i, testCase := range testCases { // Obtain a common mod time from modTimes slice. - ctime := commonTime(testCase.times) + ctime, _ := commonTime(testCase.times) if testCase.time != ctime { t.Fatalf("Test case %d, expect to pass but failed. Wanted modTime: %s, got modTime: %s\n", i+1, testCase.time, ctime) } diff --git a/cmd/xl-v1-healing.go b/cmd/xl-v1-healing.go index f14b6baf4..4b1b89dbe 100644 --- a/cmd/xl-v1-healing.go +++ b/cmd/xl-v1-healing.go @@ -169,8 +169,8 @@ func listBucketNames(storageDisks []StorageAPI) (bucketNames map[string]struct{} if !IsValidBucketName(volInfo.Name) { continue } - // Ignore the volume special bucket. - if volInfo.Name == minioMetaBucket { + // Skip special volume buckets. + if isMinioMetaBucketName(volInfo.Name) { continue } bucketNames[volInfo.Name] = struct{}{} diff --git a/cmd/xl-v1-list-objects-heal.go b/cmd/xl-v1-list-objects-heal.go index 1871470e5..2438c17a2 100644 --- a/cmd/xl-v1-list-objects-heal.go +++ b/cmd/xl-v1-list-objects-heal.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -120,8 +120,15 @@ func (xl xlObjects) listObjectsHeal(bucket, prefix, marker, delimiter string, ma objInfo.Name = entry objInfo.IsDir = true } else { - objInfo.Bucket = bucket - objInfo.Name = entry + var err error + objInfo, err = xl.getObjectInfo(bucket, entry) + if err != nil { + // Ignore errFileNotFound + if errorCause(err) == errFileNotFound { + continue + } + return ListObjectsInfo{}, toObjectErr(err, bucket, prefix) + } } nextMarker = objInfo.Name objInfos = append(objInfos, objInfo) @@ -150,11 +157,13 @@ func (xl xlObjects) listObjectsHeal(bucket, prefix, marker, delimiter string, ma objectLock.RLock() partsMetadata, errs := readAllXLMetadata(xl.storageDisks, bucket, objInfo.Name) if xlShouldHeal(partsMetadata, errs) { + healStat := xlHealStat(xl, partsMetadata, errs) result.Objects = append(result.Objects, ObjectInfo{ - Name: objInfo.Name, - ModTime: objInfo.ModTime, - Size: objInfo.Size, - IsDir: false, + Name: objInfo.Name, + ModTime: objInfo.ModTime, + Size: objInfo.Size, + IsDir: false, + HealInfo: &healStat, }) } objectLock.RUnlock() diff --git a/cmd/xl-v1-utils.go b/cmd/xl-v1-utils.go index ec696e17f..e2309483c 100644 --- a/cmd/xl-v1-utils.go +++ b/cmd/xl-v1-utils.go @@ -322,7 +322,7 @@ func readAllXLMetadata(disks []StorageAPI, bucket, object string) ([]xlMetaV1, [ return metadataArray, errs } -// Return ordered partsMetadata depeinding on distribution. +// Return ordered partsMetadata depending on distribution. func getOrderedPartsMetadata(distribution []int, partsMetadata []xlMetaV1) (orderedPartsMetadata []xlMetaV1) { orderedPartsMetadata = make([]xlMetaV1, len(partsMetadata)) for index := range partsMetadata { diff --git a/pkg/madmin/API.md b/pkg/madmin/API.md index 7eaf02833..87ba3a3d1 100644 --- a/pkg/madmin/API.md +++ b/pkg/madmin/API.md @@ -9,29 +9,29 @@ package main import ( - "fmt" + "fmt" - "github.com/minio/minio/pkg/madmin" + "github.com/minio/minio/pkg/madmin" ) func main() { - // Use a secure connection. - ssl := true + // Use a secure connection. + ssl := true - // Initialize minio client object. - mdmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETKEY", ssl) - if err != nil { - fmt.Println(err) - return - } + // Initialize minio client object. + mdmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETKEY", ssl) + if err != nil { + fmt.Println(err) + return + } - // Fetch service status. - st, err := mdmClnt.ServiceStatus() - if err != nil { - fmt.Println(err) - return - } - fmt.Printf("%#v\n", st) + // Fetch service status. + st, err := mdmClnt.ServiceStatus() + if err != nil { + fmt.Println(err) + return + } + fmt.Printf("%#v\n", st) } ``` @@ -62,16 +62,16 @@ __Parameters__ ### ServiceStatus() (ServiceStatusMetadata, error) -Fetch service status, replies disk space used, backend type and total disks offline/online (XL). +Fetch service status, replies disk space used, backend type and total disks offline/online (applicable in distributed mode). | Param | Type | Description | |---|---|---| -|`serviceStatus` | _ServiceStatusMetadata_ | Represents current server status info in following format: | +|`serviceStatus` | _ServiceStatusMetadata_ | Represents current server status info in following format: | | Param | Type | Description | |---|---|---| -|`st.Total` | _int64_ | Total disk space. | +|`st.Total` | _int64_ | Total disk space. | |`st.Free` | _int64_ | Free disk space. | |`st.Backend`| _struct{}_ | Represents backend type embedded structure. | @@ -86,7 +86,7 @@ Fetch service status, replies disk space used, backend type and total disks offl __Example__ - + ```go st, err := madmClnt.ServiceStatus() @@ -103,7 +103,7 @@ If successful restarts the running minio service, for distributed setup restarts __Example__ - + ```go @@ -111,7 +111,108 @@ If successful restarts the running minio service, for distributed setup restarts if err != nil { log.Fatalln(err) } - log.Printf("Succes") + log.Printf("Success") ``` + +### ListLocks(bucket, prefix string, olderThan time.Duration) ([]VolumeLockInfo, error) +If successful returns information on the list of locks held on ``bucket`` matching ``prefix`` older than ``olderThan`` seconds. +__Example__ + +``` go + volLocks, err := madmClnt.ListLocks("mybucket", "myprefix", 30 * time.Second) + if err != nil { + log.Fatalln(err) + } + log.Println("List of locks: ", volLocks) + +``` + + +### ClearLocks(bucket, prefix string, olderThan time.Duration) ([]VolumeLockInfo, error) +If successful returns information on the list of locks cleared on ``bucket`` matching ``prefix`` older than ``olderThan`` seconds. + +__Example__ + +``` go + volLocks, err := madmClnt.ClearLocks("mybucket", "myprefix", 30 * time.Second) + if err != nil { + log.Fatalln(err) + } + log.Println("List of locks cleared: ", volLocks) + +``` + + +### ListObjectsHeal(bucket, prefix string, recursive bool, doneCh <-chan struct{}) (<-chan ObjectInfo, error) +If successful returns information on the list of objects that need healing in ``bucket`` matching ``prefix``. + +__Example__ + +``` go + // Create a done channel to control 'ListObjectsHeal' go routine. + doneCh := make(chan struct{}) + + // Indicate to our routine to exit cleanly upon return. + defer close(doneCh) + + // Set true if recursive listing is needed. + isRecursive := true + // List objects that need healing for a given bucket and + // prefix. + healObjectCh, err := madmClnt.ListObjectsHeal("mybucket", "myprefix", isRecursive, doneCh) + if err != nil { + fmt.Println(err) + return + } + for object := range healObjectsCh { + if object.Err != nil { + log.Fatalln(err) + return + } + if object.HealInfo != nil { + switch healInfo := *object.HealInfo; healInfo.Status { + case madmin.CanHeal: + fmt.Println(object.Key, " can be healed.") + case madmin.QuorumUnavailable: + fmt.Println(object.Key, " can't be healed until quorum is available.") + case madmin.Corrupted: + fmt.Println(object.Key, " can't be healed, not enough information.") + } + } + fmt.Println("object: ", object) + } +``` + + +### HealBucket(bucket string, isDryRun bool) error +If bucket is successfully healed returns nil, otherwise returns error indicating the reason for failure. If isDryRun is true, then the bucket is not healed, but heal bucket request is validated by the server. e.g, if the bucket exists, if bucket name is valid etc. + +__Example__ + +``` go + isDryRun := false + err := madmClnt.HealBucket("mybucket", isDryRun) + if err != nil { + log.Fatalln(err) + } + log.Println("successfully healed mybucket") + +``` + + +### HealObject(bucket, object string, isDryRun bool) error +If object is successfully healed returns nil, otherwise returns error indicating the reason for failure. If isDryRun is true, then the object is not healed, but heal object request is validated by the server. e.g, if the object exists, if object name is valid etc. + +__Example__ + +``` go + isDryRun := false + err := madmClnt.HealObject("mybucket", "myobject", isDryRun) + if err != nil { + log.Fatalln(err) + } + log.Println("successfully healed mybucket/myobject") + +``` diff --git a/pkg/madmin/examples/heal-bucket.go b/pkg/madmin/examples/heal-bucket.go new file mode 100644 index 000000000..cb2559dcf --- /dev/null +++ b/pkg/madmin/examples/heal-bucket.go @@ -0,0 +1,58 @@ +// +build ignore + +/* + * Minio Cloud Storage, (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package main + +import ( + "log" + + "github.com/minio/minio/pkg/madmin" +) + +func main() { + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY are + // dummy values, please replace them with original values. + + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY are + // dummy values, please replace them with original values. + + // API requests are secure (HTTPS) if secure=true and insecure (HTTPS) otherwise. + // New returns an Minio Admin client object. + madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + if err != nil { + log.Fatalln(err) + } + + // Heal bucket mybucket - dry run + isDryRun := true + err = madmClnt.HealBucket("mybucket", isDryRun) + if err != nil { + log.Fatalln(err) + + } + + // Heal bucket mybucket - for real this time. + isDryRun := false + err = madmClnt.HealBucket("mybucket", isDryRun) + if err != nil { + log.Fatalln(err) + } + + log.Println("successfully healed mybucket") +} diff --git a/pkg/madmin/examples/heal-list.go b/pkg/madmin/examples/heal-list.go new file mode 100644 index 000000000..87a34f7fd --- /dev/null +++ b/pkg/madmin/examples/heal-list.go @@ -0,0 +1,76 @@ +// +build ignore + +package main + +/* + * Minio Cloud Storage, (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import ( + "fmt" + "log" + + "github.com/minio/minio/pkg/madmin" +) + +func main() { + + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY are + // dummy values, please replace them with original values. + + // API requests are secure (HTTPS) if secure=true and insecure (HTTPS) otherwise. + // New returns an Minio Admin client object. + madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + if err != nil { + log.Fatalln(err) + } + + bucket := "mybucket" + prefix := "myprefix" + + // Create a done channel to control 'ListObjectsHeal' go routine. + doneCh := make(chan struct{}) + // Indicate to our routine to exit cleanly upon return. + defer close(doneCh) + + // Set true if recursive listing is needed. + isRecursive := true + // List objects that need healing for a given bucket and + // prefix. + healObjectsCh, err := madmClnt.ListObjectsHeal(bucket, prefix, isRecursive, doneCh) + if err != nil { + log.Fatalln(err) + } + + for object := range healObjectsCh { + if object.Err != nil { + log.Fatalln(err) + return + } + + if object.HealInfo != nil { + switch healInfo := *object.HealInfo; healInfo.Status { + case madmin.CanHeal: + fmt.Println(object.Key, " can be healed.") + case madmin.QuorumUnavailable: + fmt.Println(object.Key, " can't be healed until quorum is available.") + case madmin.Corrupted: + fmt.Println(object.Key, " can't be healed, not enough information.") + } + } + fmt.Println("object: ", object) + } +} diff --git a/pkg/madmin/examples/heal-object.go b/pkg/madmin/examples/heal-object.go new file mode 100644 index 000000000..0cd15a809 --- /dev/null +++ b/pkg/madmin/examples/heal-object.go @@ -0,0 +1,57 @@ +// +build ignore + +/* + * Minio Cloud Storage, (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package main + +import ( + "log" + + "github.com/minio/minio/pkg/madmin" +) + +func main() { + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY are + // dummy values, please replace them with original values. + + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY are + // dummy values, please replace them with original values. + + // API requests are secure (HTTPS) if secure=true and insecure (HTTPS) otherwise. + // New returns an Minio Admin client object. + madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + if err != nil { + log.Fatalln(err) + } + + // Heal object mybucket/myobject - dry run. + isDryRun := true + err = madmClnt.HealObject("mybucket", "myobject", isDryRun) + if err != nil { + log.Fatalln(err) + } + + // Heal object mybucket/myobject - this time for real. + isDryRun = false + err = madmClnt.HealObject("mybucket", "myobject", isDryRun) + if err != nil { + log.Fatalln(err) + } + + log.Println("successfully healed mybucket/myobject") +} diff --git a/pkg/madmin/heal-commands.go b/pkg/madmin/heal-commands.go new file mode 100644 index 000000000..84f2aa3c8 --- /dev/null +++ b/pkg/madmin/heal-commands.go @@ -0,0 +1,308 @@ +/* + * Minio Cloud Storage, (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package madmin + +import ( + "encoding/xml" + "errors" + "fmt" + "net/http" + "net/url" + "time" +) + +// listBucketHealResult container for listObjects response. +type listBucketHealResult struct { + // A response can contain CommonPrefixes only if you have + // specified a delimiter. + CommonPrefixes []commonPrefix + // Metadata about each object returned. + Contents []ObjectInfo + Delimiter string + + // Encoding type used to encode object keys in the response. + EncodingType string + + // A flag that indicates whether or not ListObjects returned all of the results + // that satisfied the search criteria. + IsTruncated bool + Marker string + MaxKeys int64 + Name string + + // When response is truncated (the IsTruncated element value in + // the response is true), you can use the key name in this field + // as marker in the subsequent request to get next set of objects. + // Object storage lists objects in alphabetical order Note: This + // element is returned only if you have delimiter request + // parameter specified. If response does not include the NextMaker + // and it is truncated, you can use the value of the last Key in + // the response as the marker in the subsequent request to get the + // next set of object keys. + NextMarker string + Prefix string +} + +// commonPrefix container for prefix response. +type commonPrefix struct { + Prefix string +} + +// HealStatus - represents different states of healing an object could be in. +type healStatus int + +const ( + // CanHeal - Object can be healed + CanHeal healStatus = iota + // Corrupted - Object can't be healed + Corrupted + // QuorumUnavailable - Object can't be healed until read quorum is available + QuorumUnavailable +) + +// HealInfo - represents healing related information of an object. +type HealInfo struct { + Status healStatus + MissingDataCount int + MissingPartityCount int +} + +// ObjectInfo container for object metadata. +type ObjectInfo struct { + // An ETag is optionally set to md5sum of an object. In case of multipart objects, + // ETag is of the form MD5SUM-N where MD5SUM is md5sum of all individual md5sums of + // each parts concatenated into one string. + ETag string `json:"etag"` + + Key string `json:"name"` // Name of the object + LastModified time.Time `json:"lastModified"` // Date and time the object was last modified. + Size int64 `json:"size"` // Size in bytes of the object. + ContentType string `json:"contentType"` // A standard MIME type describing the format of the object data. + + // Collection of additional metadata on the object. + // eg: x-amz-meta-*, content-encoding etc. + Metadata http.Header `json:"metadata"` + + // Owner name. + Owner struct { + DisplayName string `json:"name"` + ID string `json:"id"` + } `json:"owner"` + + // The class of storage used to store the object. + StorageClass string `json:"storageClass"` + + // Error + Err error `json:"-"` + HealInfo *HealInfo `json:"healInfo,omitempty"` +} + +type healQueryKey string + +const ( + healBucket healQueryKey = "bucket" + healObject healQueryKey = "object" + healPrefix healQueryKey = "prefix" + healMarker healQueryKey = "marker" + healDelimiter healQueryKey = "delimiter" + healMaxKey healQueryKey = "max-key" + healDryRun healQueryKey = "dry-run" +) + +// mkHealQueryVal - helper function to construct heal REST API query params. +func mkHealQueryVal(bucket, prefix, marker, delimiter, maxKeyStr string) url.Values { + queryVal := make(url.Values) + queryVal.Set("heal", "") + queryVal.Set(string(healBucket), bucket) + queryVal.Set(string(healPrefix), prefix) + queryVal.Set(string(healMarker), marker) + queryVal.Set(string(healDelimiter), delimiter) + queryVal.Set(string(healMaxKey), maxKeyStr) + return queryVal +} + +// listObjectsHeal - issues heal list API request for a batch of maxKeys objects to be healed. +func (adm *AdminClient) listObjectsHeal(bucket, prefix, delimiter, marker string, maxKeys int) (listBucketHealResult, error) { + // Construct query params. + maxKeyStr := fmt.Sprintf("%d", maxKeys) + queryVal := mkHealQueryVal(bucket, prefix, marker, delimiter, maxKeyStr) + + hdrs := make(http.Header) + hdrs.Set(minioAdminOpHeader, "list") + + reqData := requestData{ + queryValues: queryVal, + customHeaders: hdrs, + } + + // Empty 'list' of objects to be healed. + toBeHealedObjects := listBucketHealResult{} + + // Execute GET on /?heal to list objects needing heal. + resp, err := adm.executeMethod("GET", reqData) + + defer closeResponse(resp) + if err != nil { + return listBucketHealResult{}, err + } + + if resp.StatusCode != http.StatusOK { + return toBeHealedObjects, errors.New("Got HTTP Status: " + resp.Status) + } + + err = xml.NewDecoder(resp.Body).Decode(&toBeHealedObjects) + if err != nil { + return toBeHealedObjects, err + } + return toBeHealedObjects, nil +} + +// ListObjectsHeal - Lists upto maxKeys objects that needing heal matching bucket, prefix, marker, delimiter. +func (adm *AdminClient) ListObjectsHeal(bucket, prefix string, recursive bool, doneCh <-chan struct{}) (<-chan ObjectInfo, error) { + // Allocate new list objects channel. + objectStatCh := make(chan ObjectInfo, 1) + // Default listing is delimited at "/" + delimiter := "/" + if recursive { + // If recursive we do not delimit. + delimiter = "" + } + + // Initiate list objects goroutine here. + go func(objectStatCh chan<- ObjectInfo) { + defer close(objectStatCh) + // Save marker for next request. + var marker string + for { + // Get list of objects a maximum of 1000 per request. + result, err := adm.listObjectsHeal(bucket, prefix, marker, delimiter, 1000) + if err != nil { + objectStatCh <- ObjectInfo{ + Err: err, + } + return + } + + // If contents are available loop through and send over channel. + for _, object := range result.Contents { + // Save the marker. + marker = object.Key + select { + // Send object content. + case objectStatCh <- object: + // If receives done from the caller, return here. + case <-doneCh: + return + } + } + + // Send all common prefixes if any. + // NOTE: prefixes are only present if the request is delimited. + for _, obj := range result.CommonPrefixes { + object := ObjectInfo{} + object.Key = obj.Prefix + object.Size = 0 + select { + // Send object prefixes. + case objectStatCh <- object: + // If receives done from the caller, return here. + case <-doneCh: + return + } + } + + // If next marker present, save it for next request. + if result.NextMarker != "" { + marker = result.NextMarker + } + + // Listing ends result is not truncated, return right here. + if !result.IsTruncated { + return + } + } + }(objectStatCh) + return objectStatCh, nil +} + +// HealBucket - Heal the given bucket +func (adm *AdminClient) HealBucket(bucket string, dryrun bool) error { + // Construct query params. + queryVal := url.Values{} + queryVal.Set("heal", "") + queryVal.Set(string(healBucket), bucket) + if dryrun { + queryVal.Set(string(healDryRun), "yes") + } + + hdrs := make(http.Header) + hdrs.Set(minioAdminOpHeader, "bucket") + + reqData := requestData{ + queryValues: queryVal, + customHeaders: hdrs, + } + + // Execute POST on /?heal&bucket=mybucket to heal a bucket. + resp, err := adm.executeMethod("POST", reqData) + + defer closeResponse(resp) + if err != nil { + return err + } + + if resp.StatusCode != http.StatusOK { + return errors.New("Got HTTP Status: " + resp.Status) + } + + return nil +} + +// HealObject - Heal the given object. +func (adm *AdminClient) HealObject(bucket, object string, dryrun bool) error { + // Construct query params. + queryVal := url.Values{} + queryVal.Set("heal", "") + queryVal.Set(string(healBucket), bucket) + queryVal.Set(string(healObject), object) + if dryrun { + queryVal.Set(string(healDryRun), "yes") + } + + hdrs := make(http.Header) + hdrs.Set(minioAdminOpHeader, "object") + + reqData := requestData{ + queryValues: queryVal, + customHeaders: hdrs, + } + + // Execute POST on /?heal&bucket=mybucket&object=myobject to heal an object. + resp, err := adm.executeMethod("POST", reqData) + + defer closeResponse(resp) + if err != nil { + return err + } + + if resp.StatusCode != http.StatusOK { + return errors.New("Got HTTP Status: " + resp.Status) + } + + return nil +} From 09b450d61011aea3422373144d389abfafaa5275 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Tue, 17 Jan 2017 14:05:07 -0800 Subject: [PATCH 075/100] Fix fs tests to avoid deleting /usr to certain systems. --- cmd/fs-v1-helpers_test.go | 7 ------- 1 file changed, 7 deletions(-) diff --git a/cmd/fs-v1-helpers_test.go b/cmd/fs-v1-helpers_test.go index e58f4319f..ef815ebad 100644 --- a/cmd/fs-v1-helpers_test.go +++ b/cmd/fs-v1-helpers_test.go @@ -18,7 +18,6 @@ package cmd import ( "bytes" - "runtime" "testing" ) @@ -396,10 +395,4 @@ func TestFSRemoves(t *testing.T) { if err = fsRemoveAll("my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"); err != errFileNameTooLong { t.Fatal(err) } - - if runtime.GOOS != "windows" { - if err = fsRemoveAll("/usr"); err != errVolumeAccessDenied { - t.Fatal(err) - } - } } From 20a65981bd655a7a61ac0ff4a0a3c76349171420 Mon Sep 17 00:00:00 2001 From: Andy Brown Date: Tue, 17 Jan 2017 14:20:05 -0800 Subject: [PATCH 076/100] add delete and options methods to CORS whitelist (#3589) --- cmd/generic-handlers.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/generic-handlers.go b/cmd/generic-handlers.go index e38b65339..e0850b3f4 100644 --- a/cmd/generic-handlers.go +++ b/cmd/generic-handlers.go @@ -253,7 +253,7 @@ type resourceHandler struct { func setCorsHandler(h http.Handler) http.Handler { c := cors.New(cors.Options{ AllowedOrigins: []string{"*"}, - AllowedMethods: []string{"GET", "HEAD", "POST", "PUT"}, + AllowedMethods: []string{"GET", "HEAD", "POST", "PUT", "DELETE", "OPTIONS"}, AllowedHeaders: []string{"*"}, ExposedHeaders: []string{"ETag"}, }) From f803bb4b3d4ed5cbb6b472a31bf90e07b7a49003 Mon Sep 17 00:00:00 2001 From: Anis Elleuch Date: Tue, 17 Jan 2017 23:25:59 +0100 Subject: [PATCH 077/100] admin: Add service Set Credentials API (#3580) --- appveyor.yml | 4 +- cmd/admin-handlers.go | 72 +++++++++++++++++++ cmd/admin-handlers_test.go | 57 +++++++++++---- cmd/admin-router.go | 2 + cmd/api-errors.go | 14 ++++ cmd/globals.go | 6 ++ cmd/main.go | 8 +-- docs/admin-api/management-api.md | 44 ++++++++++-- pkg/madmin/README.md | 2 + pkg/madmin/api-error-response.go | 28 +++++++- .../examples/service-set-credentials.go | 44 ++++++++++++ pkg/madmin/service.go | 48 +++++++++++++ pkg/madmin/utils.go | 15 ++++ 13 files changed, 317 insertions(+), 27 deletions(-) create mode 100644 pkg/madmin/examples/service-set-credentials.go diff --git a/appveyor.yml b/appveyor.yml index 66afd4cb2..7488c103a 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -35,8 +35,8 @@ test_script: # Unit tests - ps: Add-AppveyorTest "Unit Tests" -Outcome Running - mkdir build\coverage - - go test -race github.com/minio/minio/cmd... - - go test -race github.com/minio/minio/pkg... + - go test -timeout 15m -v -race github.com/minio/minio/cmd... + - go test -v -race github.com/minio/minio/pkg... - go test -coverprofile=build\coverage\coverage.txt -covermode=atomic github.com/minio/minio/cmd - ps: Update-AppveyorTest "Unit Tests" -Outcome Passed diff --git a/cmd/admin-handlers.go b/cmd/admin-handlers.go index cebfcd3b0..8d93f605d 100644 --- a/cmd/admin-handlers.go +++ b/cmd/admin-handlers.go @@ -18,6 +18,8 @@ package cmd import ( "encoding/json" + "encoding/xml" + "io/ioutil" "net/http" "net/url" "strconv" @@ -84,6 +86,76 @@ func (adminAPI adminAPIHandlers) ServiceRestartHandler(w http.ResponseWriter, r sendServiceCmd(globalAdminPeers, serviceRestart) } +// setCredsReq request +type setCredsReq struct { + Username string `xml:"username"` + Password string `xml:"password"` +} + +// ServiceCredsHandler - POST /?service +// HTTP header x-minio-operation: creds +// ---------- +// Update credentials in a minio server. In a distributed setup, update all the servers +// in the cluster. +func (adminAPI adminAPIHandlers) ServiceCredentialsHandler(w http.ResponseWriter, r *http.Request) { + // Authenticate request + adminAPIErr := checkRequestAuthType(r, "", "", "") + if adminAPIErr != ErrNone { + writeErrorResponse(w, adminAPIErr, r.URL) + return + } + + // Avoid setting new credentials when they are already passed + // by the environnement + if globalEnvAccessKey != "" || globalEnvSecretKey != "" { + writeErrorResponse(w, ErrMethodNotAllowed, r.URL) + return + } + + // Load request body + inputData, err := ioutil.ReadAll(r.Body) + if err != nil { + writeErrorResponse(w, ErrInternalError, r.URL) + return + } + + // Unmarshal request body + var req setCredsReq + err = xml.Unmarshal(inputData, &req) + if err != nil { + errorIf(err, "Cannot unmarshal credentials request") + writeErrorResponse(w, ErrMalformedXML, r.URL) + return + } + + // Check passed credentials + cred, err := getCredential(req.Username, req.Password) + switch err { + case errInvalidAccessKeyLength: + writeErrorResponse(w, ErrAdminInvalidAccessKey, r.URL) + return + case errInvalidSecretKeyLength: + writeErrorResponse(w, ErrAdminInvalidSecretKey, r.URL) + return + } + + // Notify all other Minio peers to update credentials + updateErrs := updateCredsOnPeers(cred) + for peer, err := range updateErrs { + errorIf(err, "Unable to update credentials on peer %s.", peer) + } + + // Update local credentials + serverConfig.SetCredential(cred) + if err = serverConfig.Save(); err != nil { + writeErrorResponse(w, ErrInternalError, r.URL) + return + } + + // At this stage, the operation is successful, return 200 OK + w.WriteHeader(http.StatusOK) +} + // validateLockQueryParams - Validates query params for list/clear locks management APIs. func validateLockQueryParams(vars url.Values) (string, string, time.Duration, APIErrorCode) { bucket := vars.Get(string(mgmtBucket)) diff --git a/cmd/admin-handlers_test.go b/cmd/admin-handlers_test.go index 0f0c39c66..15b620623 100644 --- a/cmd/admin-handlers_test.go +++ b/cmd/admin-handlers_test.go @@ -19,6 +19,8 @@ package cmd import ( "bytes" "encoding/json" + "encoding/xml" + "io/ioutil" "net/http" "net/http/httptest" "net/url" @@ -33,8 +35,8 @@ type cmdType int const ( statusCmd cmdType = iota - stopCmd restartCmd + setCreds ) // String - String representation for cmdType @@ -42,10 +44,10 @@ func (c cmdType) String() string { switch c { case statusCmd: return "status" - case stopCmd: - return "stop" case restartCmd: return "restart" + case setCreds: + return "set-credentials" } return "" } @@ -58,6 +60,8 @@ func (c cmdType) apiMethod() string { return "GET" case restartCmd: return "POST" + case setCreds: + return "POST" } return "GET" } @@ -86,15 +90,19 @@ func testServiceSignalReceiver(cmd cmdType, t *testing.T) { // getServiceCmdRequest - Constructs a management REST API request for service // subcommands for a given cmdType value. -func getServiceCmdRequest(cmd cmdType, cred credential) (*http.Request, error) { +func getServiceCmdRequest(cmd cmdType, cred credential, body []byte) (*http.Request, error) { req, err := newTestRequest(cmd.apiMethod(), "/?service", 0, nil) if err != nil { return nil, err } + // Set body + req.Body = ioutil.NopCloser(bytes.NewReader(body)) + // minioAdminOpHeader is to identify the request as a // management REST API request. req.Header.Set(minioAdminOpHeader, cmd.String()) + req.Header.Set("X-Amz-Content-Sha256", getSHA256Hash(body)) // management REST API uses signature V4 for authentication. err = signRequestV4(req, cred.AccessKey, cred.SecretKey) @@ -106,7 +114,7 @@ func getServiceCmdRequest(cmd cmdType, cred credential) (*http.Request, error) { // testServicesCmdHandler - parametrizes service subcommand tests on // cmdType value. -func testServicesCmdHandler(cmd cmdType, t *testing.T) { +func testServicesCmdHandler(cmd cmdType, args map[string]interface{}, t *testing.T) { // reset globals. // this is to make sure that the tests are not affected by modified value. resetTestGlobals() @@ -147,19 +155,25 @@ func testServicesCmdHandler(cmd cmdType, t *testing.T) { // Setting up a go routine to simulate ServerMux's // handleServiceSignals for stop and restart commands. - switch cmd { - case stopCmd, restartCmd: + if cmd == restartCmd { go testServiceSignalReceiver(cmd, t) } credentials := serverConfig.GetCredential() adminRouter := router.NewRouter() registerAdminRouter(adminRouter) - rec := httptest.NewRecorder() - req, err := getServiceCmdRequest(cmd, credentials) + var body []byte + + if cmd == setCreds { + body, _ = xml.Marshal(setCredsReq{Username: args["username"].(string), Password: args["password"].(string)}) + } + + req, err := getServiceCmdRequest(cmd, credentials, body) if err != nil { t.Fatalf("Failed to build service status request %v", err) } + + rec := httptest.NewRecorder() adminRouter.ServeHTTP(rec, req) if cmd == statusCmd { @@ -173,20 +187,37 @@ func testServicesCmdHandler(cmd cmdType, t *testing.T) { } } + if cmd == setCreds { + // Check if new credentials are set + cred := serverConfig.GetCredential() + if cred.AccessKey != args["username"].(string) { + t.Errorf("Wrong access key, expected = %s, found = %s", args["username"].(string), cred.AccessKey) + } + if cred.SecretKey != args["password"].(string) { + t.Errorf("Wrong secret key, expected = %s, found = %s", args["password"].(string), cred.SecretKey) + } + + } + if rec.Code != http.StatusOK { - t.Errorf("Expected to receive %d status code but received %d", - http.StatusOK, rec.Code) + resp, _ := ioutil.ReadAll(rec.Body) + t.Errorf("Expected to receive %d status code but received %d. Body (%s)", + http.StatusOK, rec.Code, string(resp)) } } // Test for service status management REST API. func TestServiceStatusHandler(t *testing.T) { - testServicesCmdHandler(statusCmd, t) + testServicesCmdHandler(statusCmd, nil, t) } // Test for service restart management REST API. func TestServiceRestartHandler(t *testing.T) { - testServicesCmdHandler(restartCmd, t) + testServicesCmdHandler(restartCmd, nil, t) +} + +func TestServiceSetCreds(t *testing.T) { + testServicesCmdHandler(setCreds, map[string]interface{}{"username": "minio", "password": "minio123"}, t) } // mkLockQueryVal - helper function to build lock query param. diff --git a/cmd/admin-router.go b/cmd/admin-router.go index dde7faea8..3c5f63a96 100644 --- a/cmd/admin-router.go +++ b/cmd/admin-router.go @@ -36,6 +36,8 @@ func registerAdminRouter(mux *router.Router) { // Service restart adminRouter.Methods("POST").Queries("service", "").Headers(minioAdminOpHeader, "restart").HandlerFunc(adminAPI.ServiceRestartHandler) + // Service update credentials + adminRouter.Methods("POST").Queries("service", "").Headers(minioAdminOpHeader, "set-credentials").HandlerFunc(adminAPI.ServiceCredentialsHandler) /// Lock operations diff --git a/cmd/api-errors.go b/cmd/api-errors.go index a23a7e7d6..99b18ccd6 100644 --- a/cmd/api-errors.go +++ b/cmd/api-errors.go @@ -140,6 +140,9 @@ const ( // Add new extended error codes here. // Please open a https://github.com/minio/minio/issues before adding // new error codes here. + + ErrAdminInvalidAccessKey + ErrAdminInvalidSecretKey ) // error code to APIError structure, these fields carry respective @@ -574,6 +577,17 @@ var errorCodeResponse = map[APIErrorCode]APIError{ Description: "Server not initialized, please try again.", HTTPStatusCode: http.StatusServiceUnavailable, }, + ErrAdminInvalidAccessKey: { + Code: "XMinioAdminInvalidAccessKey", + Description: "The access key is invalid.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrAdminInvalidSecretKey: { + Code: "XMinioAdminInvalidSecretKey", + Description: "The secret key is invalid.", + HTTPStatusCode: http.StatusBadRequest, + }, + // Add your error structure here. } diff --git a/cmd/globals.go b/cmd/globals.go index bae69452a..ce3a0a983 100644 --- a/cmd/globals.go +++ b/cmd/globals.go @@ -101,6 +101,12 @@ var ( // Minio server user agent string. globalServerUserAgent = "Minio/" + ReleaseTag + " (" + runtime.GOOS + "; " + runtime.GOARCH + ")" + // Access key passed from the environment + globalEnvAccessKey = os.Getenv("MINIO_ACCESS_KEY") + + // Secret key passed from the environment + globalEnvSecretKey = os.Getenv("MINIO_SECRET_KEY") + // Add new variable global values here. ) diff --git a/cmd/main.go b/cmd/main.go index bb2a47a71..db214201e 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -190,13 +190,11 @@ func minioInit(ctx *cli.Context) { enableLoggers() // Fetch access keys from environment variables and update the config. - accessKey := os.Getenv("MINIO_ACCESS_KEY") - secretKey := os.Getenv("MINIO_SECRET_KEY") - if accessKey != "" && secretKey != "" { + if globalEnvAccessKey != "" && globalEnvSecretKey != "" { // Set new credentials. serverConfig.SetCredential(credential{ - AccessKey: accessKey, - SecretKey: secretKey, + AccessKey: globalEnvAccessKey, + SecretKey: globalEnvSecretKey, }) } if !isAccessKeyValid(serverConfig.GetCredential().AccessKey) { diff --git a/docs/admin-api/management-api.md b/docs/admin-api/management-api.md index 2655c1698..b8ed45489 100644 --- a/docs/admin-api/management-api.md +++ b/docs/admin-api/management-api.md @@ -6,9 +6,9 @@ ## List of management APIs - Service - - Stop - Restart - Status + - SetCredentials - Locks - List @@ -17,11 +17,6 @@ - Healing ### Service Management APIs -* Stop - - POST /?service - - x-minio-operation: stop - - Response: On success 200 - * Restart - POST /?service - x-minio-operation: restart @@ -32,6 +27,43 @@ - x-minio-operation: status - Response: On success 200, return json formatted StorageInfo object. +* SetCredentials + - GET /?service + - x-minio-operation: set-credentials + - Response: Success 200 + - Possible error responses + - ErrMethodNotAllowed + + MethodNotAllowed + The specified method is not allowed against this resource. + + + / + 3L137 + 3L137 + + - ErrAdminBadCred + + XMinioBadCred + XMinioBadCred + + + / + 3L137 + 3L137 + + - ErrInternalError + + InternalError + We encountered an internal error, please try again. + + + / + 3L137 + 3L137 + + + ### Lock Management APIs * ListLocks - GET /?lock&bucket=mybucket&prefix=myprefix&older-than=rel_time diff --git a/pkg/madmin/README.md b/pkg/madmin/README.md index 73aba84a3..e2d16f1f8 100644 --- a/pkg/madmin/README.md +++ b/pkg/madmin/README.md @@ -106,6 +106,7 @@ go run service-status.go * [`ServiceStatus`](./API.md#ServiceStatus) * [`ServiceRestart`](./API.md#ServiceRestart) +* [`ServiceSetCredentials`](./API.md#ServiceSetCredentials) ## Full Examples @@ -113,6 +114,7 @@ go run service-status.go * [service-status.go](https://github.com/minio/minio/blob/master/pkg/madmin/examples/service-status.go) * [service-restart.go](https://github.com/minio/minio/blob/master/pkg/madmin/examples/service-restart.go) +* [service-set-credentials.go](https://github.com/minio/minio/blob/master/pkg/madmin/examples/service-set-credentials.go) ## Contribute diff --git a/pkg/madmin/api-error-response.go b/pkg/madmin/api-error-response.go index 49abf7e70..d5a14f656 100644 --- a/pkg/madmin/api-error-response.go +++ b/pkg/madmin/api-error-response.go @@ -16,7 +16,10 @@ package madmin -import "encoding/xml" +import ( + "encoding/xml" + "net/http" +) /* **** SAMPLE ERROR RESPONSE **** @@ -50,6 +53,29 @@ func (e ErrorResponse) Error() string { return e.Message } +const ( + reportIssue = "Please report this issue at https://github.com/minio/minio-go/issues." +) + +// httpRespToErrorResponse returns a new encoded ErrorResponse +// structure as error. +func httpRespToErrorResponse(resp *http.Response) error { + if resp == nil { + msg := "Response is empty. " + reportIssue + return ErrInvalidArgument(msg) + } + var errResp ErrorResponse + // Decode the xml error + err := xmlDecoder(resp.Body, &errResp) + if err != nil { + return ErrorResponse{ + Code: resp.Status, + Message: "Failed to parse server response.", + } + } + return errResp +} + // ErrInvalidArgument - Invalid argument response. func ErrInvalidArgument(message string) error { return ErrorResponse{ diff --git a/pkg/madmin/examples/service-set-credentials.go b/pkg/madmin/examples/service-set-credentials.go new file mode 100644 index 000000000..81f0dc203 --- /dev/null +++ b/pkg/madmin/examples/service-set-credentials.go @@ -0,0 +1,44 @@ +// +build ignore + +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package main + +import ( + "log" + + "github.com/minio/minio/pkg/madmin" +) + +func main() { + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are + // dummy values, please replace them with original values. + + // API requests are secure (HTTPS) if secure=true and insecure (HTTPS) otherwise. + // New returns an Minio Admin client object. + madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + if err != nil { + log.Fatalln(err) + } + + err = madmClnt.ServiceSetCredentials("YOUR-NEW-ACCESSKEY", "YOUR-NEW-SECRETKEY") + if err != nil { + log.Fatalln(err) + } + log.Println("New credentials successfully set.") +} diff --git a/pkg/madmin/service.go b/pkg/madmin/service.go index d49213037..442ab3ffb 100644 --- a/pkg/madmin/service.go +++ b/pkg/madmin/service.go @@ -18,7 +18,9 @@ package madmin import ( + "bytes" "encoding/json" + "encoding/xml" "errors" "io/ioutil" "net/http" @@ -117,3 +119,49 @@ func (adm *AdminClient) ServiceRestart() error { } return nil } + +// setCredsReq - xml to send to the server to set new credentials +type setCredsReq struct { + Username string `xml:"username"` + Password string `xml:"password"` +} + +// ServiceSetCredentials - Call Service Set Credentials API to set new access and secret keys in the specified Minio server +func (adm *AdminClient) ServiceSetCredentials(access, secret string) error { + + // Disallow sending with the server if the connection is not secure + if !adm.secure { + return errors.New("setting new credentials requires HTTPS connection to the server") + } + + // Setup new request + reqData := requestData{} + reqData.queryValues = make(url.Values) + reqData.queryValues.Set("service", "") + reqData.customHeaders = make(http.Header) + reqData.customHeaders.Set(minioAdminOpHeader, "set-credentials") + + // Setup request's body + body, err := xml.Marshal(setCredsReq{Username: access, Password: secret}) + if err != nil { + return err + } + reqData.contentBody = bytes.NewReader(body) + reqData.contentLength = int64(len(body)) + reqData.contentMD5Bytes = sumMD5(body) + reqData.contentSHA256Bytes = sum256(body) + + // Execute GET on bucket to list objects. + resp, err := adm.executeMethod("POST", reqData) + + defer closeResponse(resp) + if err != nil { + return err + } + + // Return error to the caller if http response code is different from 200 + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp) + } + return nil +} diff --git a/pkg/madmin/utils.go b/pkg/madmin/utils.go index 1661c2f8c..34aa80618 100644 --- a/pkg/madmin/utils.go +++ b/pkg/madmin/utils.go @@ -17,7 +17,9 @@ package madmin import ( + "crypto/md5" "crypto/sha256" + "encoding/xml" "io" "io/ioutil" "net" @@ -35,6 +37,19 @@ func sum256(data []byte) []byte { return hash.Sum(nil) } +// sumMD5 calculate sumMD5 sum for an input byte array. +func sumMD5(data []byte) []byte { + hash := md5.New() + hash.Write(data) + return hash.Sum(nil) +} + +// xmlDecoder provide decoded value in xml. +func xmlDecoder(body io.Reader, v interface{}) error { + d := xml.NewDecoder(body) + return d.Decode(v) +} + // getEndpointURL - construct a new endpoint. func getEndpointURL(endpoint string, secure bool) (*url.URL, error) { if strings.Contains(endpoint, ":") { From 62f8343879162afad14093fef920a874bdb79dc6 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Wed, 18 Jan 2017 12:24:34 -0800 Subject: [PATCH 078/100] Add constants for commonly used values. (#3588) This is a consolidation effort, avoiding usage of naked strings in codebase. Whenever possible use constants which can be repurposed elsewhere. This also fixes `goconst ./...` reported issues. --- cmd/admin-handlers_test.go | 8 +-- cmd/admin-rpc-server_test.go | 4 +- cmd/api-errors.go | 16 ++--- cmd/api-response.go | 28 ++++---- cmd/auth-handler.go | 4 +- cmd/auth-handler_test.go | 14 ++-- cmd/auth-rpc-server_test.go | 6 +- cmd/benchmark-utils_test.go | 18 ++--- cmd/bucket-handlers.go | 10 +-- cmd/bucket-notification-handlers_test.go | 8 +-- cmd/bucket-notification-utils_test.go | 6 +- cmd/build-constants.go | 9 ++- cmd/checkport_test.go | 4 +- cmd/config-migrate.go | 24 +++---- cmd/config-migrate_test.go | 10 +-- cmd/config-v13.go | 4 +- cmd/config-v13_test.go | 6 +- cmd/erasure-utils.go | 4 +- cmd/event-notifier_test.go | 20 +++--- cmd/format-config-v1_test.go | 6 +- cmd/fs-v1-helpers_test.go | 2 +- cmd/fs-v1-metadata.go | 17 ++++- cmd/fs-v1-multipart-common.go | 4 +- cmd/fs-v1-multipart_test.go | 4 +- cmd/fs-v1-rwpool_test.go | 8 +-- cmd/fs-v1.go | 4 +- cmd/fs-v1_test.go | 4 +- cmd/generic-handlers.go | 26 ++++++- cmd/globals.go | 13 ++-- cmd/handler-utils.go | 6 +- cmd/handler-utils_test.go | 10 +-- cmd/jwt_test.go | 4 +- cmd/lock-rpc-server_test.go | 22 +++--- cmd/notify-webhook_test.go | 2 +- cmd/object-api-common.go | 6 +- cmd/object-api-common_test.go | 4 +- cmd/object-api-getobject_test.go | 4 +- cmd/object-api-listobjects_test.go | 4 +- cmd/object-api-multipart_test.go | 6 +- cmd/posix-errors.go | 8 +-- cmd/posix-errors_test.go | 6 +- cmd/posix-prepare-path.go | 4 +- cmd/posix.go | 4 +- cmd/posix_test.go | 18 ++--- cmd/post-policy_test.go | 6 +- cmd/postpolicyform.go | 55 +++++++------- cmd/prepare-storage-msg.go | 18 +++-- cmd/prepare-storage-msg_test.go | 14 ++-- cmd/retry-storage_test.go | 4 +- cmd/s3-peer-client_test.go | 4 +- cmd/scan-bar.go | 92 ------------------------ cmd/server-main.go | 33 +++++---- cmd/server-main_test.go | 10 +-- cmd/server-mux.go | 26 ++++--- cmd/server-mux_test.go | 6 +- cmd/server-startup-msg.go | 4 +- cmd/server-startup-msg_test.go | 8 +-- cmd/server-startup-utils.go | 4 +- cmd/signature-v2_test.go | 2 +- cmd/signature-v4-parser_test.go | 4 +- cmd/signature-v4-utils.go | 8 +-- cmd/signature-v4-utils_test.go | 6 +- cmd/signature-v4.go | 13 ++-- cmd/signature-v4_test.go | 18 ++--- cmd/storage-rpc-client_test.go | 4 +- cmd/storage-rpc-server_test.go | 4 +- cmd/test-utils_test.go | 26 +++---- cmd/trie_test.go | 6 +- cmd/update-main.go | 8 +-- cmd/update-notifier.go | 2 +- cmd/url-sort_test.go | 12 ++-- cmd/utils.go | 10 ++- cmd/utils_test.go | 20 +++--- cmd/web-handlers_test.go | 42 +++++------ cmd/xl-v1-healing_test.go | 8 +-- cmd/xl-v1-list-objects-heal_test.go | 4 +- cmd/xl-v1-metadata.go | 31 ++++++-- cmd/xl-v1-multipart-common_test.go | 4 +- cmd/xl-v1-utils_test.go | 8 +-- 79 files changed, 461 insertions(+), 462 deletions(-) delete mode 100644 cmd/scan-bar.go diff --git a/cmd/admin-handlers_test.go b/cmd/admin-handlers_test.go index 15b620623..7daa801f7 100644 --- a/cmd/admin-handlers_test.go +++ b/cmd/admin-handlers_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -121,7 +121,7 @@ func testServicesCmdHandler(cmd cmdType, args map[string]interface{}, t *testing // initialize NSLock. initNSLock(false) // Initialize configuration for access/secret credentials. - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("Unable to initialize server config. %s", err) } @@ -238,7 +238,7 @@ func TestListLocksHandler(t *testing.T) { // initialize NSLock. initNSLock(false) - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("Unable to initialize server config. %s", err) } @@ -323,7 +323,7 @@ func TestClearLocksHandler(t *testing.T) { // initialize NSLock. initNSLock(false) - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("Unable to initialize server config. %s", err) } diff --git a/cmd/admin-rpc-server_test.go b/cmd/admin-rpc-server_test.go index 5894514c7..5577c1b2c 100644 --- a/cmd/admin-rpc-server_test.go +++ b/cmd/admin-rpc-server_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -26,7 +26,7 @@ func testAdminCmd(cmd cmdType, t *testing.T) { // this is to make sure that the tests are not affected by modified globals. resetTestGlobals() - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("Failed to create test config - %v", err) } diff --git a/cmd/api-errors.go b/cmd/api-errors.go index 99b18ccd6..8021b8098 100644 --- a/cmd/api-errors.go +++ b/cmd/api-errors.go @@ -675,15 +675,11 @@ func getAPIError(code APIErrorCode) APIError { // getErrorResponse gets in standard error and resource value and // provides a encodable populated response values func getAPIErrorResponse(err APIError, resource string) APIErrorResponse { - var data = APIErrorResponse{} - data.Code = err.Code - data.Message = err.Description - if resource != "" { - data.Resource = resource + return APIErrorResponse{ + Code: err.Code, + Message: err.Description, + Resource: resource, + RequestID: "3L137", + HostID: "3L137", } - // TODO implement this in future - data.RequestID = "3L137" - data.HostID = "3L137" - - return data } diff --git a/cmd/api-response.go b/cmd/api-response.go index f8f175903..f07779112 100644 --- a/cmd/api-response.go +++ b/cmd/api-response.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2015, 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2015, 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -278,8 +278,8 @@ func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse { var data = ListBucketsResponse{} var owner = Owner{} - owner.ID = "minio" - owner.DisplayName = "minio" + owner.ID = globalMinioDefaultOwnerID + owner.DisplayName = globalMinioDefaultOwnerID for _, bucket := range buckets { var listbucket = Bucket{} @@ -301,8 +301,8 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter string, max var owner = Owner{} var data = ListObjectsResponse{} - owner.ID = "minio" - owner.DisplayName = "minio" + owner.ID = globalMinioDefaultOwnerID + owner.DisplayName = globalMinioDefaultOwnerID for _, object := range resp.Objects { var content = Object{} @@ -315,7 +315,7 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter string, max content.ETag = "\"" + object.MD5Sum + "\"" } content.Size = object.Size - content.StorageClass = "STANDARD" + content.StorageClass = globalMinioDefaultStorageClass content.Owner = owner // object.HealInfo is non-empty only when resp is constructed in ListObjectsHeal. content.HealInfo = object.HealInfo @@ -349,8 +349,8 @@ func generateListObjectsV2Response(bucket, prefix, token, startAfter, delimiter var data = ListObjectsV2Response{} if fetchOwner { - owner.ID = "minio" - owner.DisplayName = "minio" + owner.ID = globalMinioDefaultOwnerID + owner.DisplayName = globalMinioDefaultOwnerID } for _, object := range resp.Objects { @@ -364,7 +364,7 @@ func generateListObjectsV2Response(bucket, prefix, token, startAfter, delimiter content.ETag = "\"" + object.MD5Sum + "\"" } content.Size = object.Size - content.StorageClass = "STANDARD" + content.StorageClass = globalMinioDefaultStorageClass content.Owner = owner contents = append(contents, content) } @@ -423,11 +423,11 @@ func generateListPartsResponse(partsInfo ListPartsInfo) ListPartsResponse { listPartsResponse.Bucket = partsInfo.Bucket listPartsResponse.Key = partsInfo.Object listPartsResponse.UploadID = partsInfo.UploadID - listPartsResponse.StorageClass = "STANDARD" - listPartsResponse.Initiator.ID = "minio" - listPartsResponse.Initiator.DisplayName = "minio" - listPartsResponse.Owner.ID = "minio" - listPartsResponse.Owner.DisplayName = "minio" + listPartsResponse.StorageClass = globalMinioDefaultStorageClass + listPartsResponse.Initiator.ID = globalMinioDefaultOwnerID + listPartsResponse.Initiator.DisplayName = globalMinioDefaultOwnerID + listPartsResponse.Owner.ID = globalMinioDefaultOwnerID + listPartsResponse.Owner.DisplayName = globalMinioDefaultOwnerID listPartsResponse.MaxParts = partsInfo.MaxParts listPartsResponse.PartNumberMarker = partsInfo.PartNumberMarker diff --git a/cmd/auth-handler.go b/cmd/auth-handler.go index afb8330a8..a50219d59 100644 --- a/cmd/auth-handler.go +++ b/cmd/auth-handler.go @@ -58,12 +58,12 @@ func isRequestPresignedSignatureV2(r *http.Request) bool { // Verify if request has AWS Post policy Signature Version '4'. func isRequestPostPolicySignatureV4(r *http.Request) bool { - return strings.Contains(r.Header.Get("Content-Type"), "multipart/form-data") && r.Method == "POST" + return strings.Contains(r.Header.Get("Content-Type"), "multipart/form-data") && r.Method == httpPOST } // Verify if the request has AWS Streaming Signature Version '4'. This is only valid for 'PUT' operation. func isRequestSignStreamingV4(r *http.Request) bool { - return r.Header.Get("x-amz-content-sha256") == streamingContentSHA256 && r.Method == "PUT" + return r.Header.Get("x-amz-content-sha256") == streamingContentSHA256 && r.Method == httpPUT } // Authorization type. diff --git a/cmd/auth-handler_test.go b/cmd/auth-handler_test.go index a1a536131..de9d8e124 100644 --- a/cmd/auth-handler_test.go +++ b/cmd/auth-handler_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -37,7 +37,7 @@ func TestGetRequestAuthType(t *testing.T) { req: &http.Request{ URL: &url.URL{ Host: "localhost:9000", - Scheme: "http", + Scheme: httpScheme, Path: "/", }, Header: http.Header{ @@ -54,7 +54,7 @@ func TestGetRequestAuthType(t *testing.T) { req: &http.Request{ URL: &url.URL{ Host: "localhost:9000", - Scheme: "http", + Scheme: httpScheme, Path: "/", }, Header: http.Header{ @@ -69,7 +69,7 @@ func TestGetRequestAuthType(t *testing.T) { req: &http.Request{ URL: &url.URL{ Host: "localhost:9000", - Scheme: "http", + Scheme: httpScheme, Path: "/", }, Header: http.Header{ @@ -84,7 +84,7 @@ func TestGetRequestAuthType(t *testing.T) { req: &http.Request{ URL: &url.URL{ Host: "localhost:9000", - Scheme: "http", + Scheme: httpScheme, Path: "/", RawQuery: "X-Amz-Credential=EXAMPLEINVALIDEXAMPL%2Fs3%2F20160314%2Fus-east-1", }, @@ -97,7 +97,7 @@ func TestGetRequestAuthType(t *testing.T) { req: &http.Request{ URL: &url.URL{ Host: "localhost:9000", - Scheme: "http", + Scheme: httpScheme, Path: "/", }, Header: http.Header{ @@ -309,7 +309,7 @@ func mustNewSignedRequest(method string, urlStr string, contentLength int64, bod // Tests is requested authenticated function, tests replies for s3 errors. func TestIsReqAuthenticated(t *testing.T) { - path, err := newTestConfig("us-east-1") + path, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("unable initialize config file, %s", err) } diff --git a/cmd/auth-rpc-server_test.go b/cmd/auth-rpc-server_test.go index e95c98340..39ec686c0 100644 --- a/cmd/auth-rpc-server_test.go +++ b/cmd/auth-rpc-server_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -22,7 +22,7 @@ import ( ) func TestLogin(t *testing.T) { - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("Failed to create test config - %v", err) } @@ -77,7 +77,7 @@ func TestLogin(t *testing.T) { // Invalid password length { args: LoginRPCArgs{ - Username: "minio", + Username: globalMinioDefaultOwnerID, Password: "aaa", Version: Version, }, diff --git a/cmd/benchmark-utils_test.go b/cmd/benchmark-utils_test.go index fd80e45bb..079fd0cfb 100644 --- a/cmd/benchmark-utils_test.go +++ b/cmd/benchmark-utils_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -148,7 +148,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) { // creates XL/FS backend setup, obtains the object layer and calls the runPutObjectPartBenchmark function. func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) { - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { b.Fatalf("Unable to initialize config. %s", err) } @@ -167,7 +167,7 @@ func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) { // creates XL/FS backend setup, obtains the object layer and calls the runPutObjectBenchmark function. func benchmarkPutObject(b *testing.B, instanceType string, objSize int) { - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { b.Fatalf("Unable to initialize config. %s", err) } @@ -186,7 +186,7 @@ func benchmarkPutObject(b *testing.B, instanceType string, objSize int) { // creates XL/FS backend setup, obtains the object layer and runs parallel benchmark for put object. func benchmarkPutObjectParallel(b *testing.B, instanceType string, objSize int) { - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { b.Fatalf("Unable to initialize config. %s", err) } @@ -206,7 +206,7 @@ func benchmarkPutObjectParallel(b *testing.B, instanceType string, objSize int) // Benchmark utility functions for ObjectLayer.GetObject(). // Creates Object layer setup ( MakeBucket, PutObject) and then runs the benchmark. func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) { - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { b.Fatalf("Unable to initialize config. %s", err) } @@ -275,7 +275,7 @@ func generateBytesData(size int) []byte { // creates XL/FS backend setup, obtains the object layer and calls the runGetObjectBenchmark function. func benchmarkGetObject(b *testing.B, instanceType string, objSize int) { - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { b.Fatalf("Unable to initialize config. %s", err) } @@ -294,7 +294,7 @@ func benchmarkGetObject(b *testing.B, instanceType string, objSize int) { // creates XL/FS backend setup, obtains the object layer and runs parallel benchmark for ObjectLayer.GetObject() . func benchmarkGetObjectParallel(b *testing.B, instanceType string, objSize int) { - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { b.Fatalf("Unable to initialize config. %s", err) } @@ -314,7 +314,7 @@ func benchmarkGetObjectParallel(b *testing.B, instanceType string, objSize int) // Parallel benchmark utility functions for ObjectLayer.PutObject(). // Creates Object layer setup ( MakeBucket ) and then runs the PutObject benchmark. func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) { - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { b.Fatalf("Unable to initialize config. %s", err) } @@ -362,7 +362,7 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) { // Parallel benchmark utility functions for ObjectLayer.GetObject(). // Creates Object layer setup ( MakeBucket, PutObject) and then runs the benchmark. func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) { - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { b.Fatalf("Unable to initialize config. %s", err) } diff --git a/cmd/bucket-handlers.go b/cmd/bucket-handlers.go index c0ebb652f..aadd99bb4 100644 --- a/cmd/bucket-handlers.go +++ b/cmd/bucket-handlers.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2015, 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2015, 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -100,7 +100,7 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r * return } - if s3Error := checkRequestAuthType(r, bucket, "s3:GetBucketLocation", "us-east-1"); s3Error != ErrNone { + if s3Error := checkRequestAuthType(r, bucket, "s3:GetBucketLocation", globalMinioDefaultRegion); s3Error != ErrNone { writeErrorResponse(w, s3Error, r.URL) return } @@ -115,7 +115,7 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r * encodedSuccessResponse := encodeResponse(LocationResponse{}) // Get current region. region := serverConfig.GetRegion() - if region != "us-east-1" { + if region != globalMinioDefaultRegion { encodedSuccessResponse = encodeResponse(LocationResponse{ Location: region, }) @@ -187,7 +187,7 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R } // ListBuckets does not have any bucket action. - s3Error := checkRequestAuthType(r, "", "", "us-east-1") + s3Error := checkRequestAuthType(r, "", "", globalMinioDefaultRegion) if s3Error == ErrInvalidRegion { // Clients like boto3 send listBuckets() call signed with region that is configured. s3Error = checkRequestAuthType(r, "", "", serverConfig.GetRegion()) @@ -334,7 +334,7 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req } // PutBucket does not have any bucket action. - if s3Error := checkRequestAuthType(r, "", "", "us-east-1"); s3Error != ErrNone { + if s3Error := checkRequestAuthType(r, "", "", globalMinioDefaultRegion); s3Error != ErrNone { writeErrorResponse(w, s3Error, r.URL) return } diff --git a/cmd/bucket-notification-handlers_test.go b/cmd/bucket-notification-handlers_test.go index 986749fdd..d4db4445c 100644 --- a/cmd/bucket-notification-handlers_test.go +++ b/cmd/bucket-notification-handlers_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -47,7 +47,7 @@ func newFlushWriter(writer io.Writer) http.ResponseWriter { // Tests write notification code. func TestWriteNotification(t *testing.T) { // Initialize a new test config. - root, err := newTestConfig("us-east-1") + root, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("Unable to initialize test config %s", err) } @@ -112,7 +112,7 @@ func TestWriteNotification(t *testing.T) { func TestSendBucketNotification(t *testing.T) { // Initialize a new test config. - root, err := newTestConfig("us-east-1") + root, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("Unable to initialize test config %s", err) } @@ -185,7 +185,7 @@ func testGetBucketNotificationHandler(obj ObjectLayer, instanceType, bucketName filterRules := []filterRule{ { Name: "prefix", - Value: "minio", + Value: globalMinioDefaultOwnerID, }, { Name: "suffix", diff --git a/cmd/bucket-notification-utils_test.go b/cmd/bucket-notification-utils_test.go index 0a59f30f2..22d64be3a 100644 --- a/cmd/bucket-notification-utils_test.go +++ b/cmd/bucket-notification-utils_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -218,7 +218,7 @@ func TestValidEvents(t *testing.T) { // Tests queue arn validation. func TestQueueARN(t *testing.T) { - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("unable initialize config file, %s", err) } @@ -302,7 +302,7 @@ func TestQueueARN(t *testing.T) { // Test unmarshal queue arn. func TestUnmarshalSQSARN(t *testing.T) { - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("unable initialize config file, %s", err) } diff --git a/cmd/build-constants.go b/cmd/build-constants.go index 0eb6c238f..ff2a55f57 100644 --- a/cmd/build-constants.go +++ b/cmd/build-constants.go @@ -22,12 +22,15 @@ var ( // GOPATH - GOPATH value at the time of build. GOPATH = "" + // Go get development tag. + goGetTag = "DEVELOPMENT.GOGET" + // Version - version time.RFC3339. - Version = "DEVELOPMENT.GOGET" + Version = goGetTag // ReleaseTag - release tag in TAG.%Y-%m-%dT%H-%M-%SZ. - ReleaseTag = "DEVELOPMENT.GOGET" + ReleaseTag = goGetTag // CommitID - latest commit id. - CommitID = "DEVELOPMENT.GOGET" + CommitID = goGetTag // ShortCommitID - first 12 characters from CommitID. ShortCommitID = CommitID[:12] ) diff --git a/cmd/checkport_test.go b/cmd/checkport_test.go index 9b3c4c087..bbdb01dd7 100644 --- a/cmd/checkport_test.go +++ b/cmd/checkport_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -47,7 +47,7 @@ func TestCheckPortAvailability(t *testing.T) { err = checkPortAvailability(test.port) // Skip if the os is windows due to https://github.com/golang/go/issues/7598 - if err == nil && runtime.GOOS != "windows" { + if err == nil && runtime.GOOS != globalWindowsOSName { t.Fatalf("checkPortAvailability should fail for port: %s. Error: %v", test.port, err) } } diff --git a/cmd/config-migrate.go b/cmd/config-migrate.go index acf72f017..f153f17ff 100644 --- a/cmd/config-migrate.go +++ b/cmd/config-migrate.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -127,7 +127,7 @@ func migrateV2ToV3() error { srvConfig.Region = cv2.Credentials.Region if srvConfig.Region == "" { // Region needs to be set for AWS Signature V4. - srvConfig.Region = "us-east-1" + srvConfig.Region = globalMinioDefaultRegion } srvConfig.Logger.Console = consoleLogger{ Enable: true, @@ -191,7 +191,7 @@ func migrateV3ToV4() error { srvConfig.Region = cv3.Region if srvConfig.Region == "" { // Region needs to be set for AWS Signature Version 4. - srvConfig.Region = "us-east-1" + srvConfig.Region = globalMinioDefaultRegion } srvConfig.Logger.Console = cv3.Logger.Console srvConfig.Logger.File = cv3.Logger.File @@ -237,7 +237,7 @@ func migrateV4ToV5() error { srvConfig.Region = cv4.Region if srvConfig.Region == "" { // Region needs to be set for AWS Signature Version 4. - srvConfig.Region = "us-east-1" + srvConfig.Region = globalMinioDefaultRegion } srvConfig.Logger.Console = cv4.Logger.Console srvConfig.Logger.File = cv4.Logger.File @@ -286,7 +286,7 @@ func migrateV5ToV6() error { srvConfig.Region = cv5.Region if srvConfig.Region == "" { // Region needs to be set for AWS Signature Version 4. - srvConfig.Region = "us-east-1" + srvConfig.Region = globalMinioDefaultRegion } srvConfig.Logger.Console = cv5.Logger.Console srvConfig.Logger.File = cv5.Logger.File @@ -362,7 +362,7 @@ func migrateV6ToV7() error { srvConfig.Region = cv6.Region if srvConfig.Region == "" { // Region needs to be set for AWS Signature Version 4. - srvConfig.Region = "us-east-1" + srvConfig.Region = globalMinioDefaultRegion } srvConfig.Logger.Console = cv6.Logger.Console srvConfig.Logger.File = cv6.Logger.File @@ -426,7 +426,7 @@ func migrateV7ToV8() error { srvConfig.Region = cv7.Region if srvConfig.Region == "" { // Region needs to be set for AWS Signature Version 4. - srvConfig.Region = "us-east-1" + srvConfig.Region = globalMinioDefaultRegion } srvConfig.Logger.Console = cv7.Logger.Console srvConfig.Logger.File = cv7.Logger.File @@ -496,7 +496,7 @@ func migrateV8ToV9() error { srvConfig.Region = cv8.Region if srvConfig.Region == "" { // Region needs to be set for AWS Signature Version 4. - srvConfig.Region = "us-east-1" + srvConfig.Region = globalMinioDefaultRegion } srvConfig.Logger.Console = cv8.Logger.Console srvConfig.Logger.Console.Level = "error" @@ -583,7 +583,7 @@ func migrateV9ToV10() error { srvConfig.Region = cv9.Region if srvConfig.Region == "" { // Region needs to be set for AWS Signature Version 4. - srvConfig.Region = "us-east-1" + srvConfig.Region = globalMinioDefaultRegion } srvConfig.Logger.Console = cv9.Logger.Console srvConfig.Logger.File = cv9.Logger.File @@ -668,7 +668,7 @@ func migrateV10ToV11() error { srvConfig.Region = cv10.Region if srvConfig.Region == "" { // Region needs to be set for AWS Signature Version 4. - srvConfig.Region = "us-east-1" + srvConfig.Region = globalMinioDefaultRegion } srvConfig.Logger.Console = cv10.Logger.Console srvConfig.Logger.File = cv10.Logger.File @@ -756,7 +756,7 @@ func migrateV11ToV12() error { srvConfig.Region = cv11.Region if srvConfig.Region == "" { // Region needs to be set for AWS Signature Version 4. - srvConfig.Region = "us-east-1" + srvConfig.Region = globalMinioDefaultRegion } srvConfig.Logger.Console = cv11.Logger.Console srvConfig.Logger.File = cv11.Logger.File @@ -861,7 +861,7 @@ func migrateV12ToV13() error { srvConfig.Region = cv12.Region if srvConfig.Region == "" { // Region needs to be set for AWS Signature Version 4. - srvConfig.Region = "us-east-1" + srvConfig.Region = globalMinioDefaultRegion } srvConfig.Logger.Console = cv12.Logger.Console srvConfig.Logger.File = cv12.Logger.File diff --git a/cmd/config-migrate_test.go b/cmd/config-migrate_test.go index e5478cd00..c5b8a0827 100644 --- a/cmd/config-migrate_test.go +++ b/cmd/config-migrate_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -24,7 +24,7 @@ import ( // Test if config v1 is purged func TestServerConfigMigrateV1(t *testing.T) { - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("Init Test config failed") } @@ -58,7 +58,7 @@ func TestServerConfigMigrateV1(t *testing.T) { // Test if all migrate code returns nil when config file does not // exist func TestServerConfigMigrateInexistentConfig(t *testing.T) { - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("Init Test config failed") } @@ -110,7 +110,7 @@ func TestServerConfigMigrateInexistentConfig(t *testing.T) { // Test if a config migration from v2 to v12 is successfully done func TestServerConfigMigrateV2toV12(t *testing.T) { - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("Init Test config failed") } @@ -169,7 +169,7 @@ func TestServerConfigMigrateV2toV12(t *testing.T) { // Test if all migrate code returns error with corrupted config files func TestServerConfigMigrateFaultyConfig(t *testing.T) { - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("Init Test config failed") } diff --git a/cmd/config-v13.go b/cmd/config-v13.go index b29419699..5ba8b78c6 100644 --- a/cmd/config-v13.go +++ b/cmd/config-v13.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -49,7 +49,7 @@ func initConfig() (bool, error) { // Initialize server config. srvCfg := &serverConfigV13{} srvCfg.Version = globalMinioConfigVersion - srvCfg.Region = "us-east-1" + srvCfg.Region = globalMinioDefaultRegion srvCfg.Credential = newCredential() // Enable console logger by default on a fresh run. diff --git a/cmd/config-v13_test.go b/cmd/config-v13_test.go index 5a80e43aa..54312714e 100644 --- a/cmd/config-v13_test.go +++ b/cmd/config-v13_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -22,14 +22,14 @@ import ( ) func TestServerConfig(t *testing.T) { - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("Init Test config failed") } // remove the root directory after the test ends. defer removeAll(rootPath) - if serverConfig.GetRegion() != "us-east-1" { + if serverConfig.GetRegion() != globalMinioDefaultRegion { t.Errorf("Expecting region `us-east-1` found %s", serverConfig.GetRegion()) } diff --git a/cmd/erasure-utils.go b/cmd/erasure-utils.go index c9b5f2f28..814df1c47 100644 --- a/cmd/erasure-utils.go +++ b/cmd/erasure-utils.go @@ -40,11 +40,11 @@ func newHashWriters(diskCount int, algo string) []hash.Hash { // newHash - gives you a newly allocated hash depending on the input algorithm. func newHash(algo string) (h hash.Hash) { switch algo { - case "sha256": + case sha256Algo: // sha256 checksum specially on ARM64 platforms or whenever // requested as dictated by `xl.json` entry. h = sha256.New() - case "blake2b": + case blake2bAlgo: // ignore the error, because New512 without a key never fails // New512 only returns a non-nil error, if the length of the passed // key > 64 bytes - but we use blake2b as hash function (no key) diff --git a/cmd/event-notifier_test.go b/cmd/event-notifier_test.go index 89556b0c1..0b52f65da 100644 --- a/cmd/event-notifier_test.go +++ b/cmd/event-notifier_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -28,7 +28,7 @@ import ( // Test InitEventNotifier with faulty disks func TestInitEventNotifierFaultyDisks(t *testing.T) { // Prepare for tests - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("Init Test config failed") } @@ -85,7 +85,7 @@ func TestInitEventNotifierFaultyDisks(t *testing.T) { func TestInitEventNotifierWithPostgreSQL(t *testing.T) { // initialize the server and obtain the credentials and root. // credentials are necessary to sign the HTTP request. - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("Init Test config failed") } @@ -116,7 +116,7 @@ func TestInitEventNotifierWithPostgreSQL(t *testing.T) { func TestInitEventNotifierWithNATS(t *testing.T) { // initialize the server and obtain the credentials and root. // credentials are necessary to sign the HTTP request. - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("Init Test config failed") } @@ -147,7 +147,7 @@ func TestInitEventNotifierWithNATS(t *testing.T) { func TestInitEventNotifierWithWebHook(t *testing.T) { // initialize the server and obtain the credentials and root. // credentials are necessary to sign the HTTP request. - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("Init Test config failed") } @@ -178,7 +178,7 @@ func TestInitEventNotifierWithWebHook(t *testing.T) { func TestInitEventNotifierWithAMQP(t *testing.T) { // initialize the server and obtain the credentials and root. // credentials are necessary to sign the HTTP request. - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("Init Test config failed") } @@ -209,7 +209,7 @@ func TestInitEventNotifierWithAMQP(t *testing.T) { func TestInitEventNotifierWithElasticSearch(t *testing.T) { // initialize the server and obtain the credentials and root. // credentials are necessary to sign the HTTP request. - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("Init Test config failed") } @@ -240,7 +240,7 @@ func TestInitEventNotifierWithElasticSearch(t *testing.T) { func TestInitEventNotifierWithRedis(t *testing.T) { // initialize the server and obtain the credentials and root. // credentials are necessary to sign the HTTP request. - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("Init Test config failed") } @@ -346,7 +346,7 @@ func TestInitEventNotifier(t *testing.T) { filterRules := []filterRule{ { Name: "prefix", - Value: "minio", + Value: globalMinioDefaultOwnerID, }, { Name: "suffix", @@ -569,7 +569,7 @@ func TestAddRemoveBucketListenerConfig(t *testing.T) { filterRules := []filterRule{ { Name: "prefix", - Value: "minio", + Value: globalMinioDefaultOwnerID, }, { Name: "suffix", diff --git a/cmd/format-config-v1_test.go b/cmd/format-config-v1_test.go index 8e46fdde3..b92532b0a 100644 --- a/cmd/format-config-v1_test.go +++ b/cmd/format-config-v1_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -812,7 +812,7 @@ func TestLoadFormatXLErrs(t *testing.T) { // Tests for healFormatXLCorruptedDisks() with cases which lead to errors func TestHealFormatXLCorruptedDisksErrs(t *testing.T) { - root, err := newTestConfig("us-east-1") + root, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatal(err) } @@ -968,7 +968,7 @@ func TestHealFormatXLCorruptedDisksErrs(t *testing.T) { // Tests for healFormatXLFreshDisks() with cases which lead to errors func TestHealFormatXLFreshDisksErrs(t *testing.T) { - root, err := newTestConfig("us-east-1") + root, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatal(err) } diff --git a/cmd/fs-v1-helpers_test.go b/cmd/fs-v1-helpers_test.go index ef815ebad..67541f670 100644 --- a/cmd/fs-v1-helpers_test.go +++ b/cmd/fs-v1-helpers_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cmd/fs-v1-metadata.go b/cmd/fs-v1-metadata.go index 99d475541..ae0b3532a 100644 --- a/cmd/fs-v1-metadata.go +++ b/cmd/fs-v1-metadata.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -164,11 +164,22 @@ func (m *fsMetaV1) ReadFrom(reader io.Reader) (n int64, err error) { return int64(len(metadataBytes)), nil } +// FS metadata constants. +const ( + // FS backend meta version. + fsMetaVersion = "1.0.0" + + // FS backend meta format. + fsMetaFormat = "fs" + + // Add more constants here. +) + // newFSMetaV1 - initializes new fsMetaV1. func newFSMetaV1() (fsMeta fsMetaV1) { fsMeta = fsMetaV1{} - fsMeta.Version = "1.0.0" - fsMeta.Format = "fs" + fsMeta.Version = fsMetaVersion + fsMeta.Format = fsMetaFormat fsMeta.Minio.Release = ReleaseTag return fsMeta } diff --git a/cmd/fs-v1-multipart-common.go b/cmd/fs-v1-multipart-common.go index c7b9c7f63..940b0645e 100644 --- a/cmd/fs-v1-multipart-common.go +++ b/cmd/fs-v1-multipart-common.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -51,7 +51,7 @@ func (fs fsObjects) deleteUploadsJSON(bucket, object, uploadID string) error { uploadsMetaPath := pathJoin(uploadPath, uploadsJSONFile) // Special case for windows please read through. - if runtime.GOOS == "windows" { + if runtime.GOOS == globalWindowsOSName { // Ordinarily windows does not permit deletion or renaming of files still // in use, but if all open handles to that file were opened with FILE_SHARE_DELETE // then it can permit renames and deletions of open files. diff --git a/cmd/fs-v1-multipart_test.go b/cmd/fs-v1-multipart_test.go index 660b7af78..ee6126fed 100644 --- a/cmd/fs-v1-multipart_test.go +++ b/cmd/fs-v1-multipart_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -48,7 +48,7 @@ func TestNewMultipartUploadFaultyDisk(t *testing.T) { // TestPutObjectPartFaultyDisk - test PutObjectPart with faulty disks func TestPutObjectPartFaultyDisk(t *testing.T) { - root, err := newTestConfig("us-east-1") + root, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatal(err) } diff --git a/cmd/fs-v1-rwpool_test.go b/cmd/fs-v1-rwpool_test.go index 4a9280f33..dc9459379 100644 --- a/cmd/fs-v1-rwpool_test.go +++ b/cmd/fs-v1-rwpool_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -69,7 +69,7 @@ func TestRWPool(t *testing.T) { // Fails to create a file if there is a directory. _, err = rwPool.Create(pathJoin(path, "success-vol", "file")) - if runtime.GOOS == "windows" { + if runtime.GOOS == globalWindowsOSName { if err != errFileAccessDenied { t.Fatal("Unexpected error", err) } @@ -87,7 +87,7 @@ func TestRWPool(t *testing.T) { // Fails to read a directory. _, err = rwPool.Open(pathJoin(path, "success-vol", "file")) - if runtime.GOOS == "windows" { + if runtime.GOOS == globalWindowsOSName { if err != errFileAccessDenied { t.Fatal("Unexpected error", err) } @@ -99,7 +99,7 @@ func TestRWPool(t *testing.T) { // Fails to open a file which has a parent as file. _, err = rwPool.Open(pathJoin(path, "success-vol", "file/path/1.txt/test")) - if runtime.GOOS != "windows" { + if runtime.GOOS != globalWindowsOSName { if err != errFileAccessDenied { t.Fatal("Unexpected error", err) } diff --git a/cmd/fs-v1.go b/cmd/fs-v1.go index 6f33594e7..24c25b124 100644 --- a/cmd/fs-v1.go +++ b/cmd/fs-v1.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -179,7 +179,7 @@ func newFSObjectLayer(fsPath string) (ObjectLayer, error) { func (fs fsObjects) checkDiskFree() (err error) { // We don't validate disk space or inode utilization on windows. // Each windows calls to 'GetVolumeInformationW' takes around 3-5seconds. - if runtime.GOOS == "windows" { + if runtime.GOOS == globalWindowsOSName { return nil } diff --git a/cmd/fs-v1_test.go b/cmd/fs-v1_test.go index e0176d18e..97d2d36c4 100644 --- a/cmd/fs-v1_test.go +++ b/cmd/fs-v1_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -49,7 +49,7 @@ func TestNewFS(t *testing.T) { // TestFSShutdown - initialize a new FS object layer then calls // Shutdown to check returned results func TestFSShutdown(t *testing.T) { - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatal(err) } diff --git a/cmd/generic-handlers.go b/cmd/generic-handlers.go index e0850b3f4..974045687 100644 --- a/cmd/generic-handlers.go +++ b/cmd/generic-handlers.go @@ -139,7 +139,7 @@ func setBrowserCacheControlHandler(h http.Handler) http.Handler { } func (h cacheControlHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if r.Method == "GET" && guessIsBrowserReq(r) && globalIsBrowserEnabled { + if r.Method == httpGET && guessIsBrowserReq(r) && globalIsBrowserEnabled { // For all browser requests set appropriate Cache-Control policies if strings.HasPrefix(r.URL.Path, reservedBucket+"/") { if strings.HasSuffix(r.URL.Path, ".js") || r.URL.Path == reservedBucket+"/favicon.ico" { @@ -249,11 +249,31 @@ type resourceHandler struct { handler http.Handler } +// List of http methods. +const ( + httpGET = "GET" + httpPUT = "PUT" + httpHEAD = "HEAD" + httpPOST = "POST" + httpDELETE = "DELETE" + httpOPTIONS = "OPTIONS" +) + +// List of default allowable HTTP methods. +var defaultAllowableHTTPMethods = []string{ + httpGET, + httpPUT, + httpHEAD, + httpPOST, + httpDELETE, + httpOPTIONS, +} + // setCorsHandler handler for CORS (Cross Origin Resource Sharing) func setCorsHandler(h http.Handler) http.Handler { c := cors.New(cors.Options{ AllowedOrigins: []string{"*"}, - AllowedMethods: []string{"GET", "HEAD", "POST", "PUT", "DELETE", "OPTIONS"}, + AllowedMethods: defaultAllowableHTTPMethods, AllowedHeaders: []string{"*"}, ExposedHeaders: []string{"ETag"}, }) @@ -328,7 +348,7 @@ func (h resourceHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } } // A put method on path "/" doesn't make sense, ignore it. - if r.Method == "PUT" && r.URL.Path == "/" { + if r.Method == httpPUT && r.URL.Path == "/" { writeErrorResponse(w, ErrNotImplemented, r.URL) return } diff --git a/cmd/globals.go b/cmd/globals.go index ce3a0a983..c2f2a7861 100644 --- a/cmd/globals.go +++ b/cmd/globals.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2015, 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2015, 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -45,6 +45,11 @@ const ( globalMinioKeyFile = "private.key" globalMinioConfigFile = "config.json" globalMinioCertExpireWarnDays = time.Hour * 24 * 30 // 30 days. + + globalMinioDefaultRegion = "us-east-1" + globalMinioDefaultOwnerID = "minio" + globalMinioDefaultStorageClass = "STANDARD" + globalWindowsOSName = "windows" // Add new global values here. ) @@ -118,10 +123,8 @@ var ( // global colors. var ( - colorRed = color.New(color.FgRed).SprintFunc() - colorBold = color.New(color.Bold).SprintFunc() - colorBlue = color.New(color.FgBlue).SprintfFunc() - colorGreen = color.New(color.FgGreen).SprintfFunc() + colorBold = color.New(color.Bold).SprintFunc() + colorBlue = color.New(color.FgBlue).SprintfFunc() ) // Parse command arguments and set global variables accordingly diff --git a/cmd/handler-utils.go b/cmd/handler-utils.go index 23d227140..a97f0f192 100644 --- a/cmd/handler-utils.go +++ b/cmd/handler-utils.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2015, 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2015, 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -39,9 +39,9 @@ func isValidLocationConstraint(r *http.Request) (s3Error APIErrorCode) { // Once region has been obtained we proceed to verify it. incomingRegion := locationConstraint.Location if incomingRegion == "" { - // Location constraint is empty for region "us-east-1", + // Location constraint is empty for region globalMinioDefaultRegion, // in accordance with protocol. - incomingRegion = "us-east-1" + incomingRegion = globalMinioDefaultRegion } // Return errInvalidRegion if location constraint does not match // with configured region. diff --git a/cmd/handler-utils_test.go b/cmd/handler-utils_test.go index bedd705ad..0bf34b3c5 100644 --- a/cmd/handler-utils_test.go +++ b/cmd/handler-utils_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2015, 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2015, 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -27,7 +27,7 @@ import ( // Tests validate bucket LocationConstraint. func TestIsValidLocationContraint(t *testing.T) { - path, err := newTestConfig("us-east-1") + path, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("unable initialize config file, %s", err) } @@ -63,12 +63,12 @@ func TestIsValidLocationContraint(t *testing.T) { expectedCode APIErrorCode }{ // Test case - 1. - {"us-east-1", "us-east-1", ErrNone}, + {globalMinioDefaultRegion, globalMinioDefaultRegion, ErrNone}, // Test case - 2. // In case of empty request body ErrNone is returned. - {"", "us-east-1", ErrNone}, + {"", globalMinioDefaultRegion, ErrNone}, // Test case - 3. - {"eu-central-1", "us-east-1", ErrInvalidRegion}, + {"eu-central-1", globalMinioDefaultRegion, ErrInvalidRegion}, } for i, testCase := range testCases { inputRequest, e := createExpectedRequest(&http.Request{}, testCase.locationForInputRequest) diff --git a/cmd/jwt_test.go b/cmd/jwt_test.go index 341ecd591..32cdfa8fe 100644 --- a/cmd/jwt_test.go +++ b/cmd/jwt_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,7 +19,7 @@ package cmd import "testing" func testAuthenticate(authType string, t *testing.T) { - testPath, err := newTestConfig("us-east-1") + testPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("unable initialize config file, %s", err) } diff --git a/cmd/lock-rpc-server_test.go b/cmd/lock-rpc-server_test.go index 9e406dbda..3251a37bb 100644 --- a/cmd/lock-rpc-server_test.go +++ b/cmd/lock-rpc-server_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -45,7 +45,7 @@ func testLockEquality(lriLeft, lriRight []lockRequesterInfo) bool { // Helper function to create a lock server for testing func createLockTestServer(t *testing.T) (string, *lockServer, string) { - testPath, err := newTestConfig("us-east-1") + testPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("unable initialize config file, %s", err) } @@ -446,7 +446,7 @@ func TestLockRpcServerExpired(t *testing.T) { // Test initialization of lock servers. func TestLockServers(t *testing.T) { - if runtime.GOOS == "windows" { + if runtime.GOOS == globalWindowsOSName { return } @@ -466,19 +466,19 @@ func TestLockServers(t *testing.T) { isDistXL: true, srvCmdConfig: serverCmdConfig{ endpoints: []*url.URL{{ - Scheme: "http", + Scheme: httpScheme, Host: "localhost:9000", Path: "/mnt/disk1", }, { - Scheme: "http", + Scheme: httpScheme, Host: "1.1.1.2:9000", Path: "/mnt/disk2", }, { - Scheme: "http", + Scheme: httpScheme, Host: "1.1.2.1:9000", Path: "/mnt/disk3", }, { - Scheme: "http", + Scheme: httpScheme, Host: "1.1.2.2:9000", Path: "/mnt/disk4", }}, @@ -490,19 +490,19 @@ func TestLockServers(t *testing.T) { isDistXL: true, srvCmdConfig: serverCmdConfig{ endpoints: []*url.URL{{ - Scheme: "http", + Scheme: httpScheme, Host: "localhost:9000", Path: "/mnt/disk1", }, { - Scheme: "http", + Scheme: httpScheme, Host: "localhost:9000", Path: "/mnt/disk2", }, { - Scheme: "http", + Scheme: httpScheme, Host: "1.1.2.1:9000", Path: "/mnt/disk3", }, { - Scheme: "http", + Scheme: httpScheme, Host: "1.1.2.2:9000", Path: "/mnt/disk4", }}, diff --git a/cmd/notify-webhook_test.go b/cmd/notify-webhook_test.go index 3f298694f..4740901f0 100644 --- a/cmd/notify-webhook_test.go +++ b/cmd/notify-webhook_test.go @@ -40,7 +40,7 @@ func (p postHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // Tests web hook initialization. func TestNewWebHookNotify(t *testing.T) { - root, err := newTestConfig("us-east-1") + root, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatal(err) } diff --git a/cmd/object-api-common.go b/cmd/object-api-common.go index aaf2d8c43..a371e3553 100644 --- a/cmd/object-api-common.go +++ b/cmd/object-api-common.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -159,12 +159,12 @@ func getPath(ep *url.URL) string { } var diskPath string // For windows ep.Path is usually empty - if runtime.GOOS == "windows" { + if runtime.GOOS == globalWindowsOSName { switch ep.Scheme { case "": // Eg. "minio server .\export" diskPath = ep.Path - case "http", "https": + case httpScheme, httpsScheme: // For full URLs windows drive is part of URL path. // Eg: http://ip:port/C:\mydrive // For windows trim off the preceding "/". diff --git a/cmd/object-api-common_test.go b/cmd/object-api-common_test.go index 3946f4d67..d312901c5 100644 --- a/cmd/object-api-common_test.go +++ b/cmd/object-api-common_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -106,7 +106,7 @@ func TestGetPath(t *testing.T) { epStr string path string } - if runtime.GOOS == "windows" { + if runtime.GOOS == globalWindowsOSName { testCases = []struct { epStr string path string diff --git a/cmd/object-api-getobject_test.go b/cmd/object-api-getobject_test.go index 5385952a9..1e6b429a7 100644 --- a/cmd/object-api-getobject_test.go +++ b/cmd/object-api-getobject_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -182,7 +182,7 @@ func testGetObject(obj ObjectLayer, instanceType string, t TestErrHandler) { // Wrapper for calling GetObject with permission denied expected func TestGetObjectPermissionDenied(t *testing.T) { // Windows doesn't support Chmod under golang - if runtime.GOOS != "windows" { + if runtime.GOOS != globalWindowsOSName { ExecObjectLayerDiskAlteredTest(t, testGetObjectPermissionDenied) } } diff --git a/cmd/object-api-listobjects_test.go b/cmd/object-api-listobjects_test.go index 8b3d4e94d..2b25441a0 100644 --- a/cmd/object-api-listobjects_test.go +++ b/cmd/object-api-listobjects_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2015-2016 Minio, Inc. + * Minio Cloud Storage, (C) 2015-2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -588,7 +588,7 @@ func BenchmarkListObjects(b *testing.B) { } defer removeAll(directory) // initialize the root directory. - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { b.Fatalf("Unable to initialize config. %s", err) } diff --git a/cmd/object-api-multipart_test.go b/cmd/object-api-multipart_test.go index abb5f8841..5e0146f3e 100644 --- a/cmd/object-api-multipart_test.go +++ b/cmd/object-api-multipart_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -1061,7 +1061,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan { MaxUploads: 10, IsTruncated: false, - Prefix: "minio", + Prefix: globalMinioDefaultOwnerID, UploadIDMarker: uploadIDs[4], Uploads: []uploadMetadata{ { @@ -1201,7 +1201,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan // Test case with `prefix` and `KeyMarker` (Test number 48). {bucketNames[2], "minio-object", objectNames[1], "", "", 10, listMultipartResults[34], nil, true}, // Test case with `prefix` and `uploadIDMarker` (Test number 49). - // {bucketNames[2], "minio", "", uploadIDs[4], "", 10, listMultipartResults[35], nil, true}, + // {bucketNames[2], globalMinioDefaultOwnerID, "", uploadIDs[4], "", 10, listMultipartResults[35], nil, true}, // Test case with `KeyMarker` and `uploadIDMarker` (Test number 50). // {bucketNames[2], "minio-object.txt", "", uploadIDs[5], "", 10, listMultipartResults[36], nil, true}, } diff --git a/cmd/posix-errors.go b/cmd/posix-errors.go index dfa583257..7bdde538d 100644 --- a/cmd/posix-errors.go +++ b/cmd/posix-errors.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -79,7 +79,7 @@ func isSysErrTooLong(err error) bool { // and ERROR_DIR_NOT_EMPTY for windows (directory not empty). func isSysErrNotEmpty(err error) bool { if pathErr, ok := err.(*os.PathError); ok { - if runtime.GOOS == "windows" { + if runtime.GOOS == globalWindowsOSName { if errno, _ok := pathErr.Err.(syscall.Errno); _ok && errno == 0x91 { // ERROR_DIR_NOT_EMPTY return true @@ -95,7 +95,7 @@ func isSysErrNotEmpty(err error) bool { // Check if the given error corresponds to the specific ERROR_PATH_NOT_FOUND for windows func isSysErrPathNotFound(err error) bool { - if runtime.GOOS != "windows" { + if runtime.GOOS != globalWindowsOSName { return false } if pathErr, ok := err.(*os.PathError); ok { @@ -109,7 +109,7 @@ func isSysErrPathNotFound(err error) bool { // Check if the given error corresponds to the specific ERROR_INVALID_HANDLE for windows func isSysErrHandleInvalid(err error) bool { - if runtime.GOOS != "windows" { + if runtime.GOOS != globalWindowsOSName { return false } // Check if err contains ERROR_INVALID_HANDLE errno diff --git a/cmd/posix-errors_test.go b/cmd/posix-errors_test.go index 498022f50..254744d6a 100644 --- a/cmd/posix-errors_test.go +++ b/cmd/posix-errors_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -34,7 +34,7 @@ func TestSysErrors(t *testing.T) { if !ok { t.Fatalf("Unexpected error expecting %s", syscall.ENOTDIR) } - if runtime.GOOS != "windows" { + if runtime.GOOS != globalWindowsOSName { pathErr = &os.PathError{Err: syscall.ENOTEMPTY} ok = isSysErrNotEmpty(pathErr) if !ok { @@ -47,7 +47,7 @@ func TestSysErrors(t *testing.T) { t.Fatal("Unexpected error expecting 0x91") } } - if runtime.GOOS == "windows" { + if runtime.GOOS == globalWindowsOSName { pathErr = &os.PathError{Err: syscall.Errno(0x03)} ok = isSysErrPathNotFound(pathErr) if !ok { diff --git a/cmd/posix-prepare-path.go b/cmd/posix-prepare-path.go index 23db02eed..5cd3ed8ee 100644 --- a/cmd/posix-prepare-path.go +++ b/cmd/posix-prepare-path.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -24,7 +24,7 @@ import ( // preparePath rewrites path to handle any OS specific details. func preparePath(path string) string { - if runtime.GOOS == "windows" { + if runtime.GOOS == globalWindowsOSName { // Microsoft Windows supports long path names using // uniform naming convention (UNC). return UNCPath(path) diff --git a/cmd/posix.go b/cmd/posix.go index bdfb9652b..cc4ff340f 100644 --- a/cmd/posix.go +++ b/cmd/posix.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -157,7 +157,7 @@ func getDiskInfo(diskPath string) (di disk.Info, err error) { func (s *posix) checkDiskFree() (err error) { // We don't validate disk space or inode utilization on windows. // Each windows calls to 'GetVolumeInformationW' takes around 3-5seconds. - if runtime.GOOS == "windows" { + if runtime.GOOS == globalWindowsOSName { return nil } diff --git a/cmd/posix_test.go b/cmd/posix_test.go index f1dc0b428..e7fb98927 100644 --- a/cmd/posix_test.go +++ b/cmd/posix_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -286,7 +286,7 @@ func TestPosixMakeVol(t *testing.T) { } // TestPosix for permission denied. - if runtime.GOOS != "windows" { + if runtime.GOOS != globalWindowsOSName { // Initialize posix storage layer for permission denied error. posix, err := newPosix("/usr") if err != nil { @@ -377,7 +377,7 @@ func TestPosixDeleteVol(t *testing.T) { } // TestPosix for permission denied. - if runtime.GOOS != "windows" { + if runtime.GOOS != globalWindowsOSName { // Initialize posix storage layer for permission denied error. posixStorage, err = newPosix("/usr") if err != nil { @@ -656,7 +656,7 @@ func TestPosixPosixListDir(t *testing.T) { } // TestPosix for permission denied. - if runtime.GOOS != "windows" { + if runtime.GOOS != globalWindowsOSName { // Initialize posix storage layer for permission denied error. posixStorage, err = newPosix("/usr") if err != nil { @@ -770,7 +770,7 @@ func TestPosixDeleteFile(t *testing.T) { } // TestPosix for permission denied. - if runtime.GOOS != "windows" { + if runtime.GOOS != globalWindowsOSName { // Initialize posix storage layer for permission denied error. posixStorage, err = newPosix("/usr") if err != nil { @@ -881,7 +881,7 @@ func TestPosixReadFile(t *testing.T) { -1, 5, nil, func() error { - if runtime.GOOS == "windows" { + if runtime.GOOS == globalWindowsOSName { return &os.PathError{ Op: "seek", Path: preparePath(slashpath.Join(path, "success-vol", "myobject")), @@ -937,7 +937,7 @@ func TestPosixReadFile(t *testing.T) { if err != nil && testCase.expectedErr != nil { // Validate if the type string of the errors are an exact match. if err.Error() != testCase.expectedErr.Error() { - if runtime.GOOS != "windows" { + if runtime.GOOS != globalWindowsOSName { t.Errorf("Case: %d %#v, expected: %s, got: %s", i+1, testCase, testCase.expectedErr, err) } else { var resultErrno, expectErrno uintptr @@ -1075,7 +1075,7 @@ func TestPosixAppendFile(t *testing.T) { } // TestPosix for permission denied. - if runtime.GOOS != "windows" { + if runtime.GOOS != globalWindowsOSName { // Initialize posix storage layer for permission denied error. posixStorage, err = newPosix("/usr") if err != nil { @@ -1162,7 +1162,7 @@ func TestPosixPrepareFile(t *testing.T) { } // TestPosix for permission denied. - if runtime.GOOS != "windows" { + if runtime.GOOS != globalWindowsOSName { // Initialize posix storage layer for permission denied error. posixStorage, err = newPosix("/usr") if err != nil { diff --git a/cmd/post-policy_test.go b/cmd/post-policy_test.go index 52bd96058..60333e3a3 100644 --- a/cmd/post-policy_test.go +++ b/cmd/post-policy_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -115,7 +115,7 @@ func TestPostPolicyBucketHandler(t *testing.T) { // testPostPolicyBucketHandler - Tests validate post policy handler uploading objects. func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErrHandler) { - root, err := newTestConfig("us-east-1") + root, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("Initializing config.json failed") } @@ -402,7 +402,7 @@ func TestPostPolicyBucketHandlerRedirect(t *testing.T) { // testPostPolicyBucketHandlerRedirect tests POST Object when success_action_redirect is specified func testPostPolicyBucketHandlerRedirect(obj ObjectLayer, instanceType string, t TestErrHandler) { - root, err := newTestConfig("us-east-1") + root, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("Initializing config.json failed") } diff --git a/cmd/postpolicyform.go b/cmd/postpolicyform.go index b3c8aeb04..799f5f818 100644 --- a/cmd/postpolicyform.go +++ b/cmd/postpolicyform.go @@ -27,6 +27,31 @@ import ( "time" ) +// startWithConds - map which indicates if a given condition supports starts-with policy operator +var startsWithConds = map[string]bool{ + "$acl": true, + "$bucket": false, + "$cache-control": true, + "$content-type": true, + "$content-disposition": true, + "$content-encoding": true, + "$expires": true, + "$key": true, + "$success_action_redirect": true, + "$redirect": true, + "$success_action_status": false, + "$x-amz-algorithm": false, + "$x-amz-credential": false, + "$x-amz-date": false, +} + +// Add policy conditionals. +const ( + policyCondEqual = "eq" + policyCondStartsWith = "starts-with" + policyCondContentLength = "content-length-range" +) + // toString - Safely convert interface to string without causing panic. func toString(val interface{}) string { switch v := val.(type) { @@ -127,7 +152,7 @@ func parsePostPolicyForm(policy string) (PostPolicyForm, error) { Operator string Value string }{ - Operator: "eq", + Operator: policyCondEqual, Value: toString(v), } } @@ -136,7 +161,7 @@ func parsePostPolicyForm(policy string) (PostPolicyForm, error) { return parsedPolicy, fmt.Errorf("Malformed conditional fields %s of type %s found in POST policy form", condt, reflect.TypeOf(condt).String()) } switch toLowerString(condt[0]) { - case "eq", "starts-with": + case policyCondEqual, policyCondStartsWith: for _, v := range condt { // Pre-check all values for type. if !isString(v) { // All values must be of type string. @@ -151,7 +176,7 @@ func parsePostPolicyForm(policy string) (PostPolicyForm, error) { Operator: operator, Value: value, } - case "content-length-range": + case policyCondContentLength: min, err := toInteger(condt[1]) if err != nil { return parsedPolicy, err @@ -180,31 +205,13 @@ func parsePostPolicyForm(policy string) (PostPolicyForm, error) { return parsedPolicy, nil } -// startWithConds - map which indicates if a given condition supports starts-with policy operator -var startsWithConds = map[string]bool{ - "$acl": true, - "$bucket": false, - "$cache-control": true, - "$content-type": true, - "$content-disposition": true, - "$content-encoding": true, - "$expires": true, - "$key": true, - "$success_action_redirect": true, - "$redirect": true, - "$success_action_status": false, - "$x-amz-algorithm": false, - "$x-amz-credential": false, - "$x-amz-date": false, -} - // checkPolicyCond returns a boolean to indicate if a condition is satisified according // to the passed operator func checkPolicyCond(op string, input1, input2 string) bool { switch op { - case "eq": + case policyCondEqual: return input1 == input2 - case "starts-with": + case policyCondStartsWith: return strings.HasPrefix(input1, input2) } return false @@ -231,7 +238,7 @@ func checkPostPolicy(formValues map[string]string, postPolicyForm PostPolicyForm // If the current policy condition is known if startsWithSupported, condFound := startsWithConds[cond]; condFound { // Check if the current condition supports starts-with operator - if op == "starts-with" && !startsWithSupported { + if op == policyCondStartsWith && !startsWithSupported { return ErrAccessDenied } // Check if current policy condition is satisfied diff --git a/cmd/prepare-storage-msg.go b/cmd/prepare-storage-msg.go index 9cc699b9f..445bbefca 100644 --- a/cmd/prepare-storage-msg.go +++ b/cmd/prepare-storage-msg.go @@ -65,9 +65,9 @@ func printHealMsg(endpoints []*url.URL, storageDisks []StorageAPI, fn printOnceF // for single node XL, distributed XL and when minio server is bound // to a specific ip:port. func getHealEndpoint(tls bool, firstEndpoint *url.URL) (cEndpoint *url.URL) { - scheme := "http" + scheme := httpScheme if tls { - scheme = "https" + scheme = httpsScheme } cEndpoint = &url.URL{ Scheme: scheme, @@ -93,6 +93,12 @@ func getHealEndpoint(tls bool, firstEndpoint *url.URL) (cEndpoint *url.URL) { return cEndpoint } +// Disks offline and online strings.. +const ( + diskOffline = "offline" + diskOnline = "online" +) + // Constructs a formatted heal message, when cluster is found to be in state where it requires healing. // healing is optional, server continues to initialize object layer after printing this message. // it is upto the end user to perform a heal if needed. @@ -112,9 +118,9 @@ func getHealMsg(endpoints []*url.URL, storageDisks []StorageAPI) string { humanize.IBytes(uint64(info.Total)), func() string { if info.Total > 0 { - return "online" + return diskOnline } - return "offline" + return diskOffline }(), ) } @@ -142,9 +148,9 @@ func getStorageInitMsg(titleMsg string, endpoints []*url.URL, storageDisks []Sto humanize.IBytes(uint64(info.Total)), func() string { if info.Total > 0 { - return "online" + return diskOnline } - return "offline" + return diskOffline }(), ) } diff --git a/cmd/prepare-storage-msg_test.go b/cmd/prepare-storage-msg_test.go index 5b39a50e9..af5c9bf1a 100644 --- a/cmd/prepare-storage-msg_test.go +++ b/cmd/prepare-storage-msg_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -27,11 +27,11 @@ func TestGetHealEndpoint(t *testing.T) { // Test for a SSL scheme. tls := true hURL := getHealEndpoint(tls, &url.URL{ - Scheme: "http", + Scheme: httpScheme, Host: "localhost:9000", }) sHURL := &url.URL{ - Scheme: "https", + Scheme: httpsScheme, Host: "localhost:9000", } if !reflect.DeepEqual(hURL, sHURL) { @@ -41,11 +41,11 @@ func TestGetHealEndpoint(t *testing.T) { // Test a non-TLS scheme. tls = false hURL = getHealEndpoint(tls, &url.URL{ - Scheme: "https", + Scheme: httpsScheme, Host: "localhost:9000", }) sHURL = &url.URL{ - Scheme: "http", + Scheme: httpScheme, Host: "localhost:9000", } if !reflect.DeepEqual(hURL, sHURL) { @@ -61,7 +61,7 @@ func TestGetHealEndpoint(t *testing.T) { Path: "/export", }) sHURL = &url.URL{ - Scheme: "http", + Scheme: httpScheme, Host: "", } globalMinioAddr = "" @@ -73,7 +73,7 @@ func TestGetHealEndpoint(t *testing.T) { // Tests heal message to be correct and properly formatted. func TestHealMsg(t *testing.T) { - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatal("Unable to initialize test config", err) } diff --git a/cmd/retry-storage_test.go b/cmd/retry-storage_test.go index 04556323b..5642910f8 100644 --- a/cmd/retry-storage_test.go +++ b/cmd/retry-storage_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -25,7 +25,7 @@ import ( // Tests retry storage. func TestRetryStorage(t *testing.T) { - root, err := newTestConfig("us-east-1") + root, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatal(err) } diff --git a/cmd/s3-peer-client_test.go b/cmd/s3-peer-client_test.go index 023bb619a..4c6824d04 100644 --- a/cmd/s3-peer-client_test.go +++ b/cmd/s3-peer-client_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -26,7 +26,7 @@ import ( // endpoints. func TestMakeS3Peers(t *testing.T) { // Initialize configuration - root, err := newTestConfig("us-east-1") + root, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("%s", err) } diff --git a/cmd/scan-bar.go b/cmd/scan-bar.go deleted file mode 100644 index fbfa5ad75..000000000 --- a/cmd/scan-bar.go +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Minio Cloud Storage (C) 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "fmt" - "runtime" - "strings" - - "github.com/cheggaaa/pb" - "github.com/dustin/go-humanize" - "github.com/minio/mc/pkg/console" -) - -// fixateScanBar truncates or stretches text to fit within the terminal size. -func fixateScanBar(text string, width int) string { - if len([]rune(text)) > width { - // Trim text to fit within the screen - trimSize := len([]rune(text)) - width + 3 //"..." - if trimSize < len([]rune(text)) { - text = "..." + text[trimSize:] - } - } else { - text += strings.Repeat(" ", width-len([]rune(text))) - } - return text -} - -// Progress bar function report objects being scaned. -type scanBarFunc func(string) - -// scanBarFactory returns a progress bar function to report URL scanning. -func scanBarFactory() scanBarFunc { - fileCount := 0 - termWidth, err := pb.GetTerminalWidth() - if err != nil { - termWidth = 80 - } - - // Cursor animate channel. - cursorCh := cursorAnimate() - return func(source string) { - scanPrefix := fmt.Sprintf("[%s] %s ", humanize.Comma(int64(fileCount)), string(<-cursorCh)) - source = fixateScanBar(source, termWidth-len([]rune(scanPrefix))) - barText := scanPrefix + source - console.PrintC("\r" + barText + "\r") - fileCount++ - } -} - -// cursorAnimate - returns a animated rune through read channel for every read. -func cursorAnimate() <-chan rune { - cursorCh := make(chan rune) - var cursors string - - switch runtime.GOOS { - case "linux": - // cursors = "➩➪➫➬➭➮➯➱" - // cursors = "▁▃▄▅▆▇█▇▆▅▄▃" - cursors = "◐◓◑◒" - // cursors = "←↖↑↗→↘↓↙" - // cursors = "◴◷◶◵" - // cursors = "◰◳◲◱" - //cursors = "⣾⣽⣻⢿⡿⣟⣯⣷" - case "darwin": - cursors = "◐◓◑◒" - default: - cursors = "|/-\\" - } - go func() { - for { - for _, cursor := range cursors { - cursorCh <- cursor - } - } - }() - return cursorCh -} diff --git a/cmd/server-main.go b/cmd/server-main.go index ea0a373dd..87f9fee00 100644 --- a/cmd/server-main.go +++ b/cmd/server-main.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2015, 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2015, 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -195,36 +195,45 @@ func isDistributedSetup(eps []*url.URL) bool { return false } +// Returns true if path is empty, or equals to '.', '/', '\' characters. +func isPathSentinel(path string) bool { + return path == "" || path == "." || path == "/" || path == `\` +} + +// Returned when path is empty or root path. +var errEmptyRootPath = errors.New("Empty or root path is not allowed") + +// Invalid scheme passed. +var errInvalidScheme = errors.New("Invalid scheme") + // Check if endpoint is in expected syntax by valid scheme/path across all platforms. func checkEndpointURL(endpointURL *url.URL) (err error) { - // applicable to all OS. - if endpointURL.Scheme == "" || endpointURL.Scheme == "http" || endpointURL.Scheme == "https" { - urlPath := path.Clean(endpointURL.Path) - if urlPath == "" || urlPath == "." || urlPath == "/" || urlPath == `\` { - err = fmt.Errorf("Empty or root path is not allowed") + // Applicable to all OS. + if endpointURL.Scheme == "" || endpointURL.Scheme == httpScheme || endpointURL.Scheme == httpsScheme { + if isPathSentinel(path.Clean(endpointURL.Path)) { + err = errEmptyRootPath } return err } // Applicable to Windows only. - if runtime.GOOS == "windows" { + if runtime.GOOS == globalWindowsOSName { // On Windows, endpoint can be a path with drive eg. C:\Export and its URL.Scheme is 'C'. // Check if URL.Scheme is a single letter alphabet to represent a drive. // Note: URL.Parse() converts scheme into lower case always. if len(endpointURL.Scheme) == 1 && endpointURL.Scheme[0] >= 'a' && endpointURL.Scheme[0] <= 'z' { // If endpoint is C:\ or C:\export, URL.Path does not have path information like \ or \export // hence we directly work with endpoint. - urlPath := strings.SplitN(path.Clean(endpointURL.String()), ":", 2)[1] - if urlPath == "" || urlPath == "." || urlPath == "/" || urlPath == `\` { - err = fmt.Errorf("Empty or root path is not allowed") + if isPathSentinel(strings.SplitN(path.Clean(endpointURL.String()), ":", 2)[1]) { + err = errEmptyRootPath } return err } } - return fmt.Errorf("Invalid scheme") + return errInvalidScheme } // Check if endpoints are in expected syntax by valid scheme/path across all platforms. @@ -300,7 +309,7 @@ func checkServerSyntax(c *cli.Context) { } for _, ep := range endpoints { - if ep.Scheme == "https" && !globalIsSSL { + if ep.Scheme == httpsScheme && !globalIsSSL { // Certificates should be provided for https configuration. fatalIf(errInvalidArgument, "Certificates not provided for secure configuration") } diff --git a/cmd/server-main_test.go b/cmd/server-main_test.go index 627dd7065..df152b3ae 100644 --- a/cmd/server-main_test.go +++ b/cmd/server-main_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -133,7 +133,7 @@ func TestFinalizeAPIEndpoints(t *testing.T) { // Tests all the expected input disks for function checkSufficientDisks. func TestCheckSufficientDisks(t *testing.T) { var xlDisks []string - if runtime.GOOS == "windows" { + if runtime.GOOS == globalWindowsOSName { xlDisks = []string{ "C:\\mnt\\backend1", "C:\\mnt\\backend2", @@ -330,7 +330,7 @@ func TestCheckEndpointsSyntax(t *testing.T) { "server:/export", } - if runtime.GOOS == "windows" { + if runtime.GOOS == globalWindowsOSName { successCases = append(successCases, `\export`, `D:\export`, @@ -401,7 +401,7 @@ func TestIsDistributedSetup(t *testing.T) { disks []string result bool } - if runtime.GOOS == "windows" { + if runtime.GOOS == globalWindowsOSName { testCases = []struct { disks []string result bool @@ -457,7 +457,7 @@ func TestIsDistributedSetup(t *testing.T) { func TestInitServerConfig(t *testing.T) { ctx := &cli.Context{} - root, err := newTestConfig("us-east-1") + root, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatal("Failed to set up test config") } diff --git a/cmd/server-mux.go b/cmd/server-mux.go index e879e0c61..c7e9ffd1e 100644 --- a/cmd/server-mux.go +++ b/cmd/server-mux.go @@ -71,6 +71,12 @@ func NewConnMux(c net.Conn) *ConnMux { } } +const ( + protocolTLS = "tls" + protocolHTTP1 = "http" + protocolHTTP2 = "http2" +) + // PeekProtocol - reads the first bytes, then checks if it is similar // to one of the default http methods func (c *ConnMux) PeekProtocol() string { @@ -79,19 +85,19 @@ func (c *ConnMux) PeekProtocol() string { if err != io.EOF { errorIf(err, "Unable to peek into the protocol") } - return "http" + return protocolHTTP1 } for _, m := range defaultHTTP1Methods { if strings.HasPrefix(string(buf), m) { - return "http" + return protocolHTTP1 } } for _, m := range defaultHTTP2Methods { if strings.HasPrefix(string(buf), m) { - return "http2" + return protocolHTTP2 } } - return "tls" + return protocolTLS } // Read - streams the ConnMux buffer when reset flag is activated, otherwise @@ -194,10 +200,14 @@ func newListenerMux(listener net.Listener, config *tls.Config) *ListenerMux { // and decide if we need to wrap the connection itself with a TLS or not go func(conn net.Conn) { connMux := NewConnMux(conn) - if connMux.PeekProtocol() == "tls" { - l.acceptResCh <- ListenerMuxAcceptRes{conn: tls.Server(connMux, l.config)} + if connMux.PeekProtocol() == protocolTLS { + l.acceptResCh <- ListenerMuxAcceptRes{ + conn: tls.Server(connMux, l.config), + } } else { - l.acceptResCh <- ListenerMuxAcceptRes{conn: connMux} + l.acceptResCh <- ListenerMuxAcceptRes{ + conn: connMux, + } } }(conn) } @@ -367,7 +377,7 @@ func (m *ServerMux) ListenAndServe(certFile, keyFile string) (err error) { if tlsEnabled && r.TLS == nil { // TLS is enabled but Request is not TLS configured u := url.URL{ - Scheme: "https", + Scheme: httpsScheme, Opaque: r.URL.Opaque, User: r.URL.User, Host: r.Host, diff --git a/cmd/server-mux_test.go b/cmd/server-mux_test.go index 7d0555af4..661ad05a4 100644 --- a/cmd/server-mux_test.go +++ b/cmd/server-mux_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2015, 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2015, 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -153,7 +153,7 @@ func TestInitListeners(t *testing.T) { } } // Windows doesn't have 'localhost' hostname. - if runtime.GOOS != "windows" { + if runtime.GOOS != globalWindowsOSName { listeners, err := initListeners("localhost:"+getFreePort(), &tls.Config{}) if err != nil { t.Fatalf("Test 3: Unable to initialize listeners %s", err) @@ -395,7 +395,7 @@ func TestListenAndServeTLS(t *testing.T) { res, _ := client.Get("http://" + addr) // Without TLS we expect a re-direction from http to https // And also the request is not rejected. - if res != nil && res.StatusCode == http.StatusOK && res.Request.URL.Scheme == "https" { + if res != nil && res.StatusCode == http.StatusOK && res.Request.URL.Scheme == httpsScheme { okNoTLS = true } } diff --git a/cmd/server-startup-msg.go b/cmd/server-startup-msg.go index 0bc8b74f2..0cb7370de 100644 --- a/cmd/server-startup-msg.go +++ b/cmd/server-startup-msg.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -118,7 +118,7 @@ func printCLIAccessMsg(endPoint string) { // Configure 'mc', following block prints platform specific information for minio client. console.Println(colorBlue("\nCommand-line Access: ") + mcQuickStartGuide) - if runtime.GOOS == "windows" { + if runtime.GOOS == globalWindowsOSName { mcMessage := fmt.Sprintf("$ mc.exe config host add myminio %s %s %s", endPoint, cred.AccessKey, cred.SecretKey) console.Println(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage)) } else { diff --git a/cmd/server-startup-msg_test.go b/cmd/server-startup-msg_test.go index 1a219858c..bd15cd1e4 100644 --- a/cmd/server-startup-msg_test.go +++ b/cmd/server-startup-msg_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -97,7 +97,7 @@ func TestCertificateNotExpired(t *testing.T) { // Test printing server common message. func TestPrintServerCommonMessage(t *testing.T) { - root, err := newTestConfig("us-east-1") + root, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatal(err) } @@ -109,7 +109,7 @@ func TestPrintServerCommonMessage(t *testing.T) { // Tests print cli access message. func TestPrintCLIAccessMsg(t *testing.T) { - root, err := newTestConfig("us-east-1") + root, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatal(err) } @@ -121,7 +121,7 @@ func TestPrintCLIAccessMsg(t *testing.T) { // Test print startup message. func TestPrintStartupMessage(t *testing.T) { - root, err := newTestConfig("us-east-1") + root, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatal(err) } diff --git a/cmd/server-startup-utils.go b/cmd/server-startup-utils.go index 1bb4fce70..aef08e2b6 100644 --- a/cmd/server-startup-utils.go +++ b/cmd/server-startup-utils.go @@ -51,9 +51,9 @@ func getListenIPs(serverAddr string) (hosts []string, port string, err error) { // Finalizes the API endpoints based on the host list and port. func finalizeAPIEndpoints(apiServer *http.Server) (endPoints []string, err error) { // Verify current scheme. - scheme := "http" + scheme := httpScheme if globalIsSSL { - scheme = "https" + scheme = httpsScheme } // Get list of listen ips and port. diff --git a/cmd/signature-v2_test.go b/cmd/signature-v2_test.go index 4b0462a11..91f4176a8 100644 --- a/cmd/signature-v2_test.go +++ b/cmd/signature-v2_test.go @@ -24,7 +24,7 @@ func TestResourceListSorting(t *testing.T) { // Tests presigned v2 signature. func TestDoesPresignedV2SignatureMatch(t *testing.T) { - root, err := newTestConfig("us-east-1") + root, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatal("Unable to initialize test config.") } diff --git a/cmd/signature-v4-parser_test.go b/cmd/signature-v4-parser_test.go index 16d960686..d30b8bee1 100644 --- a/cmd/signature-v4-parser_test.go +++ b/cmd/signature-v4-parser_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -81,7 +81,7 @@ func validateCredentialfields(t *testing.T, testNum int, expectedCredentials cre // A valid format of creadential should be of the following format. // Credential = accessKey + "/"+ scope // where scope = string.Join([]string{ currTime.Format(yyyymmdd), -// "us-east-1", +// globalMinioDefaultRegion, // "s3", // "aws4_request", // },"/") diff --git a/cmd/signature-v4-utils.go b/cmd/signature-v4-utils.go index bf956636e..bb5c5887d 100644 --- a/cmd/signature-v4-utils.go +++ b/cmd/signature-v4-utils.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2015, 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2015, 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -49,12 +49,12 @@ func skipContentSha256Cksum(r *http.Request) bool { // isValidRegion - verify if incoming region value is valid with configured Region. func isValidRegion(reqRegion string, confRegion string) bool { if confRegion == "" || confRegion == "US" { - confRegion = "us-east-1" + confRegion = globalMinioDefaultRegion } // Some older s3 clients set region as "US" instead of - // "us-east-1", handle it. + // globalMinioDefaultRegion, handle it. if reqRegion == "US" { - reqRegion = "us-east-1" + reqRegion = globalMinioDefaultRegion } return reqRegion == confRegion } diff --git a/cmd/signature-v4-utils_test.go b/cmd/signature-v4-utils_test.go index 17004862c..343c0ec3f 100644 --- a/cmd/signature-v4-utils_test.go +++ b/cmd/signature-v4-utils_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2015, 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2015, 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -82,8 +82,8 @@ func TestIsValidRegion(t *testing.T) { }{ {"", "", false}, - {"us-east-1", "", true}, - {"us-east-1", "US", true}, + {globalMinioDefaultRegion, "", true}, + {globalMinioDefaultRegion, "US", true}, {"us-west-1", "US", false}, {"us-west-1", "us-west-1", true}, // "US" was old naming convention for 'us-east-1'. diff --git a/cmd/signature-v4.go b/cmd/signature-v4.go index cf54c85a3..174e7579d 100644 --- a/cmd/signature-v4.go +++ b/cmd/signature-v4.go @@ -39,9 +39,10 @@ import ( // AWS Signature Version '4' constants. const ( - signV4Algorithm = "AWS4-HMAC-SHA256" - iso8601Format = "20060102T150405Z" - yyyymmdd = "20060102" + signV4Algorithm = "AWS4-HMAC-SHA256" + iso8601Format = "20060102T150405Z" + yyyymmdd = "20060102" + presignedHostHeader = "host" ) // getCanonicalHeaders generate a list of request headers with their values @@ -52,7 +53,7 @@ func getCanonicalHeaders(signedHeaders http.Header, host string) string { headers = append(headers, strings.ToLower(k)) vals[strings.ToLower(k)] = vv } - headers = append(headers, "host") + headers = append(headers, presignedHostHeader) sort.Strings(headers) var buf bytes.Buffer @@ -60,7 +61,7 @@ func getCanonicalHeaders(signedHeaders http.Header, host string) string { buf.WriteString(k) buf.WriteByte(':') switch { - case k == "host": + case k == presignedHostHeader: buf.WriteString(host) fallthrough default: @@ -82,7 +83,7 @@ func getSignedHeaders(signedHeaders http.Header) string { for k := range signedHeaders { headers = append(headers, strings.ToLower(k)) } - headers = append(headers, "host") + headers = append(headers, presignedHostHeader) sort.Strings(headers) return strings.Join(headers, ";") } diff --git a/cmd/signature-v4_test.go b/cmd/signature-v4_test.go index 45efb295b..4990f9941 100644 --- a/cmd/signature-v4_test.go +++ b/cmd/signature-v4_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -50,7 +50,7 @@ func TestDoesPolicySignatureMatch(t *testing.T) { // (1) It should fail if the access key is incorrect. { form: map[string]string{ - "X-Amz-Credential": fmt.Sprintf(credentialTemplate, "EXAMPLEINVALIDEXAMPL", now.Format(yyyymmdd), "us-east-1"), + "X-Amz-Credential": fmt.Sprintf(credentialTemplate, "EXAMPLEINVALIDEXAMPL", now.Format(yyyymmdd), globalMinioDefaultRegion), }, expected: ErrInvalidAccessKeyID, }, @@ -64,14 +64,14 @@ func TestDoesPolicySignatureMatch(t *testing.T) { // (3) It should fail if the date is invalid (or missing, in this case). { form: map[string]string{ - "X-Amz-Credential": fmt.Sprintf(credentialTemplate, accessKey, now.Format(yyyymmdd), "us-east-1"), + "X-Amz-Credential": fmt.Sprintf(credentialTemplate, accessKey, now.Format(yyyymmdd), globalMinioDefaultRegion), }, expected: ErrMalformedDate, }, // (4) It should fail with a bad signature. { form: map[string]string{ - "X-Amz-Credential": fmt.Sprintf(credentialTemplate, accessKey, now.Format(yyyymmdd), "us-east-1"), + "X-Amz-Credential": fmt.Sprintf(credentialTemplate, accessKey, now.Format(yyyymmdd), globalMinioDefaultRegion), "X-Amz-Date": now.Format(iso8601Format), "X-Amz-Signature": "invalidsignature", "Policy": "policy", @@ -81,9 +81,9 @@ func TestDoesPolicySignatureMatch(t *testing.T) { // (5) It should succeed if everything is correct. { form: map[string]string{ - "X-Amz-Credential": fmt.Sprintf(credentialTemplate, accessKey, now.Format(yyyymmdd), "us-east-1"), + "X-Amz-Credential": fmt.Sprintf(credentialTemplate, accessKey, now.Format(yyyymmdd), globalMinioDefaultRegion), "X-Amz-Date": now.Format(iso8601Format), - "X-Amz-Signature": getSignature(getSigningKey(serverConfig.GetCredential().SecretKey, now, "us-east-1"), "policy"), + "X-Amz-Signature": getSignature(getSigningKey(serverConfig.GetCredential().SecretKey, now, globalMinioDefaultRegion), "policy"), "Policy": "policy", }, expected: ErrNone, @@ -100,7 +100,7 @@ func TestDoesPolicySignatureMatch(t *testing.T) { } func TestDoesPresignedSignatureMatch(t *testing.T) { - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatal(err) } @@ -121,7 +121,7 @@ func TestDoesPresignedSignatureMatch(t *testing.T) { }{ // (0) Should error without a set URL query. { - region: "us-east-1", + region: globalMinioDefaultRegion, expected: ErrInvalidQueryParams, }, // (1) Should error on an invalid access key. @@ -162,7 +162,7 @@ func TestDoesPresignedSignatureMatch(t *testing.T) { "X-Amz-Credential": fmt.Sprintf(credentialTemplate, accessKeyID, now.Format(yyyymmdd), "us-west-1"), "X-Amz-Content-Sha256": payloadSHA256, }, - region: "us-east-1", + region: globalMinioDefaultRegion, expected: ErrInvalidRegion, }, // (4) Should NOT fail with an invalid region if it doesn't verify it. diff --git a/cmd/storage-rpc-client_test.go b/cmd/storage-rpc-client_test.go index 37965075b..eb668fc6d 100644 --- a/cmd/storage-rpc-client_test.go +++ b/cmd/storage-rpc-client_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -188,7 +188,7 @@ func TestRPCStorageClient(t *testing.T) { func (s *TestRPCStorageSuite) testRPCStorageClient(t *testing.T) { // TODO - Fix below tests to run on windows. - if runtime.GOOS == "windows" { + if runtime.GOOS == globalWindowsOSName { return } s.testRPCStorageDisksInfo(t) diff --git a/cmd/storage-rpc-server_test.go b/cmd/storage-rpc-server_test.go index 6ed51d6bc..48082e8ad 100644 --- a/cmd/storage-rpc-server_test.go +++ b/cmd/storage-rpc-server_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -35,7 +35,7 @@ type testStorageRPCServer struct { } func createTestStorageServer(t *testing.T) *testStorageRPCServer { - testPath, err := newTestConfig("us-east-1") + testPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("unable initialize config file, %s", err) } diff --git a/cmd/test-utils_test.go b/cmd/test-utils_test.go index cfdc9ad86..21641b42c 100644 --- a/cmd/test-utils_test.go +++ b/cmd/test-utils_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2015, 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2015, 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -110,7 +110,7 @@ func prepareXL() (ObjectLayer, []string, error) { // Initialize FS objects. func initFSObjects(disk string, t *testing.T) (obj ObjectLayer) { - newTestConfig("us-east-1") + newTestConfig(globalMinioDefaultRegion) var err error obj, err = newFSObjectLayer(disk) if err != nil { @@ -203,7 +203,7 @@ func UnstartedTestServer(t TestErrHandler, instanceType string) TestServer { t.Fatal("Failed to create disks for the backend") } - root, err := newTestConfig("us-east-1") + root, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("%s", err) } @@ -367,7 +367,7 @@ func StartTestStorageRPCServer(t TestErrHandler, instanceType string, diskN int) t.Fatalf("%s", err) } - root, err := newTestConfig("us-east-1") + root, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("%s", err) } @@ -402,7 +402,7 @@ func StartTestPeersRPCServer(t TestErrHandler, instanceType string) TestServer { t.Fatalf("%s", err) } - root, err := newTestConfig("us-east-1") + root, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("%s", err) } @@ -691,7 +691,7 @@ func signStreamingRequest(req *http.Request, accessKey, secretKey string, currTi // Get scope. scope := strings.Join([]string{ currTime.Format(yyyymmdd), - "us-east-1", + globalMinioDefaultRegion, "s3", "aws4_request", }, "/") @@ -701,7 +701,7 @@ func signStreamingRequest(req *http.Request, accessKey, secretKey string, currTi stringToSign = stringToSign + getSHA256Hash([]byte(canonicalRequest)) date := sumHMAC([]byte("AWS4"+secretKey), []byte(currTime.Format(yyyymmdd))) - region := sumHMAC(date, []byte("us-east-1")) + region := sumHMAC(date, []byte(globalMinioDefaultRegion)) service := sumHMAC(region, []byte("s3")) signingKey := sumHMAC(service, []byte("aws4_request")) @@ -1919,7 +1919,7 @@ func ExecObjectLayerAPITest(t *testing.T, objAPITest objAPITestType, endpoints [ initNSLock(false) // initialize the server and obtain the credentials and root. // credentials are necessary to sign the HTTP request. - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("Unable to initialize server config. %s", err) } @@ -1964,7 +1964,7 @@ type objTestDiskNotFoundType func(obj ObjectLayer, instanceType string, dirs []s func ExecObjectLayerTest(t TestErrHandler, objTest objTestType) { // initialize the server and obtain the credentials and root. // credentials are necessary to sign the HTTP request. - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatal("Unexpected error", err) } @@ -1989,7 +1989,7 @@ func ExecObjectLayerTest(t TestErrHandler, objTest objTestType) { // ExecObjectLayerDiskAlteredTest - executes object layer tests while altering // disks in between tests. Creates XL ObjectLayer instance and runs test for XL layer. func ExecObjectLayerDiskAlteredTest(t *testing.T, objTest objTestDiskNotFoundType) { - configPath, err := newTestConfig("us-east-1") + configPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatal("Failed to create config directory", err) } @@ -2010,7 +2010,7 @@ type objTestStaleFilesType func(obj ObjectLayer, instanceType string, dirs []str // ExecObjectLayerStaleFilesTest - executes object layer tests those leaves stale // files/directories under .minio/tmp. Creates XL ObjectLayer instance and runs test for XL layer. func ExecObjectLayerStaleFilesTest(t *testing.T, objTest objTestStaleFilesType) { - configPath, err := newTestConfig("us-east-1") + configPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatal("Failed to create config directory", err) } @@ -2169,7 +2169,7 @@ func initTestBrowserPeerRPCEndPoint() http.Handler { } func StartTestBrowserPeerRPCServer(t TestErrHandler, instanceType string) TestServer { - root, err := newTestConfig("us-east-1") + root, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("%s", err) } @@ -2190,7 +2190,7 @@ func StartTestBrowserPeerRPCServer(t TestErrHandler, instanceType string) TestSe } func StartTestS3PeerRPCServer(t TestErrHandler) (TestServer, []string) { - root, err := newTestConfig("us-east-1") + root, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("%s", err) } diff --git a/cmd/trie_test.go b/cmd/trie_test.go index f89af985a..b37093144 100644 --- a/cmd/trie_test.go +++ b/cmd/trie_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -55,7 +55,7 @@ func TestPrefixMatch(t *testing.T) { trie = newTrie() // Feed it some fodder: only 'minio' and 'miny-os' should trip the matcher. - trie.Insert("minio") + trie.Insert(globalMinioDefaultOwnerID) trie.Insert("amazon") trie.Insert("cheerio") trie.Insert("miny-o's") @@ -65,7 +65,7 @@ func TestPrefixMatch(t *testing.T) { t.Errorf("expected two matches, got: %d", len(matches)) } - if matches[0] != "minio" && matches[1] != "minio" { + if matches[0] != globalMinioDefaultOwnerID && matches[1] != globalMinioDefaultOwnerID { t.Errorf("expected one match to be 'minio', got: '%s' and '%s'", matches[0], matches[1]) } } diff --git a/cmd/update-main.go b/cmd/update-main.go index 1cc39dd6b..b301c1520 100644 --- a/cmd/update-main.go +++ b/cmd/update-main.go @@ -84,7 +84,7 @@ func parseReleaseData(data string) (time.Time, error) { if len(releaseDateSplits) < 3 { return time.Time{}, (errors.New("Update data malformed")) } - if releaseDateSplits[0] != "minio" { + if releaseDateSplits[0] != globalMinioDefaultOwnerID { return time.Time{}, (errors.New("Update data malformed, missing minio tag")) } // "OFFICIAL" tag is still kept for backward compatibility. @@ -126,14 +126,14 @@ func isDocker() bool { // Check if the minio server binary was built with source. func isSourceBuild() bool { - return Version == "DEVELOPMENT.GOGET" + return Version == goGetTag } // Fetch the current version of the Minio server binary. func getCurrentMinioVersion() (current time.Time, err error) { // For development builds we check for binary modTime // to validate against latest minio server release. - if Version != "DEVELOPMENT.GOGET" { + if Version != goGetTag { // Parse current minio version into RFC3339. current, err = time.Parse(time.RFC3339, Version) if err != nil { @@ -164,7 +164,7 @@ func getReleaseUpdate(updateURL string, duration time.Duration) (updateMsg updat downloadURL = "docker pull minio/minio" } else { switch runtime.GOOS { - case "windows": + case globalWindowsOSName: // For windows. downloadURL = newUpdateURLPrefix + "/minio.exe" default: diff --git a/cmd/update-notifier.go b/cmd/update-notifier.go index c54a2cfc7..046933594 100644 --- a/cmd/update-notifier.go +++ b/cmd/update-notifier.go @@ -66,7 +66,7 @@ func colorizeUpdateMessage(updateString string, newerThan time.Duration) string default: // on windows terminal turn off unicode characters. var top, bottom, sideBar string - if runtime.GOOS == "windows" { + if runtime.GOOS == globalWindowsOSName { top = yellow("*" + strings.Repeat("*", maxContentWidth) + "*") bottom = yellow("*" + strings.Repeat("*", maxContentWidth) + "*") sideBar = yellow("|") diff --git a/cmd/url-sort_test.go b/cmd/url-sort_test.go index fb1b6e5c1..b277952f0 100644 --- a/cmd/url-sort_test.go +++ b/cmd/url-sort_test.go @@ -38,17 +38,17 @@ func TestSortByHostPath(t *testing.T) { }, expected: []*url.URL{ { - Scheme: "http", + Scheme: httpScheme, Host: "abcd.com:9000", Path: "/a/b/c", }, { - Scheme: "http", + Scheme: httpScheme, Host: "abcd.com:9000", Path: "/a/b/d", }, { - Scheme: "http", + Scheme: httpScheme, Host: "abcd.com:9000", Path: "/a/b/e", }, @@ -62,17 +62,17 @@ func TestSortByHostPath(t *testing.T) { }, expected: []*url.URL{ { - Scheme: "http", + Scheme: httpScheme, Host: "abcd.com:9000", Path: "/a/b/c", }, { - Scheme: "http", + Scheme: httpScheme, Host: "defg.com:9000", Path: "/a/b/c", }, { - Scheme: "http", + Scheme: httpScheme, Host: "hijk.com:9000", Path: "/a/b/c", }, diff --git a/cmd/utils.go b/cmd/utils.go index a026d6712..a6a158d48 100644 --- a/cmd/utils.go +++ b/cmd/utils.go @@ -102,9 +102,15 @@ func urlPath2BucketObjectName(u *url.URL) (bucketName, objectName string) { return bucketName, objectName } +// URI scheme constants. +const ( + httpScheme = "http" + httpsScheme = "https" +) + var portMap = map[string]string{ - "http": "80", - "https": "443", + httpScheme: "80", + httpsScheme: "443", } // Given a string of the form "host", "host:port", or "[ipv6::address]:port", diff --git a/cmd/utils_test.go b/cmd/utils_test.go index 49d9f65eb..d2b2ba9f6 100644 --- a/cmd/utils_test.go +++ b/cmd/utils_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -275,7 +275,7 @@ func TestStartProfiler(t *testing.T) { // Tests fetch local address. func TestLocalAddress(t *testing.T) { - if runtime.GOOS == "windows" { + if runtime.GOOS == globalWindowsOSName { return } @@ -297,19 +297,19 @@ func TestLocalAddress(t *testing.T) { isDistXL: true, srvCmdConfig: serverCmdConfig{ endpoints: []*url.URL{{ - Scheme: "http", + Scheme: httpScheme, Host: "localhost:9000", Path: "/mnt/disk1", }, { - Scheme: "http", + Scheme: httpScheme, Host: "1.1.1.2:9000", Path: "/mnt/disk2", }, { - Scheme: "http", + Scheme: httpScheme, Host: "1.1.2.1:9000", Path: "/mnt/disk3", }, { - Scheme: "http", + Scheme: httpScheme, Host: "1.1.2.2:9000", Path: "/mnt/disk4", }}, @@ -338,19 +338,19 @@ func TestLocalAddress(t *testing.T) { isDistXL: true, srvCmdConfig: serverCmdConfig{ endpoints: []*url.URL{{ - Scheme: "http", + Scheme: httpScheme, Host: "1.1.1.1:9000", Path: "/mnt/disk2", }, { - Scheme: "http", + Scheme: httpScheme, Host: "1.1.1.2:9000", Path: "/mnt/disk2", }, { - Scheme: "http", + Scheme: httpScheme, Host: "1.1.2.1:9000", Path: "/mnt/disk3", }, { - Scheme: "http", + Scheme: httpScheme, Host: "1.1.2.2:9000", Path: "/mnt/disk4", }}, diff --git a/cmd/web-handlers_test.go b/cmd/web-handlers_test.go index f3c0ff026..238713078 100644 --- a/cmd/web-handlers_test.go +++ b/cmd/web-handlers_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -130,7 +130,7 @@ func testLoginWebHandler(obj ObjectLayer, instanceType string, t TestErrHandler) apiRouter := initTestWebRPCEndPoint(obj) // initialize the server and obtain the credentials and root. // credentials are necessary to sign the HTTP request. - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("Init Test config failed") } @@ -179,7 +179,7 @@ func testStorageInfoWebHandler(obj ObjectLayer, instanceType string, t TestErrHa apiRouter := initTestWebRPCEndPoint(obj) // initialize the server and obtain the credentials and root. // credentials are necessary to sign the HTTP request. - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("Init Test config failed") } @@ -225,7 +225,7 @@ func testServerInfoWebHandler(obj ObjectLayer, instanceType string, t TestErrHan apiRouter := initTestWebRPCEndPoint(obj) // initialize the server and obtain the credentials and root. // credentials are necessary to sign the HTTP request. - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("Init Test config failed") } @@ -271,7 +271,7 @@ func testMakeBucketWebHandler(obj ObjectLayer, instanceType string, t TestErrHan apiRouter := initTestWebRPCEndPoint(obj) // initialize the server and obtain the credentials and root. // credentials are necessary to sign the HTTP request. - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("Init Test config failed") } @@ -331,7 +331,7 @@ func testListBucketsWebHandler(obj ObjectLayer, instanceType string, t TestErrHa apiRouter := initTestWebRPCEndPoint(obj) // initialize the server and obtain the credentials and root. // credentials are necessary to sign the HTTP request. - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("Init Test config failed") } @@ -388,7 +388,7 @@ func testListObjectsWebHandler(obj ObjectLayer, instanceType string, t TestErrHa apiRouter := initTestWebRPCEndPoint(obj) // initialize the server and obtain the credentials and root. // credentials are necessary to sign the HTTP request. - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("Init Test config failed") } @@ -492,7 +492,7 @@ func testRemoveObjectWebHandler(obj ObjectLayer, instanceType string, t TestErrH apiRouter := initTestWebRPCEndPoint(obj) // initialize the server and obtain the credentials and root. // credentials are necessary to sign the HTTP request. - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("Init Test config failed") } @@ -568,7 +568,7 @@ func testGenerateAuthWebHandler(obj ObjectLayer, instanceType string, t TestErrH apiRouter := initTestWebRPCEndPoint(obj) // initialize the server and obtain the credentials and root. // credentials are necessary to sign the HTTP request. - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("Init Test config failed") } @@ -614,7 +614,7 @@ func testSetAuthWebHandler(obj ObjectLayer, instanceType string, t TestErrHandle apiRouter := initTestWebRPCEndPoint(obj) // initialize the server and obtain the credentials and root. // credentials are necessary to sign the HTTP request. - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("Init Test config failed") } @@ -675,7 +675,7 @@ func testGetAuthWebHandler(obj ObjectLayer, instanceType string, t TestErrHandle apiRouter := initTestWebRPCEndPoint(obj) // initialize the server and obtain the credentials and root. // credentials are necessary to sign the HTTP request. - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("Init Test config failed") } @@ -720,7 +720,7 @@ func testUploadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandler apiRouter := initTestWebRPCEndPoint(obj) // initialize the server and obtain the credentials and root. // credentials are necessary to sign the HTTP request. - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("Init Test config failed") } @@ -811,7 +811,7 @@ func testDownloadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandl apiRouter := initTestWebRPCEndPoint(obj) // initialize the server and obtain the credentials and root. // credentials are necessary to sign the HTTP request. - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("Init Test config failed") } @@ -903,7 +903,7 @@ func testWebPresignedGetHandler(obj ObjectLayer, instanceType string, t TestErrH apiRouter := initTestWebRPCEndPoint(obj) // initialize the server and obtain the credentials and root. // credentials are necessary to sign the HTTP request. - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("Init Test config failed") } @@ -1016,7 +1016,7 @@ func testWebGetBucketPolicyHandler(obj ObjectLayer, instanceType string, t TestE apiRouter := initTestWebRPCEndPoint(obj) // initialize the server and obtain the credentials and root. // credentials are necessary to sign the HTTP request. - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("Init Test config failed") } @@ -1099,7 +1099,7 @@ func testWebListAllBucketPoliciesHandler(obj ObjectLayer, instanceType string, t apiRouter := initTestWebRPCEndPoint(obj) // initialize the server and obtain the credentials and root. // credentials are necessary to sign the HTTP request. - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("Init Test config failed") } @@ -1205,7 +1205,7 @@ func testWebSetBucketPolicyHandler(obj ObjectLayer, instanceType string, t TestE apiRouter := initTestWebRPCEndPoint(obj) // initialize the server and obtain the credentials and root. // credentials are necessary to sign the HTTP request. - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("Init Test config failed") } @@ -1283,7 +1283,7 @@ func TestWebCheckAuthorization(t *testing.T) { apiRouter := initTestWebRPCEndPoint(obj) // initialize the server and obtain the credentials and root. // credentials are necessary to sign the HTTP request. - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatal("Init Test config failed", err) } @@ -1365,7 +1365,7 @@ func TestWebObjectLayerNotReady(t *testing.T) { // initialize the server and obtain the credentials and root. // credentials are necessary to sign the HTTP request. - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatal("Init Test config failed", err) } @@ -1444,7 +1444,7 @@ func TestWebObjectLayerNotReady(t *testing.T) { // TestWebObjectLayerFaultyDisks - Test Web RPC responses with faulty disks func TestWebObjectLayerFaultyDisks(t *testing.T) { - root, err := newTestConfig("us-east-1") + root, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatal(err) } @@ -1469,7 +1469,7 @@ func TestWebObjectLayerFaultyDisks(t *testing.T) { // initialize the server and obtain the credentials and root. // credentials are necessary to sign the HTTP request. - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatal("Init Test config failed", err) } diff --git a/cmd/xl-v1-healing_test.go b/cmd/xl-v1-healing_test.go index d770f96be..728e573e8 100644 --- a/cmd/xl-v1-healing_test.go +++ b/cmd/xl-v1-healing_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -23,7 +23,7 @@ import ( // Tests healing of format XL. func TestHealFormatXL(t *testing.T) { - root, err := newTestConfig("us-east-1") + root, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatal(err) } @@ -271,7 +271,7 @@ func TestHealFormatXL(t *testing.T) { // Tests undoes and validates if the undoing completes successfully. func TestUndoMakeBucket(t *testing.T) { - root, err := newTestConfig("us-east-1") + root, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatal(err) } @@ -316,7 +316,7 @@ func TestUndoMakeBucket(t *testing.T) { // Tests quick healing of bucket and bucket metadata. func TestQuickHeal(t *testing.T) { - root, err := newTestConfig("us-east-1") + root, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatal(err) } diff --git a/cmd/xl-v1-list-objects-heal_test.go b/cmd/xl-v1-list-objects-heal_test.go index 701c12bca..e21ab7765 100644 --- a/cmd/xl-v1-list-objects-heal_test.go +++ b/cmd/xl-v1-list-objects-heal_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage (C) 2016 Minio, Inc. + * Minio Cloud Storage (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -25,7 +25,7 @@ import ( // TestListObjectsHeal - Tests ListObjectsHeal API for XL func TestListObjectsHeal(t *testing.T) { - rootPath, err := newTestConfig("us-east-1") + rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("Init Test config failed") } diff --git a/cmd/xl-v1-metadata.go b/cmd/xl-v1-metadata.go index f50b083ad..05149c48e 100644 --- a/cmd/xl-v1-metadata.go +++ b/cmd/xl-v1-metadata.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -54,6 +54,14 @@ type checkSumInfo struct { Hash string `json:"hash"` } +// Various algorithms supported by bit-rot protection feature. +const ( + // "sha256" is specifically used on arm64 bit platforms. + sha256Algo = "sha256" + // Rest of the platforms default to blake2b. + blake2bAlgo = "blake2b" +) + // Constant indicates current bit-rot algo used when creating objects. // Depending on the architecture we are choosing a different checksum. var bitRotAlgo = getDefaultBitRotAlgo() @@ -71,10 +79,10 @@ func getDefaultBitRotAlgo() string { // This would also allows erasure coded writes // on ARM64 servers to be on-par with their // counter-part X86_64 servers. - return "sha256" + return sha256Algo default: // Default for all other architectures we use blake2b. - return "blake2b" + return blake2bAlgo } } @@ -135,11 +143,22 @@ type xlMetaV1 struct { Parts []objectPartInfo `json:"parts,omitempty"` } +// XL metadata constants. +const ( + // XL meta version. + xlMetaVersion = "1.0.0" + + // XL meta format string. + xlMetaFormat = "xl" + + // Add new constants here. +) + // newXLMetaV1 - initializes new xlMetaV1, adds version, allocates a fresh erasure info. func newXLMetaV1(object string, dataBlocks, parityBlocks int) (xlMeta xlMetaV1) { xlMeta = xlMetaV1{} - xlMeta.Version = "1.0.0" - xlMeta.Format = "xl" + xlMeta.Version = xlMetaVersion + xlMeta.Format = xlMetaFormat xlMeta.Minio.Release = ReleaseTag xlMeta.Erasure = erasureInfo{ Algorithm: erasureAlgorithmKlauspost, @@ -154,7 +173,7 @@ func newXLMetaV1(object string, dataBlocks, parityBlocks int) (xlMeta xlMetaV1) // IsValid - tells if the format is sane by validating the version // string and format style. func (m xlMetaV1) IsValid() bool { - return m.Version == "1.0.0" && m.Format == "xl" + return m.Version == xlMetaVersion && m.Format == xlMetaFormat } // objectPartIndex - returns the index of matching object part number. diff --git a/cmd/xl-v1-multipart-common_test.go b/cmd/xl-v1-multipart-common_test.go index d3c94b594..5d31eb1b2 100644 --- a/cmd/xl-v1-multipart-common_test.go +++ b/cmd/xl-v1-multipart-common_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2014-2016 Minio, Inc. + * Minio Cloud Storage, (C) 2014-2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -23,7 +23,7 @@ import ( func TestUpdateUploadJSON(t *testing.T) { // Initialize configuration - root, err := newTestConfig("us-east-1") + root, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("%s", err) } diff --git a/cmd/xl-v1-utils_test.go b/cmd/xl-v1-utils_test.go index d66a85c63..1bada5408 100644 --- a/cmd/xl-v1-utils_test.go +++ b/cmd/xl-v1-utils_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2015, 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2015, 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -131,9 +131,9 @@ func TestHashOrder(t *testing.T) { // newTestXLMetaV1 - initializes new xlMetaV1, adds version, allocates a fresh erasure info and metadata. func newTestXLMetaV1() xlMetaV1 { xlMeta := xlMetaV1{} - xlMeta.Version = "1.0.0" - xlMeta.Format = "xl" - xlMeta.Minio.Release = "1.0.0" + xlMeta.Version = xlMetaVersion + xlMeta.Format = xlMetaFormat + xlMeta.Minio.Release = "test" xlMeta.Erasure = erasureInfo{ Algorithm: "klauspost/reedsolomon/vandermonde", DataBlocks: 5, From dfc2ef300454c797ca8176d0918824396fec9c40 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Wed, 18 Jan 2017 12:55:57 -0800 Subject: [PATCH 079/100] storage/rpc: Remove network error restriction. (#3591) This restriction has lots of side affects, since we do not have a mechanism to clear states like this it is better not to keep them. Network errors are common and can occur with simple cable removal etc. Since we already have a retry mechanism this error count and stateful nature can bring problems on a long running cluster. --- cmd/storage-rpc-client.go | 165 -------------------------------------- 1 file changed, 165 deletions(-) diff --git a/cmd/storage-rpc-client.go b/cmd/storage-rpc-client.go index acb2b1376..ebe62e35d 100644 --- a/cmd/storage-rpc-client.go +++ b/cmd/storage-rpc-client.go @@ -23,7 +23,6 @@ import ( "net/rpc" "net/url" "path" - "sync/atomic" "github.com/minio/minio/pkg/disk" ) @@ -136,15 +135,6 @@ func (n *networkStorage) String() string { return n.rpcClient.ServerAddr() + ":" + n.rpcClient.ServiceEndpoint() } -// Network IO error count is kept at 256 with some simple -// math. Before we reject the disk completely. The combination -// of retry logic and total error count roughly comes around -// 2.5secs ( 2 * 5 * time.Millisecond * 256) which is when we -// basically take the disk offline completely. This is considered -// sufficient time tradeoff to avoid large delays in-terms of -// incoming i/o. -const maxAllowedNetworkIOError = 256 // maximum allowed network IOError. - // Init - attempts a login to reconnect. func (n *networkStorage) Init() error { err := n.rpcClient.Login() @@ -160,18 +150,6 @@ func (n *networkStorage) Close() (err error) { // DiskInfo - fetch disk information for a remote disk. func (n *networkStorage) DiskInfo() (info disk.Info, err error) { - defer func() { - if err == errDiskNotFound { - atomic.AddInt32(&n.networkIOErrCount, 1) - } - }() - - // Take remote disk offline if the total network errors. - // are more than maximum allowable IO error limit. - if n.networkIOErrCount > maxAllowedNetworkIOError { - return disk.Info{}, errFaultyRemoteDisk - } - args := AuthRPCArgs{} if err = n.rpcClient.Call("Storage.DiskInfoHandler", &args, &info); err != nil { return disk.Info{}, toStorageErr(err) @@ -181,18 +159,6 @@ func (n *networkStorage) DiskInfo() (info disk.Info, err error) { // MakeVol - create a volume on a remote disk. func (n *networkStorage) MakeVol(volume string) (err error) { - defer func() { - if err == errDiskNotFound { - atomic.AddInt32(&n.networkIOErrCount, 1) - } - }() - - // Take remote disk offline if the total network errors. - // are more than maximum allowable IO error limit. - if n.networkIOErrCount > maxAllowedNetworkIOError { - return errFaultyRemoteDisk - } - reply := AuthRPCReply{} args := GenericVolArgs{Vol: volume} if err := n.rpcClient.Call("Storage.MakeVolHandler", &args, &reply); err != nil { @@ -203,18 +169,6 @@ func (n *networkStorage) MakeVol(volume string) (err error) { // ListVols - List all volumes on a remote disk. func (n *networkStorage) ListVols() (vols []VolInfo, err error) { - defer func() { - if err == errDiskNotFound { - atomic.AddInt32(&n.networkIOErrCount, 1) - } - }() - - // Take remote disk offline if the total network errors. - // are more than maximum allowable IO error limit. - if n.networkIOErrCount > maxAllowedNetworkIOError { - return nil, errFaultyRemoteDisk - } - ListVols := ListVolsReply{} err = n.rpcClient.Call("Storage.ListVolsHandler", &AuthRPCArgs{}, &ListVols) if err != nil { @@ -225,18 +179,6 @@ func (n *networkStorage) ListVols() (vols []VolInfo, err error) { // StatVol - get volume info over the network. func (n *networkStorage) StatVol(volume string) (volInfo VolInfo, err error) { - defer func() { - if err == errDiskNotFound { - atomic.AddInt32(&n.networkIOErrCount, 1) - } - }() - - // Take remote disk offline if the total network errors. - // are more than maximum allowable IO error limit. - if n.networkIOErrCount > maxAllowedNetworkIOError { - return VolInfo{}, errFaultyRemoteDisk - } - args := GenericVolArgs{Vol: volume} if err = n.rpcClient.Call("Storage.StatVolHandler", &args, &volInfo); err != nil { return VolInfo{}, toStorageErr(err) @@ -246,18 +188,6 @@ func (n *networkStorage) StatVol(volume string) (volInfo VolInfo, err error) { // DeleteVol - Deletes a volume over the network. func (n *networkStorage) DeleteVol(volume string) (err error) { - defer func() { - if err == errDiskNotFound { - atomic.AddInt32(&n.networkIOErrCount, 1) - } - }() - - // Take remote disk offline if the total network errors. - // are more than maximum allowable IO error limit. - if n.networkIOErrCount > maxAllowedNetworkIOError { - return errFaultyRemoteDisk - } - reply := AuthRPCReply{} args := GenericVolArgs{Vol: volume} if err := n.rpcClient.Call("Storage.DeleteVolHandler", &args, &reply); err != nil { @@ -269,17 +199,6 @@ func (n *networkStorage) DeleteVol(volume string) (err error) { // File operations. func (n *networkStorage) PrepareFile(volume, path string, length int64) (err error) { - defer func() { - if err == errDiskNotFound { - atomic.AddInt32(&n.networkIOErrCount, 1) - } - }() - // Take remote disk offline if the total network errors. - // are more than maximum allowable IO error limit. - if n.networkIOErrCount > maxAllowedNetworkIOError { - return errFaultyRemoteDisk - } - reply := AuthRPCReply{} if err = n.rpcClient.Call("Storage.PrepareFileHandler", &PrepareFileArgs{ Vol: volume, @@ -293,18 +212,6 @@ func (n *networkStorage) PrepareFile(volume, path string, length int64) (err err // AppendFile - append file writes buffer to a remote network path. func (n *networkStorage) AppendFile(volume, path string, buffer []byte) (err error) { - defer func() { - if err == errDiskNotFound { - atomic.AddInt32(&n.networkIOErrCount, 1) - } - }() - - // Take remote disk offline if the total network errors. - // are more than maximum allowable IO error limit. - if n.networkIOErrCount > maxAllowedNetworkIOError { - return errFaultyRemoteDisk - } - reply := AuthRPCReply{} if err = n.rpcClient.Call("Storage.AppendFileHandler", &AppendFileArgs{ Vol: volume, @@ -318,18 +225,6 @@ func (n *networkStorage) AppendFile(volume, path string, buffer []byte) (err err // StatFile - get latest Stat information for a file at path. func (n *networkStorage) StatFile(volume, path string) (fileInfo FileInfo, err error) { - defer func() { - if err == errDiskNotFound { - atomic.AddInt32(&n.networkIOErrCount, 1) - } - }() - - // Take remote disk offline if the total network errors. - // are more than maximum allowable IO error limit. - if n.networkIOErrCount > maxAllowedNetworkIOError { - return FileInfo{}, errFaultyRemoteDisk - } - if err = n.rpcClient.Call("Storage.StatFileHandler", &StatFileArgs{ Vol: volume, Path: path, @@ -344,18 +239,6 @@ func (n *networkStorage) StatFile(volume, path string) (fileInfo FileInfo, err e // This API is meant to be used on files which have small memory footprint, do // not use this on large files as it would cause server to crash. func (n *networkStorage) ReadAll(volume, path string) (buf []byte, err error) { - defer func() { - if err == errDiskNotFound { - atomic.AddInt32(&n.networkIOErrCount, 1) - } - }() - - // Take remote disk offline if the total network errors. - // are more than maximum allowable IO error limit. - if n.networkIOErrCount > maxAllowedNetworkIOError { - return nil, errFaultyRemoteDisk - } - if err = n.rpcClient.Call("Storage.ReadAllHandler", &ReadAllArgs{ Vol: volume, Path: path, @@ -367,12 +250,6 @@ func (n *networkStorage) ReadAll(volume, path string) (buf []byte, err error) { // ReadFile - reads a file at remote path and fills the buffer. func (n *networkStorage) ReadFile(volume string, path string, offset int64, buffer []byte) (m int64, err error) { - defer func() { - if err == errDiskNotFound { - atomic.AddInt32(&n.networkIOErrCount, 1) - } - }() - defer func() { if r := recover(); r != nil { // Recover any panic from allocation, and return error. @@ -380,12 +257,6 @@ func (n *networkStorage) ReadFile(volume string, path string, offset int64, buff } }() // Do not crash the server. - // Take remote disk offline if the total network errors. - // are more than maximum allowable IO error limit. - if n.networkIOErrCount > maxAllowedNetworkIOError { - return 0, errFaultyRemoteDisk - } - var result []byte err = n.rpcClient.Call("Storage.ReadFileHandler", &ReadFileArgs{ Vol: volume, @@ -403,18 +274,6 @@ func (n *networkStorage) ReadFile(volume string, path string, offset int64, buff // ListDir - list all entries at prefix. func (n *networkStorage) ListDir(volume, path string) (entries []string, err error) { - defer func() { - if err == errDiskNotFound { - atomic.AddInt32(&n.networkIOErrCount, 1) - } - }() - - // Take remote disk offline if the total network errors. - // are more than maximum allowable IO error limit. - if n.networkIOErrCount > maxAllowedNetworkIOError { - return nil, errFaultyRemoteDisk - } - if err = n.rpcClient.Call("Storage.ListDirHandler", &ListDirArgs{ Vol: volume, Path: path, @@ -427,18 +286,6 @@ func (n *networkStorage) ListDir(volume, path string) (entries []string, err err // DeleteFile - Delete a file at path. func (n *networkStorage) DeleteFile(volume, path string) (err error) { - defer func() { - if err == errDiskNotFound { - atomic.AddInt32(&n.networkIOErrCount, 1) - } - }() - - // Take remote disk offline if the total network errors. - // are more than maximum allowable IO error limit. - if n.networkIOErrCount > maxAllowedNetworkIOError { - return errFaultyRemoteDisk - } - reply := AuthRPCReply{} if err = n.rpcClient.Call("Storage.DeleteFileHandler", &DeleteFileArgs{ Vol: volume, @@ -451,18 +298,6 @@ func (n *networkStorage) DeleteFile(volume, path string) (err error) { // RenameFile - rename a remote file from source to destination. func (n *networkStorage) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) (err error) { - defer func() { - if err == errDiskNotFound { - atomic.AddInt32(&n.networkIOErrCount, 1) - } - }() - - // Take remote disk offline if the total network errors. - // are more than maximum allowable IO error limit. - if n.networkIOErrCount > maxAllowedNetworkIOError { - return errFaultyRemoteDisk - } - reply := AuthRPCReply{} if err = n.rpcClient.Call("Storage.RenameFileHandler", &RenameFileArgs{ SrcVol: srcVolume, From 0715032598fb2fb632dc9129a1e0b665359c9670 Mon Sep 17 00:00:00 2001 From: Anis Elleuch Date: Thu, 19 Jan 2017 18:34:18 +0100 Subject: [PATCH 080/100] heal: Add ListBucketsHeal object API (#3563) ListBucketsHeal will list which buckets that need to be healed: * ListBucketsHeal() (buckets []BucketInfo, err error) --- cmd/admin-handlers.go | 28 +++++ cmd/admin-handlers_test.go | 2 +- cmd/admin-router.go | 5 +- cmd/api-response.go | 14 ++- cmd/fs-v1.go | 5 + cmd/object-api-datatypes.go | 33 ++++-- cmd/object-api-interface.go | 1 + cmd/xl-v1-healing-common.go | 8 +- cmd/xl-v1-healing.go | 112 +++++++++++++++++- cmd/xl-v1-healing_test.go | 63 ++++++++++ cmd/xl-v1-list-objects-heal.go | 10 +- docs/admin-api/management-api.md | 6 + pkg/madmin/API.md | 32 ++++- pkg/madmin/examples/heal-buckets-list.go | 60 ++++++++++ .../{heal-list.go => heal-objects-list.go} | 0 pkg/madmin/heal-commands.go | 109 ++++++++++++++++- 16 files changed, 445 insertions(+), 43 deletions(-) create mode 100644 pkg/madmin/examples/heal-buckets-list.go rename pkg/madmin/examples/{heal-list.go => heal-objects-list.go} (100%) diff --git a/cmd/admin-handlers.go b/cmd/admin-handlers.go index 8d93f605d..794f89689 100644 --- a/cmd/admin-handlers.go +++ b/cmd/admin-handlers.go @@ -345,6 +345,34 @@ func (adminAPI adminAPIHandlers) ListObjectsHealHandler(w http.ResponseWriter, r writeSuccessResponseXML(w, encodeResponse(listResponse)) } +// ListBucketsHealHandler - GET /?heal +func (adminAPI adminAPIHandlers) ListBucketsHealHandler(w http.ResponseWriter, r *http.Request) { + // Get object layer instance. + objLayer := newObjectLayerFn() + if objLayer == nil { + writeErrorResponse(w, ErrServerNotInitialized, r.URL) + return + } + + // Validate request signature. + adminAPIErr := checkRequestAuthType(r, "", "", "") + if adminAPIErr != ErrNone { + writeErrorResponse(w, adminAPIErr, r.URL) + return + } + + // Get the list buckets to be healed. + bucketsInfo, err := objLayer.ListBucketsHeal() + if err != nil { + writeErrorResponse(w, toAPIErrorCode(err), r.URL) + return + } + + listResponse := generateListBucketsResponse(bucketsInfo) + // Write success response. + writeSuccessResponseXML(w, encodeResponse(listResponse)) +} + // HealBucketHandler - POST /?heal&bucket=mybucket // - bucket is mandatory query parameter // Heal a given bucket, if present. diff --git a/cmd/admin-handlers_test.go b/cmd/admin-handlers_test.go index 7daa801f7..138478213 100644 --- a/cmd/admin-handlers_test.go +++ b/cmd/admin-handlers_test.go @@ -687,7 +687,7 @@ func TestListObjectsHealHandler(t *testing.T) { if err != nil { t.Fatalf("Test %d - Failed to construct list objects needing heal request - %v", i+1, err) } - req.Header.Set(minioAdminOpHeader, "list") + req.Header.Set(minioAdminOpHeader, "list-objects") cred := serverConfig.GetCredential() err = signRequestV4(req, cred.AccessKey, cred.SecretKey) diff --git a/cmd/admin-router.go b/cmd/admin-router.go index 3c5f63a96..a21b1ffb6 100644 --- a/cmd/admin-router.go +++ b/cmd/admin-router.go @@ -49,7 +49,10 @@ func registerAdminRouter(mux *router.Router) { /// Heal operations // List Objects needing heal. - adminRouter.Methods("GET").Queries("heal", "").Headers(minioAdminOpHeader, "list").HandlerFunc(adminAPI.ListObjectsHealHandler) + adminRouter.Methods("GET").Queries("heal", "").Headers(minioAdminOpHeader, "list-objects").HandlerFunc(adminAPI.ListObjectsHealHandler) + // List Buckets needing heal. + adminRouter.Methods("GET").Queries("heal", "").Headers(minioAdminOpHeader, "list-buckets").HandlerFunc(adminAPI.ListBucketsHealHandler) + // Heal Buckets. adminRouter.Methods("POST").Queries("heal", "").Headers(minioAdminOpHeader, "bucket").HandlerFunc(adminAPI.HealBucketHandler) // Heal Objects. diff --git a/cmd/api-response.go b/cmd/api-response.go index f07779112..447a1e732 100644 --- a/cmd/api-response.go +++ b/cmd/api-response.go @@ -181,8 +181,9 @@ type CommonPrefix struct { // Bucket container for bucket metadata type Bucket struct { - Name string - CreationDate string // time string of format "2006-01-02T15:04:05.000Z" + Name string + CreationDate string // time string of format "2006-01-02T15:04:05.000Z" + HealBucketInfo *HealBucketInfo `xml:"HealBucketInfo,omitempty"` } // Object container for object metadata @@ -196,8 +197,8 @@ type Object struct { Owner Owner // The class of storage used to store the object. - StorageClass string - HealInfo *HealInfo `xml:"HealInfo,omitempty"` + StorageClass string + HealObjectInfo *HealObjectInfo `xml:"HealObjectInfo,omitempty"` } // CopyObjectResponse container returns ETag and LastModified of the successfully copied object @@ -285,6 +286,7 @@ func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse { var listbucket = Bucket{} listbucket.Name = bucket.Name listbucket.CreationDate = bucket.Created.Format(timeFormatAMZLong) + listbucket.HealBucketInfo = bucket.HealBucketInfo listbuckets = append(listbuckets, listbucket) } @@ -317,8 +319,8 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter string, max content.Size = object.Size content.StorageClass = globalMinioDefaultStorageClass content.Owner = owner - // object.HealInfo is non-empty only when resp is constructed in ListObjectsHeal. - content.HealInfo = object.HealInfo + // object.HealObjectInfo is non-empty only when resp is constructed in ListObjectsHeal. + content.HealObjectInfo = object.HealObjectInfo contents = append(contents, content) } // TODO - support EncodingType in xml decoding diff --git a/cmd/fs-v1.go b/cmd/fs-v1.go index 24c25b124..adcdb7542 100644 --- a/cmd/fs-v1.go +++ b/cmd/fs-v1.go @@ -897,3 +897,8 @@ func (fs fsObjects) HealBucket(bucket string) error { func (fs fsObjects) ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) { return ListObjectsInfo{}, traceError(NotImplemented{}) } + +// ListBucketsHeal - list all buckets to be healed. Valid only for XL +func (fs fsObjects) ListBucketsHeal() ([]BucketInfo, error) { + return []BucketInfo{}, traceError(NotImplemented{}) +} diff --git a/cmd/object-api-datatypes.go b/cmd/object-api-datatypes.go index 336fb16b2..0da114699 100644 --- a/cmd/object-api-datatypes.go +++ b/cmd/object-api-datatypes.go @@ -50,6 +50,20 @@ type StorageInfo struct { } } +type healStatus int + +const ( + healthy healStatus = iota // Object is healthy + canHeal // Object can be healed + corrupted // Object can't be healed + quorumUnavailable // Object can't be healed until read quorum is available +) + +// HealBucketInfo - represents healing related information of a bucket. +type HealBucketInfo struct { + Status healStatus +} + // BucketInfo - represents bucket metadata. type BucketInfo struct { // Name of the bucket. @@ -57,18 +71,13 @@ type BucketInfo struct { // Date and time when the bucket was created. Created time.Time + + // Healing information + HealBucketInfo *HealBucketInfo `xml:"HealBucketInfo,omitempty"` } -type healStatus int - -const ( - canHeal healStatus = iota // Object can be healed - corrupted // Object can't be healed - quorumUnavailable // Object can't be healed until read quorum is available -) - -// HealInfo - represents healing related information of an object. -type HealInfo struct { +// HealObjectInfo - represents healing related information of an object. +type HealObjectInfo struct { Status healStatus MissingDataCount int MissingPartityCount int @@ -103,8 +112,8 @@ type ObjectInfo struct { ContentEncoding string // User-Defined metadata - UserDefined map[string]string - HealInfo *HealInfo `xml:"HealInfo,omitempty"` + UserDefined map[string]string + HealObjectInfo *HealObjectInfo `xml:"HealObjectInfo,omitempty"` } // ListPartsInfo - represents list of all parts. diff --git a/cmd/object-api-interface.go b/cmd/object-api-interface.go index b0a04510c..6687d9594 100644 --- a/cmd/object-api-interface.go +++ b/cmd/object-api-interface.go @@ -48,6 +48,7 @@ type ObjectLayer interface { // Healing operations. HealBucket(bucket string) error + ListBucketsHeal() (buckets []BucketInfo, err error) HealObject(bucket, object string) error ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) } diff --git a/cmd/xl-v1-healing-common.go b/cmd/xl-v1-healing-common.go index c87d60977..d652bc462 100644 --- a/cmd/xl-v1-healing-common.go +++ b/cmd/xl-v1-healing-common.go @@ -141,12 +141,12 @@ func xlShouldHeal(partsMetadata []xlMetaV1, errs []error) bool { // xlHealStat - returns a structure which describes how many data, // parity erasure blocks are missing and if it is possible to heal // with the blocks present. -func xlHealStat(xl xlObjects, partsMetadata []xlMetaV1, errs []error) HealInfo { +func xlHealStat(xl xlObjects, partsMetadata []xlMetaV1, errs []error) HealObjectInfo { // Less than quorum erasure coded blocks of the object have the same create time. // This object can't be healed with the information we have. modTime, count := commonTime(listObjectModtimes(partsMetadata, errs)) if count < xl.readQuorum { - return HealInfo{ + return HealObjectInfo{ Status: quorumUnavailable, MissingDataCount: 0, MissingPartityCount: 0, @@ -156,7 +156,7 @@ func xlHealStat(xl xlObjects, partsMetadata []xlMetaV1, errs []error) HealInfo { // If there isn't a valid xlMeta then we can't heal the object. xlMeta, err := pickValidXLMeta(partsMetadata, modTime) if err != nil { - return HealInfo{ + return HealObjectInfo{ Status: corrupted, MissingDataCount: 0, MissingPartityCount: 0, @@ -183,7 +183,7 @@ func xlHealStat(xl xlObjects, partsMetadata []xlMetaV1, errs []error) HealInfo { // This object can be healed. We have enough object metadata // to reconstruct missing erasure coded blocks. - return HealInfo{ + return HealObjectInfo{ Status: canHeal, MissingDataCount: missingDataCount, MissingPartityCount: missingParityCount, diff --git a/cmd/xl-v1-healing.go b/cmd/xl-v1-healing.go index 4b1b89dbe..49a03676b 100644 --- a/cmd/xl-v1-healing.go +++ b/cmd/xl-v1-healing.go @@ -19,6 +19,7 @@ package cmd import ( "fmt" "path" + "sort" "sync" ) @@ -153,9 +154,11 @@ func healBucketMetadata(storageDisks []StorageAPI, bucket string, readQuorum int return healBucketMetaFn(lConfigPath) } -// listBucketNames list all bucket names from all disks to heal. -func listBucketNames(storageDisks []StorageAPI) (bucketNames map[string]struct{}, err error) { - bucketNames = make(map[string]struct{}) +// listAllBuckets lists all buckets from all disks. It also +// returns the occurrence of each buckets in all disks +func listAllBuckets(storageDisks []StorageAPI) (buckets map[string]VolInfo, bucketsOcc map[string]int, err error) { + buckets = make(map[string]VolInfo) + bucketsOcc = make(map[string]int) for _, disk := range storageDisks { if disk == nil { continue @@ -173,7 +176,10 @@ func listBucketNames(storageDisks []StorageAPI) (bucketNames map[string]struct{} if isMinioMetaBucketName(volInfo.Name) { continue } - bucketNames[volInfo.Name] = struct{}{} + // Increase counter per bucket name + bucketsOcc[volInfo.Name]++ + // Save volume info under bucket name + buckets[volInfo.Name] = volInfo } continue } @@ -183,7 +189,101 @@ func listBucketNames(storageDisks []StorageAPI) (bucketNames map[string]struct{} } break } - return bucketNames, err + return buckets, bucketsOcc, err +} + +// reduceHealStatus - fetches the worst heal status in a provided slice +func reduceHealStatus(status []healStatus) healStatus { + worstStatus := healthy + for _, st := range status { + if st > worstStatus { + worstStatus = st + } + } + return worstStatus +} + +// bucketHealStatus - returns the heal status of the provided bucket. Internally, +// this function lists all object heal status of objects inside meta bucket config +// directory and returns the worst heal status that can be found +func (xl xlObjects) bucketHealStatus(bucketName string) (healStatus, error) { + // A list of all the bucket config files + configFiles := []string{bucketPolicyConfig, bucketNotificationConfig, bucketListenerConfig} + // The status of buckets config files + configsHealStatus := make([]healStatus, len(configFiles)) + // The list of errors found during checking heal status of each config file + configsErrs := make([]error, len(configFiles)) + // The path of meta bucket that contains all config files + configBucket := path.Join(minioMetaBucket, bucketConfigPrefix, bucketName) + + // Check of config files heal status in go-routines + var wg sync.WaitGroup + // Loop over config files + for idx, configFile := range configFiles { + wg.Add(1) + // Compute heal status of current config file + go func(bucket, object string, index int) { + defer wg.Done() + // Check + listObjectsHeal, err := xl.listObjectsHeal(bucket, object, "", "", 1) + // If any error, save and immediately quit + if err != nil { + configsErrs[index] = err + return + } + // Check if current bucket contains any not healthy config file and save heal status + if len(listObjectsHeal.Objects) > 0 { + configsHealStatus[index] = listObjectsHeal.Objects[0].HealObjectInfo.Status + } + }(configBucket, configFile, idx) + } + wg.Wait() + + // Return any found error + for _, err := range configsErrs { + if err != nil { + return healthy, err + } + } + + // Reduce and return heal status + return reduceHealStatus(configsHealStatus), nil +} + +// ListBucketsHeal - Find all buckets that need to be healed +func (xl xlObjects) ListBucketsHeal() ([]BucketInfo, error) { + listBuckets := []BucketInfo{} + // List all buckets that can be found in all disks + buckets, occ, err := listAllBuckets(xl.storageDisks) + if err != nil { + return listBuckets, err + } + // Iterate over all buckets + for _, currBucket := range buckets { + // Check the status of bucket metadata + bucketHealStatus, err := xl.bucketHealStatus(currBucket.Name) + if err != nil { + return []BucketInfo{}, err + } + // If all metadata are sane, check if the bucket directory is present in all disks + if bucketHealStatus == healthy && occ[currBucket.Name] != len(xl.storageDisks) { + // Current bucket is missing in some of the storage disks + bucketHealStatus = canHeal + } + // Add current bucket to the returned result if not healthy + if bucketHealStatus != healthy { + listBuckets = append(listBuckets, + BucketInfo{ + Name: currBucket.Name, + Created: currBucket.Created, + HealBucketInfo: &HealBucketInfo{Status: bucketHealStatus}, + }) + } + + } + // Sort found buckets + sort.Sort(byBucketName(listBuckets)) + return listBuckets, nil } // This function is meant for all the healing that needs to be done @@ -196,7 +296,7 @@ func listBucketNames(storageDisks []StorageAPI) (bucketNames map[string]struct{} // - add support for healing dangling `xl.json`. func quickHeal(storageDisks []StorageAPI, writeQuorum int, readQuorum int) error { // List all bucket names from all disks. - bucketNames, err := listBucketNames(storageDisks) + bucketNames, _, err := listAllBuckets(storageDisks) if err != nil { return err } diff --git a/cmd/xl-v1-healing_test.go b/cmd/xl-v1-healing_test.go index 728e573e8..1acbd9909 100644 --- a/cmd/xl-v1-healing_test.go +++ b/cmd/xl-v1-healing_test.go @@ -423,3 +423,66 @@ func TestQuickHeal(t *testing.T) { t.Fatal("Got an unexpected error: ", err) } } + +// TestListBucketsHeal lists buckets heal result +func TestListBucketsHeal(t *testing.T) { + root, err := newTestConfig("us-east-1") + if err != nil { + t.Fatal(err) + } + defer removeAll(root) + + nDisks := 16 + fsDirs, err := getRandomDisks(nDisks) + if err != nil { + t.Fatal(err) + } + defer removeRoots(fsDirs) + + endpoints, err := parseStorageEndpoints(fsDirs) + if err != nil { + t.Fatal(err) + } + + obj, _, err := initObjectLayer(endpoints) + if err != nil { + t.Fatal(err) + } + + // Create a bucket that won't get corrupted + saneBucket := "sanebucket" + if err = obj.MakeBucket(saneBucket); err != nil { + t.Fatal(err) + } + + // Create a bucket that will be removed in some disks + corruptedBucketName := getRandomBucketName() + if err = obj.MakeBucket(corruptedBucketName); err != nil { + t.Fatal(err) + } + + xl := obj.(*xlObjects) + + // Remove bucket in disk 0, 1 and 2 + for i := 0; i <= 2; i++ { + if err = xl.storageDisks[i].DeleteVol(corruptedBucketName); err != nil { + t.Fatal(err) + } + } + + // List the missing buckets. + buckets, err := xl.ListBucketsHeal() + if err != nil { + t.Fatal(err) + } + + // Check the number of buckets in list buckets heal result + if len(buckets) != 1 { + t.Fatalf("Length of missing buckets is incorrect, expected: 1, found: %d", len(buckets)) + } + + // Check the name of bucket in list buckets heal result + if buckets[0].Name != corruptedBucketName { + t.Fatalf("Name of missing bucket is incorrect, expected: %s, found: %s", corruptedBucketName, buckets[0].Name) + } +} diff --git a/cmd/xl-v1-list-objects-heal.go b/cmd/xl-v1-list-objects-heal.go index 2438c17a2..1fb6702ac 100644 --- a/cmd/xl-v1-list-objects-heal.go +++ b/cmd/xl-v1-list-objects-heal.go @@ -159,11 +159,11 @@ func (xl xlObjects) listObjectsHeal(bucket, prefix, marker, delimiter string, ma if xlShouldHeal(partsMetadata, errs) { healStat := xlHealStat(xl, partsMetadata, errs) result.Objects = append(result.Objects, ObjectInfo{ - Name: objInfo.Name, - ModTime: objInfo.ModTime, - Size: objInfo.Size, - IsDir: false, - HealInfo: &healStat, + Name: objInfo.Name, + ModTime: objInfo.ModTime, + Size: objInfo.Size, + IsDir: false, + HealObjectInfo: &healStat, }) } objectLock.RUnlock() diff --git a/docs/admin-api/management-api.md b/docs/admin-api/management-api.md index b8ed45489..dc0167c82 100644 --- a/docs/admin-api/management-api.md +++ b/docs/admin-api/management-api.md @@ -112,3 +112,9 @@ - ErrInvalidBucketName - ErrInvalidObjectName - ErrInvalidDuration + +### Healing + +* ListBucketsHeal + - GET /?heal + - x-minio-operation: list-buckets diff --git a/pkg/madmin/API.md b/pkg/madmin/API.md index 87ba3a3d1..8b6d0393a 100644 --- a/pkg/madmin/API.md +++ b/pkg/madmin/API.md @@ -171,8 +171,8 @@ __Example__ log.Fatalln(err) return } - if object.HealInfo != nil { - switch healInfo := *object.HealInfo; healInfo.Status { + if object.HealObjectInfo != nil { + switch healInfo := *object.HealObjectInfo; healInfo.Status { case madmin.CanHeal: fmt.Println(object.Key, " can be healed.") case madmin.QuorumUnavailable: @@ -185,6 +185,34 @@ __Example__ } ``` + +### ListBucketsList() error +If successful returns information on the list of buckets that need healing. + +__Example__ + +``` go + // List buckets that need healing + healBucketsList, err := madmClnt.ListBucketsHeal() + if err != nil { + fmt.Println(err) + return + } + for bucket := range healBucketsList { + if bucket.HealBucketInfo != nil { + switch healInfo := *object.HealBucketInfo; healInfo.Status { + case madmin.CanHeal: + fmt.Println(bucket.Key, " can be healed.") + case madmin.QuorumUnavailable: + fmt.Println(bucket.Key, " can't be healed until quorum is available.") + case madmin.Corrupted: + fmt.Println(bucket.Key, " can't be healed, not enough information.") + } + } + fmt.Println("bucket: ", bucket) + } +``` + ### HealBucket(bucket string, isDryRun bool) error If bucket is successfully healed returns nil, otherwise returns error indicating the reason for failure. If isDryRun is true, then the bucket is not healed, but heal bucket request is validated by the server. e.g, if the bucket exists, if bucket name is valid etc. diff --git a/pkg/madmin/examples/heal-buckets-list.go b/pkg/madmin/examples/heal-buckets-list.go new file mode 100644 index 000000000..88b561651 --- /dev/null +++ b/pkg/madmin/examples/heal-buckets-list.go @@ -0,0 +1,60 @@ +// +build ignore + +package main + +/* + * Minio Cloud Storage, (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import ( + "fmt" + "log" + + "github.com/minio/minio/pkg/madmin" +) + +func main() { + + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY are + // dummy values, please replace them with original values. + + // API requests are secure (HTTPS) if secure=true and insecure (HTTPS) otherwise. + // New returns an Minio Admin client object. + madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + if err != nil { + log.Fatalln(err) + } + + // List buckets that need healing + healBucketsList, err := madmClnt.ListBucketsHeal() + if err != nil { + log.Fatalln(err) + } + + for _, bucket := range healBucketsList { + if bucket.HealBucketInfo != nil { + switch healInfo := *bucket.HealBucketInfo; healInfo.Status { + case madmin.CanHeal: + fmt.Println(bucket.Name, " can be healed.") + case madmin.QuorumUnavailable: + fmt.Println(bucket.Name, " can't be healed until quorum is available.") + case madmin.Corrupted: + fmt.Println(bucket.Name, " can't be healed, not enough information.") + } + } + fmt.Println("bucket: ", bucket) + } +} diff --git a/pkg/madmin/examples/heal-list.go b/pkg/madmin/examples/heal-objects-list.go similarity index 100% rename from pkg/madmin/examples/heal-list.go rename to pkg/madmin/examples/heal-objects-list.go diff --git a/pkg/madmin/heal-commands.go b/pkg/madmin/heal-commands.go index 84f2aa3c8..e439b84d2 100644 --- a/pkg/madmin/heal-commands.go +++ b/pkg/madmin/heal-commands.go @@ -63,20 +63,65 @@ type commonPrefix struct { Prefix string } +// Owner - bucket owner/principal +type Owner struct { + ID string + DisplayName string +} + +// Bucket container for bucket metadata +type Bucket struct { + Name string + CreationDate string // time string of format "2006-01-02T15:04:05.000Z" + + HealBucketInfo *HealBucketInfo `xml:"HealBucketInfo,omitempty"` +} + +// ListBucketsHealResponse - format for list buckets response +type ListBucketsHealResponse struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListAllMyBucketsResult" json:"-"` + + Owner Owner + + // Container for one or more buckets. + Buckets struct { + Buckets []Bucket `xml:"Bucket"` + } // Buckets are nested +} + // HealStatus - represents different states of healing an object could be in. type healStatus int const ( + // Healthy - Object that is already healthy + Healthy healStatus = iota // CanHeal - Object can be healed - CanHeal healStatus = iota + CanHeal // Corrupted - Object can't be healed Corrupted // QuorumUnavailable - Object can't be healed until read quorum is available QuorumUnavailable ) -// HealInfo - represents healing related information of an object. -type HealInfo struct { +// HealBucketInfo - represents healing related information of a bucket. +type HealBucketInfo struct { + Status healStatus +} + +// BucketInfo - represents bucket metadata. +type BucketInfo struct { + // Name of the bucket. + Name string + + // Date and time when the bucket was created. + Created time.Time + + // Healing information + HealBucketInfo *HealBucketInfo `xml:"HealBucketInfo,omitempty"` +} + +// HealObjectInfo - represents healing related information of an object. +type HealObjectInfo struct { Status healStatus MissingDataCount int MissingPartityCount int @@ -108,8 +153,8 @@ type ObjectInfo struct { StorageClass string `json:"storageClass"` // Error - Err error `json:"-"` - HealInfo *HealInfo `json:"healInfo,omitempty"` + Err error `json:"-"` + HealObjectInfo *HealObjectInfo `json:"healObjectInfo,omitempty"` } type healQueryKey string @@ -143,7 +188,7 @@ func (adm *AdminClient) listObjectsHeal(bucket, prefix, delimiter, marker string queryVal := mkHealQueryVal(bucket, prefix, marker, delimiter, maxKeyStr) hdrs := make(http.Header) - hdrs.Set(minioAdminOpHeader, "list") + hdrs.Set(minioAdminOpHeader, "list-objects") reqData := requestData{ queryValues: queryVal, @@ -240,6 +285,58 @@ func (adm *AdminClient) ListObjectsHeal(bucket, prefix string, recursive bool, d return objectStatCh, nil } +const timeFormatAMZLong = "2006-01-02T15:04:05.000Z" // Reply date format with nanosecond precision. + +// ListBucketsHeal - issues heal bucket list API request +func (adm *AdminClient) ListBucketsHeal() ([]BucketInfo, error) { + queryVal := url.Values{} + queryVal.Set("heal", "") + + hdrs := make(http.Header) + hdrs.Set(minioAdminOpHeader, "list-buckets") + + reqData := requestData{ + queryValues: queryVal, + customHeaders: hdrs, + } + + // Execute GET on /?heal to list objects needing heal. + resp, err := adm.executeMethod("GET", reqData) + + defer closeResponse(resp) + if err != nil { + return []BucketInfo{}, err + } + + if resp.StatusCode != http.StatusOK { + return []BucketInfo{}, errors.New("Got HTTP Status: " + resp.Status) + } + + var listBucketsHealResult ListBucketsHealResponse + + err = xml.NewDecoder(resp.Body).Decode(&listBucketsHealResult) + if err != nil { + return []BucketInfo{}, err + } + + var bucketsToBeHealed []BucketInfo + + for _, bucket := range listBucketsHealResult.Buckets.Buckets { + creationDate, err := time.Parse(timeFormatAMZLong, bucket.CreationDate) + if err != nil { + return []BucketInfo{}, err + } + bucketsToBeHealed = append(bucketsToBeHealed, + BucketInfo{ + Name: bucket.Name, + Created: creationDate, + HealBucketInfo: bucket.HealBucketInfo, + }) + } + + return bucketsToBeHealed, nil +} + // HealBucket - Heal the given bucket func (adm *AdminClient) HealBucket(bucket string, dryrun bool) error { // Construct query params. From 9e1f1b50e0e213e9eca4db39e9e848fc2b642f40 Mon Sep 17 00:00:00 2001 From: Jeffery Utter Date: Thu, 19 Jan 2017 12:39:44 -0600 Subject: [PATCH 081/100] Don't Check Available Inodes on NFS (#3598) In some cases (such as with VirutualBox, this value gets hardcoded to 1000, which is less than the required minimum of 10000. Fixes #3592 --- cmd/fs-v1.go | 2 +- cmd/posix.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/fs-v1.go b/cmd/fs-v1.go index adcdb7542..224bb9bf2 100644 --- a/cmd/fs-v1.go +++ b/cmd/fs-v1.go @@ -199,7 +199,7 @@ func (fs fsObjects) checkDiskFree() (err error) { // are allocated based on available disk space. For example CephFS, StoreNext CVFS, AzureFile driver. // Allow for the available disk to be separately validate and we will validate inodes only if // total inodes are provided by the underlying filesystem. - if di.Files != 0 { + if di.Files != 0 && di.FSType != "NFS" { availableFiles := int64(di.Ffree) if availableFiles <= fs.minFreeInodes { return errDiskFull diff --git a/cmd/posix.go b/cmd/posix.go index cc4ff340f..0a1969fda 100644 --- a/cmd/posix.go +++ b/cmd/posix.go @@ -177,7 +177,7 @@ func (s *posix) checkDiskFree() (err error) { // are allocated based on available disk space. For example CephFS, StoreNext CVFS, AzureFile driver. // Allow for the available disk to be separately validate and we will validate inodes only if // total inodes are provided by the underlying filesystem. - if di.Files != 0 { + if di.Files != 0 && di.FSType != "NFS" { availableFiles := int64(di.Ffree) if availableFiles <= s.minFreeInodes { return errDiskFull From a17f1e875ce5944b2cdbb2c685f1ab32ed8adff5 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Thu, 19 Jan 2017 11:19:57 -0800 Subject: [PATCH 082/100] server/mux: Close the connection even if buffer.Flush() returns error. (#3599) It is possible that buf.Flush() might return an error, leading to a potential leak in active sockets. --- cmd/server-mux.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/cmd/server-mux.go b/cmd/server-mux.go index c7e9ffd1e..bf79bfc2e 100644 --- a/cmd/server-mux.go +++ b/cmd/server-mux.go @@ -108,10 +108,12 @@ func (c *ConnMux) Read(b []byte) (int, error) { // Close the connection. func (c *ConnMux) Close() (err error) { - if err = c.bufrw.Flush(); err != nil { - return err - } - return c.Conn.Close() + // Make sure that we always close a connection, + // even if the bufioWriter flush sends an error. + defer c.Conn.Close() + + // Flush and write to the connection. + return c.bufrw.Flush() } // ListenerMux wraps the standard net.Listener to inspect From 0674fa43ff70129116474c0682a5d7ab586d400d Mon Sep 17 00:00:00 2001 From: Andreas Kohn Date: Thu, 19 Jan 2017 20:31:51 +0100 Subject: [PATCH 083/100] Handle the region for GetBucketLocation and PutBucket properly (#3596) This adjusts the code for these two handlers to match the logic in ListBucketHandler. Fixes #3595 --- cmd/bucket-handlers.go | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/cmd/bucket-handlers.go b/cmd/bucket-handlers.go index aadd99bb4..95227f4af 100644 --- a/cmd/bucket-handlers.go +++ b/cmd/bucket-handlers.go @@ -100,7 +100,12 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r * return } - if s3Error := checkRequestAuthType(r, bucket, "s3:GetBucketLocation", globalMinioDefaultRegion); s3Error != ErrNone { + s3Error := checkRequestAuthType(r, bucket, "s3:GetBucketLocation", globalMinioDefaultRegion) + if s3Error == ErrInvalidRegion { + // Clients like boto3 send getBucketLocation() call signed with region that is configured. + s3Error = checkRequestAuthType(r, "", "s3:GetBucketLocation", serverConfig.GetRegion()) + } + if s3Error != ErrNone { writeErrorResponse(w, s3Error, r.URL) return } @@ -334,7 +339,12 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req } // PutBucket does not have any bucket action. - if s3Error := checkRequestAuthType(r, "", "", globalMinioDefaultRegion); s3Error != ErrNone { + s3Error := checkRequestAuthType(r, "", "", globalMinioDefaultRegion) + if s3Error == ErrInvalidRegion { + // Clients like boto3 send putBucket() call signed with region that is configured. + s3Error = checkRequestAuthType(r, "", "", serverConfig.GetRegion()) + } + if s3Error != ErrNone { writeErrorResponse(w, s3Error, r.URL) return } From 80f13878776a1944934cc8a6fb4666c145355400 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Thu, 19 Jan 2017 11:32:13 -0800 Subject: [PATCH 084/100] Initialize peers properly for localhost. (#3600) This introduced a regression. Fixes #3594 --- cmd/server-main.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/cmd/server-main.go b/cmd/server-main.go index 87f9fee00..3bd31c4d2 100644 --- a/cmd/server-main.go +++ b/cmd/server-main.go @@ -425,18 +425,18 @@ func serverMain(c *cli.Context) { handler, err := configureServerHandler(srvConfig) fatalIf(err, "Unable to configure one of server's RPC services.") - // Initialize S3 Peers inter-node communication only in distributed setup. - initGlobalS3Peers(endpoints) - - // Initialize Admin Peers inter-node communication only in distributed setup. - initGlobalAdminPeers(endpoints) - // Initialize a new HTTP server. apiServer := NewServerMux(serverAddr, handler) // Set the global minio addr for this server. globalMinioAddr = getLocalAddress(srvConfig) + // Initialize S3 Peers inter-node communication only in distributed setup. + initGlobalS3Peers(endpoints) + + // Initialize Admin Peers inter-node communication only in distributed setup. + initGlobalAdminPeers(endpoints) + // Determine API endpoints where we are going to serve the S3 API from. apiEndPoints, err := finalizeAPIEndpoints(apiServer.Server) fatalIf(err, "Unable to finalize API endpoints for %s", apiServer.Server.Addr) From c3f7d1026faf53647f8659befdad81ec9a8a24bf Mon Sep 17 00:00:00 2001 From: Andrei Kopats Date: Fri, 20 Jan 2017 20:30:20 +0300 Subject: [PATCH 085/100] fs: start even if there are not enough free space (#3606) --- cmd/fs-v1.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/cmd/fs-v1.go b/cmd/fs-v1.go index 224bb9bf2..6a486757a 100644 --- a/cmd/fs-v1.go +++ b/cmd/fs-v1.go @@ -154,11 +154,6 @@ func newFSObjectLayer(fsPath string) (ObjectLayer, error) { }, } - // Validate if disk has enough free space to use. - if err = fs.checkDiskFree(); err != nil { - return nil, err - } - // Initialize and load bucket policies. err = initBucketPolicies(fs) if err != nil { From 51fa4f7fe3d952cd83c96bd6b115b3f6ffc86734 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Fri, 20 Jan 2017 16:33:01 -0800 Subject: [PATCH 086/100] Make PutObject a nop for an object which ends with "/" and size is '0' (#3603) This helps majority of S3 compatible applications while not returning an error upon directory create request. Fixes #2965 --- cmd/bucket-notification-handlers.go | 12 +++----- cmd/bucket-policy-handlers.go | 21 ++++++------- cmd/fs-v1.go | 6 ++++ cmd/object-api-common.go | 27 +++++++++++++++++ cmd/object-handlers.go | 2 +- cmd/server_test.go | 47 ++++++++++++++++++++++++++++- cmd/xl-v1-object.go | 6 ++++ 7 files changed, 100 insertions(+), 21 deletions(-) diff --git a/cmd/bucket-notification-handlers.go b/cmd/bucket-notification-handlers.go index fb9c026ef..ad4a6333c 100644 --- a/cmd/bucket-notification-handlers.go +++ b/cmd/bucket-notification-handlers.go @@ -115,13 +115,11 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter, return } - // If Content-Length is unknown or zero, deny the request. PutBucketNotification - // always needs a Content-Length if incoming request is not chunked. - if !contains(r.TransferEncoding, "chunked") { - if r.ContentLength == -1 { - writeErrorResponse(w, ErrMissingContentLength, r.URL) - return - } + // If Content-Length is unknown or zero, deny the request. + // PutBucketNotification always needs a Content-Length. + if r.ContentLength == -1 || r.ContentLength == 0 { + writeErrorResponse(w, ErrMissingContentLength, r.URL) + return } // Reads the incoming notification configuration. diff --git a/cmd/bucket-policy-handlers.go b/cmd/bucket-policy-handlers.go index 32dd0aac4..7a525dd95 100644 --- a/cmd/bucket-policy-handlers.go +++ b/cmd/bucket-policy-handlers.go @@ -142,18 +142,15 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht } // If Content-Length is unknown or zero, deny the - // request. PutBucketPolicy always needs a Content-Length if - // incoming request is not chunked. - if !contains(r.TransferEncoding, "chunked") { - if r.ContentLength == -1 || r.ContentLength == 0 { - writeErrorResponse(w, ErrMissingContentLength, r.URL) - return - } - // If Content-Length is greater than maximum allowed policy size. - if r.ContentLength > maxAccessPolicySize { - writeErrorResponse(w, ErrEntityTooLarge, r.URL) - return - } + // request. PutBucketPolicy always needs a Content-Length. + if r.ContentLength == -1 || r.ContentLength == 0 { + writeErrorResponse(w, ErrMissingContentLength, r.URL) + return + } + // If Content-Length is greater than maximum allowed policy size. + if r.ContentLength > maxAccessPolicySize { + writeErrorResponse(w, ErrEntityTooLarge, r.URL) + return } // Read access policy up to maxAccessPolicySize. diff --git a/cmd/fs-v1.go b/cmd/fs-v1.go index 6a486757a..6db9c5635 100644 --- a/cmd/fs-v1.go +++ b/cmd/fs-v1.go @@ -542,6 +542,12 @@ func (fs fsObjects) GetObjectInfo(bucket, object string) (ObjectInfo, error) { // Additionally writes `fs.json` which carries the necessary metadata // for future object operations. func (fs fsObjects) PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (objInfo ObjectInfo, err error) { + // This is a special case with size as '0' and object ends with + // a slash separator, we treat it like a valid operation and + // return success. + if isObjectDir(object, size) { + return dirObjectInfo(bucket, object, size, metadata), nil + } if err = checkPutObjectArgs(bucket, object, fs); err != nil { return ObjectInfo{}, err } diff --git a/cmd/object-api-common.go b/cmd/object-api-common.go index a371e3553..0f0c4a414 100644 --- a/cmd/object-api-common.go +++ b/cmd/object-api-common.go @@ -22,6 +22,7 @@ import ( "runtime" "strings" "sync" + "time" humanize "github.com/dustin/go-humanize" ) @@ -54,6 +55,32 @@ func isRemoteDisk(disk StorageAPI) bool { return ok } +// Checks if the object is a directory, this logic uses +// if size == 0 and object ends with slashSeparator then +// returns true. +func isObjectDir(object string, size int64) bool { + return strings.HasSuffix(object, slashSeparator) && size == 0 +} + +// Converts just bucket, object metadata into ObjectInfo datatype. +func dirObjectInfo(bucket, object string, size int64, metadata map[string]string) ObjectInfo { + // This is a special case with size as '0' and object ends with + // a slash separator, we treat it like a valid operation and + // return success. + md5Sum := metadata["md5Sum"] + delete(metadata, "md5Sum") + return ObjectInfo{ + Bucket: bucket, + Name: object, + ModTime: time.Now().UTC(), + ContentType: "application/octet-stream", + IsDir: true, + Size: size, + MD5Sum: md5Sum, + UserDefined: metadata, + } +} + // House keeping code for FS/XL and distributed Minio setup. func houseKeeping(storageDisks []StorageAPI) error { var wg = &sync.WaitGroup{} diff --git a/cmd/object-handlers.go b/cmd/object-handlers.go index 3c9100ec7..73b10b32e 100644 --- a/cmd/object-handlers.go +++ b/cmd/object-handlers.go @@ -409,7 +409,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req return } } - if size == -1 && !contains(r.TransferEncoding, "chunked") { + if size == -1 { writeErrorResponse(w, ErrMissingContentLength, r.URL) return } diff --git a/cmd/server_test.go b/cmd/server_test.go index 9709f3ba6..edfdcafa0 100644 --- a/cmd/server_test.go +++ b/cmd/server_test.go @@ -130,6 +130,49 @@ func (s *TestSuiteCommon) TestBucketSQSNotificationWebHook(c *C) { verifyError(c, response, "InvalidArgument", "A specified destination ARN does not exist or is not well-formed. Verify the destination ARN.", http.StatusBadRequest) } +func (s *TestSuiteCommon) TestObjectDir(c *C) { + bucketName := getRandomBucketName() + // HTTP request to create the bucket. + request, err := newTestSignedRequest("PUT", getMakeBucketURL(s.endPoint, bucketName), + 0, nil, s.accessKey, s.secretKey, s.signer) + c.Assert(err, IsNil) + + client := http.Client{Transport: s.transport} + // execute the request. + response, err := client.Do(request) + c.Assert(err, IsNil) + + // assert the http response status code. + c.Assert(response.StatusCode, Equals, http.StatusOK) + + request, err = newTestSignedRequest("PUT", getPutObjectURL(s.endPoint, bucketName, "my-object-directory/"), + 0, nil, s.accessKey, s.secretKey, s.signer) + c.Assert(err, IsNil) + + client = http.Client{Transport: s.transport} + // execute the HTTP request. + response, err = client.Do(request) + + c.Assert(err, IsNil) + // assert the http response status code. + c.Assert(response.StatusCode, Equals, http.StatusOK) + + request, err = newTestSignedRequest("PUT", getPutObjectURL(s.endPoint, bucketName, "my-object-directory/"), + 0, nil, s.accessKey, s.secretKey, s.signer) + c.Assert(err, IsNil) + + helloReader := bytes.NewReader([]byte("Hello, World")) + request.ContentLength = helloReader.Size() + request.Body = ioutil.NopCloser(helloReader) + + client = http.Client{Transport: s.transport} + // execute the HTTP request. + response, err = client.Do(request) + + c.Assert(err, IsNil) + verifyError(c, response, "XMinioInvalidObjectName", "Object name contains unsupported characters. Unsupported characters are `^*|\\\"", http.StatusBadRequest) +} + func (s *TestSuiteCommon) TestBucketSQSNotificationAMQP(c *C) { // Sample bucket notification. bucketNotificationBuf := `s3:ObjectCreated:Putprefiximages/1arn:minio:sqs:us-east-1:444455556666:amqp` @@ -1135,7 +1178,9 @@ func (s *TestSuiteCommon) TestSHA256Mismatch(c *C) { c.Assert(request.Header.Get("x-amz-content-sha256"), Equals, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855") } // Set the body to generate signature mismatch. - request.Body = ioutil.NopCloser(bytes.NewReader([]byte("Hello, World"))) + helloReader := bytes.NewReader([]byte("Hello, World")) + request.ContentLength = helloReader.Size() + request.Body = ioutil.NopCloser(helloReader) c.Assert(err, IsNil) // execute the HTTP request. response, err = client.Do(request) diff --git a/cmd/xl-v1-object.go b/cmd/xl-v1-object.go index 8fa41ca77..2e4c632ca 100644 --- a/cmd/xl-v1-object.go +++ b/cmd/xl-v1-object.go @@ -440,6 +440,12 @@ func renameObject(disks []StorageAPI, srcBucket, srcObject, dstBucket, dstObject // writes `xl.json` which carries the necessary metadata for future // object operations. func (xl xlObjects) PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (objInfo ObjectInfo, err error) { + // This is a special case with size as '0' and object ends with + // a slash separator, we treat it like a valid operation and + // return success. + if isObjectDir(object, size) { + return dirObjectInfo(bucket, object, size, metadata), nil + } if err = checkPutObjectArgs(bucket, object, xl); err != nil { return ObjectInfo{}, err } From 3640c632895fa201ce5ba7f0d7dcdf79b5170a98 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Sun, 22 Jan 2017 12:14:00 -0800 Subject: [PATCH 087/100] server/mux: PeekProtocol() should return error and connection be closed. (#3608) For TLS peekProtocol do not assume the incoming request to be a TLS connection perform a handshake() instead and validate. Also add some security related defaults to `tls.Config`. --- cmd/server-mux.go | 102 ++++++++++++++++++++++++++++++++++------------ 1 file changed, 75 insertions(+), 27 deletions(-) diff --git a/cmd/server-mux.go b/cmd/server-mux.go index bf79bfc2e..1d7458d2c 100644 --- a/cmd/server-mux.go +++ b/cmd/server-mux.go @@ -71,6 +71,7 @@ func NewConnMux(c net.Conn) *ConnMux { } } +// List of protocols to be detected by PeekProtocol function. const ( protocolTLS = "tls" protocolHTTP1 = "http" @@ -78,26 +79,33 @@ const ( ) // PeekProtocol - reads the first bytes, then checks if it is similar -// to one of the default http methods -func (c *ConnMux) PeekProtocol() string { +// to one of the default http methods. Returns error if there are any +// errors in peeking over the connection. +func (c *ConnMux) PeekProtocol() (string, error) { + // Peek for HTTP verbs. buf, err := c.bufrw.Peek(maxHTTPVerbLen) if err != nil { - if err != io.EOF { - errorIf(err, "Unable to peek into the protocol") - } - return protocolHTTP1 - } - for _, m := range defaultHTTP1Methods { - if strings.HasPrefix(string(buf), m) { - return protocolHTTP1 - } + return "", err } + + // Check for HTTP2 methods first. for _, m := range defaultHTTP2Methods { if strings.HasPrefix(string(buf), m) { - return protocolHTTP2 + return protocolHTTP2, nil } } - return protocolTLS + + // Check for HTTP1 methods. + for _, m := range defaultHTTP1Methods { + if strings.HasPrefix(string(buf), m) { + return protocolHTTP1, nil + } + } + + // Default to TLS, this is not a real indication + // that the connection is TLS but that will be + // validated later by doing a handshake. + return protocolTLS, nil } // Read - streams the ConnMux buffer when reset flag is activated, otherwise @@ -165,8 +173,8 @@ type ListenerMuxAcceptRes struct { // $ cat /proc/sys/net/ipv4/tcp_keepalive_probes // 9 // -// Effective value of total keep alive comes upto 9 x 3 * time.Minute = 27 Minutes. -var defaultKeepAliveTimeout = 3 * time.Minute // 3 minutes. +// Effective value of total keep alive comes upto 9 x 10 * time.Second = 1.5 Minutes. +var defaultKeepAliveTimeout = 10 * time.Second // 10 seconds. // newListenerMux listens and wraps accepted connections with tls after protocol peeking func newListenerMux(listener net.Listener, config *tls.Config) *ListenerMux { @@ -198,20 +206,42 @@ func newListenerMux(listener net.Listener, config *tls.Config) *ListenerMux { conn.SetKeepAlive(true) conn.SetKeepAlivePeriod(defaultKeepAliveTimeout) + // Allocate new conn muxer. + connMux := NewConnMux(conn) + // Wrap the connection with ConnMux to be able to peek the data in the incoming connection // and decide if we need to wrap the connection itself with a TLS or not - go func(conn net.Conn) { - connMux := NewConnMux(conn) - if connMux.PeekProtocol() == protocolTLS { - l.acceptResCh <- ListenerMuxAcceptRes{ - conn: tls.Server(connMux, l.config), + go func(connMux *ConnMux) { + protocol, err := connMux.PeekProtocol() + if err != nil { + // io.EOF is usually returned by non-http clients, + // just close the connection to avoid any leak. + if err != io.EOF { + errorIf(err, "Unable to peek into incoming protocol") } - } else { + connMux.Close() + return + } + switch protocol { + case protocolTLS: + tlsConn := tls.Server(connMux, l.config) + // Make sure to handshake so that we know that this + // is a TLS connection, if not we should close and reject + // such a connection. + if err = tlsConn.Handshake(); err != nil { + errorIf(err, "TLS handshake failed") + tlsConn.Close() + return + } + l.acceptResCh <- ListenerMuxAcceptRes{ + conn: tlsConn, + } + default: l.acceptResCh <- ListenerMuxAcceptRes{ conn: connMux, } } - }(conn) + }(connMux) } }() return &l @@ -284,10 +314,7 @@ type ServerMux struct { func NewServerMux(addr string, handler http.Handler) *ServerMux { m := &ServerMux{ Server: &http.Server{ - Addr: addr, - // Do not add any timeouts Golang net.Conn - // closes connections right after 10mins even - // if they are not idle. + Addr: addr, Handler: handler, MaxHeaderBytes: 1 << 20, }, @@ -349,7 +376,28 @@ func (m *ServerMux) ListenAndServe(certFile, keyFile string) (err error) { tlsEnabled := certFile != "" && keyFile != "" - config := &tls.Config{} // Always instantiate. + config := &tls.Config{ + // Causes servers to use Go's default ciphersuite preferences, + // which are tuned to avoid attacks. Does nothing on clients. + PreferServerCipherSuites: true, + // Only use curves which have assembly implementations + CurvePreferences: []tls.CurveID{ + tls.CurveP256, + }, + // Set minimum version to TLS 1.2 + MinVersion: tls.VersionTLS12, + CipherSuites: []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + + // Best disabled, as they don't provide Forward Secrecy, + // but might be necessary for some clients + // tls.TLS_RSA_WITH_AES_256_GCM_SHA384, + // tls.TLS_RSA_WITH_AES_128_GCM_SHA256, + }, + } // Always instantiate. if tlsEnabled { // Configure TLS in the server From 47358e31049eed047f861cd1bb54d38b8413e92e Mon Sep 17 00:00:00 2001 From: Anis Elleuch Date: Sun, 22 Jan 2017 23:48:27 +0100 Subject: [PATCH 088/100] server-mux: Add tcp idle read timeout (#3607) Avoid many idle client connections i.e client didn't send any data until a given stipulated amount of time. Default chosen here is 30 seconds. --- cmd/server-mux.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/cmd/server-mux.go b/cmd/server-mux.go index 1d7458d2c..e98741bb6 100644 --- a/cmd/server-mux.go +++ b/cmd/server-mux.go @@ -111,6 +111,8 @@ func (c *ConnMux) PeekProtocol() (string, error) { // Read - streams the ConnMux buffer when reset flag is activated, otherwise // streams from the incoming network connection func (c *ConnMux) Read(b []byte) (int, error) { + // Push read deadline + c.Conn.SetReadDeadline(time.Now().Add(defaultTCPReadTimeout)) return c.bufrw.Read(b) } @@ -176,6 +178,9 @@ type ListenerMuxAcceptRes struct { // Effective value of total keep alive comes upto 9 x 10 * time.Second = 1.5 Minutes. var defaultKeepAliveTimeout = 10 * time.Second // 10 seconds. +// Timeout to close connection when a client is not sending any data +var defaultTCPReadTimeout = 30 * time.Second + // newListenerMux listens and wraps accepted connections with tls after protocol peeking func newListenerMux(listener net.Listener, config *tls.Config) *ListenerMux { l := ListenerMux{ @@ -202,6 +207,9 @@ func newListenerMux(listener net.Listener, config *tls.Config) *ListenerMux { return } + // Enable Read timeout + conn.SetReadDeadline(time.Now().Add(defaultTCPReadTimeout)) + // Enable keep alive for each connection. conn.SetKeepAlive(true) conn.SetKeepAlivePeriod(defaultKeepAliveTimeout) From 4e926b292f840ea747ed06e7cecac134ad64533f Mon Sep 17 00:00:00 2001 From: Krishna Srinivas Date: Sun, 22 Jan 2017 21:02:09 -0800 Subject: [PATCH 089/100] vendor-update: Minio Browser (#3609) --- .../minio/miniobrowser/ui-assets.go | 93 +++++++++---------- vendor/vendor.json | 6 +- 2 files changed, 49 insertions(+), 50 deletions(-) diff --git a/vendor/github.com/minio/miniobrowser/ui-assets.go b/vendor/github.com/minio/miniobrowser/ui-assets.go index 379c21770..2ddddb6f1 100644 --- a/vendor/github.com/minio/miniobrowser/ui-assets.go +++ b/vendor/github.com/minio/miniobrowser/ui-assets.go @@ -4,7 +4,7 @@ // production/favicon.ico // production/firefox.png // production/index.html -// production/index_bundle-2016-11-24T00-39-02Z.js +// production/index_bundle-2017-01-23T04-16-49Z.js // production/loader.css // production/logo.svg // production/safari.png @@ -65,7 +65,7 @@ func productionChromePng() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "production/chrome.png", size: 3726, mode: os.FileMode(436), modTime: time.Unix(1479947956, 0)} + info := bindataFileInfo{name: "production/chrome.png", size: 3726, mode: os.FileMode(436), modTime: time.Unix(1485145028, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -82,7 +82,7 @@ func productionFaviconIco() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "production/favicon.ico", size: 1340, mode: os.FileMode(436), modTime: time.Unix(1479947956, 0)} + info := bindataFileInfo{name: "production/favicon.ico", size: 1340, mode: os.FileMode(436), modTime: time.Unix(1485145028, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -99,7 +99,7 @@ func productionFirefoxPng() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "production/firefox.png", size: 4795, mode: os.FileMode(436), modTime: time.Unix(1479947956, 0)} + info := bindataFileInfo{name: "production/firefox.png", size: 4795, mode: os.FileMode(436), modTime: time.Unix(1485145028, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -156,8 +156,8 @@ var _productionIndexHTML = []byte(` - - + + `) @@ -172,21 +172,21 @@ func productionIndexHTML() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "production/index.html", size: 1996, mode: os.FileMode(436), modTime: time.Unix(1479947956, 0)} + info := bindataFileInfo{name: "production/index.html", size: 1996, mode: os.FileMode(436), modTime: time.Unix(1485145028, 0)} a := &asset{bytes: bytes, info: info} return a, nil } -var _productionIndex_bundle20161124t003902zJs = []byte(`!function(A){function M(I){if(t[I])return t[I].exports;var g=t[I]={exports:{},id:I,loaded:!1};return A[I].call(g.exports,g,g.exports,M),g.loaded=!0,g.exports}var t={};return M.m=A,M.c=t,M.p="",M(0)}([function(A,M,t){A.exports=t(238)},function(A,M,t){"use strict";A.exports=t(431)},function(A,M,t){"use strict";function I(A,M,t,I,g,e,i,T){if(!A){var E;if(void 0===M)E=new Error("Minified exception occurred; use the non-minified dev environment for the full error message and additional helpful warnings.");else{var N=[t,I,g,e,i,T],o=0;E=new Error(M.replace(/%s/g,function(){return N[o++]})),E.name="Invariant Violation"}throw E.framesToPop=1,E}}A.exports=I},function(A,M,t){"use strict";var I=t(20),g=I;A.exports=g},function(A,M){"use strict";function t(A,M){if(null==A)throw new TypeError("Object.assign target cannot be null or undefined");for(var t=Object(A),I=Object.prototype.hasOwnProperty,g=1;g2?t-2:0),g=2;g1){for(var c=Array(C),D=0;D1){for(var a=Array(D),B=0;B, "+('or explicitly pass "store" as a prop to "'+t+'".'));var E=i.store.getState();return i.state={storeState:E},i.clearCache(),i}return i(T,I),T.prototype.shouldComponentUpdate=function(){return!s||this.haveOwnPropsChanged||this.hasStoreStateChanged},T.prototype.computeStateProps=function(A,M){if(!this.finalMapStateToProps)return this.configureFinalMapState(A,M);var t=A.getState(),I=this.doStatePropsDependOnOwnProps?this.finalMapStateToProps(t,M):this.finalMapStateToProps(t);return I},T.prototype.configureFinalMapState=function(A,M){var t=C(A.getState(),M),I="function"==typeof t;return this.finalMapStateToProps=I?t:C,this.doStatePropsDependOnOwnProps=1!==this.finalMapStateToProps.length,I?this.computeStateProps(A,M):t},T.prototype.computeDispatchProps=function(A,M){if(!this.finalMapDispatchToProps)return this.configureFinalMapDispatch(A,M);var t=A.dispatch,I=this.doDispatchPropsDependOnOwnProps?this.finalMapDispatchToProps(t,M):this.finalMapDispatchToProps(t);return I},T.prototype.configureFinalMapDispatch=function(A,M){var t=D(A.dispatch,M),I="function"==typeof t;return this.finalMapDispatchToProps=I?t:D,this.doDispatchPropsDependOnOwnProps=1!==this.finalMapDispatchToProps.length,I?this.computeDispatchProps(A,M):t},T.prototype.updateStatePropsIfNeeded=function(){var A=this.computeStateProps(this.store,this.props);return(!this.stateProps||!(0,a["default"])(A,this.stateProps))&&(this.stateProps=A,!0)},T.prototype.updateDispatchPropsIfNeeded=function(){var A=this.computeDispatchProps(this.store,this.props);return(!this.dispatchProps||!(0,a["default"])(A,this.dispatchProps))&&(this.dispatchProps=A,!0)},T.prototype.updateMergedPropsIfNeeded=function(){var A=M(this.stateProps,this.dispatchProps,this.props);return!(this.mergedProps&&h&&(0,a["default"])(A,this.mergedProps))&&(this.mergedProps=A,!0)},T.prototype.isSubscribed=function(){return"function"==typeof this.unsubscribe},T.prototype.trySubscribe=function(){N&&!this.unsubscribe&&(this.unsubscribe=this.store.subscribe(this.handleChange.bind(this)),this.handleChange())},T.prototype.tryUnsubscribe=function(){this.unsubscribe&&(this.unsubscribe(),this.unsubscribe=null)},T.prototype.componentDidMount=function(){this.trySubscribe()},T.prototype.componentWillReceiveProps=function(A){s&&(0,a["default"])(A,this.props)||(this.haveOwnPropsChanged=!0)},T.prototype.componentWillUnmount=function(){this.tryUnsubscribe(),this.clearCache()},T.prototype.clearCache=function(){this.dispatchProps=null,this.stateProps=null,this.mergedProps=null,this.haveOwnPropsChanged=!0,this.hasStoreStateChanged=!0,this.haveStatePropsBeenPrecalculated=!1,this.statePropsPrecalculationError=null,this.renderedElement=null,this.finalMapDispatchToProps=null,this.finalMapStateToProps=null},T.prototype.handleChange=function(){if(this.unsubscribe){var A=this.store.getState(),M=this.state.storeState;if(!s||M!==A){if(s&&!this.doStatePropsDependOnOwnProps){var t=E(this.updateStatePropsIfNeeded,this);if(!t)return;t===l&&(this.statePropsPrecalculationError=l.value),this.haveStatePropsBeenPrecalculated=!0}this.hasStoreStateChanged=!0,this.setState({storeState:A})}}},T.prototype.getWrappedInstance=function(){return(0,w["default"])(y,"To access the wrapped instance, you need to specify { withRef: true } as the fourth argument of the connect() call."),this.refs.wrappedInstance},T.prototype.render=function(){var M=this.haveOwnPropsChanged,t=this.hasStoreStateChanged,I=this.haveStatePropsBeenPrecalculated,g=this.statePropsPrecalculationError,e=this.renderedElement;if(this.haveOwnPropsChanged=!1,this.hasStoreStateChanged=!1,this.haveStatePropsBeenPrecalculated=!1,this.statePropsPrecalculationError=null,g)throw g;var i=!0,T=!0;s&&e&&(i=t||M&&this.doStatePropsDependOnOwnProps,T=M&&this.doDispatchPropsDependOnOwnProps);var E=!1,N=!1;I?E=!0:i&&(E=this.updateStatePropsIfNeeded()),T&&(N=this.updateDispatchPropsIfNeeded());var C=!0;return C=!!(E||N||M)&&this.updateMergedPropsIfNeeded(),!C&&e?e:(y?this.renderedElement=(0,n.createElement)(A,o({},this.mergedProps,{ref:"wrappedInstance"})):this.renderedElement=(0,n.createElement)(A,this.mergedProps),this.renderedElement)},T}(n.Component);return I.displayName=t,I.WrappedComponent=A,I.contextTypes={store:c["default"]},I.propTypes={store:c["default"]},(0,j["default"])(I,A)}}var o=Object.assign||function(A){for(var M=1;M should not have a "'+M+'" prop')}M.__esModule=!0,M.routes=M.route=M.components=M.component=M.history=void 0,M.falsy=I;var g=t(1),e=g.PropTypes.func,i=g.PropTypes.object,T=g.PropTypes.arrayOf,E=g.PropTypes.oneOfType,N=g.PropTypes.element,o=g.PropTypes.shape,n=g.PropTypes.string,C=(M.history=o({listen:e.isRequired,push:e.isRequired,replace:e.isRequired,go:e.isRequired,goBack:e.isRequired,goForward:e.isRequired}),M.component=E([e,n])),c=(M.components=E([C,i]),M.route=E([i,N]));M.routes=E([c,T(c)])},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{"default":A}}function g(A){var M=A.match(/^https?:\/\/[^\/]*/);return null==M?A:A.substring(M[0].length)}function e(A){var M=g(A),t="",I="",e=M.indexOf("#");e!==-1&&(I=M.substring(e),M=M.substring(0,e));var i=M.indexOf("?");return i!==-1&&(t=M.substring(i),M=M.substring(0,i)),""===M&&(M="/"),{pathname:M,search:t,hash:I}}M.__esModule=!0,M.extractPath=g, -M.parsePath=e;var i=t(22);I(i)},function(A,M,t){"use strict";function I(){g.attachRefs(this,this._currentElement)}var g=t(450),e={mountComponent:function(A,M,t,g){var e=A.mountComponent(M,t,g);return A._currentElement&&null!=A._currentElement.ref&&t.getReactMountReady().enqueue(I,A),e},unmountComponent:function(A){g.detachRefs(A,A._currentElement),A.unmountComponent()},receiveComponent:function(A,M,t,e){var i=A._currentElement;if(M!==i||e!==A._context){var T=g.shouldUpdateRefs(i,M);T&&g.detachRefs(A,i),A.receiveComponent(M,t,e),T&&A._currentElement&&null!=A._currentElement.ref&&t.getReactMountReady().enqueue(I,A)}},performUpdateIfNecessary:function(A,M){A.performUpdateIfNecessary(M)}};A.exports=e},function(A,M,t){"use strict";function I(A,M,t,I){this.dispatchConfig=A,this.dispatchMarker=M,this.nativeEvent=t;var g=this.constructor.Interface;for(var e in g)if(g.hasOwnProperty(e)){var T=g[e];T?this[e]=T(t):"target"===e?this.target=I:this[e]=t[e]}var E=null!=t.defaultPrevented?t.defaultPrevented:t.returnValue===!1;E?this.isDefaultPrevented=i.thatReturnsTrue:this.isDefaultPrevented=i.thatReturnsFalse,this.isPropagationStopped=i.thatReturnsFalse}var g=t(30),e=t(4),i=t(20),T=(t(3),{type:null,target:null,currentTarget:i.thatReturnsNull,eventPhase:null,bubbles:null,cancelable:null,timeStamp:function(A){return A.timeStamp||Date.now()},defaultPrevented:null,isTrusted:null});e(I.prototype,{preventDefault:function(){this.defaultPrevented=!0;var A=this.nativeEvent;A&&(A.preventDefault?A.preventDefault():A.returnValue=!1,this.isDefaultPrevented=i.thatReturnsTrue)},stopPropagation:function(){var A=this.nativeEvent;A&&(A.stopPropagation?A.stopPropagation():A.cancelBubble=!0,this.isPropagationStopped=i.thatReturnsTrue)},persist:function(){this.isPersistent=i.thatReturnsTrue},isPersistent:i.thatReturnsFalse,destructor:function(){var A=this.constructor.Interface;for(var M in A)this[M]=null;this.dispatchConfig=null,this.dispatchMarker=null,this.nativeEvent=null}}),I.Interface=T,I.augmentClass=function(A,M){var t=this,I=Object.create(t.prototype);e(I,A.prototype),A.prototype=I,A.prototype.constructor=A,A.Interface=e({},t.Interface,M),A.augmentClass=t.augmentClass,g.addPoolingTo(A,g.fourArgumentPooler)},g.addPoolingTo(I,g.fourArgumentPooler),A.exports=I},function(A,M){"use strict";M["default"]=function(A,M){var t={};for(var I in A)M.indexOf(I)>=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t},M.__esModule=!0},function(A,M){"use strict";function t(A){return A&&A.ownerDocument||document}M.__esModule=!0,M["default"]=t,A.exports=M["default"]},function(A,M,t){"use strict";var I=t(32),g=function(){var A=I&&document.documentElement;return A&&A.contains?function(A,M){return A.contains(M)}:A&&A.compareDocumentPosition?function(A,M){return A===M||!!(16&A.compareDocumentPosition(M))}:function(A,M){if(M)do if(M===A)return!0;while(M=M.parentNode);return!1}}();A.exports=g},function(A,M){function t(A){return"number"==typeof A&&A>-1&&A%1==0&&A<=I}var I=9007199254740991;A.exports=t},function(A,M,t){(function(A){!function(M,t){A.exports=t()}(this,function(){"use strict";function M(){return cI.apply(null,arguments)}function t(A){cI=A}function I(A){return A instanceof Array||"[object Array]"===Object.prototype.toString.call(A)}function g(A){return null!=A&&"[object Object]"===Object.prototype.toString.call(A)}function e(A){var M;for(M in A)return!1;return!0}function i(A){return A instanceof Date||"[object Date]"===Object.prototype.toString.call(A)}function T(A,M){var t,I=[];for(t=0;t0)for(t in aI)I=aI[t],g=M[I],a(g)||(A[I]=g);return A}function Q(A){B(this,A),this._d=new Date(null!=A._d?A._d.getTime():NaN),BI===!1&&(BI=!0,M.updateOffset(this),BI=!1)}function r(A){return A instanceof Q||null!=A&&null!=A._isAMomentObject}function s(A){return A<0?Math.ceil(A)||0:Math.floor(A)}function x(A){var M=+A,t=0;return 0!==M&&isFinite(M)&&(t=s(M)),t}function j(A,M,t){var I,g=Math.min(A.length,M.length),e=Math.abs(A.length-M.length),i=0;for(I=0;I0?"future":"past"];return L(t)?t(M):t.replace(/%s/i,M)}function m(A,M){var t=A.toLowerCase();YI[t]=YI[t+"s"]=YI[M]=A}function F(A){return"string"==typeof A?YI[A]||YI[A.toLowerCase()]:void 0}function f(A){var M,t,I={};for(t in A)E(A,t)&&(M=F(t),M&&(I[M]=A[t]));return I}function k(A,M){lI[A]=M}function R(A){var M=[];for(var t in A)M.push({unit:t,priority:lI[t]});return M.sort(function(A,M){return A.priority-M.priority}),M}function J(A,t){return function(I){return null!=I?(H(this,A,I),M.updateOffset(this,t),this):G(this,A)}}function G(A,M){return A.isValid()?A._d["get"+(A._isUTC?"UTC":"")+M]():NaN}function H(A,M,t){A.isValid()&&A._d["set"+(A._isUTC?"UTC":"")+M](t)}function b(A){return A=F(A),L(this[A])?this[A]():this}function X(A,M){if("object"==typeof A){A=f(A);for(var t=R(A),I=0;I=0;return(e?t?"+":"":"-")+Math.pow(10,Math.max(0,g)).toString().substr(1)+I}function W(A,M,t,I){var g=I;"string"==typeof I&&(g=function(){return this[I]()}),A&&(zI[A]=g),M&&(zI[M[0]]=function(){return v(g.apply(this,arguments),M[1],M[2])}),t&&(zI[t]=function(){return this.localeData().ordinal(g.apply(this,arguments),A)})}function V(A){return A.match(/\[[\s\S]/)?A.replace(/^\[|\]$/g,""):A.replace(/\\/g,"")}function P(A){var M,t,I=A.match(dI);for(M=0,t=I.length;M=0&&hI.test(A);)A=A.replace(hI,t),hI.lastIndex=0,I-=1;return A}function q(A,M,t){ZI[A]=L(M)?M:function(A,I){return A&&t?t:M}}function _(A,M){return E(ZI,A)?ZI[A](M._strict,M._locale):new RegExp($(A))}function $(A){return AA(A.replace("\\","").replace(/\\(\[)|\\(\])|\[([^\]\[]*)\]|\\(.)/g,function(A,M,t,I,g){return M||t||I||g}))}function AA(A){return A.replace(/[-\/\\^$*+?.()|[\]{}]/g,"\\$&")}function MA(A,M){var t,I=M;for("string"==typeof A&&(A=[A]),"number"==typeof M&&(I=function(A,t){t[M]=x(A)}),t=0;t=0&&isFinite(T.getFullYear())&&T.setFullYear(A),T}function sA(A){var M=new Date(Date.UTC.apply(null,arguments));return A<100&&A>=0&&isFinite(M.getUTCFullYear())&&M.setUTCFullYear(A),M}function xA(A,M,t){var I=7+M-t,g=(7+sA(A,0,I).getUTCDay()-M)%7;return-g+I-1}function jA(A,M,t,I,g){var e,i,T=(7+t-I)%7,E=xA(A,I,g),N=1+7*(M-1)+T+E;return N<=0?(e=A-1,i=aA(e)+N):N>aA(A)?(e=A+1,i=N-aA(A)):(e=A,i=N),{year:e,dayOfYear:i}}function yA(A,M,t){var I,g,e=xA(A.year(),M,t),i=Math.floor((A.dayOfYear()-e-1)/7)+1;return i<1?(g=A.year()-1,I=i+wA(g,M,t)):i>wA(A.year(),M,t)?(I=i-wA(A.year(),M,t),g=A.year()+1):(g=A.year(),I=i),{week:I,year:g}}function wA(A,M,t){var I=xA(A,M,t),g=xA(A+1,M,t);return(aA(A)-I+g)/7}function uA(A){return yA(A,this._week.dow,this._week.doy).week}function LA(){return this._week.dow}function YA(){return this._week.doy}function lA(A){var M=this.localeData().week(this);return null==A?M:this.add(7*(A-M),"d")}function dA(A){var M=yA(this,1,4).week;return null==A?M:this.add(7*(A-M),"d")}function hA(A,M){return"string"!=typeof A?A:isNaN(A)?(A=M.weekdaysParse(A),"number"==typeof A?A:null):parseInt(A,10)}function SA(A,M){return"string"==typeof A?M.weekdaysParse(A)%7||7:isNaN(A)?null:A}function zA(A,M){return A?I(this._weekdays)?this._weekdays[A.day()]:this._weekdays[this._weekdays.isFormat.test(M)?"format":"standalone"][A.day()]:this._weekdays}function UA(A){return A?this._weekdaysShort[A.day()]:this._weekdaysShort}function pA(A){return A?this._weekdaysMin[A.day()]:this._weekdaysMin}function OA(A,M,t){var I,g,e,i=A.toLocaleLowerCase();if(!this._weekdaysParse)for(this._weekdaysParse=[],this._shortWeekdaysParse=[],this._minWeekdaysParse=[],I=0;I<7;++I)e=o([2e3,1]).day(I),this._minWeekdaysParse[I]=this.weekdaysMin(e,"").toLocaleLowerCase(),this._shortWeekdaysParse[I]=this.weekdaysShort(e,"").toLocaleLowerCase(),this._weekdaysParse[I]=this.weekdays(e,"").toLocaleLowerCase();return t?"dddd"===M?(g=sI.call(this._weekdaysParse,i),g!==-1?g:null):"ddd"===M?(g=sI.call(this._shortWeekdaysParse,i),g!==-1?g:null):(g=sI.call(this._minWeekdaysParse,i),g!==-1?g:null):"dddd"===M?(g=sI.call(this._weekdaysParse,i),g!==-1?g:(g=sI.call(this._shortWeekdaysParse,i),g!==-1?g:(g=sI.call(this._minWeekdaysParse,i),g!==-1?g:null))):"ddd"===M?(g=sI.call(this._shortWeekdaysParse,i),g!==-1?g:(g=sI.call(this._weekdaysParse,i),g!==-1?g:(g=sI.call(this._minWeekdaysParse,i),g!==-1?g:null))):(g=sI.call(this._minWeekdaysParse,i),g!==-1?g:(g=sI.call(this._weekdaysParse,i),g!==-1?g:(g=sI.call(this._shortWeekdaysParse,i),g!==-1?g:null)))}function mA(A,M,t){var I,g,e;if(this._weekdaysParseExact)return OA.call(this,A,M,t);for(this._weekdaysParse||(this._weekdaysParse=[],this._minWeekdaysParse=[],this._shortWeekdaysParse=[],this._fullWeekdaysParse=[]),I=0;I<7;I++){if(g=o([2e3,1]).day(I),t&&!this._fullWeekdaysParse[I]&&(this._fullWeekdaysParse[I]=new RegExp("^"+this.weekdays(g,"").replace(".",".?")+"$","i"),this._shortWeekdaysParse[I]=new RegExp("^"+this.weekdaysShort(g,"").replace(".",".?")+"$","i"),this._minWeekdaysParse[I]=new RegExp("^"+this.weekdaysMin(g,"").replace(".",".?")+"$","i")),this._weekdaysParse[I]||(e="^"+this.weekdays(g,"")+"|^"+this.weekdaysShort(g,"")+"|^"+this.weekdaysMin(g,""),this._weekdaysParse[I]=new RegExp(e.replace(".",""),"i")),t&&"dddd"===M&&this._fullWeekdaysParse[I].test(A))return I;if(t&&"ddd"===M&&this._shortWeekdaysParse[I].test(A))return I;if(t&&"dd"===M&&this._minWeekdaysParse[I].test(A))return I;if(!t&&this._weekdaysParse[I].test(A))return I}}function FA(A){if(!this.isValid())return null!=A?this:NaN;var M=this._isUTC?this._d.getUTCDay():this._d.getDay();return null!=A?(A=hA(A,this.localeData()),this.add(A-M,"d")):M}function fA(A){if(!this.isValid())return null!=A?this:NaN;var M=(this.day()+7-this.localeData()._week.dow)%7;return null==A?M:this.add(A-M,"d")}function kA(A){if(!this.isValid())return null!=A?this:NaN;if(null!=A){var M=SA(A,this.localeData());return this.day(this.day()%7?M:M-7)}return this.day()||7}function RA(A){return this._weekdaysParseExact?(E(this,"_weekdaysRegex")||HA.call(this),A?this._weekdaysStrictRegex:this._weekdaysRegex):(E(this,"_weekdaysRegex")||(this._weekdaysRegex=Bg),this._weekdaysStrictRegex&&A?this._weekdaysStrictRegex:this._weekdaysRegex)}function JA(A){return this._weekdaysParseExact?(E(this,"_weekdaysRegex")||HA.call(this),A?this._weekdaysShortStrictRegex:this._weekdaysShortRegex):(E(this,"_weekdaysShortRegex")||(this._weekdaysShortRegex=Qg),this._weekdaysShortStrictRegex&&A?this._weekdaysShortStrictRegex:this._weekdaysShortRegex)}function GA(A){return this._weekdaysParseExact?(E(this,"_weekdaysRegex")||HA.call(this),A?this._weekdaysMinStrictRegex:this._weekdaysMinRegex):(E(this,"_weekdaysMinRegex")||(this._weekdaysMinRegex=rg),this._weekdaysMinStrictRegex&&A?this._weekdaysMinStrictRegex:this._weekdaysMinRegex)}function HA(){function A(A,M){return M.length-A.length}var M,t,I,g,e,i=[],T=[],E=[],N=[];for(M=0;M<7;M++)t=o([2e3,1]).day(M),I=this.weekdaysMin(t,""),g=this.weekdaysShort(t,""),e=this.weekdays(t,""),i.push(I),T.push(g),E.push(e),N.push(I),N.push(g),N.push(e);for(i.sort(A),T.sort(A),E.sort(A),N.sort(A),M=0;M<7;M++)T[M]=AA(T[M]),E[M]=AA(E[M]),N[M]=AA(N[M]);this._weekdaysRegex=new RegExp("^("+N.join("|")+")","i"),this._weekdaysShortRegex=this._weekdaysRegex,this._weekdaysMinRegex=this._weekdaysRegex,this._weekdaysStrictRegex=new RegExp("^("+E.join("|")+")","i"),this._weekdaysShortStrictRegex=new RegExp("^("+T.join("|")+")","i"),this._weekdaysMinStrictRegex=new RegExp("^("+i.join("|")+")","i")}function bA(){return this.hours()%12||12}function XA(){return this.hours()||24}function vA(A,M){W(A,0,0,function(){return this.localeData().meridiem(this.hours(),this.minutes(),M)})}function WA(A,M){return M._meridiemParse}function VA(A){return"p"===(A+"").toLowerCase().charAt(0)}function PA(A,M,t){return A>11?t?"pm":"PM":t?"am":"AM"}function ZA(A){return A?A.toLowerCase().replace("_","-"):A}function KA(A){for(var M,t,I,g,e=0;e0;){if(I=qA(g.slice(0,M).join("-")))return I;if(t&&t.length>=M&&j(g,t,!0)>=M-1)break;M--}e++}return null}function qA(M){var t=null;if(!wg[M]&&"undefined"!=typeof A&&A&&A.exports)try{t=sg._abbr,!function(){var A=new Error('Cannot find module "./locale"');throw A.code="MODULE_NOT_FOUND",A}(),_A(t)}catch(I){}return wg[M]}function _A(A,M){var t;return A&&(t=a(M)?MM(A):$A(A,M),t&&(sg=t)),sg._abbr}function $A(A,M){if(null!==M){var t=yg;return M.abbr=A,null!=wg[A]?(u("defineLocaleOverride","use moment.updateLocale(localeName, config) to change an existing locale. moment.defineLocale(localeName, config) should only be used for creating a new locale See http://momentjs.com/guides/#/warnings/define-locale/ for more info."),t=wg[A]._config):null!=M.parentLocale&&(null!=wg[M.parentLocale]?t=wg[M.parentLocale]._config:u("parentLocaleUndefined","specified parentLocale is not defined yet. See http://momentjs.com/guides/#/warnings/parent-locale/")),wg[A]=new d(l(t,M)),_A(A),wg[A]}return delete wg[A],null}function AM(A,M){if(null!=M){var t,I=yg;null!=wg[A]&&(I=wg[A]._config),M=l(I,M),t=new d(M),t.parentLocale=wg[A],wg[A]=t,_A(A)}else null!=wg[A]&&(null!=wg[A].parentLocale?wg[A]=wg[A].parentLocale:null!=wg[A]&&delete wg[A]);return wg[A]}function MM(A){var M;if(A&&A._locale&&A._locale._abbr&&(A=A._locale._abbr),!A)return sg;if(!I(A)){if(M=qA(A))return M;A=[A]}return KA(A)}function tM(){return rI(wg)}function IM(A){var M,t=A._a;return t&&C(A).overflow===-2&&(M=t[_I]<0||t[_I]>11?_I:t[$I]<1||t[$I]>gA(t[qI],t[_I])?$I:t[Ag]<0||t[Ag]>24||24===t[Ag]&&(0!==t[Mg]||0!==t[tg]||0!==t[Ig])?Ag:t[Mg]<0||t[Mg]>59?Mg:t[tg]<0||t[tg]>59?tg:t[Ig]<0||t[Ig]>999?Ig:-1,C(A)._overflowDayOfYear&&(M$I)&&(M=$I),C(A)._overflowWeeks&&M===-1&&(M=gg),C(A)._overflowWeekday&&M===-1&&(M=eg),C(A).overflow=M),A}function gM(A){var M,t,I,g,e,i,T=A._i,E=ug.exec(T)||Lg.exec(T);if(E){for(C(A).iso=!0,M=0,t=lg.length;MaA(g)&&(C(A)._overflowDayOfYear=!0),t=sA(g,0,A._dayOfYear),A._a[_I]=t.getUTCMonth(),A._a[$I]=t.getUTCDate()),M=0;M<3&&null==A._a[M];++M)A._a[M]=e[M]=I[M];for(;M<7;M++)A._a[M]=e[M]=null==A._a[M]?2===M?1:0:A._a[M];24===A._a[Ag]&&0===A._a[Mg]&&0===A._a[tg]&&0===A._a[Ig]&&(A._nextDay=!0,A._a[Ag]=0),A._d=(A._useUTC?sA:rA).apply(null,e),null!=A._tzm&&A._d.setUTCMinutes(A._d.getUTCMinutes()-A._tzm),A._nextDay&&(A._a[Ag]=24)}}function NM(A){var M,t,I,g,e,i,T,E;M=A._w,null!=M.GG||null!=M.W||null!=M.E?(e=1,i=4,t=iM(M.GG,A._a[qI],yA(rM(),1,4).year),I=iM(M.W,1),g=iM(M.E,1),(g<1||g>7)&&(E=!0)):(e=A._locale._week.dow,i=A._locale._week.doy,t=iM(M.gg,A._a[qI],yA(rM(),e,i).year),I=iM(M.w,1),null!=M.d?(g=M.d,(g<0||g>6)&&(E=!0)):null!=M.e?(g=M.e+e,(M.e<0||M.e>6)&&(E=!0)):g=e),I<1||I>wA(t,e,i)?C(A)._overflowWeeks=!0:null!=E?C(A)._overflowWeekday=!0:(T=jA(t,I,g,e,i),A._a[qI]=T.year,A._dayOfYear=T.dayOfYear)}function oM(A){if(A._f===M.ISO_8601)return void gM(A);A._a=[],C(A).empty=!0;var t,I,g,e,i,T=""+A._i,E=T.length,N=0;for(g=K(A._f,A._locale).match(dI)||[],t=0;t0&&C(A).unusedInput.push(i),T=T.slice(T.indexOf(I)+I.length),N+=I.length),zI[e]?(I?C(A).empty=!1:C(A).unusedTokens.push(e),IA(e,I,A)):A._strict&&!I&&C(A).unusedTokens.push(e);C(A).charsLeftOver=E-N,T.length>0&&C(A).unusedInput.push(T),A._a[Ag]<=12&&C(A).bigHour===!0&&A._a[Ag]>0&&(C(A).bigHour=void 0),C(A).parsedDateParts=A._a.slice(0),C(A).meridiem=A._meridiem,A._a[Ag]=nM(A._locale,A._a[Ag],A._meridiem),EM(A),IM(A)}function nM(A,M,t){var I;return null==t?M:null!=A.meridiemHour?A.meridiemHour(M,t):null!=A.isPM?(I=A.isPM(t),I&&M<12&&(M+=12),I||12!==M||(M=0),M):M}function CM(A){var M,t,I,g,e;if(0===A._f.length)return C(A).invalidFormat=!0,void(A._d=new Date(NaN));for(g=0;gthis.clone().month(0).utcOffset()||this.utcOffset()>this.clone().month(5).utcOffset()}function FM(){if(!a(this._isDSTShifted))return this._isDSTShifted;var A={};if(B(A,this),A=aM(A),A._a){var M=A._isUTC?o(A._a):rM(A._a);this._isDSTShifted=this.isValid()&&j(A._a,M.toArray())>0}else this._isDSTShifted=!1;return this._isDSTShifted}function fM(){return!!this.isValid()&&!this._isUTC}function kM(){return!!this.isValid()&&this._isUTC}function RM(){return!!this.isValid()&&(this._isUTC&&0===this._offset)}function JM(A,M){var t,I,g,e=A,i=null;return wM(A)?e={ms:A._milliseconds,d:A._days,M:A._months}:"number"==typeof A?(e={},M?e[M]=A:e.milliseconds=A):(i=Og.exec(A))?(t="-"===i[1]?-1:1,e={y:0,d:x(i[$I])*t,h:x(i[Ag])*t,m:x(i[Mg])*t,s:x(i[tg])*t,ms:x(uM(1e3*i[Ig]))*t}):(i=mg.exec(A))?(t="-"===i[1]?-1:1,e={y:GM(i[2],t),M:GM(i[3],t),w:GM(i[4],t),d:GM(i[5],t),h:GM(i[6],t),m:GM(i[7],t),s:GM(i[8],t)}):null==e?e={}:"object"==typeof e&&("from"in e||"to"in e)&&(g=bM(rM(e.from),rM(e.to)),e={},e.ms=g.milliseconds,e.M=g.months),I=new yM(e),wM(A)&&E(A,"_locale")&&(I._locale=A._locale),I}function GM(A,M){var t=A&&parseFloat(A.replace(",","."));return(isNaN(t)?0:t)*M}function HM(A,M){var t={milliseconds:0,months:0};return t.months=M.month()-A.month()+12*(M.year()-A.year()),A.clone().add(t.months,"M").isAfter(M)&&--t.months,t.milliseconds=+M-+A.clone().add(t.months,"M"),t}function bM(A,M){var t;return A.isValid()&&M.isValid()?(M=lM(M,A),A.isBefore(M)?t=HM(A,M):(t=HM(M,A),t.milliseconds=-t.milliseconds,t.months=-t.months),t):{milliseconds:0,months:0}}function XM(A,M){return function(t,I){var g,e;return null===I||isNaN(+I)||(u(M,"moment()."+M+"(period, number) is deprecated. Please use moment()."+M+"(number, period). See http://momentjs.com/guides/#/warnings/add-inverted-param/ for more info."),e=t,t=I,I=e),t="string"==typeof t?+t:t,g=JM(t,I),vM(this,g,A),this}}function vM(A,t,I,g){var e=t._milliseconds,i=uM(t._days),T=uM(t._months);A.isValid()&&(g=null==g||g,e&&A._d.setTime(A._d.valueOf()+e*I),i&&H(A,"Date",G(A,"Date")+i*I),T&&NA(A,G(A,"Month")+T*I),g&&M.updateOffset(A,i||T))}function WM(A,M){var t=A.diff(M,"days",!0);return t<-6?"sameElse":t<-1?"lastWeek":t<0?"lastDay":t<1?"sameDay":t<2?"nextDay":t<7?"nextWeek":"sameElse"}function VM(A,t){var I=A||rM(),g=lM(I,this).startOf("day"),e=M.calendarFormat(this,g)||"sameElse",i=t&&(L(t[e])?t[e].call(this,I):t[e]);return this.format(i||this.localeData().calendar(e,this,rM(I)))}function PM(){return new Q(this)}function ZM(A,M){var t=r(A)?A:rM(A);return!(!this.isValid()||!t.isValid())&&(M=F(a(M)?"millisecond":M),"millisecond"===M?this.valueOf()>t.valueOf():t.valueOf()e&&(M=e),St.call(this,A,M,t,I,g))}function St(A,M,t,I,g){var e=jA(A,M,t,I,g),i=sA(e.year,0,e.dayOfYear);return this.year(i.getUTCFullYear()),this.month(i.getUTCMonth()),this.date(i.getUTCDate()),this}function zt(A){return null==A?Math.ceil((this.month()+1)/3):this.month(3*(A-1)+this.month()%3)}function Ut(A){var M=Math.round((this.clone().startOf("day")-this.clone().startOf("year"))/864e5)+1;return null==A?M:this.add(A-M,"d")}function pt(A,M){M[Ig]=x(1e3*("0."+A))}function Ot(){return this._isUTC?"UTC":""}function mt(){return this._isUTC?"Coordinated Universal Time":""}function Ft(A){return rM(1e3*A)}function ft(){return rM.apply(null,arguments).parseZone()}function kt(A){return A}function Rt(A,M,t,I){var g=MM(),e=o().set(I,M);return g[t](e,A)}function Jt(A,M,t){if("number"==typeof A&&(M=A,A=void 0),A=A||"",null!=M)return Rt(A,M,t,"month");var I,g=[];for(I=0;I<12;I++)g[I]=Rt(A,I,t,"month");return g}function Gt(A,M,t,I){"boolean"==typeof A?("number"==typeof M&&(t=M,M=void 0),M=M||""):(M=A,t=M,A=!1,"number"==typeof M&&(t=M,M=void 0),M=M||"");var g=MM(),e=A?g._week.dow:0;if(null!=t)return Rt(M,(t+e)%7,I,"day");var i,T=[];for(i=0;i<7;i++)T[i]=Rt(M,(i+e)%7,I,"day");return T}function Ht(A,M){return Jt(A,M,"months")}function bt(A,M){return Jt(A,M,"monthsShort")}function Xt(A,M,t){return Gt(A,M,t,"weekdays")}function vt(A,M,t){return Gt(A,M,t,"weekdaysShort")}function Wt(A,M,t){return Gt(A,M,t,"weekdaysMin")}function Vt(){var A=this._data;return this._milliseconds=Vg(this._milliseconds),this._days=Vg(this._days),this._months=Vg(this._months),A.milliseconds=Vg(A.milliseconds),A.seconds=Vg(A.seconds),A.minutes=Vg(A.minutes),A.hours=Vg(A.hours),A.months=Vg(A.months),A.years=Vg(A.years),this}function Pt(A,M,t,I){var g=JM(M,t);return A._milliseconds+=I*g._milliseconds,A._days+=I*g._days,A._months+=I*g._months,A._bubble()}function Zt(A,M){return Pt(this,A,M,1)}function Kt(A,M){return Pt(this,A,M,-1)}function qt(A){return A<0?Math.floor(A):Math.ceil(A)}function _t(){var A,M,t,I,g,e=this._milliseconds,i=this._days,T=this._months,E=this._data;return e>=0&&i>=0&&T>=0||e<=0&&i<=0&&T<=0||(e+=864e5*qt(AI(T)+i),i=0,T=0),E.milliseconds=e%1e3,A=s(e/1e3),E.seconds=A%60,M=s(A/60),E.minutes=M%60,t=s(M/60),E.hours=t%24,i+=s(t/24),g=s($t(i)),T+=g,i-=qt(AI(g)),I=s(T/12),T%=12,E.days=i,E.months=T,E.years=I,this}function $t(A){return 4800*A/146097}function AI(A){return 146097*A/4800}function MI(A){var M,t,I=this._milliseconds;if(A=F(A),"month"===A||"year"===A)return M=this._days+I/864e5,t=this._months+$t(M),"month"===A?t:t/12;switch(M=this._days+Math.round(AI(this._months)),A){case"week":return M/7+I/6048e5;case"day":return M+I/864e5;case"hour":return 24*M+I/36e5;case"minute":return 1440*M+I/6e4;case"second":return 86400*M+I/1e3;case"millisecond":return Math.floor(864e5*M)+I;default:throw new Error("Unknown unit "+A)}}function tI(){return this._milliseconds+864e5*this._days+this._months%12*2592e6+31536e6*x(this._months/12)}function II(A){return function(){return this.as(A)}}function gI(A){return A=F(A),this[A+"s"]()}function eI(A){return function(){return this._data[A]}}function iI(){return s(this.days()/7)}function TI(A,M,t,I,g){return g.relativeTime(M||1,!!t,A,I)}function EI(A,M,t){var I=JM(A).abs(),g=Ne(I.as("s")),e=Ne(I.as("m")),i=Ne(I.as("h")),T=Ne(I.as("d")),E=Ne(I.as("M")),N=Ne(I.as("y")),o=g0,o[4]=t,TI.apply(null,o)}function NI(A){return void 0===A?Ne:"function"==typeof A&&(Ne=A,!0)}function oI(A,M){return void 0!==oe[A]&&(void 0===M?oe[A]:(oe[A]=M,!0))}function nI(A){var M=this.localeData(),t=EI(this,!A,M);return A&&(t=M.pastFuture(+this,t)),M.postformat(t)}function CI(){var A,M,t,I=ne(this._milliseconds)/1e3,g=ne(this._days),e=ne(this._months);A=s(I/60),M=s(A/60),I%=60,A%=60,t=s(e/12),e%=12;var i=t,T=e,E=g,N=M,o=A,n=I,C=this.asSeconds();return C?(C<0?"-":"")+"P"+(i?i+"Y":"")+(T?T+"M":"")+(E?E+"D":"")+(N||o||n?"T":"")+(N?N+"H":"")+(o?o+"M":"")+(n?n+"S":""):"P0D"}var cI,DI;DI=Array.prototype.some?Array.prototype.some:function(A){for(var M=Object(this),t=M.length>>>0,I=0;I68?1900:2e3)};var ng=J("FullYear",!0);W("w",["ww",2],"wo","week"),W("W",["WW",2],"Wo","isoWeek"),m("week","w"),m("isoWeek","W"),k("week",5),k("isoWeek",5),q("w",fI),q("ww",fI,pI),q("W",fI),q("WW",fI,pI),tA(["w","ww","W","WW"],function(A,M,t,I){M[I.substr(0,1)]=x(A)});var Cg={dow:0,doy:6};W("d",0,"do","day"),W("dd",0,0,function(A){return this.localeData().weekdaysMin(this,A)}),W("ddd",0,0,function(A){return this.localeData().weekdaysShort(this,A)}),W("dddd",0,0,function(A){return this.localeData().weekdays(this,A)}),W("e",0,0,"weekday"),W("E",0,0,"isoWeekday"),m("day","d"),m("weekday","e"),m("isoWeekday","E"),k("day",11),k("weekday",11),k("isoWeekday",11),q("d",fI),q("e",fI),q("E",fI),q("dd",function(A,M){return M.weekdaysMinRegex(A)}),q("ddd",function(A,M){return M.weekdaysShortRegex(A)}),q("dddd",function(A,M){return M.weekdaysRegex(A)}),tA(["dd","ddd","dddd"],function(A,M,t,I){var g=t._locale.weekdaysParse(A,I,t._strict);null!=g?M.d=g:C(t).invalidWeekday=A}),tA(["d","e","E"],function(A,M,t,I){M[I]=x(A)});var cg="Sunday_Monday_Tuesday_Wednesday_Thursday_Friday_Saturday".split("_"),Dg="Sun_Mon_Tue_Wed_Thu_Fri_Sat".split("_"),ag="Su_Mo_Tu_We_Th_Fr_Sa".split("_"),Bg=PI,Qg=PI,rg=PI;W("H",["HH",2],0,"hour"),W("h",["hh",2],0,bA),W("k",["kk",2],0,XA),W("hmm",0,0,function(){return""+bA.apply(this)+v(this.minutes(),2)}),W("hmmss",0,0,function(){return""+bA.apply(this)+v(this.minutes(),2)+v(this.seconds(),2)}),W("Hmm",0,0,function(){return""+this.hours()+v(this.minutes(),2)}),W("Hmmss",0,0,function(){return""+this.hours()+v(this.minutes(),2)+v(this.seconds(),2)}),vA("a",!0),vA("A",!1),m("hour","h"),k("hour",13),q("a",WA),q("A",WA),q("H",fI),q("h",fI),q("HH",fI,pI),q("hh",fI,pI),q("hmm",kI),q("hmmss",RI),q("Hmm",kI),q("Hmmss",RI),MA(["H","HH"],Ag),MA(["a","A"],function(A,M,t){t._isPm=t._locale.isPM(A),t._meridiem=A}),MA(["h","hh"],function(A,M,t){M[Ag]=x(A),C(t).bigHour=!0}),MA("hmm",function(A,M,t){var I=A.length-2;M[Ag]=x(A.substr(0,I)),M[Mg]=x(A.substr(I)),C(t).bigHour=!0}),MA("hmmss",function(A,M,t){var I=A.length-4,g=A.length-2;M[Ag]=x(A.substr(0,I)),M[Mg]=x(A.substr(I,2)),M[tg]=x(A.substr(g)),C(t).bigHour=!0}),MA("Hmm",function(A,M,t){var I=A.length-2;M[Ag]=x(A.substr(0,I)),M[Mg]=x(A.substr(I))}),MA("Hmmss",function(A,M,t){var I=A.length-4,g=A.length-2;M[Ag]=x(A.substr(0,I)),M[Mg]=x(A.substr(I,2)),M[tg]=x(A.substr(g))});var sg,xg=/[ap]\.?m?\.?/i,jg=J("Hours",!0),yg={calendar:xI,longDateFormat:jI,invalidDate:yI,ordinal:wI,ordinalParse:uI,relativeTime:LI,months:Tg,monthsShort:Eg,week:Cg,weekdays:cg,weekdaysMin:ag,weekdaysShort:Dg,meridiemParse:xg},wg={},ug=/^\s*((?:[+-]\d{6}|\d{4})-(?:\d\d-\d\d|W\d\d-\d|W\d\d|\d\d\d|\d\d))(?:(T| )(\d\d(?::\d\d(?::\d\d(?:[.,]\d+)?)?)?)([\+\-]\d\d(?::?\d\d)?|\s*Z)?)?/,Lg=/^\s*((?:[+-]\d{6}|\d{4})(?:\d\d\d\d|W\d\d\d|W\d\d|\d\d\d|\d\d))(?:(T| )(\d\d(?:\d\d(?:\d\d(?:[.,]\d+)?)?)?)([\+\-]\d\d(?::?\d\d)?|\s*Z)?)?/,Yg=/Z|[+-]\d\d(?::?\d\d)?/,lg=[["YYYYYY-MM-DD",/[+-]\d{6}-\d\d-\d\d/],["YYYY-MM-DD",/\d{4}-\d\d-\d\d/],["GGGG-[W]WW-E",/\d{4}-W\d\d-\d/],["GGGG-[W]WW",/\d{4}-W\d\d/,!1],["YYYY-DDD",/\d{4}-\d{3}/],["YYYY-MM",/\d{4}-\d\d/,!1],["YYYYYYMMDD",/[+-]\d{10}/],["YYYYMMDD",/\d{8}/],["GGGG[W]WWE",/\d{4}W\d{3}/],["GGGG[W]WW",/\d{4}W\d{2}/,!1],["YYYYDDD",/\d{7}/]],dg=[["HH:mm:ss.SSSS",/\d\d:\d\d:\d\d\.\d+/],["HH:mm:ss,SSSS",/\d\d:\d\d:\d\d,\d+/],["HH:mm:ss",/\d\d:\d\d:\d\d/],["HH:mm",/\d\d:\d\d/],["HHmmss.SSSS",/\d\d\d\d\d\d\.\d+/],["HHmmss,SSSS",/\d\d\d\d\d\d,\d+/],["HHmmss",/\d\d\d\d\d\d/],["HHmm",/\d\d\d\d/],["HH",/\d\d/]],hg=/^\/?Date\((\-?\d+)/i;M.createFromInputFallback=w("value provided is not in a recognized ISO format. moment construction falls back to js Date(), which is not reliable across all browsers and versions. Non ISO date formats are discouraged and will be removed in an upcoming major release. Please refer to http://momentjs.com/guides/#/warnings/js-date/ for more info.",function(A){A._d=new Date(A._i+(A._useUTC?" UTC":""))}),M.ISO_8601=function(){};var Sg=w("moment().min is deprecated, use moment.max instead. http://momentjs.com/guides/#/warnings/min-max/",function(){var A=rM.apply(null,arguments);return this.isValid()&&A.isValid()?Athis?this:A:D()}),Ug=function(){return Date.now?Date.now():+new Date};LM("Z",":"),LM("ZZ",""),q("Z",WI),q("ZZ",WI),MA(["Z","ZZ"],function(A,M,t){t._useUTC=!0,t._tzm=YM(WI,A)});var pg=/([\+\-]|\d\d)/gi;M.updateOffset=function(){};var Og=/^(\-)?(?:(\d*)[. ])?(\d+)\:(\d+)(?:\:(\d+)(\.\d*)?)?$/,mg=/^(-)?P(?:(-?[0-9,.]*)Y)?(?:(-?[0-9,.]*)M)?(?:(-?[0-9,.]*)W)?(?:(-?[0-9,.]*)D)?(?:T(?:(-?[0-9,.]*)H)?(?:(-?[0-9,.]*)M)?(?:(-?[0-9,.]*)S)?)?$/;JM.fn=yM.prototype;var Fg=XM(1,"add"),fg=XM(-1,"subtract");M.defaultFormat="YYYY-MM-DDTHH:mm:ssZ",M.defaultFormatUtc="YYYY-MM-DDTHH:mm:ss[Z]";var kg=w("moment().lang() is deprecated. Instead, use moment().localeData() to get the language configuration. Use moment().locale() to change languages.",function(A){return void 0===A?this.localeData():this.locale(A)});W(0,["gg",2],0,function(){return this.weekYear()%100}),W(0,["GG",2],0,function(){return this.isoWeekYear()%100}),ut("gggg","weekYear"),ut("ggggg","weekYear"),ut("GGGG","isoWeekYear"),ut("GGGGG","isoWeekYear"),m("weekYear","gg"),m("isoWeekYear","GG"),k("weekYear",1),k("isoWeekYear",1),q("G",XI),q("g",XI),q("GG",fI,pI),q("gg",fI,pI),q("GGGG",GI,mI),q("gggg",GI,mI),q("GGGGG",HI,FI),q("ggggg",HI,FI),tA(["gggg","ggggg","GGGG","GGGGG"],function(A,M,t,I){M[I.substr(0,2)]=x(A)}),tA(["gg","GG"],function(A,t,I,g){t[g]=M.parseTwoDigitYear(A)}),W("Q",0,"Qo","quarter"),m("quarter","Q"),k("quarter",7),q("Q",UI),MA("Q",function(A,M){M[_I]=3*(x(A)-1)}),W("D",["DD",2],"Do","date"),m("date","D"),k("date",9),q("D",fI),q("DD",fI,pI),q("Do",function(A,M){return A?M._ordinalParse:M._ordinalParseLenient}),MA(["D","DD"],$I),MA("Do",function(A,M){M[$I]=x(A.match(fI)[0],10)});var Rg=J("Date",!0);W("DDD",["DDDD",3],"DDDo","dayOfYear"),m("dayOfYear","DDD"),k("dayOfYear",4),q("DDD",JI),q("DDDD",OI),MA(["DDD","DDDD"],function(A,M,t){t._dayOfYear=x(A)}),W("m",["mm",2],0,"minute"),m("minute","m"),k("minute",14),q("m",fI),q("mm",fI,pI),MA(["m","mm"],Mg);var Jg=J("Minutes",!1);W("s",["ss",2],0,"second"),m("second","s"),k("second",15),q("s",fI),q("ss",fI,pI),MA(["s","ss"],tg);var Gg=J("Seconds",!1);W("S",0,0,function(){return~~(this.millisecond()/100)}),W(0,["SS",2],0,function(){return~~(this.millisecond()/10)}),W(0,["SSS",3],0,"millisecond"),W(0,["SSSS",4],0,function(){return 10*this.millisecond()}),W(0,["SSSSS",5],0,function(){return 100*this.millisecond()}),W(0,["SSSSSS",6],0,function(){return 1e3*this.millisecond()}),W(0,["SSSSSSS",7],0,function(){return 1e4*this.millisecond()}),W(0,["SSSSSSSS",8],0,function(){return 1e5*this.millisecond()}),W(0,["SSSSSSSSS",9],0,function(){return 1e6*this.millisecond()}),m("millisecond","ms"),k("millisecond",16),q("S",JI,UI),q("SS",JI,pI),q("SSS",JI,OI);var Hg;for(Hg="SSSS";Hg.length<=9;Hg+="S")q(Hg,bI);for(Hg="S";Hg.length<=9;Hg+="S")MA(Hg,pt);var bg=J("Milliseconds",!1);W("z",0,0,"zoneAbbr"),W("zz",0,0,"zoneName");var Xg=Q.prototype;Xg.add=Fg,Xg.calendar=VM,Xg.clone=PM,Xg.diff=Mt,Xg.endOf=ct,Xg.format=et,Xg.from=it,Xg.fromNow=Tt,Xg.to=Et,Xg.toNow=Nt,Xg.get=b,Xg.invalidAt=yt,Xg.isAfter=ZM,Xg.isBefore=KM,Xg.isBetween=qM,Xg.isSame=_M,Xg.isSameOrAfter=$M,Xg.isSameOrBefore=At,Xg.isValid=xt,Xg.lang=kg,Xg.locale=ot,Xg.localeData=nt,Xg.max=zg,Xg.min=Sg,Xg.parsingFlags=jt,Xg.set=X,Xg.startOf=Ct,Xg.subtract=fg,Xg.toArray=Qt,Xg.toObject=rt,Xg.toDate=Bt,Xg.toISOString=gt,Xg.toJSON=st,Xg.toString=It,Xg.unix=at,Xg.valueOf=Dt,Xg.creationData=wt,Xg.year=ng,Xg.isLeapYear=QA,Xg.weekYear=Lt,Xg.isoWeekYear=Yt,Xg.quarter=Xg.quarters=zt,Xg.month=oA,Xg.daysInMonth=nA,Xg.week=Xg.weeks=lA,Xg.isoWeek=Xg.isoWeeks=dA,Xg.weeksInYear=dt,Xg.isoWeeksInYear=lt,Xg.date=Rg,Xg.day=Xg.days=FA,Xg.weekday=fA,Xg.isoWeekday=kA,Xg.dayOfYear=Ut,Xg.hour=Xg.hours=jg,Xg.minute=Xg.minutes=Jg,Xg.second=Xg.seconds=Gg,Xg.millisecond=Xg.milliseconds=bg,Xg.utcOffset=hM,Xg.utc=zM,Xg.local=UM,Xg.parseZone=pM,Xg.hasAlignedHourOffset=OM,Xg.isDST=mM,Xg.isLocal=fM,Xg.isUtcOffset=kM,Xg.isUtc=RM,Xg.isUTC=RM,Xg.zoneAbbr=Ot,Xg.zoneName=mt,Xg.dates=w("dates accessor is deprecated. Use date instead.",Rg),Xg.months=w("months accessor is deprecated. Use month instead",oA),Xg.years=w("years accessor is deprecated. Use year instead",ng),Xg.zone=w("moment().zone is deprecated, use moment().utcOffset instead. http://momentjs.com/guides/#/warnings/zone/",SM),Xg.isDSTShifted=w("isDSTShifted is deprecated. See http://momentjs.com/guides/#/warnings/dst-shifted/ for more information",FM);var vg=Xg,Wg=d.prototype;Wg.calendar=h,Wg.longDateFormat=S,Wg.invalidDate=z,Wg.ordinal=U,Wg.preparse=kt,Wg.postformat=kt,Wg.relativeTime=p,Wg.pastFuture=O,Wg.set=Y,Wg.months=eA,Wg.monthsShort=iA,Wg.monthsParse=EA,Wg.monthsRegex=cA,Wg.monthsShortRegex=CA,Wg.week=uA,Wg.firstDayOfYear=YA,Wg.firstDayOfWeek=LA,Wg.weekdays=zA,Wg.weekdaysMin=pA,Wg.weekdaysShort=UA,Wg.weekdaysParse=mA,Wg.weekdaysRegex=RA,Wg.weekdaysShortRegex=JA,Wg.weekdaysMinRegex=GA,Wg.isPM=VA,Wg.meridiem=PA,_A("en",{ordinalParse:/\d{1,2}(th|st|nd|rd)/,ordinal:function(A){var M=A%10,t=1===x(A%100/10)?"th":1===M?"st":2===M?"nd":3===M?"rd":"th";return A+t}}),M.lang=w("moment.lang is deprecated. Use moment.locale instead.",_A),M.langData=w("moment.langData is deprecated. Use moment.localeData instead.",MM);var Vg=Math.abs,Pg=II("ms"),Zg=II("s"),Kg=II("m"),qg=II("h"),_g=II("d"),$g=II("w"),Ae=II("M"),Me=II("y"),te=eI("milliseconds"),Ie=eI("seconds"),ge=eI("minutes"),ee=eI("hours"),ie=eI("days"),Te=eI("months"),Ee=eI("years"),Ne=Math.round,oe={s:45,m:45,h:22,d:26,M:11},ne=Math.abs,Ce=yM.prototype;Ce.abs=Vt,Ce.add=Zt,Ce.subtract=Kt,Ce.as=MI,Ce.asMilliseconds=Pg,Ce.asSeconds=Zg,Ce.asMinutes=Kg,Ce.asHours=qg,Ce.asDays=_g,Ce.asWeeks=$g,Ce.asMonths=Ae,Ce.asYears=Me,Ce.valueOf=tI,Ce._bubble=_t,Ce.get=gI,Ce.milliseconds=te,Ce.seconds=Ie,Ce.minutes=ge,Ce.hours=ee,Ce.days=ie,Ce.weeks=iI,Ce.months=Te,Ce.years=Ee,Ce.humanize=nI,Ce.toISOString=CI,Ce.toString=CI,Ce.toJSON=CI,Ce.locale=ot,Ce.localeData=nt,Ce.toIsoString=w("toIsoString() is deprecated. Please use toISOString() instead (notice the capitals)",CI),Ce.lang=kg,W("X",0,0,"unix"),W("x",0,0,"valueOf"),q("x",XI),q("X",VI),MA("X",function(A,M,t){t._d=new Date(1e3*parseFloat(A,10))}),MA("x",function(A,M,t){t._d=new Date(x(A))}),M.version="2.15.1",t(rM),M.fn=vg,M.min=xM,M.max=jM,M.now=Ug,M.utc=o,M.unix=Ft,M.months=Ht,M.isDate=i,M.locale=_A,M.invalid=D,M.duration=JM,M.isMoment=r,M.weekdays=Xt,M.parseZone=ft,M.localeData=MM,M.isDuration=wM,M.monthsShort=bt,M.weekdaysMin=Wt,M.defineLocale=$A,M.updateLocale=AM,M.locales=tM,M.weekdaysShort=vt,M.normalizeUnits=F,M.relativeTimeRounding=NI,M.relativeTimeThreshold=oI,M.calendarFormat=WM,M.prototype=vg;var ce=M;return ce})}).call(M,t(127)(A))},function(A,M,t){"use strict";var I=t(131)["default"],g=t(132)["default"],e=t(77)["default"];M.__esModule=!0;var i=function(A){return I(g({values:function(){var A=this;return e(this).map(function(M){return A[M]})}}),A)},T={SIZES:{large:"lg",medium:"md",small:"sm",xsmall:"xs",lg:"lg",md:"md",sm:"sm",xs:"xs"},GRID_COLUMNS:12},E=i({LARGE:"large",MEDIUM:"medium",SMALL:"small",XSMALL:"xsmall"});M.Sizes=E;var N=i({SUCCESS:"success",WARNING:"warning",DANGER:"danger",INFO:"info"});M.State=N;var o="default";M.DEFAULT=o;var n="primary";M.PRIMARY=n;var C="link";M.LINK=C;var c="inverse";M.INVERSE=c,M["default"]=T},function(A,M){"use strict";function t(){for(var A=arguments.length,M=Array(A),t=0;t0?void 0:(0,C["default"])(!1),null!=o&&(e+=encodeURI(o))):"("===E?g+=1:")"===E?g-=1:":"===E.charAt(0)?(N=E.substring(1),o=M[N],null!=o||g>0?void 0:(0,C["default"])(!1),null!=o&&(e+=encodeURIComponent(o))):e+=E;return e.replace(/\/+/g,"/")}M.__esModule=!0,M.compilePattern=i,M.matchPattern=T,M.getParamNames=E,M.getParams=N,M.formatPattern=o;var n=t(8),C=I(n),c=Object.create(null)},function(A,M){"use strict";M.__esModule=!0;var t="PUSH";M.PUSH=t;var I="REPLACE";M.REPLACE=I;var g="POP";M.POP=g,M["default"]={PUSH:t,REPLACE:I,POP:g}},function(A,M,t){"use strict";function I(A,M){return(A&M)===M}var g=t(2),e={MUST_USE_ATTRIBUTE:1,MUST_USE_PROPERTY:2,HAS_SIDE_EFFECTS:4,HAS_BOOLEAN_VALUE:8,HAS_NUMERIC_VALUE:16,HAS_POSITIVE_NUMERIC_VALUE:48,HAS_OVERLOADED_BOOLEAN_VALUE:64,injectDOMPropertyConfig:function(A){var M=e,t=A.Properties||{},i=A.DOMAttributeNamespaces||{},E=A.DOMAttributeNames||{},N=A.DOMPropertyNames||{},o=A.DOMMutationMethods||{};A.isCustomAttribute&&T._isCustomAttributeFunctions.push(A.isCustomAttribute);for(var n in t){T.properties.hasOwnProperty(n)?g(!1):void 0;var C=n.toLowerCase(),c=t[n],D={attributeName:C,attributeNamespace:null,propertyName:n,mutationMethod:null,mustUseAttribute:I(c,M.MUST_USE_ATTRIBUTE),mustUseProperty:I(c,M.MUST_USE_PROPERTY),hasSideEffects:I(c,M.HAS_SIDE_EFFECTS),hasBooleanValue:I(c,M.HAS_BOOLEAN_VALUE),hasNumericValue:I(c,M.HAS_NUMERIC_VALUE),hasPositiveNumericValue:I(c,M.HAS_POSITIVE_NUMERIC_VALUE),hasOverloadedBooleanValue:I(c,M.HAS_OVERLOADED_BOOLEAN_VALUE)};if(D.mustUseAttribute&&D.mustUseProperty?g(!1):void 0,!D.mustUseProperty&&D.hasSideEffects?g(!1):void 0,D.hasBooleanValue+D.hasNumericValue+D.hasOverloadedBooleanValue<=1?void 0:g(!1),E.hasOwnProperty(n)){var a=E[n];D.attributeName=a}i.hasOwnProperty(n)&&(D.attributeNamespace=i[n]),N.hasOwnProperty(n)&&(D.propertyName=N[n]),o.hasOwnProperty(n)&&(D.mutationMethod=o[n]),T.properties[n]=D}}},i={},T={ID_ATTRIBUTE_NAME:"data-reactid",properties:{},getPossibleStandardName:null,_isCustomAttributeFunctions:[],isCustomAttribute:function(A){for(var M=0;M1){var M=A.indexOf(c,1);return M>-1?A.substr(0,M):A}return null},traverseEnterLeave:function(A,M,t,I,g){var e=N(A,M);e!==A&&o(A,e,t,I,!1,!0),e!==M&&o(e,M,t,g,!0,!1)},traverseTwoPhase:function(A,M,t){A&&(o("",A,M,t,!0,!1),o(A,"",M,t,!1,!0))},traverseTwoPhaseSkipTarget:function(A,M,t){A&&(o("",A,M,t,!0,!0),o(A,"",M,t,!0,!0))},traverseAncestors:function(A,M,t){o("",A,M,t,!0,!1)},getFirstCommonAncestorID:N,_getNextDescendantID:E,isAncestorIDOf:i,SEPARATOR:c};A.exports=B},function(A,M){var t=A.exports={version:"1.2.6"};"number"==typeof __e&&(__e=t)},function(A,M,t){"use strict";var I={};A.exports=I},function(A,M,t){"use strict";function I(A,M,t){var I=0;return n["default"].Children.map(A,function(A){if(n["default"].isValidElement(A)){var g=I;return I++,M.call(t,A,g)}return A})}function g(A,M,t){var I=0;return n["default"].Children.forEach(A,function(A){n["default"].isValidElement(A)&&(M.call(t,A,I),I++)})}function e(A){var M=0;return n["default"].Children.forEach(A,function(A){n["default"].isValidElement(A)&&M++}),M}function i(A){var M=!1;return n["default"].Children.forEach(A,function(A){!M&&n["default"].isValidElement(A)&&(M=!0)}),M}function T(A,M){var t=void 0;return g(A,function(I,g){!t&&M(I,g,A)&&(t=I)}),t}function E(A,M,t){var I=0,g=[];return n["default"].Children.forEach(A,function(A){n["default"].isValidElement(A)&&(M.call(t,A,I)&&g.push(A),I++)}),g}var N=t(6)["default"];M.__esModule=!0;var o=t(1),n=N(o);M["default"]={map:I,forEach:g,numberOf:e,find:T,findValidComponents:E,hasValidComponent:i},A.exports=M["default"]},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{"default":A}}Object.defineProperty(M,"__esModule",{value:!0}),M["default"]=function(A){return(0,T["default"])(e["default"].findDOMNode(A))};var g=t(16),e=I(g),i=t(39),T=I(i);A.exports=M["default"]},function(A,M,t){"use strict";var I=t(203),g=t(428),e=t(216),i=t(225),T=t(226),E=t(2),N=(t(3),{}),o=null,n=function(A,M){A&&(g.executeDispatchesInOrder(A,M),A.isPersistent()||A.constructor.release(A))},C=function(A){return n(A,!0)},c=function(A){return n(A,!1)},D=null,a={injection:{injectMount:g.injection.injectMount,injectInstanceHandle:function(A){D=A},getInstanceHandle:function(){return D},injectEventPluginOrder:I.injectEventPluginOrder,injectEventPluginsByName:I.injectEventPluginsByName},eventNameDispatchConfigs:I.eventNameDispatchConfigs,registrationNameModules:I.registrationNameModules,putListener:function(A,M,t){"function"!=typeof t?E(!1):void 0;var g=N[M]||(N[M]={});g[A]=t;var e=I.registrationNameModules[M];e&&e.didPutListener&&e.didPutListener(A,M,t)},getListener:function(A,M){var t=N[M];return t&&t[A]},deleteListener:function(A,M){var t=I.registrationNameModules[M];t&&t.willDeleteListener&&t.willDeleteListener(A,M);var g=N[M];g&&delete g[A]},deleteAllListeners:function(A){for(var M in N)if(N[M][A]){var t=I.registrationNameModules[M];t&&t.willDeleteListener&&t.willDeleteListener(A,M),delete N[M][A]}},extractEvents:function(A,M,t,g,e){for(var T,E=I.plugins,N=0;N1?I-1:0),e=1;e":">","<":"<",'"':""","'":"'"},e=/[&><"']/g;A.exports=I},function(A,M,t){"use strict";var I=t(10),g=/^[ \r\n\t\f]/,e=/<(!--|link|noscript|meta|script|style)[ \r\n\t\f\/>]/,i=function(A,M){A.innerHTML=M};if("undefined"!=typeof MSApp&&MSApp.execUnsafeLocalFunction&&(i=function(A,M){MSApp.execUnsafeLocalFunction(function(){A.innerHTML=M})}),I.canUseDOM){var T=document.createElement("div");T.innerHTML=" ",""===T.innerHTML&&(i=function(A,M){if(A.parentNode&&A.parentNode.replaceChild(A,A),g.test(M)||"<"===M[0]&&e.test(M)){A.innerHTML=String.fromCharCode(65279)+M;var t=A.firstChild;1===t.data.length?A.removeChild(t):t.deleteData(0,1)}else A.innerHTML=M})}A.exports=i},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{"default":A}}Object.defineProperty(M,"__esModule",{value:!0});var g=t(1),e=I(g),i=function(A){var M=A.label,t=A.id,I=A.name,g=A.value,i=A.onChange,T=A.type,E=A.spellCheck,N=A.required,o=A.readonly,n=A.autoComplete,C=A.align,c=A.className,D=e["default"].createElement("input",{id:t,name:I,value:g,onChange:i,className:"ig-text",type:T,spellCheck:E,required:N,autoComplete:n});return o&&(D=e["default"].createElement("input",{id:t,name:I,value:g,onChange:i,className:"ig-text",type:T,spellCheck:E,required:N,autoComplete:n,disabled:!0})),e["default"].createElement("div",{className:"input-group "+C+" "+c},D,e["default"].createElement("i",{className:"ig-helpers"}),e["default"].createElement("label",{className:"ig-label"},M))};M["default"]=i},function(A,M,t){A.exports={"default":t(256),__esModule:!0}},function(A,M,t){var I=t(262),g=t(49),e=t(133),i="prototype",T=function(A,M,t){var E,N,o,n=A&T.F,C=A&T.G,c=A&T.S,D=A&T.P,a=A&T.B,B=A&T.W,Q=C?g:g[M]||(g[M]={}),r=C?I:c?I[M]:(I[M]||{})[i];C&&(t=M);for(E in t)N=!n&&r&&E in r,N&&E in Q||(o=N?r[E]:t[E],Q[E]=C&&"function"!=typeof r[E]?t[E]:a&&N?e(o,I):B&&r[E]==o?function(A){var M=function(M){return this instanceof A?new A(M):A(M)};return M[i]=A[i],M}(o):D&&"function"==typeof o?e(Function.call,o):o,D&&((Q[i]||(Q[i]={}))[E]=o))};T.F=1,T.G=2,T.S=4,T.P=8,T.B=16,T.W=32,A.exports=T},function(A,M){var t=Object;A.exports={create:t.create,getProto:t.getPrototypeOf,isEnum:{}.propertyIsEnumerable,getDesc:t.getOwnPropertyDescriptor,setDesc:t.defineProperty,setDescs:t.defineProperties,getKeys:t.keys,getNames:t.getOwnPropertyNames,getSymbols:t.getOwnPropertySymbols,each:[].forEach}},function(A,M,t){"use strict";var I=t(32),g=function(){};I&&(g=function(){return document.addEventListener?function(A,M,t,I){return A.addEventListener(M,t,I||!1)}:document.attachEvent?function(A,M,t){return A.attachEvent("on"+M,t)}:void 0}()),A.exports=g},function(A,M,t){"use strict";var I=t(144),g=t(292),e=t(287),i=t(288),T=Object.prototype.hasOwnProperty;A.exports=function(A,M,t){var E="",N=M;if("string"==typeof M){if(void 0===t)return A.style[I(M)]||e(A).getPropertyValue(g(M));(N={})[M]=t}for(var o in N)T.call(N,o)&&(N[o]||0===N[o]?E+=g(o)+":"+N[o]+";":i(A,g(o)));A.style.cssText+=";"+E}},function(A,M,t){(function(){var t=this,I=t.humanize,g={};"undefined"!=typeof A&&A.exports&&(M=A.exports=g),M.humanize=g,g.noConflict=function(){return t.humanize=I,this},g.pad=function(A,M,t,I){if(A+="",t?t.length>1&&(t=t.charAt(0)):t=" ",I=void 0===I?"left":"right","right"===I)for(;A.length4&&A<21?"th":{1:"st",2:"nd",3:"rd"}[A%10]||"th"},w:function(){return t.getDay()},z:function(){return(o.L()?i[o.n()]:e[o.n()])+o.j()-1},W:function(){var A=o.z()-o.N()+1.5;return g.pad(1+Math.floor(Math.abs(A)/7)+(A%7>3.5?1:0),2,"0")},F:function(){return N[t.getMonth()]},m:function(){return g.pad(o.n(),2,"0")},M:function(){return o.F().slice(0,3)},n:function(){return t.getMonth()+1},t:function(){return new Date(o.Y(),o.n(),0).getDate()},L:function(){return 1===new Date(o.Y(),1,29).getMonth()?1:0},o:function(){var A=o.n(),M=o.W();return o.Y()+(12===A&&M<9?-1:1===A&&M>9)},Y:function(){return t.getFullYear()},y:function(){return String(o.Y()).slice(-2)},a:function(){return t.getHours()>11?"pm":"am"},A:function(){return o.a().toUpperCase()},B:function(){var A=t.getTime()/1e3,M=A%86400+3600;M<0&&(M+=86400);var I=M/86.4%1e3;return A<0?Math.ceil(I):Math.floor(I)},g:function(){return o.G()%12||12},G:function(){return t.getHours()},h:function(){return g.pad(o.g(),2,"0")},H:function(){return g.pad(o.G(),2,"0")},i:function(){return g.pad(t.getMinutes(),2,"0")},s:function(){return g.pad(t.getSeconds(),2,"0")},u:function(){return g.pad(1e3*t.getMilliseconds(),6,"0")},O:function(){var A=t.getTimezoneOffset(),M=Math.abs(A);return(A>0?"-":"+")+g.pad(100*Math.floor(M/60)+M%60,4,"0")},P:function(){var A=o.O();return A.substr(0,3)+":"+A.substr(3,2)},Z:function(){return 60*-t.getTimezoneOffset()},c:function(){return"Y-m-d\\TH:i:sP".replace(I,T)},r:function(){return"D, d M Y H:i:s O".replace(I,T)},U:function(){return t.getTime()/1e3||0}};return A.replace(I,T)},g.numberFormat=function(A,M,t,I){M=isNaN(M)?2:Math.abs(M),t=void 0===t?".":t,I=void 0===I?",":I;var g=A<0?"-":"";A=Math.abs(+A||0);var e=parseInt(A.toFixed(M),10)+"",i=e.length>3?e.length%3:0;return g+(i?e.substr(0,i)+I:"")+e.substr(i).replace(/(\d{3})(?=\d)/g,"$1"+I)+(M?t+Math.abs(A-e).toFixed(M).slice(2):"")},g.naturalDay=function(A,M){A=void 0===A?g.time():A,M=void 0===M?"Y-m-d":M;var t=86400,I=new Date,e=new Date(I.getFullYear(),I.getMonth(),I.getDate()).getTime()/1e3;return A=e-t?"yesterday":A>=e&&A=e+t&&A-2)return(t>=0?"just ":"")+"now";if(t<60&&t>-60)return t>=0?Math.floor(t)+" seconds ago":"in "+Math.floor(-t)+" seconds";if(t<120&&t>-120)return t>=0?"about a minute ago":"in about a minute";if(t<3600&&t>-3600)return t>=0?Math.floor(t/60)+" minutes ago":"in "+Math.floor(-t/60)+" minutes";if(t<7200&&t>-7200)return t>=0?"about an hour ago":"in about an hour";if(t<86400&&t>-86400)return t>=0?Math.floor(t/3600)+" hours ago":"in "+Math.floor(-t/3600)+" hours";var I=172800;if(t-I)return t>=0?"1 day ago":"in 1 day";var e=2505600;if(t-e)return t>=0?Math.floor(t/86400)+" days ago":"in "+Math.floor(-t/86400)+" days";var i=5184e3;if(t-i)return t>=0?"about a month ago":"in about a month";var T=parseInt(g.date("Y",M),10),E=parseInt(g.date("Y",A),10),N=12*T+parseInt(g.date("n",M),10),o=12*E+parseInt(g.date("n",A),10),n=N-o;if(n<12&&n>-12)return n>=0?n+" months ago":"in "+-n+" months";var C=T-E;return C<2&&C>-2?C>=0?"a year ago":"in a year":C>=0?C+" years ago":"in "+-C+" years"},g.ordinal=function(A){A=parseInt(A,10),A=isNaN(A)?0:A;var M=A<0?"-":"";A=Math.abs(A);var t=A%100;return M+A+(t>4&&t<21?"th":{1:"st",2:"nd",3:"rd"}[A%10]||"th")},g.filesize=function(A,M,t,I,e,i){return M=void 0===M?1024:M,A<=0?"0 bytes":(A

"),A=A.replace(/\n/g,"
"),"

"+A+"

"},g.nl2br=function(A){return A.replace(/(\r\n|\n|\r)/g,"
")},g.truncatechars=function(A,M){return A.length<=M?A:A.substr(0,M)+"…"},g.truncatewords=function(A,M){var t=A.split(" ");return t.length0,Q=C.enumErrorProps&&(A===w||A instanceof Error),r=C.enumPrototypes&&T(A);++IM.documentElement.clientHeight;return{modalStyles:{paddingRight:I&&!g?Q["default"]():void 0,paddingLeft:!I&&g?Q["default"]():void 0}}}});X.Body=z["default"],X.Header=p["default"],X.Title=m["default"],X.Footer=f["default"],X.Dialog=h["default"],X.TRANSITION_DURATION=300,X.BACKDROP_TRANSITION_DURATION=150,M["default"]=c.bsSizes([a.Sizes.LARGE,a.Sizes.SMALL],c.bsClass("modal",X)),A.exports=M["default"]},function(A,M,t){"use strict";var I=t(15)["default"],g=t(14)["default"],e=t(38)["default"],i=t(7)["default"],T=t(6)["default"];M.__esModule=!0;var E=t(1),N=T(E),o=t(5),n=T(o),C=t(11),c=T(C),D=t(44),a=T(D),B=function(A){function M(){g(this,M),A.apply(this,arguments)}return I(M,A),M.prototype.render=function(){var A=this.props,M=A["aria-label"],t=e(A,["aria-label"]),I=a["default"](this.context.$bs_onModalHide,this.props.onHide);return N["default"].createElement("div",i({},t,{className:n["default"](this.props.className,c["default"].prefix(this.props,"header"))}),this.props.closeButton&&N["default"].createElement("button",{type:"button",className:"close","aria-label":M,onClick:I},N["default"].createElement("span",{"aria-hidden":"true"},"×")),this.props.children)},M}(N["default"].Component);B.propTypes={"aria-label":N["default"].PropTypes.string,bsClass:N["default"].PropTypes.string,closeButton:N["default"].PropTypes.bool,onHide:N["default"].PropTypes.func},B.contextTypes={$bs_onModalHide:N["default"].PropTypes.func},B.defaultProps={"aria-label":"Close",closeButton:!1},M["default"]=C.bsClass("modal",B),A.exports=M["default"]},function(A,M,t){"use strict";function I(A,M){return Array.isArray(M)?M.indexOf(A)>=0:A===M}var g=t(7)["default"],e=t(77)["default"],i=t(6)["default"];M.__esModule=!0;var T=t(40),E=i(T),N=t(169),o=i(N),n=t(1),C=i(n),c=t(16),D=i(c),a=t(126),B=(i(a),t(367)),Q=i(B),r=t(44),s=i(r),x=C["default"].createClass({displayName:"OverlayTrigger",propTypes:g({},Q["default"].propTypes,{trigger:C["default"].PropTypes.oneOfType([C["default"].PropTypes.oneOf(["click","hover","focus"]),C["default"].PropTypes.arrayOf(C["default"].PropTypes.oneOf(["click","hover","focus"]))]),delay:C["default"].PropTypes.number,delayShow:C["default"].PropTypes.number,delayHide:C["default"].PropTypes.number,defaultOverlayShown:C["default"].PropTypes.bool,overlay:C["default"].PropTypes.node.isRequired,onBlur:C["default"].PropTypes.func,onClick:C["default"].PropTypes.func,onFocus:C["default"].PropTypes.func,onMouseEnter:C["default"].PropTypes.func,onMouseLeave:C["default"].PropTypes.func,target:function(){},onHide:function(){},show:function(){}}),getDefaultProps:function(){return{defaultOverlayShown:!1,trigger:["hover","focus"]}},getInitialState:function(){return{isOverlayShown:this.props.defaultOverlayShown}},show:function(){this.setState({isOverlayShown:!0})},hide:function(){this.setState({isOverlayShown:!1})},toggle:function(){this.state.isOverlayShown?this.hide():this.show()},componentWillMount:function(){this.handleMouseOver=this.handleMouseOverOut.bind(null,this.handleDelayedShow),this.handleMouseOut=this.handleMouseOverOut.bind(null,this.handleDelayedHide)},componentDidMount:function(){this._mountNode=document.createElement("div"),this.renderOverlay()},renderOverlay:function(){D["default"].unstable_renderSubtreeIntoContainer(this,this._overlay,this._mountNode)},componentWillUnmount:function(){D["default"].unmountComponentAtNode(this._mountNode),this._mountNode=null,clearTimeout(this._hoverShowDelay),clearTimeout(this._hoverHideDelay)},componentDidUpdate:function(){this._mountNode&&this.renderOverlay()},getOverlayTarget:function(){return D["default"].findDOMNode(this)},getOverlay:function(){var A=g({},o["default"](this.props,e(Q["default"].propTypes)),{show:this.state.isOverlayShown,onHide:this.hide,target:this.getOverlayTarget,onExit:this.props.onExit,onExiting:this.props.onExiting,onExited:this.props.onExited,onEnter:this.props.onEnter,onEntering:this.props.onEntering,onEntered:this.props.onEntered}),M=n.cloneElement(this.props.overlay,{placement:A.placement,container:A.container});return C["default"].createElement(Q["default"],A,M)},render:function(){var A=C["default"].Children.only(this.props.children),M=A.props,t={"aria-describedby":this.props.overlay.props.id};return this._overlay=this.getOverlay(),t.onClick=s["default"](M.onClick,this.props.onClick),I("click",this.props.trigger)&&(t.onClick=s["default"](this.toggle,t.onClick)),I("hover",this.props.trigger)&&(t.onMouseOver=s["default"](this.handleMouseOver,this.props.onMouseOver,M.onMouseOver),t.onMouseOut=s["default"](this.handleMouseOut,this.props.onMouseOut,M.onMouseOut)),I("focus",this.props.trigger)&&(t.onFocus=s["default"](this.handleDelayedShow,this.props.onFocus,M.onFocus),t.onBlur=s["default"](this.handleDelayedHide,this.props.onBlur,M.onBlur)),n.cloneElement(A,t)},handleDelayedShow:function(){var A=this;if(null!=this._hoverHideDelay)return clearTimeout(this._hoverHideDelay),void(this._hoverHideDelay=null);if(!this.state.isOverlayShown&&null==this._hoverShowDelay){var M=null!=this.props.delayShow?this.props.delayShow:this.props.delay;return M?void(this._hoverShowDelay=setTimeout(function(){A._hoverShowDelay=null,A.show()},M)):void this.show()}},handleDelayedHide:function(){var A=this;if(null!=this._hoverShowDelay)return clearTimeout(this._hoverShowDelay),void(this._hoverShowDelay=null);if(this.state.isOverlayShown&&null==this._hoverHideDelay){var M=null!=this.props.delayHide?this.props.delayHide:this.props.delay;return M?void(this._hoverHideDelay=setTimeout(function(){A._hoverHideDelay=null,A.hide()},M)):void this.hide()}},handleMouseOverOut:function(A,M){var t=M.currentTarget,I=M.relatedTarget||M.nativeEvent.toElement;I&&(I===t||E["default"](t,I))||A(M)}});M["default"]=x,A.exports=M["default"]},function(A,M,t){"use strict";var I=t(7)["default"],g=t(6)["default"];M.__esModule=!0;var e=t(1),i=g(e),T=t(5),E=g(T),N=t(11),o=g(N),n=t(183),C=g(n),c=i["default"].createClass({displayName:"Tooltip",propTypes:{id:C["default"](i["default"].PropTypes.oneOfType([i["default"].PropTypes.string,i["default"].PropTypes.number])),placement:i["default"].PropTypes.oneOf(["top","right","bottom","left"]),positionLeft:i["default"].PropTypes.number,positionTop:i["default"].PropTypes.number,arrowOffsetLeft:i["default"].PropTypes.oneOfType([i["default"].PropTypes.number,i["default"].PropTypes.string]),arrowOffsetTop:i["default"].PropTypes.oneOfType([i["default"].PropTypes.number,i["default"].PropTypes.string]),title:i["default"].PropTypes.node},getDefaultProps:function(){return{bsClass:"tooltip",placement:"right"}},render:function(){var A,M=(A={},A[o["default"].prefix(this.props)]=!0,A[this.props.placement]=!0,A),t=I({left:this.props.positionLeft,top:this.props.positionTop},this.props.style),g={left:this.props.arrowOffsetLeft,top:this.props.arrowOffsetTop};return i["default"].createElement("div",I({ -role:"tooltip"},this.props,{className:E["default"](this.props.className,M),style:t}),i["default"].createElement("div",{className:o["default"].prefix(this.props,"arrow"),style:g}),i["default"].createElement("div",{className:o["default"].prefix(this.props,"inner")},this.props.children))}});M["default"]=c,A.exports=M["default"]},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{"default":A}}function g(A,M){return A="function"==typeof A?A():A,i["default"].findDOMNode(A)||M}Object.defineProperty(M,"__esModule",{value:!0}),M["default"]=g;var e=t(16),i=I(e);A.exports=M["default"]},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{"default":A}}function g(A,M,t,I,g){var i=A[M],E="undefined"==typeof i?"undefined":e(i);return T["default"].isValidElement(i)?new Error("Invalid "+I+" ` + "`" + `"+g+"` + "`" + ` of type ReactElement "+("supplied to ` + "`" + `"+t+"` + "`" + `, expected a ReactComponent or a ")+"DOMElement. You can usually obtain a ReactComponent or DOMElement from a ReactElement by attaching a ref to it."):"object"===E&&"function"==typeof i.render||1===i.nodeType?null:new Error("Invalid "+I+" ` + "`" + `"+g+"` + "`" + ` of value ` + "`" + `"+i+"` + "`" + ` "+("supplied to ` + "`" + `"+t+"` + "`" + `, expected a ReactComponent or a ")+"DOMElement.")}M.__esModule=!0;var e="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(A){return typeof A}:function(A){return A&&"function"==typeof Symbol&&A.constructor===Symbol?"symbol":typeof A},i=t(1),T=I(i),E=t(182),N=I(E);M["default"]=(0,N["default"])(g)},function(A,M,t){"use strict";function I(){function A(A,M,I){for(var g=0;g>",null!=t[I]?A(t,I,g):M?new Error("Required prop '"+I+"' was not specified in '"+g+"'."):void 0}var t=M.bind(null,!1);return t.isRequired=M.bind(null,!0),t}M.__esModule=!0,M.errMsg=t,M.createChainableTypeChecker=I},function(A,M){"use strict";function t(A,M,t){function I(){return i=!0,T?void(N=[].concat(Array.prototype.slice.call(arguments))):void t.apply(this,arguments)}function g(){if(!i&&(E=!0,!T)){for(T=!0;!i&&e=A&&E&&(i=!0,t()))}}var e=0,i=!1,T=!1,E=!1,N=void 0;g()}function I(A,M,t){function I(A,M,I){i||(M?(i=!0,t(M)):(e[A]=I,i=++T===g,i&&t(null,e)))}var g=A.length,e=[];if(0===g)return t(null,e);var i=!1,T=0;A.forEach(function(A,t){M(A,t,function(A,M){I(t,A,M)})})}M.__esModule=!0,M.loopAsync=t,M.mapAsync=I},function(A,M,t){"use strict";function I(A){if(A&&A.__esModule)return A;var M={};if(null!=A)for(var t in A)Object.prototype.hasOwnProperty.call(A,t)&&(M[t]=A[t]);return M["default"]=A,M}function g(A){return A&&A.__esModule?A:{"default":A}}M.__esModule=!0,M.router=M.routes=M.route=M.components=M.component=M.location=M.history=M.falsy=M.locationShape=M.routerShape=void 0;var e=t(1),i=t(65),T=(g(i),t(34)),E=I(T),N=t(9),o=(g(N),e.PropTypes.func),n=e.PropTypes.object,C=e.PropTypes.shape,c=e.PropTypes.string,D=M.routerShape=C({push:o.isRequired,replace:o.isRequired,go:o.isRequired,goBack:o.isRequired,goForward:o.isRequired,setRouteLeaveHook:o.isRequired,isActive:o.isRequired}),a=M.locationShape=C({pathname:c.isRequired,search:c.isRequired,state:n,action:c.isRequired,key:c}),B=M.falsy=E.falsy,Q=M.history=E.history,r=M.location=a,s=M.component=E.component,x=M.components=E.components,j=M.route=E.route,y=(M.routes=E.routes,M.router=D),w={falsy:B,history:Q,location:r,component:s,components:x,route:j,router:y};M["default"]=w},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{"default":A}}M.__esModule=!0;var g=t(416),e=I(g),i=t(193),T=I(i);M["default"]=(0,T["default"])(e["default"]),A.exports=M["default"]},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{"default":A}}function g(A){for(var M in A)if(Object.prototype.hasOwnProperty.call(A,M))return!0;return!1}function e(A,M){function t(M){var t=!(arguments.length<=1||void 0===arguments[1])&&arguments[1],I=arguments.length<=2||void 0===arguments[2]?null:arguments[2],g=void 0;return t&&t!==!0||null!==I?(M={pathname:M,query:t},g=I||!1):(M=A.createLocation(M),g=t),(0,C["default"])(M,g,s.location,s.routes,s.params)}function I(A,t){x&&x.location===A?e(x,t):(0,B["default"])(M,A,function(M,I){M?t(M):I?e(i({},I,{location:A}),t):t()})}function e(A,M){function t(t,g){return t||g?I(t,g):void(0,D["default"])(A,function(t,I){t?M(t):M(null,null,s=i({},A,{components:I}))})}function I(A,t){A?M(A):M(null,t)}var g=(0,N["default"])(s,A),e=g.leaveRoutes,T=g.changeRoutes,E=g.enterRoutes;(0,o.runLeaveHooks)(e,s),e.filter(function(A){return E.indexOf(A)===-1}).forEach(a),(0,o.runChangeHooks)(T,s,A,function(M,g){return M||g?I(M,g):void(0,o.runEnterHooks)(E,A,t)})}function T(A){var M=arguments.length<=1||void 0===arguments[1]||arguments[1];return A.__id__||M&&(A.__id__=j++)}function E(A){return A.reduce(function(A,M){return A.push.apply(A,y[T(M)]),A},[])}function n(A,t){(0,B["default"])(M,A,function(M,I){if(null==I)return void t();x=i({},I,{location:A});for(var g=E((0,N["default"])(s,x).leaveRoutes),e=void 0,T=0,o=g.length;null==e&&T=32||13===M?M:0}A.exports=t},function(A,M){"use strict";function t(A){var M=this,t=M.nativeEvent;if(t.getModifierState)return t.getModifierState(A);var I=g[A];return!!I&&!!t[I]}function I(A){return t}var g={Alt:"altKey",Control:"ctrlKey",Meta:"metaKey",Shift:"shiftKey"};A.exports=I},function(A,M){"use strict";function t(A){var M=A.target||A.srcElement||window;return 3===M.nodeType?M.parentNode:M}A.exports=t},function(A,M){"use strict";function t(A){var M=A&&(I&&A[I]||A[g]);if("function"==typeof M)return M}var I="function"==typeof Symbol&&Symbol.iterator,g="@@iterator";A.exports=t},function(A,M,t){"use strict";function I(A){return"function"==typeof A&&"undefined"!=typeof A.prototype&&"function"==typeof A.prototype.mountComponent&&"function"==typeof A.prototype.receiveComponent}function g(A){var M;if(null===A||A===!1)M=new i(g);else if("object"==typeof A){var t=A;!t||"function"!=typeof t.type&&"string"!=typeof t.type?N(!1):void 0,M="string"==typeof t.type?T.createInternalComponent(t):I(t.type)?new t.type(t):new o}else"string"==typeof A||"number"==typeof A?M=T.createInstanceForText(A):N(!1);return M.construct(A),M._mountIndex=0,M._mountImage=null,M}var e=t(434),i=t(214),T=t(220),E=t(4),N=t(2),o=(t(3),function(){});E(o.prototype,e.Mixin,{_instantiateReactComponent:g}),A.exports=g},function(A,M,t){"use strict";/** +!function(){"use strict";function t(){for(var A=[],M=0;M2?t-2:0),g=2;g1){for(var c=Array(C),D=0;D1){for(var a=Array(D),B=0;B3&&void 0!==arguments[3]?arguments[3]:{},N=Boolean(A),C=A||w,D=void 0;D="function"==typeof M?M:M?(0,Q.default)(M):L;var B=t||l,r=I.pure,s=void 0===r||r,x=I.withRef,y=void 0!==x&&x,h=s&&B!==l,S=d++;return function(A){function M(A,M,t){var I=B(A,M,t);return I}var t="Connect("+T(A)+")",I=function(I){function T(A,M){g(this,T);var i=e(this,I.call(this,A,M));i.version=S,i.store=A.store||M.store,(0,u.default)(i.store,'Could not find "store" in either the context or '+('props of "'+t+'". ')+"Either wrap the root component in a , "+('or explicitly pass "store" as a prop to "'+t+'".'));var E=i.store.getState();return i.state={storeState:E},i.clearCache(),i}return i(T,I),T.prototype.shouldComponentUpdate=function(){return!s||this.haveOwnPropsChanged||this.hasStoreStateChanged},T.prototype.computeStateProps=function(A,M){if(!this.finalMapStateToProps)return this.configureFinalMapState(A,M);var t=A.getState(),I=this.doStatePropsDependOnOwnProps?this.finalMapStateToProps(t,M):this.finalMapStateToProps(t);return I},T.prototype.configureFinalMapState=function(A,M){var t=C(A.getState(),M),I="function"==typeof t;return this.finalMapStateToProps=I?t:C,this.doStatePropsDependOnOwnProps=1!==this.finalMapStateToProps.length,I?this.computeStateProps(A,M):t},T.prototype.computeDispatchProps=function(A,M){if(!this.finalMapDispatchToProps)return this.configureFinalMapDispatch(A,M);var t=A.dispatch,I=this.doDispatchPropsDependOnOwnProps?this.finalMapDispatchToProps(t,M):this.finalMapDispatchToProps(t);return I},T.prototype.configureFinalMapDispatch=function(A,M){var t=D(A.dispatch,M),I="function"==typeof t;return this.finalMapDispatchToProps=I?t:D,this.doDispatchPropsDependOnOwnProps=1!==this.finalMapDispatchToProps.length,I?this.computeDispatchProps(A,M):t},T.prototype.updateStatePropsIfNeeded=function(){var A=this.computeStateProps(this.store,this.props);return(!this.stateProps||!(0,a.default)(A,this.stateProps))&&(this.stateProps=A,!0)},T.prototype.updateDispatchPropsIfNeeded=function(){var A=this.computeDispatchProps(this.store,this.props);return(!this.dispatchProps||!(0,a.default)(A,this.dispatchProps))&&(this.dispatchProps=A,!0)},T.prototype.updateMergedPropsIfNeeded=function(){var A=M(this.stateProps,this.dispatchProps,this.props);return!(this.mergedProps&&h&&(0,a.default)(A,this.mergedProps))&&(this.mergedProps=A,!0)},T.prototype.isSubscribed=function(){return"function"==typeof this.unsubscribe},T.prototype.trySubscribe=function(){N&&!this.unsubscribe&&(this.unsubscribe=this.store.subscribe(this.handleChange.bind(this)),this.handleChange())},T.prototype.tryUnsubscribe=function(){this.unsubscribe&&(this.unsubscribe(),this.unsubscribe=null)},T.prototype.componentDidMount=function(){this.trySubscribe()},T.prototype.componentWillReceiveProps=function(A){s&&(0,a.default)(A,this.props)||(this.haveOwnPropsChanged=!0)},T.prototype.componentWillUnmount=function(){this.tryUnsubscribe(),this.clearCache()},T.prototype.clearCache=function(){this.dispatchProps=null,this.stateProps=null,this.mergedProps=null,this.haveOwnPropsChanged=!0,this.hasStoreStateChanged=!0,this.haveStatePropsBeenPrecalculated=!1,this.statePropsPrecalculationError=null,this.renderedElement=null,this.finalMapDispatchToProps=null,this.finalMapStateToProps=null},T.prototype.handleChange=function(){if(this.unsubscribe){var A=this.store.getState(),M=this.state.storeState;if(!s||M!==A){if(s&&!this.doStatePropsDependOnOwnProps){var t=E(this.updateStatePropsIfNeeded,this);if(!t)return;t===Y&&(this.statePropsPrecalculationError=Y.value),this.haveStatePropsBeenPrecalculated=!0}this.hasStoreStateChanged=!0,this.setState({storeState:A})}}},T.prototype.getWrappedInstance=function(){return(0,u.default)(y,"To access the wrapped instance, you need to specify { withRef: true } as the fourth argument of the connect() call."),this.refs.wrappedInstance},T.prototype.render=function(){var M=this.haveOwnPropsChanged,t=this.hasStoreStateChanged,I=this.haveStatePropsBeenPrecalculated,g=this.statePropsPrecalculationError,e=this.renderedElement;if(this.haveOwnPropsChanged=!1,this.hasStoreStateChanged=!1,this.haveStatePropsBeenPrecalculated=!1,this.statePropsPrecalculationError=null,g)throw g;var i=!0,T=!0;s&&e&&(i=t||M&&this.doStatePropsDependOnOwnProps,T=M&&this.doDispatchPropsDependOnOwnProps);var E=!1,N=!1;I?E=!0:i&&(E=this.updateStatePropsIfNeeded()),T&&(N=this.updateDispatchPropsIfNeeded());var C=!0;return C=!!(E||N||M)&&this.updateMergedPropsIfNeeded(),!C&&e?e:(y?this.renderedElement=(0,n.createElement)(A,o({},this.mergedProps,{ref:"wrappedInstance"})):this.renderedElement=(0,n.createElement)(A,this.mergedProps),this.renderedElement)},T}(n.Component);return I.displayName=t,I.WrappedComponent=A,I.contextTypes={store:c.default},I.propTypes={store:c.default},(0,j.default)(I,A)}}M.__esModule=!0;var o=Object.assign||function(A){for(var M=1;M should not have a "'+M+'" prop')}M.__esModule=!0,M.routes=M.route=M.components=M.component=M.history=void 0,M.falsy=I;var g=t(1),e=g.PropTypes.func,i=g.PropTypes.object,T=g.PropTypes.arrayOf,E=g.PropTypes.oneOfType,N=g.PropTypes.element,o=g.PropTypes.shape,n=g.PropTypes.string,C=(M.history=o({listen:e.isRequired,push:e.isRequired,replace:e.isRequired,go:e.isRequired,goBack:e.isRequired,goForward:e.isRequired}),M.component=E([e,n])),c=(M.components=E([C,i]),M.route=E([i,N]));M.routes=E([c,T(c)])},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}function g(A){var M=A.match(/^https?:\/\/[^\/]*/);return null==M?A:A.substring(M[0].length)}function e(A){var M=g(A),t="",I="",e=M.indexOf("#"); +e!==-1&&(I=M.substring(e),M=M.substring(0,e));var i=M.indexOf("?");return i!==-1&&(t=M.substring(i),M=M.substring(0,i)),""===M&&(M="/"),{pathname:M,search:t,hash:I}}M.__esModule=!0,M.extractPath=g,M.parsePath=e;var i=t(22);I(i)},function(A,M,t){"use strict";function I(){g.attachRefs(this,this._currentElement)}var g=t(456),e={mountComponent:function(A,M,t,g){var e=A.mountComponent(M,t,g);return A._currentElement&&null!=A._currentElement.ref&&t.getReactMountReady().enqueue(I,A),e},unmountComponent:function(A){g.detachRefs(A,A._currentElement),A.unmountComponent()},receiveComponent:function(A,M,t,e){var i=A._currentElement;if(M!==i||e!==A._context){var T=g.shouldUpdateRefs(i,M);T&&g.detachRefs(A,i),A.receiveComponent(M,t,e),T&&A._currentElement&&null!=A._currentElement.ref&&t.getReactMountReady().enqueue(I,A)}},performUpdateIfNecessary:function(A,M){A.performUpdateIfNecessary(M)}};A.exports=e},function(A,M,t){"use strict";function I(A,M,t,I){this.dispatchConfig=A,this.dispatchMarker=M,this.nativeEvent=t;var g=this.constructor.Interface;for(var e in g)if(g.hasOwnProperty(e)){var T=g[e];T?this[e]=T(t):"target"===e?this.target=I:this[e]=t[e]}var E=null!=t.defaultPrevented?t.defaultPrevented:t.returnValue===!1;E?this.isDefaultPrevented=i.thatReturnsTrue:this.isDefaultPrevented=i.thatReturnsFalse,this.isPropagationStopped=i.thatReturnsFalse}var g=t(29),e=t(3),i=t(23),T=(t(4),{type:null,target:null,currentTarget:i.thatReturnsNull,eventPhase:null,bubbles:null,cancelable:null,timeStamp:function(A){return A.timeStamp||Date.now()},defaultPrevented:null,isTrusted:null});e(I.prototype,{preventDefault:function(){this.defaultPrevented=!0;var A=this.nativeEvent;A&&(A.preventDefault?A.preventDefault():A.returnValue=!1,this.isDefaultPrevented=i.thatReturnsTrue)},stopPropagation:function(){var A=this.nativeEvent;A&&(A.stopPropagation?A.stopPropagation():A.cancelBubble=!0,this.isPropagationStopped=i.thatReturnsTrue)},persist:function(){this.isPersistent=i.thatReturnsTrue},isPersistent:i.thatReturnsFalse,destructor:function(){var A=this.constructor.Interface;for(var M in A)this[M]=null;this.dispatchConfig=null,this.dispatchMarker=null,this.nativeEvent=null}}),I.Interface=T,I.augmentClass=function(A,M){var t=this,I=Object.create(t.prototype);e(I,A.prototype),A.prototype=I,A.prototype.constructor=A,A.Interface=e({},t.Interface,M),A.augmentClass=t.augmentClass,g.addPoolingTo(A,g.fourArgumentPooler)},g.addPoolingTo(I,g.fourArgumentPooler),A.exports=I},function(A,M,t){(function(A){!function(M,t){A.exports=t()}(this,function(){"use strict";function M(){return aI.apply(null,arguments)}function t(A){aI=A}function I(A){return A instanceof Array||"[object Array]"===Object.prototype.toString.call(A)}function g(A){return null!=A&&"[object Object]"===Object.prototype.toString.call(A)}function e(A){var M;for(M in A)return!1;return!0}function i(A){return"number"==typeof value||"[object Number]"===Object.prototype.toString.call(A)}function T(A){return A instanceof Date||"[object Date]"===Object.prototype.toString.call(A)}function E(A,M){var t,I=[];for(t=0;t0)for(t in rI)I=rI[t],g=M[I],B(g)||(A[I]=g);return A}function r(A){Q(this,A),this._d=new Date(null!=A._d?A._d.getTime():NaN),sI===!1&&(sI=!0,M.updateOffset(this),sI=!1)}function s(A){return A instanceof r||null!=A&&null!=A._isAMomentObject}function x(A){return A<0?Math.ceil(A)||0:Math.floor(A)}function j(A){var M=+A,t=0;return 0!==M&&isFinite(M)&&(t=x(M)),t}function y(A,M,t){var I,g=Math.min(A.length,M.length),e=Math.abs(A.length-M.length),i=0;for(I=0;I0?"future":"past"];return l(t)?t(M):t.replace(/%s/i,M)}function F(A,M){var t=A.toLowerCase();SI[t]=SI[t+"s"]=SI[M]=A}function f(A){return"string"==typeof A?SI[A]||SI[A.toLowerCase()]:void 0}function k(A){var M,t,I={};for(t in A)N(A,t)&&(M=f(t),M&&(I[M]=A[t]));return I}function R(A,M){zI[A]=M}function J(A){var M=[];for(var t in A)M.push({unit:t,priority:zI[t]});return M.sort(function(A,M){return A.priority-M.priority}),M}function G(A,t){return function(I){return null!=I?(b(this,A,I),M.updateOffset(this,t),this):H(this,A)}}function H(A,M){return A.isValid()?A._d["get"+(A._isUTC?"UTC":"")+M]():NaN}function b(A,M,t){A.isValid()&&A._d["set"+(A._isUTC?"UTC":"")+M](t)}function X(A){return A=f(A),l(this[A])?this[A]():this}function v(A,M){if("object"==typeof A){A=k(A);for(var t=J(A),I=0;I=0;return(e?t?"+":"":"-")+Math.pow(10,Math.max(0,g)).toString().substr(1)+I}function V(A,M,t,I){var g=I;"string"==typeof I&&(g=function(){return this[I]()}),A&&(mI[A]=g),M&&(mI[M[0]]=function(){return W(g.apply(this,arguments),M[1],M[2])}),t&&(mI[t]=function(){return this.localeData().ordinal(g.apply(this,arguments),A)})}function P(A){return A.match(/\[[\s\S]/)?A.replace(/^\[|\]$/g,""):A.replace(/\\/g,"")}function Z(A){var M,t,I=A.match(UI);for(M=0,t=I.length;M=0&&pI.test(A);)A=A.replace(pI,t),pI.lastIndex=0,I-=1;return A}function _(A,M,t){$I[A]=l(M)?M:function(A,I){return A&&t?t:M}}function $(A,M){return N($I,A)?$I[A](M._strict,M._locale):new RegExp(AA(A))}function AA(A){return MA(A.replace("\\","").replace(/\\(\[)|\\(\])|\[([^\]\[]*)\]|\\(.)/g,function(A,M,t,I,g){return M||t||I||g}))}function MA(A){return A.replace(/[-\/\\^$*+?.()|[\]{}]/g,"\\$&")}function tA(A,M){var t,I=M;for("string"==typeof A&&(A=[A]),i(M)&&(I=function(A,t){t[M]=j(A)}),t=0;t=0&&isFinite(T.getFullYear())&&T.setFullYear(A),T}function xA(A){var M=new Date(Date.UTC.apply(null,arguments));return A<100&&A>=0&&isFinite(M.getUTCFullYear())&&M.setUTCFullYear(A),M}function jA(A,M,t){var I=7+M-t,g=(7+xA(A,0,I).getUTCDay()-M)%7;return-g+I-1}function yA(A,M,t,I,g){var e,i,T=(7+t-I)%7,E=jA(A,I,g),N=1+7*(M-1)+T+E;return N<=0?(e=A-1,i=BA(e)+N):N>BA(A)?(e=A+1,i=N-BA(A)):(e=A,i=N),{year:e,dayOfYear:i}}function uA(A,M,t){var I,g,e=jA(A.year(),M,t),i=Math.floor((A.dayOfYear()-e-1)/7)+1;return i<1?(g=A.year()-1,I=i+wA(g,M,t)):i>wA(A.year(),M,t)?(I=i-wA(A.year(),M,t),g=A.year()+1):(g=A.year(),I=i),{week:I,year:g}}function wA(A,M,t){var I=jA(A,M,t),g=jA(A+1,M,t);return(BA(A)-I+g)/7}function LA(A){return uA(A,this._week.dow,this._week.doy).week}function lA(){return this._week.dow}function YA(){return this._week.doy}function dA(A){var M=this.localeData().week(this);return null==A?M:this.add(7*(A-M),"d")}function hA(A){var M=uA(this,1,4).week;return null==A?M:this.add(7*(A-M),"d")}function SA(A,M){return"string"!=typeof A?A:isNaN(A)?(A=M.weekdaysParse(A),"number"==typeof A?A:null):parseInt(A,10)}function zA(A,M){return"string"==typeof A?M.weekdaysParse(A)%7||7:isNaN(A)?null:A}function UA(A,M){return A?I(this._weekdays)?this._weekdays[A.day()]:this._weekdays[this._weekdays.isFormat.test(M)?"format":"standalone"][A.day()]:this._weekdays}function pA(A){return A?this._weekdaysShort[A.day()]:this._weekdaysShort}function OA(A){return A?this._weekdaysMin[A.day()]:this._weekdaysMin}function mA(A,M,t){var I,g,e,i=A.toLocaleLowerCase();if(!this._weekdaysParse)for(this._weekdaysParse=[],this._shortWeekdaysParse=[],this._minWeekdaysParse=[],I=0;I<7;++I)e=n([2e3,1]).day(I),this._minWeekdaysParse[I]=this.weekdaysMin(e,"").toLocaleLowerCase(),this._shortWeekdaysParse[I]=this.weekdaysShort(e,"").toLocaleLowerCase(),this._weekdaysParse[I]=this.weekdays(e,"").toLocaleLowerCase();return t?"dddd"===M?(g=og.call(this._weekdaysParse,i),g!==-1?g:null):"ddd"===M?(g=og.call(this._shortWeekdaysParse,i),g!==-1?g:null):(g=og.call(this._minWeekdaysParse,i),g!==-1?g:null):"dddd"===M?(g=og.call(this._weekdaysParse,i),g!==-1?g:(g=og.call(this._shortWeekdaysParse,i),g!==-1?g:(g=og.call(this._minWeekdaysParse,i),g!==-1?g:null))):"ddd"===M?(g=og.call(this._shortWeekdaysParse,i),g!==-1?g:(g=og.call(this._weekdaysParse,i),g!==-1?g:(g=og.call(this._minWeekdaysParse,i),g!==-1?g:null))):(g=og.call(this._minWeekdaysParse,i),g!==-1?g:(g=og.call(this._weekdaysParse,i),g!==-1?g:(g=og.call(this._shortWeekdaysParse,i),g!==-1?g:null)))}function FA(A,M,t){var I,g,e;if(this._weekdaysParseExact)return mA.call(this,A,M,t);for(this._weekdaysParse||(this._weekdaysParse=[],this._minWeekdaysParse=[],this._shortWeekdaysParse=[],this._fullWeekdaysParse=[]),I=0;I<7;I++){if(g=n([2e3,1]).day(I),t&&!this._fullWeekdaysParse[I]&&(this._fullWeekdaysParse[I]=new RegExp("^"+this.weekdays(g,"").replace(".",".?")+"$","i"),this._shortWeekdaysParse[I]=new RegExp("^"+this.weekdaysShort(g,"").replace(".",".?")+"$","i"),this._minWeekdaysParse[I]=new RegExp("^"+this.weekdaysMin(g,"").replace(".",".?")+"$","i")),this._weekdaysParse[I]||(e="^"+this.weekdays(g,"")+"|^"+this.weekdaysShort(g,"")+"|^"+this.weekdaysMin(g,""),this._weekdaysParse[I]=new RegExp(e.replace(".",""),"i")),t&&"dddd"===M&&this._fullWeekdaysParse[I].test(A))return I;if(t&&"ddd"===M&&this._shortWeekdaysParse[I].test(A))return I;if(t&&"dd"===M&&this._minWeekdaysParse[I].test(A))return I;if(!t&&this._weekdaysParse[I].test(A))return I}}function fA(A){if(!this.isValid())return null!=A?this:NaN;var M=this._isUTC?this._d.getUTCDay():this._d.getDay();return null!=A?(A=SA(A,this.localeData()),this.add(A-M,"d")):M}function kA(A){if(!this.isValid())return null!=A?this:NaN;var M=(this.day()+7-this.localeData()._week.dow)%7;return null==A?M:this.add(A-M,"d")}function RA(A){if(!this.isValid())return null!=A?this:NaN;if(null!=A){var M=zA(A,this.localeData());return this.day(this.day()%7?M:M-7)}return this.day()||7}function JA(A){return this._weekdaysParseExact?(N(this,"_weekdaysRegex")||bA.call(this),A?this._weekdaysStrictRegex:this._weekdaysRegex):(N(this,"_weekdaysRegex")||(this._weekdaysRegex=jg),this._weekdaysStrictRegex&&A?this._weekdaysStrictRegex:this._weekdaysRegex)}function GA(A){return this._weekdaysParseExact?(N(this,"_weekdaysRegex")||bA.call(this),A?this._weekdaysShortStrictRegex:this._weekdaysShortRegex):(N(this,"_weekdaysShortRegex")||(this._weekdaysShortRegex=yg),this._weekdaysShortStrictRegex&&A?this._weekdaysShortStrictRegex:this._weekdaysShortRegex)}function HA(A){return this._weekdaysParseExact?(N(this,"_weekdaysRegex")||bA.call(this),A?this._weekdaysMinStrictRegex:this._weekdaysMinRegex):(N(this,"_weekdaysMinRegex")||(this._weekdaysMinRegex=ug),this._weekdaysMinStrictRegex&&A?this._weekdaysMinStrictRegex:this._weekdaysMinRegex)}function bA(){function A(A,M){return M.length-A.length}var M,t,I,g,e,i=[],T=[],E=[],N=[];for(M=0;M<7;M++)t=n([2e3,1]).day(M),I=this.weekdaysMin(t,""),g=this.weekdaysShort(t,""),e=this.weekdays(t,""),i.push(I),T.push(g),E.push(e),N.push(I),N.push(g),N.push(e);for(i.sort(A),T.sort(A),E.sort(A),N.sort(A),M=0;M<7;M++)T[M]=MA(T[M]),E[M]=MA(E[M]),N[M]=MA(N[M]);this._weekdaysRegex=new RegExp("^("+N.join("|")+")","i"),this._weekdaysShortRegex=this._weekdaysRegex,this._weekdaysMinRegex=this._weekdaysRegex,this._weekdaysStrictRegex=new RegExp("^("+E.join("|")+")","i"),this._weekdaysShortStrictRegex=new RegExp("^("+T.join("|")+")","i"),this._weekdaysMinStrictRegex=new RegExp("^("+i.join("|")+")","i")}function XA(){return this.hours()%12||12}function vA(){return this.hours()||24}function WA(A,M){V(A,0,0,function(){return this.localeData().meridiem(this.hours(),this.minutes(),M)})}function VA(A,M){return M._meridiemParse}function PA(A){return"p"===(A+"").toLowerCase().charAt(0)}function ZA(A,M,t){return A>11?t?"pm":"PM":t?"am":"AM"}function KA(A){return A?A.toLowerCase().replace("_","-"):A}function qA(A){for(var M,t,I,g,e=0;e0;){if(I=_A(g.slice(0,M).join("-")))return I;if(t&&t.length>=M&&y(g,t,!0)>=M-1)break;M--}e++}return null}function _A(M){var t=null;if(!dg[M]&&"undefined"!=typeof A&&A&&A.exports)try{t=wg._abbr,!function(){var A=new Error('Cannot find module "./locale"');throw A.code="MODULE_NOT_FOUND",A}(),$A(t)}catch(A){}return dg[M]}function $A(A,M){var t;return A&&(t=B(M)?tM(A):AM(A,M),t&&(wg=t)),wg._abbr}function AM(A,M){if(null!==M){var t=Yg;if(M.abbr=A,null!=dg[A])L("defineLocaleOverride","use moment.updateLocale(localeName, config) to change an existing locale. moment.defineLocale(localeName, config) should only be used for creating a new locale See http://momentjs.com/guides/#/warnings/define-locale/ for more info."),t=dg[A]._config;else if(null!=M.parentLocale){if(null==dg[M.parentLocale])return hg[M.parentLocale]||(hg[M.parentLocale]=[]),hg[M.parentLocale].push({name:A,config:M}),null;t=dg[M.parentLocale]._config}return dg[A]=new h(d(t,M)),hg[A]&&hg[A].forEach(function(A){AM(A.name,A.config)}),$A(A),dg[A]}return delete dg[A],null}function MM(A,M){if(null!=M){var t,I=Yg;null!=dg[A]&&(I=dg[A]._config),M=d(I,M),t=new h(M),t.parentLocale=dg[A],dg[A]=t,$A(A)}else null!=dg[A]&&(null!=dg[A].parentLocale?dg[A]=dg[A].parentLocale:null!=dg[A]&&delete dg[A]);return dg[A]}function tM(A){var M;if(A&&A._locale&&A._locale._abbr&&(A=A._locale._abbr),!A)return wg;if(!I(A)){if(M=_A(A))return M;A=[A]}return qA(A)}function IM(){return uI(dg)}function gM(A){var M,t=A._a;return t&&c(A).overflow===-2&&(M=t[tg]<0||t[tg]>11?tg:t[Ig]<1||t[Ig]>eA(t[Mg],t[tg])?Ig:t[gg]<0||t[gg]>24||24===t[gg]&&(0!==t[eg]||0!==t[ig]||0!==t[Tg])?gg:t[eg]<0||t[eg]>59?eg:t[ig]<0||t[ig]>59?ig:t[Tg]<0||t[Tg]>999?Tg:-1,c(A)._overflowDayOfYear&&(MIg)&&(M=Ig),c(A)._overflowWeeks&&M===-1&&(M=Eg),c(A)._overflowWeekday&&M===-1&&(M=Ng),c(A).overflow=M),A}function eM(A){var M,t,I,g,e,i,T=A._i,E=Sg.exec(T)||zg.exec(T);if(E){for(c(A).iso=!0,M=0,t=pg.length;MBA(g)&&(c(A)._overflowDayOfYear=!0),t=xA(g,0,A._dayOfYear),A._a[tg]=t.getUTCMonth(),A._a[Ig]=t.getUTCDate()),M=0;M<3&&null==A._a[M];++M)A._a[M]=e[M]=I[M];for(;M<7;M++)A._a[M]=e[M]=null==A._a[M]?2===M?1:0:A._a[M];24===A._a[gg]&&0===A._a[eg]&&0===A._a[ig]&&0===A._a[Tg]&&(A._nextDay=!0,A._a[gg]=0),A._d=(A._useUTC?xA:sA).apply(null,e),null!=A._tzm&&A._d.setUTCMinutes(A._d.getUTCMinutes()-A._tzm),A._nextDay&&(A._a[gg]=24)}}function oM(A){var M,t,I,g,e,i,T,E;if(M=A._w,null!=M.GG||null!=M.W||null!=M.E)e=1,i=4,t=TM(M.GG,A._a[Mg],uA(sM(),1,4).year),I=TM(M.W,1),g=TM(M.E,1),(g<1||g>7)&&(E=!0);else{e=A._locale._week.dow,i=A._locale._week.doy;var N=uA(sM(),e,i);t=TM(M.gg,A._a[Mg],N.year),I=TM(M.w,N.week),null!=M.d?(g=M.d,(g<0||g>6)&&(E=!0)):null!=M.e?(g=M.e+e,(M.e<0||M.e>6)&&(E=!0)):g=e}I<1||I>wA(t,e,i)?c(A)._overflowWeeks=!0:null!=E?c(A)._overflowWeekday=!0:(T=yA(t,I,g,e,i),A._a[Mg]=T.year,A._dayOfYear=T.dayOfYear)}function nM(A){if(A._f===M.ISO_8601)return void eM(A);A._a=[],c(A).empty=!0;var t,I,g,e,i,T=""+A._i,E=T.length,N=0;for(g=q(A._f,A._locale).match(UI)||[],t=0;t0&&c(A).unusedInput.push(i),T=T.slice(T.indexOf(I)+I.length),N+=I.length),mI[e]?(I?c(A).empty=!1:c(A).unusedTokens.push(e),gA(e,I,A)):A._strict&&!I&&c(A).unusedTokens.push(e);c(A).charsLeftOver=E-N,T.length>0&&c(A).unusedInput.push(T),A._a[gg]<=12&&c(A).bigHour===!0&&A._a[gg]>0&&(c(A).bigHour=void 0),c(A).parsedDateParts=A._a.slice(0),c(A).meridiem=A._meridiem,A._a[gg]=CM(A._locale,A._a[gg],A._meridiem),NM(A),gM(A)}function CM(A,M,t){var I;return null==t?M:null!=A.meridiemHour?A.meridiemHour(M,t):null!=A.isPM?(I=A.isPM(t),I&&M<12&&(M+=12),I||12!==M||(M=0),M):M}function cM(A){var M,t,I,g,e;if(0===A._f.length)return c(A).invalidFormat=!0,void(A._d=new Date(NaN));for(g=0;gthis.clone().month(0).utcOffset()||this.utcOffset()>this.clone().month(5).utcOffset()}function fM(){if(!B(this._isDSTShifted))return this._isDSTShifted;var A={};if(Q(A,this),A=BM(A),A._a){var M=A._isUTC?n(A._a):sM(A._a);this._isDSTShifted=this.isValid()&&y(A._a,M.toArray())>0}else this._isDSTShifted=!1;return this._isDSTShifted}function kM(){return!!this.isValid()&&!this._isUTC}function RM(){return!!this.isValid()&&this._isUTC}function JM(){return!!this.isValid()&&(this._isUTC&&0===this._offset)}function GM(A,M){var t,I,g,e=A,T=null;return wM(A)?e={ms:A._milliseconds,d:A._days,M:A._months}:i(A)?(e={},M?e[M]=A:e.milliseconds=A):(T=Jg.exec(A))?(t="-"===T[1]?-1:1,e={y:0,d:j(T[Ig])*t,h:j(T[gg])*t,m:j(T[eg])*t,s:j(T[ig])*t,ms:j(LM(1e3*T[Tg]))*t}):(T=Gg.exec(A))?(t="-"===T[1]?-1:1,e={y:HM(T[2],t),M:HM(T[3],t),w:HM(T[4],t),d:HM(T[5],t),h:HM(T[6],t),m:HM(T[7],t),s:HM(T[8],t)}):null==e?e={}:"object"==typeof e&&("from"in e||"to"in e)&&(g=XM(sM(e.from),sM(e.to)),e={},e.ms=g.milliseconds,e.M=g.months),I=new uM(e),wM(A)&&N(A,"_locale")&&(I._locale=A._locale),I}function HM(A,M){var t=A&&parseFloat(A.replace(",","."));return(isNaN(t)?0:t)*M}function bM(A,M){var t={milliseconds:0,months:0};return t.months=M.month()-A.month()+12*(M.year()-A.year()),A.clone().add(t.months,"M").isAfter(M)&&--t.months,t.milliseconds=+M-+A.clone().add(t.months,"M"),t}function XM(A,M){var t;return A.isValid()&&M.isValid()?(M=dM(M,A),A.isBefore(M)?t=bM(A,M):(t=bM(M,A),t.milliseconds=-t.milliseconds,t.months=-t.months),t):{milliseconds:0,months:0}}function vM(A,M){return function(t,I){var g,e;return null===I||isNaN(+I)||(L(M,"moment()."+M+"(period, number) is deprecated. Please use moment()."+M+"(number, period). See http://momentjs.com/guides/#/warnings/add-inverted-param/ for more info."),e=t,t=I,I=e),t="string"==typeof t?+t:t,g=GM(t,I),WM(this,g,A),this}}function WM(A,t,I,g){var e=t._milliseconds,i=LM(t._days),T=LM(t._months);A.isValid()&&(g=null==g||g,e&&A._d.setTime(A._d.valueOf()+e*I),i&&b(A,"Date",H(A,"Date")+i*I),T&&oA(A,H(A,"Month")+T*I),g&&M.updateOffset(A,i||T))}function VM(A,M){var t=A.diff(M,"days",!0);return t<-6?"sameElse":t<-1?"lastWeek":t<0?"lastDay":t<1?"sameDay":t<2?"nextDay":t<7?"nextWeek":"sameElse"}function PM(A,t){var I=A||sM(),g=dM(I,this).startOf("day"),e=M.calendarFormat(this,g)||"sameElse",i=t&&(l(t[e])?t[e].call(this,I):t[e]);return this.format(i||this.localeData().calendar(e,this,sM(I)))}function ZM(){return new r(this)}function KM(A,M){var t=s(A)?A:sM(A);return!(!this.isValid()||!t.isValid())&&(M=f(B(M)?"millisecond":M),"millisecond"===M?this.valueOf()>t.valueOf():t.valueOf()e&&(M=e),Ut.call(this,A,M,t,I,g))}function Ut(A,M,t,I,g){var e=yA(A,M,t,I,g),i=xA(e.year,0,e.dayOfYear);return this.year(i.getUTCFullYear()),this.month(i.getUTCMonth()),this.date(i.getUTCDate()),this}function pt(A){return null==A?Math.ceil((this.month()+1)/3):this.month(3*(A-1)+this.month()%3)}function Ot(A){var M=Math.round((this.clone().startOf("day")-this.clone().startOf("year"))/864e5)+1;return null==A?M:this.add(A-M,"d")}function mt(A,M){M[Tg]=j(1e3*("0."+A))}function Ft(){return this._isUTC?"UTC":""}function ft(){return this._isUTC?"Coordinated Universal Time":""}function kt(A){return sM(1e3*A)}function Rt(){return sM.apply(null,arguments).parseZone()}function Jt(A){return A}function Gt(A,M,t,I){var g=tM(),e=n().set(I,M);return g[t](e,A)}function Ht(A,M,t){if(i(A)&&(M=A,A=void 0),A=A||"",null!=M)return Gt(A,M,t,"month");var I,g=[];for(I=0;I<12;I++)g[I]=Gt(A,I,t,"month");return g}function bt(A,M,t,I){"boolean"==typeof A?(i(M)&&(t=M,M=void 0),M=M||""):(M=A,t=M,A=!1,i(M)&&(t=M,M=void 0),M=M||"");var g=tM(),e=A?g._week.dow:0;if(null!=t)return Gt(M,(t+e)%7,I,"day");var T,E=[];for(T=0;T<7;T++)E[T]=Gt(M,(T+e)%7,I,"day");return E}function Xt(A,M){return Ht(A,M,"months")}function vt(A,M){return Ht(A,M,"monthsShort")}function Wt(A,M,t){return bt(A,M,t,"weekdays")}function Vt(A,M,t){return bt(A,M,t,"weekdaysShort")}function Pt(A,M,t){return bt(A,M,t,"weekdaysMin")}function Zt(){var A=this._data;return this._milliseconds=_g(this._milliseconds),this._days=_g(this._days),this._months=_g(this._months),A.milliseconds=_g(A.milliseconds),A.seconds=_g(A.seconds),A.minutes=_g(A.minutes),A.hours=_g(A.hours),A.months=_g(A.months),A.years=_g(A.years),this}function Kt(A,M,t,I){var g=GM(M,t);return A._milliseconds+=I*g._milliseconds,A._days+=I*g._days,A._months+=I*g._months,A._bubble()}function qt(A,M){return Kt(this,A,M,1)}function _t(A,M){return Kt(this,A,M,-1)}function $t(A){return A<0?Math.floor(A):Math.ceil(A)}function AI(){var A,M,t,I,g,e=this._milliseconds,i=this._days,T=this._months,E=this._data;return e>=0&&i>=0&&T>=0||e<=0&&i<=0&&T<=0||(e+=864e5*$t(tI(T)+i),i=0,T=0),E.milliseconds=e%1e3,A=x(e/1e3),E.seconds=A%60,M=x(A/60),E.minutes=M%60,t=x(M/60),E.hours=t%24,i+=x(t/24),g=x(MI(i)),T+=g,i-=$t(tI(g)),I=x(T/12),T%=12,E.days=i,E.months=T,E.years=I,this}function MI(A){return 4800*A/146097}function tI(A){return 146097*A/4800}function II(A){var M,t,I=this._milliseconds;if(A=f(A),"month"===A||"year"===A)return M=this._days+I/864e5,t=this._months+MI(M),"month"===A?t:t/12;switch(M=this._days+Math.round(tI(this._months)),A){case"week":return M/7+I/6048e5;case"day":return M+I/864e5;case"hour":return 24*M+I/36e5;case"minute":return 1440*M+I/6e4;case"second":return 86400*M+I/1e3;case"millisecond":return Math.floor(864e5*M)+I;default:throw new Error("Unknown unit "+A)}}function gI(){return this._milliseconds+864e5*this._days+this._months%12*2592e6+31536e6*j(this._months/12)}function eI(A){return function(){return this.as(A)}}function iI(A){return A=f(A),this[A+"s"]()}function TI(A){return function(){return this._data[A]}}function EI(){return x(this.days()/7)}function NI(A,M,t,I,g){return g.relativeTime(M||1,!!t,A,I)}function oI(A,M,t){var I=GM(A).abs(),g=De(I.as("s")),e=De(I.as("m")),i=De(I.as("h")),T=De(I.as("d")),E=De(I.as("M")),N=De(I.as("y")),o=g0,o[4]=t,NI.apply(null,o)}function nI(A){return void 0===A?De:"function"==typeof A&&(De=A,!0)}function CI(A,M){return void 0!==ae[A]&&(void 0===M?ae[A]:(ae[A]=M,!0))}function cI(A){var M=this.localeData(),t=oI(this,!A,M);return A&&(t=M.pastFuture(+this,t)),M.postformat(t)}function DI(){var A,M,t,I=Be(this._milliseconds)/1e3,g=Be(this._days),e=Be(this._months);A=x(I/60),M=x(A/60),I%=60,A%=60,t=x(e/12),e%=12;var i=t,T=e,E=g,N=M,o=A,n=I,C=this.asSeconds();return C?(C<0?"-":"")+"P"+(i?i+"Y":"")+(T?T+"M":"")+(E?E+"D":"")+(N||o||n?"T":"")+(N?N+"H":"")+(o?o+"M":"")+(n?n+"S":""):"P0D"}var aI,BI;BI=Array.prototype.some?Array.prototype.some:function(A){for(var M=Object(this),t=M.length>>>0,I=0;I68?1900:2e3)};var Bg=G("FullYear",!0);V("w",["ww",2],"wo","week"),V("W",["WW",2],"Wo","isoWeek"),F("week","w"),F("isoWeek","W"),R("week",5),R("isoWeek",5),_("w",GI),_("ww",GI,fI),_("W",GI),_("WW",GI,fI),IA(["w","ww","W","WW"],function(A,M,t,I){M[I.substr(0,1)]=j(A)});var Qg={dow:0,doy:6};V("d",0,"do","day"),V("dd",0,0,function(A){return this.localeData().weekdaysMin(this,A)}),V("ddd",0,0,function(A){return this.localeData().weekdaysShort(this,A)}),V("dddd",0,0,function(A){return this.localeData().weekdays(this,A)}),V("e",0,0,"weekday"),V("E",0,0,"isoWeekday"),F("day","d"),F("weekday","e"),F("isoWeekday","E"),R("day",11),R("weekday",11),R("isoWeekday",11),_("d",GI),_("e",GI),_("E",GI),_("dd",function(A,M){return M.weekdaysMinRegex(A)}),_("ddd",function(A,M){return M.weekdaysShortRegex(A)}),_("dddd",function(A,M){return M.weekdaysRegex(A)}),IA(["dd","ddd","dddd"],function(A,M,t,I){var g=t._locale.weekdaysParse(A,I,t._strict);null!=g?M.d=g:c(t).invalidWeekday=A}),IA(["d","e","E"],function(A,M,t,I){M[I]=j(A)});var rg="Sunday_Monday_Tuesday_Wednesday_Thursday_Friday_Saturday".split("_"),sg="Sun_Mon_Tue_Wed_Thu_Fri_Sat".split("_"),xg="Su_Mo_Tu_We_Th_Fr_Sa".split("_"),jg=_I,yg=_I,ug=_I;V("H",["HH",2],0,"hour"),V("h",["hh",2],0,XA),V("k",["kk",2],0,vA),V("hmm",0,0,function(){return""+XA.apply(this)+W(this.minutes(),2)}),V("hmmss",0,0,function(){return""+XA.apply(this)+W(this.minutes(),2)+W(this.seconds(),2)}),V("Hmm",0,0,function(){return""+this.hours()+W(this.minutes(),2)}),V("Hmmss",0,0,function(){return""+this.hours()+W(this.minutes(),2)+W(this.seconds(),2)}),WA("a",!0),WA("A",!1),F("hour","h"),R("hour",13),_("a",VA),_("A",VA),_("H",GI),_("h",GI),_("HH",GI,fI),_("hh",GI,fI),_("hmm",HI),_("hmmss",bI),_("Hmm",HI),_("Hmmss",bI),tA(["H","HH"],gg),tA(["a","A"],function(A,M,t){t._isPm=t._locale.isPM(A),t._meridiem=A}),tA(["h","hh"],function(A,M,t){M[gg]=j(A),c(t).bigHour=!0}),tA("hmm",function(A,M,t){var I=A.length-2;M[gg]=j(A.substr(0,I)),M[eg]=j(A.substr(I)),c(t).bigHour=!0}),tA("hmmss",function(A,M,t){var I=A.length-4,g=A.length-2;M[gg]=j(A.substr(0,I)),M[eg]=j(A.substr(I,2)),M[ig]=j(A.substr(g)),c(t).bigHour=!0}),tA("Hmm",function(A,M,t){var I=A.length-2;M[gg]=j(A.substr(0,I)),M[eg]=j(A.substr(I))}),tA("Hmmss",function(A,M,t){var I=A.length-4,g=A.length-2;M[gg]=j(A.substr(0,I)),M[eg]=j(A.substr(I,2)),M[ig]=j(A.substr(g))});var wg,Lg=/[ap]\.?m?\.?/i,lg=G("Hours",!0),Yg={calendar:wI,longDateFormat:LI,invalidDate:lI,ordinal:YI,ordinalParse:dI,relativeTime:hI,months:Cg,monthsShort:cg,week:Qg,weekdays:rg,weekdaysMin:xg,weekdaysShort:sg,meridiemParse:Lg},dg={},hg={},Sg=/^\s*((?:[+-]\d{6}|\d{4})-(?:\d\d-\d\d|W\d\d-\d|W\d\d|\d\d\d|\d\d))(?:(T| )(\d\d(?::\d\d(?::\d\d(?:[.,]\d+)?)?)?)([\+\-]\d\d(?::?\d\d)?|\s*Z)?)?$/,zg=/^\s*((?:[+-]\d{6}|\d{4})(?:\d\d\d\d|W\d\d\d|W\d\d|\d\d\d|\d\d))(?:(T| )(\d\d(?:\d\d(?:\d\d(?:[.,]\d+)?)?)?)([\+\-]\d\d(?::?\d\d)?|\s*Z)?)?$/,Ug=/Z|[+-]\d\d(?::?\d\d)?/,pg=[["YYYYYY-MM-DD",/[+-]\d{6}-\d\d-\d\d/],["YYYY-MM-DD",/\d{4}-\d\d-\d\d/],["GGGG-[W]WW-E",/\d{4}-W\d\d-\d/],["GGGG-[W]WW",/\d{4}-W\d\d/,!1],["YYYY-DDD",/\d{4}-\d{3}/],["YYYY-MM",/\d{4}-\d\d/,!1],["YYYYYYMMDD",/[+-]\d{10}/],["YYYYMMDD",/\d{8}/],["GGGG[W]WWE",/\d{4}W\d{3}/],["GGGG[W]WW",/\d{4}W\d{2}/,!1],["YYYYDDD",/\d{7}/]],Og=[["HH:mm:ss.SSSS",/\d\d:\d\d:\d\d\.\d+/],["HH:mm:ss,SSSS",/\d\d:\d\d:\d\d,\d+/],["HH:mm:ss",/\d\d:\d\d:\d\d/],["HH:mm",/\d\d:\d\d/],["HHmmss.SSSS",/\d\d\d\d\d\d\.\d+/],["HHmmss,SSSS",/\d\d\d\d\d\d,\d+/],["HHmmss",/\d\d\d\d\d\d/],["HHmm",/\d\d\d\d/],["HH",/\d\d/]],mg=/^\/?Date\((\-?\d+)/i;M.createFromInputFallback=w("value provided is not in a recognized ISO format. moment construction falls back to js Date(), which is not reliable across all browsers and versions. Non ISO date formats are discouraged and will be removed in an upcoming major release. Please refer to http://momentjs.com/guides/#/warnings/js-date/ for more info.",function(A){A._d=new Date(A._i+(A._useUTC?" UTC":""))}),M.ISO_8601=function(){};var Fg=w("moment().min is deprecated, use moment.max instead. http://momentjs.com/guides/#/warnings/min-max/",function(){var A=sM.apply(null,arguments);return this.isValid()&&A.isValid()?Athis?this:A:a()}),kg=function(){return Date.now?Date.now():+new Date};lM("Z",":"),lM("ZZ",""),_("Z",KI),_("ZZ",KI),tA(["Z","ZZ"],function(A,M,t){t._useUTC=!0,t._tzm=YM(KI,A)});var Rg=/([\+\-]|\d\d)/gi;M.updateOffset=function(){};var Jg=/^(\-)?(?:(\d*)[. ])?(\d+)\:(\d+)(?:\:(\d+)(\.\d*)?)?$/,Gg=/^(-)?P(?:(-?[0-9,.]*)Y)?(?:(-?[0-9,.]*)M)?(?:(-?[0-9,.]*)W)?(?:(-?[0-9,.]*)D)?(?:T(?:(-?[0-9,.]*)H)?(?:(-?[0-9,.]*)M)?(?:(-?[0-9,.]*)S)?)?$/;GM.fn=uM.prototype;var Hg=vM(1,"add"),bg=vM(-1,"subtract");M.defaultFormat="YYYY-MM-DDTHH:mm:ssZ",M.defaultFormatUtc="YYYY-MM-DDTHH:mm:ss[Z]";var Xg=w("moment().lang() is deprecated. Instead, use moment().localeData() to get the language configuration. Use moment().locale() to change languages.",function(A){return void 0===A?this.localeData():this.locale(A)});V(0,["gg",2],0,function(){return this.weekYear()%100}),V(0,["GG",2],0,function(){return this.isoWeekYear()%100}),lt("gggg","weekYear"),lt("ggggg","weekYear"),lt("GGGG","isoWeekYear"),lt("GGGGG","isoWeekYear"),F("weekYear","gg"),F("isoWeekYear","GG"),R("weekYear",1),R("isoWeekYear",1),_("G",PI),_("g",PI),_("GG",GI,fI),_("gg",GI,fI),_("GGGG",vI,RI),_("gggg",vI,RI),_("GGGGG",WI,JI),_("ggggg",WI,JI),IA(["gggg","ggggg","GGGG","GGGGG"],function(A,M,t,I){M[I.substr(0,2)]=j(A)}),IA(["gg","GG"],function(A,t,I,g){t[g]=M.parseTwoDigitYear(A)}),V("Q",0,"Qo","quarter"),F("quarter","Q"),R("quarter",7),_("Q",FI),tA("Q",function(A,M){M[tg]=3*(j(A)-1)}),V("D",["DD",2],"Do","date"),F("date","D"),R("date",9),_("D",GI),_("DD",GI,fI),_("Do",function(A,M){return A?M._ordinalParse:M._ordinalParseLenient}),tA(["D","DD"],Ig),tA("Do",function(A,M){M[Ig]=j(A.match(GI)[0],10)});var vg=G("Date",!0);V("DDD",["DDDD",3],"DDDo","dayOfYear"),F("dayOfYear","DDD"),R("dayOfYear",4),_("DDD",XI),_("DDDD",kI),tA(["DDD","DDDD"],function(A,M,t){t._dayOfYear=j(A)}),V("m",["mm",2],0,"minute"),F("minute","m"),R("minute",14),_("m",GI),_("mm",GI,fI),tA(["m","mm"],eg);var Wg=G("Minutes",!1);V("s",["ss",2],0,"second"),F("second","s"),R("second",15),_("s",GI),_("ss",GI,fI),tA(["s","ss"],ig);var Vg=G("Seconds",!1);V("S",0,0,function(){return~~(this.millisecond()/100)}),V(0,["SS",2],0,function(){return~~(this.millisecond()/10)}),V(0,["SSS",3],0,"millisecond"),V(0,["SSSS",4],0,function(){return 10*this.millisecond()}),V(0,["SSSSS",5],0,function(){return 100*this.millisecond()}),V(0,["SSSSSS",6],0,function(){return 1e3*this.millisecond()}),V(0,["SSSSSSS",7],0,function(){return 1e4*this.millisecond()}),V(0,["SSSSSSSS",8],0,function(){return 1e5*this.millisecond()}),V(0,["SSSSSSSSS",9],0,function(){return 1e6*this.millisecond()}),F("millisecond","ms"),R("millisecond",16),_("S",XI,FI),_("SS",XI,fI),_("SSS",XI,kI);var Pg;for(Pg="SSSS";Pg.length<=9;Pg+="S")_(Pg,VI);for(Pg="S";Pg.length<=9;Pg+="S")tA(Pg,mt);var Zg=G("Milliseconds",!1);V("z",0,0,"zoneAbbr"),V("zz",0,0,"zoneName");var Kg=r.prototype;Kg.add=Hg,Kg.calendar=PM,Kg.clone=ZM,Kg.diff=tt,Kg.endOf=at,Kg.format=Tt,Kg.from=Et,Kg.fromNow=Nt,Kg.to=ot,Kg.toNow=nt,Kg.get=X,Kg.invalidAt=wt,Kg.isAfter=KM,Kg.isBefore=qM,Kg.isBetween=_M,Kg.isSame=$M,Kg.isSameOrAfter=At,Kg.isSameOrBefore=Mt,Kg.isValid=yt,Kg.lang=Xg,Kg.locale=Ct,Kg.localeData=ct,Kg.max=fg,Kg.min=Fg,Kg.parsingFlags=ut,Kg.set=v,Kg.startOf=Dt,Kg.subtract=bg,Kg.toArray=st,Kg.toObject=xt,Kg.toDate=rt,Kg.toISOString=et,Kg.inspect=it,Kg.toJSON=jt,Kg.toString=gt,Kg.unix=Qt,Kg.valueOf=Bt,Kg.creationData=Lt,Kg.year=Bg,Kg.isLeapYear=rA,Kg.weekYear=Yt,Kg.isoWeekYear=dt,Kg.quarter=Kg.quarters=pt,Kg.month=nA,Kg.daysInMonth=CA,Kg.week=Kg.weeks=dA,Kg.isoWeek=Kg.isoWeeks=hA,Kg.weeksInYear=St,Kg.isoWeeksInYear=ht,Kg.date=vg,Kg.day=Kg.days=fA,Kg.weekday=kA,Kg.isoWeekday=RA,Kg.dayOfYear=Ot,Kg.hour=Kg.hours=lg,Kg.minute=Kg.minutes=Wg,Kg.second=Kg.seconds=Vg,Kg.millisecond=Kg.milliseconds=Zg,Kg.utcOffset=SM,Kg.utc=UM,Kg.local=pM,Kg.parseZone=OM,Kg.hasAlignedHourOffset=mM,Kg.isDST=FM,Kg.isLocal=kM,Kg.isUtcOffset=RM,Kg.isUtc=JM,Kg.isUTC=JM,Kg.zoneAbbr=Ft,Kg.zoneName=ft,Kg.dates=w("dates accessor is deprecated. Use date instead.",vg),Kg.months=w("months accessor is deprecated. Use month instead",nA),Kg.years=w("years accessor is deprecated. Use year instead",Bg),Kg.zone=w("moment().zone is deprecated, use moment().utcOffset instead. http://momentjs.com/guides/#/warnings/zone/",zM),Kg.isDSTShifted=w("isDSTShifted is deprecated. See http://momentjs.com/guides/#/warnings/dst-shifted/ for more information",fM);var qg=h.prototype;qg.calendar=S,qg.longDateFormat=z,qg.invalidDate=U,qg.ordinal=p,qg.preparse=Jt,qg.postformat=Jt,qg.relativeTime=O,qg.pastFuture=m,qg.set=Y,qg.months=iA,qg.monthsShort=TA,qg.monthsParse=NA,qg.monthsRegex=DA,qg.monthsShortRegex=cA,qg.week=LA,qg.firstDayOfYear=YA,qg.firstDayOfWeek=lA,qg.weekdays=UA,qg.weekdaysMin=OA,qg.weekdaysShort=pA,qg.weekdaysParse=FA,qg.weekdaysRegex=JA,qg.weekdaysShortRegex=GA,qg.weekdaysMinRegex=HA,qg.isPM=PA,qg.meridiem=ZA,$A("en",{ordinalParse:/\d{1,2}(th|st|nd|rd)/,ordinal:function(A){var M=A%10,t=1===j(A%100/10)?"th":1===M?"st":2===M?"nd":3===M?"rd":"th";return A+t}}),M.lang=w("moment.lang is deprecated. Use moment.locale instead.",$A),M.langData=w("moment.langData is deprecated. Use moment.localeData instead.",tM);var _g=Math.abs,$g=eI("ms"),Ae=eI("s"),Me=eI("m"),te=eI("h"),Ie=eI("d"),ge=eI("w"),ee=eI("M"),ie=eI("y"),Te=TI("milliseconds"),Ee=TI("seconds"),Ne=TI("minutes"),oe=TI("hours"),ne=TI("days"),Ce=TI("months"),ce=TI("years"),De=Math.round,ae={s:45,m:45,h:22,d:26,M:11},Be=Math.abs,Qe=uM.prototype;return Qe.abs=Zt,Qe.add=qt,Qe.subtract=_t,Qe.as=II,Qe.asMilliseconds=$g,Qe.asSeconds=Ae,Qe.asMinutes=Me,Qe.asHours=te,Qe.asDays=Ie,Qe.asWeeks=ge,Qe.asMonths=ee,Qe.asYears=ie,Qe.valueOf=gI,Qe._bubble=AI,Qe.get=iI,Qe.milliseconds=Te,Qe.seconds=Ee,Qe.minutes=Ne,Qe.hours=oe,Qe.days=ne,Qe.weeks=EI,Qe.months=Ce,Qe.years=ce,Qe.humanize=cI,Qe.toISOString=DI,Qe.toString=DI,Qe.toJSON=DI,Qe.locale=Ct,Qe.localeData=ct,Qe.toIsoString=w("toIsoString() is deprecated. Please use toISOString() instead (notice the capitals)",DI),Qe.lang=Xg,V("X",0,0,"unix"),V("x",0,0,"valueOf"),_("x",PI),_("X",qI),tA("X",function(A,M,t){t._d=new Date(1e3*parseFloat(A,10))}),tA("x",function(A,M,t){t._d=new Date(j(A))}),M.version="2.16.0",t(sM),M.fn=Kg,M.min=jM,M.max=yM,M.now=kg,M.utc=n,M.unix=kt,M.months=Xt,M.isDate=T,M.locale=$A,M.invalid=a,M.duration=GM,M.isMoment=s,M.weekdays=Wt,M.parseZone=Rt,M.localeData=tM,M.isDuration=wM,M.monthsShort=vt,M.weekdaysMin=Pt,M.defineLocale=AM,M.updateLocale=MM,M.locales=IM,M.weekdaysShort=Vt,M.normalizeUnits=f,M.relativeTimeRounding=nI,M.relativeTimeThreshold=CI,M.calendarFormat=VM,M.prototype=Kg,M})}).call(M,t(237)(A))},function(A,M,t){"use strict";var I=t(139).default,g=t(140).default,e=t(83).default;M.__esModule=!0;var i=function(A){return I(g({values:function(){var A=this;return e(this).map(function(M){return A[M]})}}),A)},T={SIZES:{large:"lg",medium:"md",small:"sm",xsmall:"xs",lg:"lg",md:"md",sm:"sm",xs:"xs"},GRID_COLUMNS:12},E=i({LARGE:"large",MEDIUM:"medium",SMALL:"small",XSMALL:"xsmall"});M.Sizes=E;var N=i({SUCCESS:"success",WARNING:"warning",DANGER:"danger",INFO:"info"});M.State=N;var o="default";M.DEFAULT=o;var n="primary";M.PRIMARY=n;var C="link";M.LINK=C;var c="inverse";M.INVERSE=c,M.default=T},function(A,M){"use strict";function t(){for(var A=arguments.length,M=Array(A),t=0;t=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t},M.__esModule=!0},function(A,M){"use strict";function t(A){return A&&A.ownerDocument||document}M.__esModule=!0,M.default=t,A.exports=M.default},function(A,M){function t(A){return"number"==typeof A&&A>-1&&A%1==0&&A<=I}var I=9007199254740991;A.exports=t},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}function g(A){return A.replace(/[.*+?^${}()|[\]\\]/g,"\\$&")}function e(A){for(var M="",t=[],I=[],e=void 0,i=0,T=/:([a-zA-Z_$][a-zA-Z0-9_$]*)|\*\*|\*|\(|\)/g;e=T.exec(A);)e.index!==i&&(I.push(A.slice(i,e.index)),M+=g(A.slice(i,e.index))),e[1]?(M+="([^/]+)",t.push(e[1])):"**"===e[0]?(M+="(.*)",t.push("splat")):"*"===e[0]?(M+="(.*?)",t.push("splat")):"("===e[0]?M+="(?:":")"===e[0]&&(M+=")?"),I.push(e[0]),i=T.lastIndex;return i!==A.length&&(I.push(A.slice(i,A.length)),M+=g(A.slice(i,A.length))),{pattern:A,regexpSource:M,paramNames:t,tokens:I}}function i(A){return c[A]||(c[A]=e(A)),c[A]}function T(A,M){"/"!==A.charAt(0)&&(A="/"+A);var t=i(A),I=t.regexpSource,g=t.paramNames,e=t.tokens;"/"!==A.charAt(A.length-1)&&(I+="/?"),"*"===e[e.length-1]&&(I+="$");var T=M.match(new RegExp("^"+I,"i"));if(null==T)return null;var E=T[0],N=M.substr(E.length);if(N){if("/"!==E.charAt(E.length-1))return null;N="/"+N}return{remainingPathname:N,paramNames:g,paramValues:T.slice(1).map(function(A){return A&&decodeURIComponent(A)})}}function E(A){return i(A).paramNames}function N(A,M){var t=T(A,M);if(!t)return null;var I=t.paramNames,g=t.paramValues,e={};return I.forEach(function(A,M){e[A]=g[M]}),e}function o(A,M){M=M||{};for(var t=i(A),I=t.tokens,g=0,e="",T=0,E=void 0,N=void 0,o=void 0,n=0,c=I.length;n0?void 0:(0,C.default)(!1),null!=o&&(e+=encodeURI(o))):"("===E?g+=1:")"===E?g-=1:":"===E.charAt(0)?(N=E.substring(1),o=M[N],null!=o||g>0?void 0:(0,C.default)(!1),null!=o&&(e+=encodeURIComponent(o))):e+=E;return e.replace(/\/+/g,"/")}M.__esModule=!0,M.compilePattern=i,M.matchPattern=T,M.getParamNames=E,M.getParams=N,M.formatPattern=o;var n=t(9),C=I(n),c=Object.create(null)},function(A,M){"use strict";M.__esModule=!0;var t="PUSH";M.PUSH=t;var I="REPLACE";M.REPLACE=I;var g="POP";M.POP=g,M.default={PUSH:t,REPLACE:I,POP:g}},function(A,M,t){"use strict";function I(A,M){return(A&M)===M}var g=t(2),e={MUST_USE_ATTRIBUTE:1,MUST_USE_PROPERTY:2,HAS_SIDE_EFFECTS:4,HAS_BOOLEAN_VALUE:8,HAS_NUMERIC_VALUE:16,HAS_POSITIVE_NUMERIC_VALUE:48,HAS_OVERLOADED_BOOLEAN_VALUE:64,injectDOMPropertyConfig:function(A){var M=e,t=A.Properties||{},i=A.DOMAttributeNamespaces||{},E=A.DOMAttributeNames||{},N=A.DOMPropertyNames||{},o=A.DOMMutationMethods||{};A.isCustomAttribute&&T._isCustomAttributeFunctions.push(A.isCustomAttribute);for(var n in t){T.properties.hasOwnProperty(n)?g(!1):void 0;var C=n.toLowerCase(),c=t[n],D={attributeName:C,attributeNamespace:null,propertyName:n,mutationMethod:null,mustUseAttribute:I(c,M.MUST_USE_ATTRIBUTE),mustUseProperty:I(c,M.MUST_USE_PROPERTY),hasSideEffects:I(c,M.HAS_SIDE_EFFECTS),hasBooleanValue:I(c,M.HAS_BOOLEAN_VALUE),hasNumericValue:I(c,M.HAS_NUMERIC_VALUE),hasPositiveNumericValue:I(c,M.HAS_POSITIVE_NUMERIC_VALUE),hasOverloadedBooleanValue:I(c,M.HAS_OVERLOADED_BOOLEAN_VALUE)};if(D.mustUseAttribute&&D.mustUseProperty?g(!1):void 0,!D.mustUseProperty&&D.hasSideEffects?g(!1):void 0,D.hasBooleanValue+D.hasNumericValue+D.hasOverloadedBooleanValue<=1?void 0:g(!1),E.hasOwnProperty(n)){var a=E[n];D.attributeName=a}i.hasOwnProperty(n)&&(D.attributeNamespace=i[n]),N.hasOwnProperty(n)&&(D.propertyName=N[n]),o.hasOwnProperty(n)&&(D.mutationMethod=o[n]),T.properties[n]=D}}},i={},T={ID_ATTRIBUTE_NAME:"data-reactid",properties:{},getPossibleStandardName:null,_isCustomAttributeFunctions:[],isCustomAttribute:function(A){for(var M=0;M1){var M=A.indexOf(c,1);return M>-1?A.substr(0,M):A}return null},traverseEnterLeave:function(A,M,t,I,g){var e=N(A,M);e!==A&&o(A,e,t,I,!1,!0),e!==M&&o(e,M,t,g,!0,!1)},traverseTwoPhase:function(A,M,t){A&&(o("",A,M,t,!0,!1),o(A,"",M,t,!1,!0))},traverseTwoPhaseSkipTarget:function(A,M,t){A&&(o("",A,M,t,!0,!0),o(A,"",M,t,!0,!0))},traverseAncestors:function(A,M,t){o("",A,M,t,!0,!1)},getFirstCommonAncestorID:N,_getNextDescendantID:E,isAncestorIDOf:i,SEPARATOR:c};A.exports=B},function(A,M,t){"use strict";function I(A,M,t){var I=0;return n.default.Children.map(A,function(A){if(n.default.isValidElement(A)){var g=I;return I++,M.call(t,A,g)}return A})}function g(A,M,t){var I=0;return n.default.Children.forEach(A,function(A){n.default.isValidElement(A)&&(M.call(t,A,I),I++)})}function e(A){var M=0;return n.default.Children.forEach(A,function(A){n.default.isValidElement(A)&&M++}),M}function i(A){var M=!1;return n.default.Children.forEach(A,function(A){!M&&n.default.isValidElement(A)&&(M=!0)}),M}function T(A,M){var t=void 0;return g(A,function(I,g){!t&&M(I,g,A)&&(t=I)}),t}function E(A,M,t){var I=0,g=[];return n.default.Children.forEach(A,function(A){n.default.isValidElement(A)&&(M.call(t,A,I)&&g.push(A),I++)}),g}var N=t(6).default;M.__esModule=!0;var o=t(1),n=N(o);M.default={map:I,forEach:g,numberOf:e,find:T,findValidComponents:E,hasValidComponent:i},A.exports=M.default},function(A,M){var t=A.exports={version:"1.2.6"};"number"==typeof __e&&(__e=t)},function(A,M,t){"use strict";var I=t(32),g=function(){var A=I&&document.documentElement;return A&&A.contains?function(A,M){return A.contains(M)}:A&&A.compareDocumentPosition?function(A,M){return A===M||!!(16&A.compareDocumentPosition(M))}:function(A,M){if(M)do if(M===A)return!0;while(M=M.parentNode);return!1}}();A.exports=g},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}Object.defineProperty(M,"__esModule",{value:!0}),M.default=function(A){return(0,T.default)(e.default.findDOMNode(A))};var g=t(16),e=I(g),i=t(42),T=I(i);A.exports=M.default},function(A,M,t){"use strict";var I=t(197),g=t(434),e=t(210),i=t(219),T=t(220),E=t(2),N=(t(4),{}),o=null,n=function(A,M){A&&(g.executeDispatchesInOrder(A,M),A.isPersistent()||A.constructor.release(A))},C=function(A){return n(A,!0)},c=function(A){return n(A,!1)},D=null,a={injection:{injectMount:g.injection.injectMount,injectInstanceHandle:function(A){D=A},getInstanceHandle:function(){return D},injectEventPluginOrder:I.injectEventPluginOrder,injectEventPluginsByName:I.injectEventPluginsByName},eventNameDispatchConfigs:I.eventNameDispatchConfigs,registrationNameModules:I.registrationNameModules,putListener:function(A,M,t){"function"!=typeof t?E(!1):void 0;var g=N[M]||(N[M]={});g[A]=t;var e=I.registrationNameModules[M];e&&e.didPutListener&&e.didPutListener(A,M,t)},getListener:function(A,M){var t=N[M];return t&&t[A]},deleteListener:function(A,M){var t=I.registrationNameModules[M];t&&t.willDeleteListener&&t.willDeleteListener(A,M);var g=N[M];g&&delete g[A]},deleteAllListeners:function(A){for(var M in N)if(N[M][A]){var t=I.registrationNameModules[M];t&&t.willDeleteListener&&t.willDeleteListener(A,M),delete N[M][A]}},extractEvents:function(A,M,t,g,e){for(var T,E=I.plugins,N=0;N1?I-1:0),e=1;e":">","<":"<",'"':""","'":"'"},e=/[&><"']/g;A.exports=I},function(A,M,t){"use strict";var I=t(10),g=/^[ \r\n\t\f]/,e=/<(!--|link|noscript|meta|script|style)[ \r\n\t\f\/>]/,i=function(A,M){A.innerHTML=M};if("undefined"!=typeof MSApp&&MSApp.execUnsafeLocalFunction&&(i=function(A,M){MSApp.execUnsafeLocalFunction(function(){A.innerHTML=M})}),I.canUseDOM){var T=document.createElement("div");T.innerHTML=" ",""===T.innerHTML&&(i=function(A,M){if(A.parentNode&&A.parentNode.replaceChild(A,A),g.test(M)||"<"===M[0]&&e.test(M)){A.innerHTML=String.fromCharCode(65279)+M;var t=A.firstChild;1===t.data.length?A.removeChild(t):t.deleteData(0,1)}else A.innerHTML=M})}A.exports=i},function(A,M,t){"use strict";var I=t(2),g=function(A){var M,t={};A instanceof Object&&!Array.isArray(A)?void 0:I(!1);for(M in A)A.hasOwnProperty(M)&&(t[M]=M);return t};A.exports=g},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}Object.defineProperty(M,"__esModule",{value:!0});var g=t(1),e=I(g),i=function(A){var M=A.label,t=A.id,I=A.name,g=A.value,i=A.onChange,T=A.type,E=A.spellCheck,N=A.required,o=A.readonly,n=A.autoComplete,C=A.align,c=A.className,D=e.default.createElement("input",{id:t,name:I,value:g,onChange:i,className:"ig-text",type:T,spellCheck:E,required:N,autoComplete:n});return o&&(D=e.default.createElement("input",{id:t,name:I,value:g,onChange:i,className:"ig-text",type:T,spellCheck:E,required:N,autoComplete:n,disabled:!0})),e.default.createElement("div",{className:"input-group "+C+" "+c},D,e.default.createElement("i",{className:"ig-helpers"}),e.default.createElement("label",{className:"ig-label"},M))};M.default=i},function(A,M,t){(function(){var t=this,I=t.humanize,g={};"undefined"!=typeof A&&A.exports&&(M=A.exports=g),M.humanize=g,g.noConflict=function(){return t.humanize=I,this},g.pad=function(A,M,t,I){if(A+="",t?t.length>1&&(t=t.charAt(0)):t=" ",I=void 0===I?"left":"right","right"===I)for(;A.length4&&A<21?"th":{1:"st",2:"nd",3:"rd"}[A%10]||"th"},w:function(){return t.getDay()},z:function(){return(o.L()?i[o.n()]:e[o.n()])+o.j()-1},W:function(){var A=o.z()-o.N()+1.5;return g.pad(1+Math.floor(Math.abs(A)/7)+(A%7>3.5?1:0),2,"0")},F:function(){return N[t.getMonth()]},m:function(){return g.pad(o.n(),2,"0")},M:function(){return o.F().slice(0,3)},n:function(){return t.getMonth()+1},t:function(){return new Date(o.Y(),o.n(),0).getDate()},L:function(){return 1===new Date(o.Y(),1,29).getMonth()?1:0},o:function(){var A=o.n(),M=o.W();return o.Y()+(12===A&&M<9?-1:1===A&&M>9)},Y:function(){return t.getFullYear()},y:function(){return String(o.Y()).slice(-2)},a:function(){return t.getHours()>11?"pm":"am"},A:function(){return o.a().toUpperCase()},B:function(){var A=t.getTime()/1e3,M=A%86400+3600;M<0&&(M+=86400);var I=M/86.4%1e3;return A<0?Math.ceil(I):Math.floor(I)},g:function(){return o.G()%12||12},G:function(){return t.getHours()},h:function(){return g.pad(o.g(),2,"0")},H:function(){return g.pad(o.G(),2,"0")},i:function(){return g.pad(t.getMinutes(),2,"0")},s:function(){return g.pad(t.getSeconds(),2,"0")},u:function(){return g.pad(1e3*t.getMilliseconds(),6,"0")},O:function(){var A=t.getTimezoneOffset(),M=Math.abs(A);return(A>0?"-":"+")+g.pad(100*Math.floor(M/60)+M%60,4,"0")},P:function(){var A=o.O();return A.substr(0,3)+":"+A.substr(3,2)},Z:function(){return 60*-t.getTimezoneOffset()},c:function(){return"Y-m-d\\TH:i:sP".replace(I,T)},r:function(){return"D, d M Y H:i:s O".replace(I,T)},U:function(){return t.getTime()/1e3||0}};return A.replace(I,T)},g.numberFormat=function(A,M,t,I){M=isNaN(M)?2:Math.abs(M),t=void 0===t?".":t,I=void 0===I?",":I;var g=A<0?"-":"";A=Math.abs(+A||0);var e=parseInt(A.toFixed(M),10)+"",i=e.length>3?e.length%3:0;return g+(i?e.substr(0,i)+I:"")+e.substr(i).replace(/(\d{3})(?=\d)/g,"$1"+I)+(M?t+Math.abs(A-e).toFixed(M).slice(2):"")},g.naturalDay=function(A,M){A=void 0===A?g.time():A,M=void 0===M?"Y-m-d":M;var t=86400,I=new Date,e=new Date(I.getFullYear(),I.getMonth(),I.getDate()).getTime()/1e3;return A=e-t?"yesterday":A>=e&&A=e+t&&A-2)return(t>=0?"just ":"")+"now";if(t<60&&t>-60)return t>=0?Math.floor(t)+" seconds ago":"in "+Math.floor(-t)+" seconds";if(t<120&&t>-120)return t>=0?"about a minute ago":"in about a minute";if(t<3600&&t>-3600)return t>=0?Math.floor(t/60)+" minutes ago":"in "+Math.floor(-t/60)+" minutes";if(t<7200&&t>-7200)return t>=0?"about an hour ago":"in about an hour";if(t<86400&&t>-86400)return t>=0?Math.floor(t/3600)+" hours ago":"in "+Math.floor(-t/3600)+" hours";var I=172800;if(t-I)return t>=0?"1 day ago":"in 1 day";var e=2505600;if(t-e)return t>=0?Math.floor(t/86400)+" days ago":"in "+Math.floor(-t/86400)+" days";var i=5184e3;if(t-i)return t>=0?"about a month ago":"in about a month";var T=parseInt(g.date("Y",M),10),E=parseInt(g.date("Y",A),10),N=12*T+parseInt(g.date("n",M),10),o=12*E+parseInt(g.date("n",A),10),n=N-o;if(n<12&&n>-12)return n>=0?n+" months ago":"in "+-n+" months";var C=T-E;return C<2&&C>-2?C>=0?"a year ago":"in a year":C>=0?C+" years ago":"in "+-C+" years"},g.ordinal=function(A){A=parseInt(A,10),A=isNaN(A)?0:A;var M=A<0?"-":"";A=Math.abs(A);var t=A%100;return M+A+(t>4&&t<21?"th":{1:"st",2:"nd",3:"rd"}[A%10]||"th")},g.filesize=function(A,M,t,I,e,i){return M=void 0===M?1024:M,A<=0?"0 bytes":(A

"),A=A.replace(/\n/g,"
"),"

"+A+"

"},g.nl2br=function(A){return A.replace(/(\r\n|\n|\r)/g,"
")},g.truncatechars=function(A,M){return A.length<=M?A:A.substr(0,M)+"…"},g.truncatewords=function(A,M){var t=A.split(" ");return t.lengthM.documentElement.clientHeight;return{modalStyles:{paddingRight:I&&!g?Q.default():void 0,paddingLeft:!I&&g?Q.default():void 0}}}});X.Body=z.default,X.Header=p.default,X.Title=m.default,X.Footer=f.default,X.Dialog=h.default,X.TRANSITION_DURATION=300,X.BACKDROP_TRANSITION_DURATION=150,M.default=c.bsSizes([a.Sizes.LARGE,a.Sizes.SMALL],c.bsClass("modal",X)),A.exports=M.default},function(A,M,t){"use strict";var I=t(15).default,g=t(14).default,e=t(41).default,i=t(7).default,T=t(6).default;M.__esModule=!0;var E=t(1),N=T(E),o=t(5),n=T(o),C=t(11),c=T(C),D=t(40),a=T(D),B=function(A){function M(){g(this,M),A.apply(this,arguments)}return I(M,A),M.prototype.render=function(){var A=this.props,M=A["aria-label"],t=e(A,["aria-label"]),I=a.default(this.context.$bs_onModalHide,this.props.onHide);return N.default.createElement("div",i({},t,{className:n.default(this.props.className,c.default.prefix(this.props,"header"))}),this.props.closeButton&&N.default.createElement("button",{type:"button",className:"close","aria-label":M,onClick:I},N.default.createElement("span",{"aria-hidden":"true"},"×")),this.props.children)},M}(N.default.Component);B.propTypes={"aria-label":N.default.PropTypes.string,bsClass:N.default.PropTypes.string,closeButton:N.default.PropTypes.bool,onHide:N.default.PropTypes.func},B.contextTypes={$bs_onModalHide:N.default.PropTypes.func},B.defaultProps={"aria-label":"Close",closeButton:!1},M.default=C.bsClass("modal",B),A.exports=M.default},function(A,M,t){"use strict";function I(A,M){return Array.isArray(M)?M.indexOf(A)>=0:A===M}var g=t(7).default,e=t(83).default,i=t(6).default;M.__esModule=!0;var T=t(50),E=i(T),N=t(168),o=i(N),n=t(1),C=i(n),c=t(16),D=i(c),a=t(100),B=(i(a),t(287)),Q=i(B),r=t(40),s=i(r),x=C.default.createClass({displayName:"OverlayTrigger",propTypes:g({},Q.default.propTypes,{trigger:C.default.PropTypes.oneOfType([C.default.PropTypes.oneOf(["click","hover","focus"]),C.default.PropTypes.arrayOf(C.default.PropTypes.oneOf(["click","hover","focus"]))]),delay:C.default.PropTypes.number,delayShow:C.default.PropTypes.number,delayHide:C.default.PropTypes.number,defaultOverlayShown:C.default.PropTypes.bool,overlay:C.default.PropTypes.node.isRequired,onBlur:C.default.PropTypes.func,onClick:C.default.PropTypes.func,onFocus:C.default.PropTypes.func,onMouseEnter:C.default.PropTypes.func,onMouseLeave:C.default.PropTypes.func,target:function(){},onHide:function(){},show:function(){}}),getDefaultProps:function(){return{defaultOverlayShown:!1,trigger:["hover","focus"]}},getInitialState:function(){return{isOverlayShown:this.props.defaultOverlayShown}},show:function(){this.setState({isOverlayShown:!0})},hide:function(){this.setState({isOverlayShown:!1})},toggle:function(){this.state.isOverlayShown?this.hide():this.show()},componentWillMount:function(){this.handleMouseOver=this.handleMouseOverOut.bind(null,this.handleDelayedShow),this.handleMouseOut=this.handleMouseOverOut.bind(null,this.handleDelayedHide)},componentDidMount:function(){this._mountNode=document.createElement("div"),this.renderOverlay()},renderOverlay:function(){D.default.unstable_renderSubtreeIntoContainer(this,this._overlay,this._mountNode)},componentWillUnmount:function(){D.default.unmountComponentAtNode(this._mountNode),this._mountNode=null,clearTimeout(this._hoverShowDelay),clearTimeout(this._hoverHideDelay)},componentDidUpdate:function(){this._mountNode&&this.renderOverlay()},getOverlayTarget:function(){return D.default.findDOMNode(this)},getOverlay:function(){var A=g({},o.default(this.props,e(Q.default.propTypes)),{show:this.state.isOverlayShown,onHide:this.hide,target:this.getOverlayTarget,onExit:this.props.onExit,onExiting:this.props.onExiting,onExited:this.props.onExited,onEnter:this.props.onEnter,onEntering:this.props.onEntering,onEntered:this.props.onEntered}),M=n.cloneElement(this.props.overlay,{placement:A.placement,container:A.container});return C.default.createElement(Q.default,A,M)},render:function(){var A=C.default.Children.only(this.props.children),M=A.props,t={"aria-describedby":this.props.overlay.props.id};return this._overlay=this.getOverlay(),t.onClick=s.default(M.onClick,this.props.onClick),I("click",this.props.trigger)&&(t.onClick=s.default(this.toggle,t.onClick)),I("hover",this.props.trigger)&&(t.onMouseOver=s.default(this.handleMouseOver,this.props.onMouseOver,M.onMouseOver),t.onMouseOut=s.default(this.handleMouseOut,this.props.onMouseOut,M.onMouseOut)),I("focus",this.props.trigger)&&(t.onFocus=s.default(this.handleDelayedShow,this.props.onFocus,M.onFocus),t.onBlur=s.default(this.handleDelayedHide,this.props.onBlur,M.onBlur)),n.cloneElement(A,t)},handleDelayedShow:function(){var A=this;if(null!=this._hoverHideDelay)return clearTimeout(this._hoverHideDelay),void(this._hoverHideDelay=null);if(!this.state.isOverlayShown&&null==this._hoverShowDelay){var M=null!=this.props.delayShow?this.props.delayShow:this.props.delay;return M?void(this._hoverShowDelay=setTimeout(function(){A._hoverShowDelay=null,A.show()},M)):void this.show()}},handleDelayedHide:function(){var A=this;if(null!=this._hoverShowDelay)return clearTimeout(this._hoverShowDelay),void(this._hoverShowDelay=null);if(this.state.isOverlayShown&&null==this._hoverHideDelay){var M=null!=this.props.delayHide?this.props.delayHide:this.props.delay;return M?void(this._hoverHideDelay=setTimeout(function(){A._hoverHideDelay=null,A.hide()},M)):void this.hide()}},handleMouseOverOut:function(A,M){var t=M.currentTarget,I=M.relatedTarget||M.nativeEvent.toElement;I&&(I===t||E.default(t,I))||A(M)}});M.default=x,A.exports=M.default},function(A,M,t){"use strict";var I=t(7).default,g=t(6).default;M.__esModule=!0;var e=t(1),i=g(e),T=t(5),E=g(T),N=t(11),o=g(N),n=t(176),C=g(n),c=i.default.createClass({displayName:"Tooltip",propTypes:{id:C.default(i.default.PropTypes.oneOfType([i.default.PropTypes.string,i.default.PropTypes.number])),placement:i.default.PropTypes.oneOf(["top","right","bottom","left"]),positionLeft:i.default.PropTypes.number,positionTop:i.default.PropTypes.number,arrowOffsetLeft:i.default.PropTypes.oneOfType([i.default.PropTypes.number,i.default.PropTypes.string]),arrowOffsetTop:i.default.PropTypes.oneOfType([i.default.PropTypes.number,i.default.PropTypes.string]),title:i.default.PropTypes.node},getDefaultProps:function(){return{bsClass:"tooltip",placement:"right"}},render:function(){var A,M=(A={},A[o.default.prefix(this.props)]=!0,A[this.props.placement]=!0,A),t=I({left:this.props.positionLeft, +top:this.props.positionTop},this.props.style),g={left:this.props.arrowOffsetLeft,top:this.props.arrowOffsetTop};return i.default.createElement("div",I({role:"tooltip"},this.props,{className:E.default(this.props.className,M),style:t}),i.default.createElement("div",{className:o.default.prefix(this.props,"arrow"),style:g}),i.default.createElement("div",{className:o.default.prefix(this.props,"inner")},this.props.children))}});M.default=c,A.exports=M.default},function(A,M,t){A.exports={default:t(294),__esModule:!0}},function(A,M,t){var I=t(300),g=t(49),e=t(141),i="prototype",T=function(A,M,t){var E,N,o,n=A&T.F,C=A&T.G,c=A&T.S,D=A&T.P,a=A&T.B,B=A&T.W,Q=C?g:g[M]||(g[M]={}),r=C?I:c?I[M]:(I[M]||{})[i];C&&(t=M);for(E in t)N=!n&&r&&E in r,N&&E in Q||(o=N?r[E]:t[E],Q[E]=C&&"function"!=typeof r[E]?t[E]:a&&N?e(o,I):B&&r[E]==o?function(A){var M=function(M){return this instanceof A?new A(M):A(M)};return M[i]=A[i],M}(o):D&&"function"==typeof o?e(Function.call,o):o,D&&((Q[i]||(Q[i]={}))[E]=o))};T.F=1,T.G=2,T.S=4,T.P=8,T.B=16,T.W=32,A.exports=T},function(A,M){var t=Object;A.exports={create:t.create,getProto:t.getPrototypeOf,isEnum:{}.propertyIsEnumerable,getDesc:t.getOwnPropertyDescriptor,setDesc:t.defineProperty,setDescs:t.defineProperties,getKeys:t.keys,getNames:t.getOwnPropertyNames,getSymbols:t.getOwnPropertySymbols,each:[].forEach}},function(A,M,t){"use strict";var I=t(32),g=function(){};I&&(g=function(){return document.addEventListener?function(A,M,t,I){return A.addEventListener(M,t,I||!1)}:document.attachEvent?function(A,M,t){return A.attachEvent("on"+M,t)}:void 0}()),A.exports=g},function(A,M,t){"use strict";var I=t(150),g=t(322),e=t(317),i=t(318),T=Object.prototype.hasOwnProperty;A.exports=function(A,M,t){var E="",N=M;if("string"==typeof M){if(void 0===t)return A.style[I(M)]||e(A).getPropertyValue(g(M));(N={})[M]=t}for(var o in N)T.call(N,o)&&(N[o]||0===N[o]?E+=g(o)+":"+N[o]+";":i(A,g(o)));A.style.cssText+=";"+E}},function(A,M,t){"use strict";var I=function(A,M,t,I,g,e,i,T){if(!A){var E;if(void 0===M)E=new Error("Minified exception occurred; use the non-minified dev environment for the full error message and additional helpful warnings.");else{var N=[t,I,g,e,i,T],o=0;E=new Error(M.replace(/%s/g,function(){return N[o++]})),E.name="Invariant Violation"}throw E.framesToPop=1,E}};A.exports=I},function(A,M,t){function I(A,M,t){if("function"!=typeof A)return g;if(void 0===M)return A;switch(t){case 1:return function(t){return A.call(M,t)};case 3:return function(t,I,g){return A.call(M,t,I,g)};case 4:return function(t,I,g,e){return A.call(M,t,I,g,e)};case 5:return function(t,I,g,e,i){return A.call(M,t,I,g,e,i)}}return function(){return A.apply(M,arguments)}}var g=t(169);A.exports=I},function(A,M,t){function I(A){return null!=A&&e(g(A))}var g=t(159),e=t(43);A.exports=I},function(A,M,t){function I(A){return e(A)&&g(A)&&T.call(A,"callee")&&!E.call(A,"callee")}var g=t(90),e=t(33),i=Object.prototype,T=i.hasOwnProperty,E=i.propertyIsEnumerable;A.exports=I},function(A,M,t){function I(A){return"string"==typeof A||g(A)&&T.call(A)==e}var g=t(33),e="[object String]",i=Object.prototype,T=i.toString;A.exports=I},function(A,M,t){var I=t(61),g=t(90),e=t(27),i=t(356),T=t(95),E=I(Object,"keys"),N=E?function(A){var M=null==A?void 0:A.constructor;return"function"==typeof M&&M.prototype===A||("function"==typeof A?T.enumPrototypes:g(A))?i(A):e(A)?E(A):[]}:i;A.exports=N},function(A,M,t){function I(A){if(null==A)return[];o(A)||(A=Object(A));var M=A.length;M=M&&N(M)&&(i(A)||e(A)||n(A))&&M||0;for(var t=A.constructor,I=-1,g=T(t)&&t.prototype||w,c=g===A,D=Array(M),a=M>0,Q=C.enumErrorProps&&(A===u||A instanceof Error),r=C.enumPrototypes&&T(A);++I>",null!=t[I]?A(t,I,g):M?new Error("Required prop '"+I+"' was not specified in '"+g+"'."):void 0}var t=M.bind(null,!1);return t.isRequired=M.bind(null,!0),t}M.__esModule=!0,M.errMsg=t,M.createChainableTypeChecker=I},function(A,M,t){"use strict";var I=function(){};A.exports=I},function(A,M){"use strict";function t(A,M,t){function I(){return i=!0,T?void(N=[].concat(Array.prototype.slice.call(arguments))):void t.apply(this,arguments)}function g(){if(!i&&(E=!0,!T)){for(T=!0;!i&&e=A&&E&&(i=!0,t()))}}var e=0,i=!1,T=!1,E=!1,N=void 0;g()}function I(A,M,t){function I(A,M,I){i||(M?(i=!0,t(M)):(e[A]=I,i=++T===g,i&&t(null,e)))}var g=A.length,e=[];if(0===g)return t(null,e);var i=!1,T=0;A.forEach(function(A,t){M(A,t,function(A,M){I(t,A,M)})})}M.__esModule=!0,M.loopAsync=t,M.mapAsync=I},function(A,M,t){"use strict";function I(A){if(A&&A.__esModule)return A;var M={};if(null!=A)for(var t in A)Object.prototype.hasOwnProperty.call(A,t)&&(M[t]=A[t]);return M.default=A,M}function g(A){return A&&A.__esModule?A:{default:A}}M.__esModule=!0,M.router=M.routes=M.route=M.components=M.component=M.location=M.history=M.falsy=M.locationShape=M.routerShape=void 0;var e=t(1),i=t(64),T=(g(i),t(34)),E=I(T),N=t(8),o=(g(N),e.PropTypes.func),n=e.PropTypes.object,C=e.PropTypes.shape,c=e.PropTypes.string,D=M.routerShape=C({push:o.isRequired,replace:o.isRequired,go:o.isRequired,goBack:o.isRequired,goForward:o.isRequired,setRouteLeaveHook:o.isRequired,isActive:o.isRequired}),a=M.locationShape=C({pathname:c.isRequired,search:c.isRequired,state:n,action:c.isRequired,key:c}),B=M.falsy=E.falsy,Q=M.history=E.history,r=M.location=a,s=M.component=E.component,x=M.components=E.components,j=M.route=E.route,y=(M.routes=E.routes,M.router=D),u={falsy:B,history:Q,location:r,component:s,components:x,route:j,router:y};M.default=u},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}M.__esModule=!0;var g=t(416),e=I(g),i=t(187),T=I(i);M.default=(0,T.default)(e.default),A.exports=M.default},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}function g(A){for(var M in A)if(Object.prototype.hasOwnProperty.call(A,M))return!0;return!1}function e(A,M){function t(M){var t=!(arguments.length<=1||void 0===arguments[1])&&arguments[1],I=arguments.length<=2||void 0===arguments[2]?null:arguments[2],g=void 0;return t&&t!==!0||null!==I?(M={pathname:M,query:t},g=I||!1):(M=A.createLocation(M),g=t),(0,C.default)(M,g,s.location,s.routes,s.params)}function I(A,t){x&&x.location===A?e(x,t):(0,B.default)(M,A,function(M,I){M?t(M):I?e(i({},I,{location:A}),t):t()})}function e(A,M){function t(t,g){return t||g?I(t,g):void(0,D.default)(A,function(t,I){t?M(t):M(null,null,s=i({},A,{components:I}))})}function I(A,t){A?M(A):M(null,t)}var g=(0,N.default)(s,A),e=g.leaveRoutes,T=g.changeRoutes,E=g.enterRoutes;(0,o.runLeaveHooks)(e,s),e.filter(function(A){return E.indexOf(A)===-1}).forEach(a),(0,o.runChangeHooks)(T,s,A,function(M,g){return M||g?I(M,g):void(0,o.runEnterHooks)(E,A,t)})}function T(A){var M=arguments.length<=1||void 0===arguments[1]||arguments[1];return A.__id__||M&&(A.__id__=j++)}function E(A){return A.reduce(function(A,M){return A.push.apply(A,y[T(M)]),A},[])}function n(A,t){(0,B.default)(M,A,function(M,I){if(null==I)return void t();x=i({},I,{location:A});for(var g=E((0,N.default)(s,x).leaveRoutes),e=void 0,T=0,o=g.length;null==e&&T=32||13===M?M:0}A.exports=t},function(A,M){"use strict";function t(A){var M=this,t=M.nativeEvent;if(t.getModifierState)return t.getModifierState(A);var I=g[A];return!!I&&!!t[I]}function I(A){return t}var g={Alt:"altKey",Control:"ctrlKey",Meta:"metaKey",Shift:"shiftKey"};A.exports=I},function(A,M){"use strict";function t(A){var M=A.target||A.srcElement||window;return 3===M.nodeType?M.parentNode:M}A.exports=t},function(A,M){"use strict";function t(A){var M=A&&(I&&A[I]||A[g]);if("function"==typeof M)return M}var I="function"==typeof Symbol&&Symbol.iterator,g="@@iterator";A.exports=t},function(A,M,t){"use strict";function I(A){return"function"==typeof A&&"undefined"!=typeof A.prototype&&"function"==typeof A.prototype.mountComponent&&"function"==typeof A.prototype.receiveComponent}function g(A){var M;if(null===A||A===!1)M=new i(g);else if("object"==typeof A){var t=A;!t||"function"!=typeof t.type&&"string"!=typeof t.type?N(!1):void 0,M="string"==typeof t.type?T.createInternalComponent(t):I(t.type)?new t.type(t):new o}else"string"==typeof A||"number"==typeof A?M=T.createInstanceForText(A):N(!1);return M.construct(A),M._mountIndex=0,M._mountImage=null,M}var e=t(440),i=t(208),T=t(214),E=t(3),N=t(2),o=(t(4),function(){});E(o.prototype,e.Mixin,{_instantiateReactComponent:g}),A.exports=g},function(A,M,t){"use strict";/** * Checks if an event is supported in the current execution environment. * * NOTE: This will not work correctly for non-generic events such as ` + "`" + `change` + "`" + `, @@ -200,61 +200,60 @@ role:"tooltip"},this.props,{className:E["default"](this.props.className,M),style * @internal * @license Modernizr 3.0.0pre (Custom Build) | MIT */ -function I(A,M){if(!e.canUseDOM||M&&!("addEventListener"in document))return!1;var t="on"+A,I=t in document;if(!I){var i=document.createElement("div");i.setAttribute(t,"return;"),I="function"==typeof i[t]}return!I&&g&&"wheel"===A&&(I=document.implementation.hasFeature("Events.wheel","3.0")),I}var g,e=t(10);e.canUseDOM&&(g=document.implementation&&document.implementation.hasFeature&&document.implementation.hasFeature("","")!==!0),A.exports=I},function(A,M,t){"use strict";var I=t(10),g=t(74),e=t(75),i=function(A,M){A.textContent=M};I.canUseDOM&&("textContent"in document.documentElement||(i=function(A,M){e(A,g(M))})),A.exports=i},function(A,M){"use strict";function t(A,M){var t=null===A||A===!1,I=null===M||M===!1;if(t||I)return t===I;var g=typeof A,e=typeof M;return"string"===g||"number"===g?"string"===e||"number"===e:"object"===e&&A.type===M.type&&A.key===M.key}A.exports=t},function(A,M,t){"use strict";function I(A){return a[A]}function g(A,M){return A&&null!=A.key?i(A.key):M.toString(36)}function e(A){return(""+A).replace(B,I)}function i(A){return"$"+e(A)}function T(A,M,t,I){var e=typeof A;if("undefined"!==e&&"boolean"!==e||(A=null),null===A||"string"===e||"number"===e||N.isValidElement(A))return t(I,A,""===M?c+g(A,0):M),1;var E,o,a=0,B=""===M?c:M+D;if(Array.isArray(A))for(var Q=0;QM.name.toLowerCase()?1:0}),g=g.sort(function(A,M){return A.name.toLowerCase()M.name.toLowerCase()?1:0}),M&&(t=t.reverse(),g=g.reverse()),[].concat(I(t),I(g))},M.sortObjectsBySize=function(A,M){var t=A.filter(function(A){return A.name.endsWith("/")}),g=A.filter(function(A){return!A.name.endsWith("/")});return g=g.sort(function(A,M){return A.size-M.size}),M&&(g=g.reverse()),[].concat(I(t),I(g))},M.sortObjectsByDate=function(A,M){var t=A.filter(function(A){return A.name.endsWith("/")}),g=A.filter(function(A){return!A.name.endsWith("/")});return g=g.sort(function(A,M){return new Date(A.lastModified).getTime()-new Date(M.lastModified).getTime()}),M&&(g=g.reverse()),[].concat(I(t),I(g))},M.pathSlice=function(A){A=A.replace(g.minioBrowserPrefix,"");var M="",t="";if(!A)return{bucket:t,prefix:M};var I=A.indexOf("/",1);return I==-1?(t=A.slice(1),{bucket:t,prefix:M}):(t=A.slice(1,I),M=A.slice(I+1),{bucket:t,prefix:M})},M.pathJoin=function(A,M){return M||(M=""),g.minioBrowserPrefix+"/"+A+"/"+M}},function(A,M,t){"use strict";function I(A){if(A&&A.__esModule)return A;var M={};if(null!=A)for(var t in A)Object.prototype.hasOwnProperty.call(A,t)&&(M[t]=A[t]);return M["default"]=A,M}function g(A){return A&&A.__esModule?A:{"default":A}}function e(A,M){if(!(A instanceof M))throw new TypeError("Cannot call a class as a function")}Object.defineProperty(M,"__esModule",{value:!0});var i=function(){function A(A,M){for(var t=0;t":i.innerHTML="<"+A+">",T[A]=!i.firstChild),T[A]?C[A]:null}var g=t(10),e=t(2),i=g.canUseDOM?document.createElement("div"):null,T={},E=[1,'"],N=[1,"","
"],o=[3,"","
"],n=[1,'',""],C={"*":[1,"?
","
"],area:[1,"",""],col:[2,"","
"],legend:[1,"
","
"],param:[1,"",""],tr:[2,"","
"],optgroup:E,option:E,caption:N,colgroup:N,tbody:N,tfoot:N,thead:N,td:o,th:o},c=["circle","clipPath","defs","ellipse","g","image","line","linearGradient","mask","path","pattern","polygon","polyline","radialGradient","rect","stop","text","tspan"];c.forEach(function(A){C[A]=n,T[A]=!0}),A.exports=I},function(A,M){"use strict";function t(A,M){if(A===M)return!0;if("object"!=typeof A||null===A||"object"!=typeof M||null===M)return!1;var t=Object.keys(A),g=Object.keys(M);if(t.length!==g.length)return!1;for(var e=I.bind(M),i=0;iM.name.toLowerCase()?1:0}),g=g.sort(function(A,M){return A.name.toLowerCase()M.name.toLowerCase()?1:0}),M&&(t=t.reverse(),g=g.reverse()),[].concat(I(t),I(g))},M.sortObjectsBySize=function(A,M){var t=A.filter(function(A){return A.name.endsWith("/")}),g=A.filter(function(A){return!A.name.endsWith("/")});return g=g.sort(function(A,M){return A.size-M.size}),M&&(g=g.reverse()),[].concat(I(t),I(g))},M.sortObjectsByDate=function(A,M){var t=A.filter(function(A){return A.name.endsWith("/")}),g=A.filter(function(A){return!A.name.endsWith("/")});return g=g.sort(function(A,M){return new Date(A.lastModified).getTime()-new Date(M.lastModified).getTime()}),M&&(g=g.reverse()),[].concat(I(t),I(g))},M.pathSlice=function(A){A=A.replace(g.minioBrowserPrefix,"");var M="",t="";if(!A)return{bucket:t,prefix:M};var I=A.indexOf("/",1);return I==-1?(t=A.slice(1),{bucket:t,prefix:M}):(t=A.slice(1,I),M=A.slice(I+1),{bucket:t,prefix:M})},M.pathJoin=function(A,M){return M||(M=""),g.minioBrowserPrefix+"/"+A+"/"+M}},function(A,M,t){"use strict";function I(A){if(A&&A.__esModule)return A;var M={};if(null!=A)for(var t in A)Object.prototype.hasOwnProperty.call(A,t)&&(M[t]=A[t]);return M.default=A,M}function g(A){return A&&A.__esModule?A:{default:A}}function e(A,M){if(!(A instanceof M))throw new TypeError("Cannot call a class as a function")}Object.defineProperty(M,"__esModule",{value:!0});var i=function(){function A(A,M){for(var t=0;t1)for(var t=1;t-1&&A%1==0&&A1)for(var t=1;tA.clientHeight}Object.defineProperty(M,"__esModule",{value:!0}),M["default"]=i;var T=t(57),E=I(T),N=t(39),o=I(N);A.exports=M["default"]},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{"default":A}}function g(A,M,t,I,g){var i=A[M],E="undefined"==typeof i?"undefined":e(i);return T["default"].isValidElement(i)?new Error("Invalid "+I+" ` + "`" + `"+g+"` + "`" + ` of type ReactElement "+("supplied to ` + "`" + `"+t+"` + "`" + `, expected an element type (a string ")+"or a ReactClass)."):"function"!==E&&"string"!==E?new Error("Invalid "+I+" ` + "`" + `"+g+"` + "`" + ` of value ` + "`" + `"+i+"` + "`" + ` "+("supplied to ` + "`" + `"+t+"` + "`" + `, expected an element type (a string ")+"or a ReactClass)."):null}M.__esModule=!0;var e="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(A){return typeof A}:function(A){return A&&"function"==typeof Symbol&&A.constructor===Symbol?"symbol":typeof A},i=t(1),T=I(i),E=t(182),N=I(E);M["default"]=(0,N["default"])(g)},function(A,M){"use strict";function t(A){function M(M,t,I,g,e,i){var T=g||"<>",E=i||I;if(null==t[I])return M?new Error("Required "+e+" ` + "`" + `"+E+"` + "`" + ` was not specified "+("in ` + "`" + `"+T+"` + "`" + `.")):null;for(var N=arguments.length,o=Array(N>6?N-6:0),n=6;n=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}function e(A){return 0===A.button}function i(A){return!!(A.metaKey||A.altKey||A.ctrlKey||A.shiftKey)}function T(A){for(var M in A)if(Object.prototype.hasOwnProperty.call(A,M))return!1;return!0}function E(A,M){var t=M.query,I=M.hash,g=M.state;return t||I||g?{pathname:A,query:t,hash:I,state:g}:A}M.__esModule=!0;var N=Object.assign||function(A){for(var M=1;M=0;I--){var g=A[I],e=g.path||"";if(t=e.replace(/\/*$/,"/")+t,0===e.indexOf("/"))break}return"/"+t}},propTypes:{path:C,from:C,to:C.isRequired,query:c,state:c,onEnter:o.falsy,children:o.falsy},render:function(){(0,T["default"])(!1)}});M["default"]=D,A.exports=M["default"]},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{"default":A}}M.__esModule=!0;var g=t(1),e=I(g),i=t(8),T=I(i),E=t(29),N=t(34),o=e["default"].PropTypes,n=o.string,C=o.func,c=e["default"].createClass({displayName:"Route",statics:{createRouteFromReactElement:E.createRouteFromReactElement},propTypes:{path:n,component:N.component,components:N.components,getComponent:C,getComponents:C},render:function(){(0,T["default"])(!1)}});M["default"]=c,A.exports=M["default"]},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{"default":A}}function g(A,M){var t={};for(var I in A)M.indexOf(I)>=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}function e(A){return!A||!A.__v2_compatible__}function i(A){return A&&A.getCurrentLocation}M.__esModule=!0;var T=Object.assign||function(A){for(var M=1;M=0&&0===window.sessionStorage.length)return;throw t}}function i(A){var M=void 0;try{M=window.sessionStorage.getItem(g(A))}catch(t){if(t.name===o)return null}if(M)try{return JSON.parse(M)}catch(t){}return null}M.__esModule=!0,M.saveState=e,M.readState=i;var T=t(22),E=(I(T),"@@History/"),N=["QuotaExceededError","QUOTA_EXCEEDED_ERR"],o="SecurityError"},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{"default":A}}function g(A){function M(A){return E.canUseDOM?void 0:T["default"](!1),t.listen(A)}var t=n["default"](e({getUserConfirmation:N.getUserConfirmation},A,{go:N.go}));return e({},t,{listen:M})}M.__esModule=!0;var e=Object.assign||function(A){for(var M=1;M1?M-1:0),e=1;e=A.childNodes.length?null:A.childNodes.item(t);A.insertBefore(M,I)}var g=t(425),e=t(219),i=t(17),T=t(75),E=t(120),N=t(2),o={dangerouslyReplaceNodeWithMarkup:g.dangerouslyReplaceNodeWithMarkup,updateTextContent:E,processUpdates:function(A,M){for(var t,i=null,o=null,n=0;n-1?void 0:i(!1),!N.plugins[t]){M.extractEvents?void 0:i(!1),N.plugins[t]=M;var I=M.eventTypes;for(var e in I)g(I[e],M,e)?void 0:i(!1)}}}function g(A,M,t){N.eventNameDispatchConfigs.hasOwnProperty(t)?i(!1):void 0,N.eventNameDispatchConfigs[t]=A;var I=A.phasedRegistrationNames;if(I){for(var g in I)if(I.hasOwnProperty(g)){var T=I[g];e(T,M,t)}return!0}return!!A.registrationName&&(e(A.registrationName,M,t),!0)}function e(A,M,t){N.registrationNameModules[A]?i(!1):void 0,N.registrationNameModules[A]=M,N.registrationNameDependencies[A]=M.eventTypes[t].dependencies}var i=t(2),T=null,E={},N={plugins:[],eventNameDispatchConfigs:{},registrationNameModules:{},registrationNameDependencies:{},injectEventPluginOrder:function(A){T?i(!1):void 0,T=Array.prototype.slice.call(A),I()},injectEventPluginsByName:function(A){var M=!1;for(var t in A)if(A.hasOwnProperty(t)){var g=A[t];E.hasOwnProperty(t)&&E[t]===g||(E[t]?i(!1):void 0,E[t]=g,M=!0)}M&&I()},getPluginModuleForEvent:function(A){var M=A.dispatchConfig;if(M.registrationName)return N.registrationNameModules[M.registrationName]||null;for(var t in M.phasedRegistrationNames)if(M.phasedRegistrationNames.hasOwnProperty(t)){var I=N.registrationNameModules[M.phasedRegistrationNames[t]];if(I)return I}return null},_resetEventPlugins:function(){T=null;for(var A in E)E.hasOwnProperty(A)&&delete E[A];N.plugins.length=0;var M=N.eventNameDispatchConfigs;for(var t in M)M.hasOwnProperty(t)&&delete M[t];var I=N.registrationNameModules;for(var g in I)I.hasOwnProperty(g)&&delete I[g]}};A.exports=N},function(A,M,t){"use strict";function I(A){return(""+A).replace(x,"//")}function g(A,M){this.func=A,this.context=M,this.count=0}function e(A,M,t){var I=A.func,g=A.context;I.call(g,M,A.count++)}function i(A,M,t){if(null==A)return A;var I=g.getPooled(M,t);Q(A,e,I),g.release(I)}function T(A,M,t,I){this.result=A,this.keyPrefix=M,this.func=t,this.context=I,this.count=0}function E(A,M,t){var g=A.result,e=A.keyPrefix,i=A.func,T=A.context,E=i.call(T,M,A.count++);Array.isArray(E)?N(E,g,t,B.thatReturnsArgument):null!=E&&(a.isValidElement(E)&&(E=a.cloneAndReplaceKey(E,e+(E!==M?I(E.key||"")+"/":"")+t)),g.push(E))}function N(A,M,t,g,e){var i="";null!=t&&(i=I(t)+"/");var N=T.getPooled(M,i,g,e);Q(A,E,N),T.release(N)}function o(A,M,t){if(null==A)return A;var I=[];return N(A,I,null,M,t),I}function n(A,M,t){return null}function C(A,M){return Q(A,n,null)}function c(A){var M=[];return N(A,M,null,B.thatReturnsArgument),M}var D=t(30),a=t(13),B=t(20),Q=t(122),r=D.twoArgumentPooler,s=D.fourArgumentPooler,x=/\/(?!\/)/g;g.prototype.destructor=function(){this.func=null,this.context=null,this.count=0},D.addPoolingTo(g,r),T.prototype.destructor=function(){this.result=null,this.keyPrefix=null,this.func=null,this.context=null,this.count=0},D.addPoolingTo(T,s);var j={forEach:i,map:o,mapIntoWithKeyPrefixInternal:N,count:C,toArray:c};A.exports=j},function(A,M,t){"use strict";function I(A,M){var t=y.hasOwnProperty(M)?y[M]:null;u.hasOwnProperty(M)&&(t!==x.OVERRIDE_BASE?B(!1):void 0),A.hasOwnProperty(M)&&(t!==x.DEFINE_MANY&&t!==x.DEFINE_MANY_MERGED?B(!1):void 0)}function g(A,M){if(M){"function"==typeof M?B(!1):void 0,C.isValidElement(M)?B(!1):void 0;var t=A.prototype;M.hasOwnProperty(s)&&w.mixins(A,M.mixins);for(var g in M)if(M.hasOwnProperty(g)&&g!==s){var e=M[g];if(I(t,g),w.hasOwnProperty(g))w[g](A,e);else{var i=y.hasOwnProperty(g),N=t.hasOwnProperty(g),o="function"==typeof e,n=o&&!i&&!N&&M.autobind!==!1;if(n)t.__reactAutoBindMap||(t.__reactAutoBindMap={}),t.__reactAutoBindMap[g]=e,t[g]=e;else if(N){var c=y[g];!i||c!==x.DEFINE_MANY_MERGED&&c!==x.DEFINE_MANY?B(!1):void 0,c===x.DEFINE_MANY_MERGED?t[g]=T(t[g],e):c===x.DEFINE_MANY&&(t[g]=E(t[g],e))}else t[g]=e}}}}function e(A,M){if(M)for(var t in M){var I=M[t];if(M.hasOwnProperty(t)){var g=t in w;g?B(!1):void 0;var e=t in A;e?B(!1):void 0,A[t]=I}}}function i(A,M){A&&M&&"object"==typeof A&&"object"==typeof M?void 0:B(!1);for(var t in M)M.hasOwnProperty(t)&&(void 0!==A[t]?B(!1):void 0,A[t]=M[t]);return A}function T(A,M){return function(){var t=A.apply(this,arguments),I=M.apply(this,arguments);if(null==t)return I;if(null==I)return t;var g={};return i(g,t),i(g,I),g}}function E(A,M){return function(){A.apply(this,arguments),M.apply(this,arguments)}}function N(A,M){var t=M.bind(A);return t}function o(A){for(var M in A.__reactAutoBindMap)if(A.__reactAutoBindMap.hasOwnProperty(M)){var t=A.__reactAutoBindMap[M];A[M]=N(A,t)}}var n=t(206),C=t(13),c=(t(70),t(69),t(221)),D=t(4),a=t(50),B=t(2),Q=t(59),r=t(26),s=(t(3),r({mixins:null})),x=Q({DEFINE_ONCE:null,DEFINE_MANY:null,OVERRIDE_BASE:null,DEFINE_MANY_MERGED:null}),j=[],y={mixins:x.DEFINE_MANY,statics:x.DEFINE_MANY,propTypes:x.DEFINE_MANY,contextTypes:x.DEFINE_MANY,childContextTypes:x.DEFINE_MANY,getDefaultProps:x.DEFINE_MANY_MERGED,getInitialState:x.DEFINE_MANY_MERGED,getChildContext:x.DEFINE_MANY_MERGED,render:x.DEFINE_ONCE,componentWillMount:x.DEFINE_MANY,componentDidMount:x.DEFINE_MANY,componentWillReceiveProps:x.DEFINE_MANY,shouldComponentUpdate:x.DEFINE_ONCE,componentWillUpdate:x.DEFINE_MANY,componentDidUpdate:x.DEFINE_MANY,componentWillUnmount:x.DEFINE_MANY,updateComponent:x.OVERRIDE_BASE},w={displayName:function(A,M){A.displayName=M},mixins:function(A,M){if(M)for(var t=0;t"+T+""},receiveComponent:function(A,M){if(A!==this._currentElement){this._currentElement=A;var t=""+A;if(t!==this._stringText){this._stringText=t;var g=i.getNode(this._rootNodeID);I.updateTextContent(g,t)}}},unmountComponent:function(){e.unmountIDFromEnvironment(this._rootNodeID)}}),A.exports=o},function(A,M,t){"use strict";function I(){this.reinitializeTransaction()}var g=t(18),e=t(72),i=t(4),T=t(20),E={initialize:T,close:function(){C.isBatchingUpdates=!1}},N={initialize:T,close:g.flushBatchedUpdates.bind(g)},o=[N,E];i(I.prototype,e.Mixin,{getTransactionWrappers:function(){return o}});var n=new I,C={isBatchingUpdates:!1,batchedUpdates:function(A,M,t,I,g,e){var i=C.isBatchingUpdates;C.isBatchingUpdates=!0,i?A(M,t,I,g,e):n.perform(A,null,M,t,I,g,e)}};A.exports=C},function(A,M,t){"use strict";function I(){if(!L){L=!0,Q.EventEmitter.injectReactEventListener(B),Q.EventPluginHub.injectEventPluginOrder(T),Q.EventPluginHub.injectInstanceHandle(r),Q.EventPluginHub.injectMount(s),Q.EventPluginHub.injectEventPluginsByName({SimpleEventPlugin:w,EnterLeaveEventPlugin:E,ChangeEventPlugin:e,SelectEventPlugin:j,BeforeInputEventPlugin:g}),Q.NativeComponent.injectGenericComponentClass(D),Q.NativeComponent.injectTextComponentClass(a),Q.Class.injectMixin(n),Q.DOMProperty.injectDOMPropertyConfig(o),Q.DOMProperty.injectDOMPropertyConfig(u),Q.EmptyComponent.injectEmptyComponent("noscript"),Q.Updates.injectReconcileTransaction(x),Q.Updates.injectBatchingStrategy(c),Q.RootIndex.injectCreateReactRootIndex(N.canUseDOM?i.createReactRootIndex:y.createReactRootIndex),Q.Component.injectEnvironment(C)}}var g=t(421),e=t(423),i=t(424),T=t(426),E=t(427),N=t(10),o=t(430),n=t(432),C=t(108),c=t(211),D=t(436),a=t(210),B=t(444),Q=t(445),r=t(48),s=t(12),x=t(449),j=t(455),y=t(456),w=t(457),u=t(454),L=!1;A.exports={inject:I}},function(A,M,t){"use strict";function I(){if(n.current){var A=n.current.getName();if(A)return" Check the render method of ` + "`" + `"+A+"` + "`" + `."}return""}function g(A,M){if(A._store&&!A._store.validated&&null==A.key){A._store.validated=!0;e("uniqueKey",A,M)}}function e(A,M,t){var g=I();if(!g){var e="string"==typeof t?t:t.displayName||t.name;e&&(g=" Check the top-level render call using <"+e+">.")}var i=D[A]||(D[A]={});if(i[g])return null;i[g]=!0;var T={parentOrOwner:g,url:" See https://fb.me/react-warning-keys for more information.",childOwner:null};return M&&M._owner&&M._owner!==n.current&&(T.childOwner=" It was passed a child from "+M._owner.getName()+"."),T}function i(A,M){if("object"==typeof A)if(Array.isArray(A))for(var t=0;t/,e={CHECKSUM_ATTR_NAME:"data-react-checksum",addChecksumToMarkup:function(A){var M=I(A);return A.replace(g," "+e.CHECKSUM_ATTR_NAME+'="'+M+'"$&')},canReuseMarkup:function(A,M){var t=M.getAttribute(e.CHECKSUM_ATTR_NAME);t=t&&parseInt(t,10);var g=I(A);return g===t}};A.exports=e},function(A,M,t){"use strict";var I=t(59),g=I({INSERT_MARKUP:null,MOVE_EXISTING:null,REMOVE_NODE:null,SET_MARKUP:null,TEXT_CONTENT:null});A.exports=g},function(A,M,t){"use strict";function I(A){if("function"==typeof A.type)return A.type;var M=A.type,t=n[M];return null==t&&(n[M]=t=N(M)),t}function g(A){return o?void 0:E(!1),new o(A.type,A.props)}function e(A){return new C(A)}function i(A){return A instanceof C}var T=t(4),E=t(2),N=null,o=null,n={},C=null,c={injectGenericComponentClass:function(A){o=A},injectTextComponentClass:function(A){C=A},injectComponentClasses:function(A){T(n,A)}},D={getComponentClassForElement:I,createInternalComponent:g,createInstanceForText:e,isTextComponent:i,injection:c};A.exports=D},function(A,M,t){"use strict";function I(A,M){}var g=(t(3),{isMounted:function(A){return!1},enqueueCallback:function(A,M){},enqueueForceUpdate:function(A){I(A,"forceUpdate")},enqueueReplaceState:function(A,M){I(A,"replaceState")},enqueueSetState:function(A,M){I(A,"setState")},enqueueSetProps:function(A,M){I(A,"setProps")},enqueueReplaceProps:function(A,M){I(A,"replaceProps")}});A.exports=g},function(A,M,t){"use strict";function I(A){function M(M,t,I,g,e,i){if(g=g||y,i=i||I,null==t[I]){var T=s[e];return M?new Error("Required "+T+" ` + "`" + `"+i+"` + "`" + ` was not specified in "+("` + "`" + `"+g+"` + "`" + `.")):null}return A(t,I,g,e,i)}var t=M.bind(null,!1);return t.isRequired=M.bind(null,!0),t}function g(A){function M(M,t,I,g,e){var i=M[t],T=a(i);if(T!==A){var E=s[g],N=B(i);return new Error("Invalid "+E+" ` + "`" + `"+e+"` + "`" + ` of type "+("` + "`" + `"+N+"` + "`" + ` supplied to ` + "`" + `"+I+"` + "`" + `, expected ")+("` + "`" + `"+A+"` + "`" + `."))}return null}return I(M)}function e(){return I(x.thatReturns(null))}function i(A){function M(M,t,I,g,e){var i=M[t];if(!Array.isArray(i)){var T=s[g],E=a(i);return new Error("Invalid "+T+" ` + "`" + `"+e+"` + "`" + ` of type "+("` + "`" + `"+E+"` + "`" + ` supplied to ` + "`" + `"+I+"` + "`" + `, expected an array."))}for(var N=0;N>"}var r=t(13),s=t(69),x=t(20),j=t(117),y="<>",w={array:g("array"),bool:g("boolean"),func:g("function"),number:g("number"),object:g("object"),string:g("string"),any:e(),arrayOf:i,element:T(),instanceOf:E,node:C(),objectOf:o,oneOf:N,oneOfType:n,shape:c};A.exports=w},function(A,M){"use strict";var t={injectCreateReactRootIndex:function(A){I.createReactRootIndex=A}},I={createReactRootIndex:null,injection:t};A.exports=I},function(A,M){"use strict";var t={currentScrollLeft:0,currentScrollTop:0,refreshScrollValues:function(A){t.currentScrollLeft=A.x,t.currentScrollTop=A.y}};A.exports=t},function(A,M,t){"use strict";function I(A,M){if(null==M?g(!1):void 0,null==A)return M;var t=Array.isArray(A),I=Array.isArray(M);return t&&I?(A.push.apply(A,M),A):t?(A.push(M),A):I?[A].concat(M):[A,M]}var g=t(2);A.exports=I},function(A,M){"use strict";var t=function(A,M,t){Array.isArray(A)?A.forEach(M,t):A&&M.call(t,A)};A.exports=t},function(A,M,t){"use strict";function I(){return!e&&g.canUseDOM&&(e="textContent"in document.documentElement?"textContent":"innerText"),e}var g=t(10),e=null;A.exports=I},function(A,M){"use strict";function t(A){var M=A&&A.nodeName&&A.nodeName.toLowerCase();return M&&("input"===M&&I[A.type]||"textarea"===M)}var I={color:!0,date:!0,datetime:!0,"datetime-local":!0,email:!0,month:!0,number:!0,password:!0,range:!0,search:!0,tel:!0,text:!0,time:!0,url:!0,week:!0};A.exports=t},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{"default":A}}function g(){for(var A=arguments.length,M=Array(A),t=0;t=0&&s.splice(M,1)}function T(A){var M=document.createElement("style");return M.type="text/css",e(A,M),M}function E(A){var M=document.createElement("link");return M.rel="stylesheet",e(A,M),M}function N(A,M){var t,I,g;if(M.singleton){var e=r++;t=Q||(Q=T(M)),I=o.bind(null,t,e,!1),g=o.bind(null,t,e,!0)}else A.sourceMap&&"function"==typeof URL&&"function"==typeof URL.createObjectURL&&"function"==typeof URL.revokeObjectURL&&"function"==typeof Blob&&"function"==typeof btoa?(t=E(M),I=C.bind(null,t),g=function(){i(t),t.href&&URL.revokeObjectURL(t.href)}):(t=T(M),I=n.bind(null,t),g=function(){i(t)});return I(A),function(M){if(M){if(M.css===A.css&&M.media===A.media&&M.sourceMap===A.sourceMap)return;I(A=M)}else g()}}function o(A,M,t,I){var g=t?"":I.css;if(A.styleSheet)A.styleSheet.cssText=x(M,g);else{var e=document.createTextNode(g),i=A.childNodes;i[M]&&A.removeChild(i[M]),i.length?A.insertBefore(e,i[M]):A.appendChild(e)}}function n(A,M){var t=M.css,I=M.media;if(I&&A.setAttribute("media",I),A.styleSheet)A.styleSheet.cssText=t;else{for(;A.firstChild;)A.removeChild(A.firstChild);A.appendChild(document.createTextNode(t))}}function C(A,M){var t=M.css,I=M.sourceMap;I&&(t+="\n/*# sourceMappingURL=data:application/json;base64,"+btoa(unescape(encodeURIComponent(JSON.stringify(I))))+" */");var g=new Blob([t],{type:"text/css"}),e=A.href;A.href=URL.createObjectURL(g),e&&URL.revokeObjectURL(e)}var c={},D=function(A){var M;return function(){return"undefined"==typeof M&&(M=A.apply(this,arguments)),M}},a=D(function(){return/msie [6-9]\b/.test(window.navigator.userAgent.toLowerCase())}),B=D(function(){return document.head||document.getElementsByTagName("head")[0]}),Q=null,r=0,s=[];A.exports=function(A,M){M=M||{},"undefined"==typeof M.singleton&&(M.singleton=a()),"undefined"==typeof M.insertAt&&(M.insertAt="bottom");var t=g(A);return I(t,M),function(A){for(var e=[],i=0;i",'"',"` + "`" + `"," ","\r","\n","\t"],B=["{","}","|","\\","^","` + "`" + `"].concat(a),Q=["'"].concat(B),r=["%","/","?",";","#"].concat(Q),s=["/","?","#"],x=255,j=/^[a-z0-9A-Z_-]{0,63}$/,y=/^([a-z0-9A-Z_-]{0,63})(.*)$/,w={javascript:!0,"javascript:":!0},u={javascript:!0,"javascript:":!0},L={http:!0,https:!0,ftp:!0,gopher:!0,file:!0,"http:":!0,"https:":!0,"ftp:":!0,"gopher:":!0,"file:":!0},Y=t(357);I.prototype.parse=function(A,M,t){if(!E(A))throw new TypeError("Parameter 'url' must be a string, not "+typeof A);var I=A;I=I.trim();var g=c.exec(I);if(g){g=g[0];var e=g.toLowerCase();this.protocol=e,I=I.substr(g.length)}if(t||g||I.match(/^\/\/[^@\/]+@[^@\/]+/)){var i="//"===I.substr(0,2);!i||g&&u[g]||(I=I.substr(2),this.slashes=!0)}if(!u[g]&&(i||g&&!L[g])){for(var T=-1,N=0;N127?"x":d[S];if(!h.match(j)){var U=B.slice(0,N),p=B.slice(N+1),O=d.match(y);O&&(U.push(O[1]),p.unshift(O[2])),p.length&&(I="/"+p.join(".")+I),this.hostname=U.join(".");break}}}if(this.hostname.length>x?this.hostname="":this.hostname=this.hostname.toLowerCase(),!a){for(var m=this.hostname.split("."),F=[],N=0;N0)&&t.host.split("@");B&&(t.auth=B.shift(),t.host=t.hostname=B.shift())}return t.search=A.search,t.query=A.query,o(t.pathname)&&o(t.search)||(t.path=(t.pathname?t.pathname:"")+(t.search?t.search:"")),t.href=t.format(),t}if(!D.length)return t.pathname=null,t.search?t.path="/"+t.search:t.path=null,t.href=t.format(),t;for(var Q=D.slice(-1)[0],r=(t.host||A.host)&&("."===Q||".."===Q)||""===Q,s=0,x=D.length;x>=0;x--)Q=D[x],"."==Q?D.splice(x,1):".."===Q?(D.splice(x,1),s++):s&&(D.splice(x,1),s--);if(!C&&!c)for(;s--;s)D.unshift("..");!C||""===D[0]||D[0]&&"/"===D[0].charAt(0)||D.unshift(""),r&&"/"!==D.join("/").substr(-1)&&D.push("");var j=""===D[0]||D[0]&&"/"===D[0].charAt(0);if(a){t.hostname=t.host=j?"":D.length?D.shift():"";var B=!!(t.host&&t.host.indexOf("@")>0)&&t.host.split("@");B&&(t.auth=B.shift(),t.host=t.hostname=B.shift())}return C=C||t.host&&D.length,C&&!j&&D.unshift(""),D.length?t.pathname=D.join("/"):(t.pathname=null,t.path=null),o(t.pathname)&&o(t.search)||(t.path=(t.pathname?t.pathname:"")+(t.search?t.search:"")),t.auth=A.auth||t.auth,t.slashes=t.slashes||A.slashes,t.href=t.format(),t},I.prototype.parseHost=function(){var A=this.host,M=D.exec(A);M&&(M=M[0],":"!==M&&(this.port=M.substr(1)),A=A.substr(0,A.length-M.length)),A&&(this.hostname=A)}},function(A,M){var t={animationIterationCount:!0,boxFlex:!0,boxFlexGroup:!0,boxOrdinalGroup:!0,columnCount:!0,flex:!0,flexGrow:!0,flexPositive:!0,flexShrink:!0,flexNegative:!0,flexOrder:!0,gridRow:!0,gridColumn:!0,fontWeight:!0,lineClamp:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,tabSize:!0,widows:!0,zIndex:!0,zoom:!0,fillOpacity:!0,stopOpacity:!0,strokeDashoffset:!0,strokeOpacity:!0,strokeWidth:!0};A.exports=function(A,M){return"number"!=typeof M||t[A]?M:M+"px"}},function(A,M,t){"use strict";function I(A){if(A&&A.__esModule)return A;var M={};if(null!=A)for(var t in A)Object.prototype.hasOwnProperty.call(A,t)&&(M[t]=A[t]);return M["default"]=A,M}function g(A){return A&&A.__esModule?A:{"default":A}}function e(A,M){if(!P.LoggedIn())return v.dispatch(p.setLoginRedirectPath(location.pathname)),void M(z.minioBrowserPrefix+"/login")}function i(A,M){P.LoggedIn()&&M(""+z.minioBrowserPrefix)}function T(){q<2&&setTimeout(function(){document.querySelector(".page-load").classList.add("pl-"+q),q++,T()},K[q])}t(485);var E=t(1),N=g(E),o=t(16),n=g(o),C=t(476),c=g(C),D=t(124),a=g(D),B=t(229),Q=g(B),r=t(189),s=g(r),x=t(190),j=g(x),y=t(100),w=g(y),u=t(186),L=g(u),Y=t(390),l=g(Y),d=t(23),h=g(d),S=t(42),z=(g(S),t(31)),U=t(19),p=I(U),O=t(252),m=g(O),F=t(242),f=g(F),k=t(239),R=g(k),J=t(484),G=(g(J),t(130)),H=g(G),b=t(60),X=g(b);window.Web=H["default"];var v=(0,Q["default"])(c["default"])(a["default"])(m["default"]),W=(0,h["default"])(function(A){return A})(R["default"]),V=(0,h["default"])(function(A){return A})(f["default"]),P=new H["default"](window.location.protocol+"//"+window.location.host+z.minioBrowserPrefix+"/webrpc",v.dispatch);window.web=P,v.dispatch(p.setWeb(P));var Z=function(A){return N["default"].createElement("div",null,A.children)};n["default"].render(N["default"].createElement(l["default"],{store:v,web:P},N["default"].createElement(j["default"],{history:w["default"]},N["default"].createElement(s["default"],{path:"/",component:Z},N["default"].createElement(s["default"],{path:"minio",component:Z},N["default"].createElement(L["default"],{component:W,onEnter:e}),N["default"].createElement(s["default"],{path:"login",component:V,onEnter:i}),N["default"].createElement(s["default"],{path:":bucket",component:W,onEnter:e}),N["default"].createElement(s["default"],{path:":bucket/*",component:W,onEnter:e}))))),document.getElementById("root"));var K=[0,400],q=0;T(),X["default"].getItem("newlyUpdated")&&(v.dispatch(p.showAlert({type:"success",message:"Updated to the latest UI Version."})),X["default"].removeItem("newlyUpdated"))},function(A,M,t){"use strict";function I(A){if(A&&A.__esModule)return A;var M={};if(null!=A)for(var t in A)Object.prototype.hasOwnProperty.call(A,t)&&(M[t]=A[t]);return M["default"]=A,M}function g(A){return A&&A.__esModule?A:{"default":A}}function e(A,M){if(!(A instanceof M))throw new TypeError("Cannot call a class as a function")}function i(A,M){if(!A)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!M||"object"!=typeof M&&"function"!=typeof M?A:M}function T(A,M){if("function"!=typeof M&&null!==M)throw new TypeError("Super expression must either be null or a function, not "+typeof M);A.prototype=Object.create(M&&M.prototype,{constructor:{value:A,enumerable:!1,writable:!0,configurable:!0}}),M&&(Object.setPrototypeOf?Object.setPrototypeOf(A,M):A.__proto__=M)}Object.defineProperty(M,"__esModule",{value:!0});var E=function(){function A(A,M){for(var t=0;t-1})))}},{key:"selectPrefix",value:function(A,M){var t=this.props,I=(t.dispatch,t.currentPath),g=(t.web,t.currentBucket);if(A.preventDefault(),M.endsWith("/")||""===M){if(M===I)return;D["default"].push(eA.pathJoin(g,M))}else window.location=window.location.origin+"/minio/download/"+g+"/"+M+"?token="+CA["default"].getItem("token")}},{key:"makeBucket",value:function(A){A.preventDefault();var M=this.refs.makeBucketRef.value;this.refs.makeBucketRef.value="";var t=this.props,I=t.web,g=t.dispatch;this.hideMakeBucketModal(),I.MakeBucket({bucketName:M}).then(function(){g(IA.addBucket(M)),g(IA.selectBucket(M))})["catch"](function(A){return g(IA.showAlert({type:"danger",message:A.message}))})}},{key:"hideMakeBucketModal",value:function(){var A=this.props.dispatch;A(IA.hideMakeBucketModal())}},{key:"showMakeBucketModal",value:function(A){A.preventDefault();var M=this.props.dispatch;M(IA.showMakeBucketModal())}},{key:"showAbout",value:function(A){A.preventDefault();var M=this.props.dispatch;M(IA.showAbout())}},{key:"hideAbout",value:function(A){A.preventDefault();var M=this.props.dispatch;M(IA.hideAbout())}},{key:"showBucketPolicy",value:function(A){A.preventDefault();var M=this.props.dispatch;M(IA.showBucketPolicy())}},{key:"hideBucketPolicy",value:function(A){A.preventDefault();var M=this.props.dispatch;M(IA.hideBucketPolicy())}},{key:"uploadFile",value:function(A){A.preventDefault();var M=this.props,t=M.dispatch,I=M.buckets;if(0===I.length)return void t(IA.showAlert({type:"danger",message:"Bucket needs to be created before trying to upload files."}));var g=A.target.files[0];A.target.value=null,this.xhr=new XMLHttpRequest,t(IA.uploadFile(g,this.xhr))}},{key:"removeObject",value:function(){var A=this,M=this.props,t=M.web,I=M.dispatch,g=M.currentPath,e=M.currentBucket,i=M.deleteConfirmation;t.RemoveObject({bucketName:e,objectName:i.object}).then(function(){A.hideDeleteConfirmation(),I(IA.selectPrefix(g))})["catch"](function(A){return I(IA.showAlert({type:"danger",message:A.message}))})}},{key:"hideAlert",value:function(A){A.preventDefault();var M=this.props.dispatch;M(IA.hideAlert())}},{key:"showDeleteConfirmation",value:function(A,M){A.preventDefault();var t=this.props.dispatch;t(IA.showDeleteConfirmation(M))}},{key:"hideDeleteConfirmation",value:function(){var A=this.props.dispatch;A(IA.hideDeleteConfirmation())}},{key:"shareObject",value:function(A,M,t){A.preventDefault();var I=this.props.dispatch;I(IA.shareObject(M,t))}},{key:"hideShareObjectModal",value:function(){var A=this.props.dispatch;A(IA.hideShareObject())}},{key:"dataType",value:function(A,M){return TA.getDataType(A,M)}},{key:"sortObjectsByName",value:function(A){var M=this.props,t=M.dispatch,I=M.objects,g=M.sortNameOrder;t(IA.setObjects(eA.sortObjectsByName(I,!g))),t(IA.setSortNameOrder(!g))}},{key:"sortObjectsBySize",value:function(){var A=this.props,M=A.dispatch,t=A.objects,I=A.sortSizeOrder;M(IA.setObjects(eA.sortObjectsBySize(t,!I))),M(IA.setSortSizeOrder(!I))}},{key:"sortObjectsByDate",value:function(){var A=this.props,M=A.dispatch,t=A.objects,I=A.sortDateOrder;M(IA.setObjects(eA.sortObjectsByDate(t,!I))),M(IA.setSortDateOrder(!I))}},{key:"logout",value:function(A){var M=this.props.web;A.preventDefault(),M.Logout(),D["default"].push(EA.minioBrowserPrefix+"/login")}},{key:"landingPage",value:function(A){A.preventDefault(),this.props.dispatch(IA.selectBucket(this.props.buckets[0]))}},{key:"fullScreen",value:function(A){A.preventDefault();var M=document.documentElement;M.requestFullscreen&&M.requestFullscreen(),M.mozRequestFullScreen&&M.mozRequestFullScreen(),M.webkitRequestFullscreen&&M.webkitRequestFullscreen(),M.msRequestFullscreen&&M.msRequestFullscreen()}},{key:"multiSelect",value:function(A){alert("yes")}},{key:"toggleSidebar",value:function(A){this.props.dispatch(IA.setSidebarStatus(A))}},{key:"hideSidebar",value:function(A){var M=A||window.event,t=M.srcElement||M.target;3===t.nodeType&&(t=t.parentNode);var I=t.id;"feh-trigger"!==I&&this.props.dispatch(IA.setSidebarStatus(!1))}},{key:"showSettings",value:function(A){A.preventDefault();var M=this.props.dispatch;M(IA.showSettings())}},{key:"showMessage",value:function(){var A=this.props.dispatch;A(IA.showAlert({type:"success",message:"Link copied to clipboard!"})),this.hideShareObjectModal()}},{key:"selectTexts",value:function(){this.refs.copyTextInput.select()}},{key:"render",value:function(){var A=this.props.storageInfo,M=A.total,t=A.free,I=this.props,g=I.showMakeBucketModal,e=I.alert,i=I.sortNameOrder,T=I.sortSizeOrder,E=I.sortDateOrder,N=I.showAbout,n=I.showBucketPolicy,c=this.props.serverInfo,D=c.version,a=c.memory,Q=c.platform,r=c.runtime,x=this.props.sidebarStatus,y=this.props.showSettings,u=this.props,Y=u.policies,d=u.currentBucket,S=(u.currentPath,this.props.deleteConfirmation),U=this.props.shareObject,p=y?o["default"].createElement(V["default"],null):o["default"].createElement("noscript",null),O=o["default"].createElement(L["default"],{className:(0,C["default"])({alert:!0,animated:!0,fadeInDown:e.show,fadeOutUp:!e.show}),bsStyle:e.type,onDismiss:this.hideAlert.bind(this)},o["default"].createElement("div",{className:"text-center"},e.message));e.message||(O="");var F=(o["default"].createElement(h["default"],{id:"tt-sign-out"},"Sign out"),o["default"].createElement(h["default"],{id:"tt-upload-file"},"Upload file")),k=o["default"].createElement(h["default"],{id:"tt-create-bucket"},"Create bucket"),J=M-t,H=J/M*100+"%";return o["default"].createElement("div",{className:(0,C["default"])({"file-explorer":!0,toggled:x})},o["default"].createElement(R["default"],{landingPage:this.landingPage.bind(this),searchBuckets:this.searchBuckets.bind(this),selectBucket:this.selectBucket.bind(this),clickOutside:this.hideSidebar.bind(this),showPolicy:this.showBucketPolicy.bind(this)}),o["default"].createElement("div",{className:"fe-body"},o["default"].createElement(m["default"],null,O,o["default"].createElement("header",{className:"fe-header-mobile hidden-lg hidden-md"},o["default"].createElement("div",{id:"feh-trigger",className:"feh-trigger "+(0,C["default"])({"feht-toggled":x}),onClick:this.toggleSidebar.bind(this,!x)},o["default"].createElement("div",{className:"feht-lines"},o["default"].createElement("div",{className:"top"}),o["default"].createElement("div",{className:"center"}),o["default"].createElement("div",{className:"bottom"}))),o["default"].createElement("img",{className:"mh-logo",src:MA["default"],alt:""})),o["default"].createElement("header",{className:"fe-header"},o["default"].createElement(G["default"],{selectPrefix:this.selectPrefix.bind(this)}),o["default"].createElement("div",{className:"feh-usage"},o["default"].createElement("div",{className:"fehu-chart"},o["default"].createElement("div",{style:{width:H}})),o["default"].createElement("ul",null,o["default"].createElement("li",null,"Used: ",B["default"].filesize(M-t)),o["default"].createElement("li",{className:"pull-right"},"Free: ",B["default"].filesize(M-J)))),o["default"].createElement("ul",{className:"feh-actions"},o["default"].createElement(b["default"],null),o["default"].createElement("li",null,o["default"].createElement(z["default"],{pullRight:!0,id:"top-right-menu"},o["default"].createElement(z["default"].Toggle,{noCaret:!0},o["default"].createElement("i",{className:"fa fa-reorder"})),o["default"].createElement(z["default"].Menu,{className:"dropdown-menu-right"},o["default"].createElement("li",null,o["default"].createElement("a",{target:"_blank",href:"https://github.com/minio/miniobrowser"},"Github ",o["default"].createElement("i",{className:"fa fa-github"}))),o["default"].createElement("li",null,o["default"].createElement("a",{href:"",onClick:this.fullScreen.bind(this)},"Fullscreen ",o["default"].createElement("i",{className:"fa fa-expand"}))),o["default"].createElement("li",null,o["default"].createElement("a",{target:"_blank",href:"https://docs.minio.io/"},"Documentation ",o["default"].createElement("i",{className:"fa fa-book"}))),o["default"].createElement("li",null,o["default"].createElement("a",{target:"_blank",href:"https://gitter.im/minio/minio"},"Ask for help ",o["default"].createElement("i",{className:"fa fa-question-circle"}))),o["default"].createElement("li",null,o["default"].createElement("a",{href:"",onClick:this.showAbout.bind(this)},"About ",o["default"].createElement("i",{className:"fa fa-info-circle"}))),o["default"].createElement("li",null,o["default"].createElement("a",{href:"",onClick:this.showSettings.bind(this)},"Settings ",o["default"].createElement("i",{className:"fa fa-cog"}))),o["default"].createElement("li",null,o["default"].createElement("a",{href:"",onClick:this.logout.bind(this)},"Sign Out ",o["default"].createElement("i",{className:"fa fa-sign-out"})))))))),o["default"].createElement("div",{className:"feb-container"},o["default"].createElement("header",{className:"fesl-row","data-type":"folder"},o["default"].createElement("div",{className:"fesl-item fi-name",onClick:this.sortObjectsByName.bind(this),"data-sort":"name"},"Name",o["default"].createElement("i",{className:(0,C["default"])({"fesli-sort":!0,fa:!0,"fa-sort-alpha-desc":i,"fa-sort-alpha-asc":!i})})),o["default"].createElement("div",{className:"fesl-item fi-size",onClick:this.sortObjectsBySize.bind(this),"data-sort":"size"},"Size",o["default"].createElement("i",{className:(0,C["default"])({"fesli-sort":!0,fa:!0,"fa-sort-amount-desc":T,"fa-sort-amount-asc":!T})})),o["default"].createElement("div",{className:"fesl-item fi-modified",onClick:this.sortObjectsByDate.bind(this),"data-sort":"last-modified"},"Last Modified",o["default"].createElement("i",{className:(0,C["default"])({"fesli-sort":!0,fa:!0,"fa-sort-numeric-desc":E,"fa-sort-numeric-asc":!E})})),o["default"].createElement("div",{className:"fesl-item fi-actions"}))),o["default"].createElement("div",{className:"feb-container"},o["default"].createElement(f["default"],{dataType:this.dataType.bind(this),selectPrefix:this.selectPrefix.bind(this),showDeleteConfirmation:this.showDeleteConfirmation.bind(this),shareObject:this.shareObject.bind(this)})),o["default"].createElement(v["default"],null),o["default"].createElement(z["default"],{dropup:!0,className:"feb-actions",id:"fe-action-toggle"},o["default"].createElement(z["default"].Toggle,{noCaret:!0,className:"feba-toggle"},o["default"].createElement("span",null,o["default"].createElement("i",{className:"fa fa-plus"}))),o["default"].createElement(z["default"].Menu,null,o["default"].createElement(l["default"],{placement:"left",overlay:F},o["default"].createElement("a",{href:"#",className:"feba-btn feba-upload"},o["default"].createElement("input",{type:"file",onChange:this.uploadFile.bind(this),style:{display:"none"},id:"file-input"}),o["default"].createElement("label",{htmlFor:"file-input"},o["default"].createElement("i",{className:"fa fa-cloud-upload"})))),o["default"].createElement(l["default"],{placement:"left",overlay:k},o["default"].createElement("a",{href:"#",className:"feba-btn feba-bucket",onClick:this.showMakeBucketModal.bind(this)},o["default"].createElement("i",{className:"fa fa-hdd-o"}))))),o["default"].createElement(s["default"],{className:"modal-create-bucket",bsSize:"small",animation:!1,show:g,onHide:this.hideMakeBucketModal.bind(this)},o["default"].createElement("button",{className:"close close-alt",onClick:this.hideMakeBucketModal.bind(this)},o["default"].createElement("span",null,"×")),o["default"].createElement(j["default"],null,o["default"].createElement("form",{onSubmit:this.makeBucket.bind(this)},o["default"].createElement("div",{className:"input-group"},o["default"].createElement("input",{className:"ig-text",type:"text",ref:"makeBucketRef",placeholder:"Bucket Name",autoFocus:!0}),o["default"].createElement("i",{className:"ig-helpers"}))))),o["default"].createElement(s["default"],{className:"modal-about modal-dark",animation:!1,show:N,onHide:this.hideAbout.bind(this)},o["default"].createElement("button",{className:"close",onClick:this.hideAbout.bind(this)},o["default"].createElement("span",null,"×")),o["default"].createElement("div",{className:"ma-inner"},o["default"].createElement("div",{className:"mai-item hidden-xs"},o["default"].createElement("a",{href:"https://minio.io",target:"_blank"},o["default"].createElement("img",{className:"maii-logo",src:MA["default"],alt:""}))),o["default"].createElement("div",{className:"mai-item"},o["default"].createElement("ul",{className:"maii-list"},o["default"].createElement("li",null,o["default"].createElement("div",null,"Version"),o["default"].createElement("small",null,D)),o["default"].createElement("li",null,o["default"].createElement("div",null,"Memory"),o["default"].createElement("small",null,a)),o["default"].createElement("li",null,o["default"].createElement("div",null,"Platform"),o["default"].createElement("small",null,Q)),o["default"].createElement("li",null,o["default"].createElement("div",null,"Runtime"),o["default"].createElement("small",null,r)))))),o["default"].createElement(s["default"],{className:"modal-policy",animation:!1,show:n,onHide:this.hideBucketPolicy.bind(this)},o["default"].createElement(w["default"],null,"Bucket Policy (",d,")",o["default"].createElement("button",{className:"close close-alt",onClick:this.hideBucketPolicy.bind(this)},o["default"].createElement("span",null,"×"))),o["default"].createElement("div",{className:"pm-body"},o["default"].createElement(Z["default"],{bucket:d}),Y.map(function(A,M){return o["default"].createElement(q["default"],{key:M,prefix:A.prefix,policy:A.policy})}))),o["default"].createElement($["default"],{ -show:S.show,icon:"fa fa-exclamation-triangle mci-red",text:"Are you sure you want to delete?",sub:"This cannot be undone!",okText:"Delete",cancelText:"Cancel",okHandler:this.removeObject.bind(this),cancelHandler:this.hideDeleteConfirmation.bind(this)}),o["default"].createElement(s["default"],{show:U.show,animation:!1,onHide:this.hideShareObjectModal.bind(this),bsSize:"small"},o["default"].createElement(j["default"],null,o["default"].createElement("div",{className:"copy-text"},o["default"].createElement("label",null,"Shareable Link"),o["default"].createElement("input",{type:"text",ref:"copyTextInput",readOnly:"readOnly",value:window.location.protocol+"//"+U.url,onClick:this.selectTexts.bind(this)}))),o["default"].createElement("div",{className:"modal-footer"},o["default"].createElement(oA["default"],{text:U.url,onCopy:this.showMessage.bind(this)},o["default"].createElement("button",{className:"btn btn-success"},"Copy Link")),o["default"].createElement("button",{className:"btn btn-link",onClick:this.hideShareObjectModal.bind(this)},"Cancel"))),p)))}}]),M}(o["default"].Component);M["default"]=cA},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{"default":A}}Object.defineProperty(M,"__esModule",{value:!0});var g=t(1),e=I(g),i=t(23),T=I(i),E=t(93),N=I(E),o=t(92),n=I(o),C=function(A){var M=A.latestUiVersion;return M===currentUiVersion?e["default"].createElement("noscript",null):e["default"].createElement("li",{className:"hidden-xs hidden-sm"},e["default"].createElement("a",{href:""},e["default"].createElement(n["default"],{placement:"left",overlay:e["default"].createElement(N["default"],{id:"tt-version-update"},"New update available. Click to refresh.")},e["default"].createElement("i",{className:"fa fa-refresh"}))))};M["default"]=(0,T["default"])(function(A){return{latestUiVersion:A.latestUiVersion}})(C)},function(A,M,t){"use strict";function I(A){if(A&&A.__esModule)return A;var M={};if(null!=A)for(var t in A)Object.prototype.hasOwnProperty.call(A,t)&&(M[t]=A[t]);return M["default"]=A,M}function g(A){return A&&A.__esModule?A:{"default":A}}function e(A,M){if(!(A instanceof M))throw new TypeError("Cannot call a class as a function")}function i(A,M){if(!A)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!M||"object"!=typeof M&&"function"!=typeof M?A:M}function T(A,M){if("function"!=typeof M&&null!==M)throw new TypeError("Super expression must either be null or a function, not "+typeof M);A.prototype=Object.create(M&&M.prototype,{constructor:{value:A,enumerable:!1,writable:!0,configurable:!0}}),M&&(Object.setPrototypeOf?Object.setPrototypeOf(A,M):A.__proto__=M)}Object.defineProperty(M,"__esModule",{value:!0});var E=function(){function A(A,M){for(var t=0;t0&&void 0!==arguments[0]?arguments[0]:{buckets:[],visibleBuckets:[],objects:[],storageInfo:{},serverInfo:{},currentBucket:"",currentPath:"",showMakeBucketModal:!1,uploads:{},alert:{show:!1,type:"danger",message:""},loginError:!1,sortNameOrder:!1,sortSizeOrder:!1,sortDateOrder:!1,latestUiVersion:currentUiVersion,sideBarActive:!1,loginRedirectPath:E.minioBrowserPrefix,settings:{accessKey:"",secretKey:"",secretKeyVisible:!1},showSettings:!1,policies:[],deleteConfirmation:{object:"",show:!1},shareObject:{show:!1,url:""}},M=arguments[1],t=Object.assign({},A);switch(M.type){case T.SET_WEB:t.web=M.web;break;case T.SET_BUCKETS:t.buckets=M.buckets;break;case T.ADD_BUCKET:t.buckets=[M.bucket].concat(e(t.buckets)),t.visibleBuckets=[M.bucket].concat(e(t.visibleBuckets));break;case T.SET_VISIBLE_BUCKETS:t.visibleBuckets=M.visibleBuckets;break;case T.SET_CURRENT_BUCKET:t.currentBucket=M.currentBucket;break;case T.SET_OBJECTS:t.objects=M.objects;break;case T.SET_CURRENT_PATH:t.currentPath=M.currentPath;break;case T.SET_STORAGE_INFO:t.storageInfo=M.storageInfo;break;case T.SET_SERVER_INFO:t.serverInfo=M.serverInfo;break;case T.SHOW_MAKEBUCKET_MODAL:t.showMakeBucketModal=M.showMakeBucketModal;break;case T.UPLOAD_PROGRESS:t.uploads=Object.assign({},t.uploads),t.uploads[M.slug].loaded=M.loaded;break;case T.ADD_UPLOAD:t.uploads=Object.assign({},t.uploads,g({},M.slug,{loaded:0,size:M.size,xhr:M.xhr,name:M.name}));break;case T.STOP_UPLOAD:t.uploads=Object.assign({},t.uploads),delete t.uploads[M.slug];break;case T.SET_ALERT:t.alert.alertTimeout&&clearTimeout(t.alert.alertTimeout),M.alert.show?t.alert=M.alert:t.alert=Object.assign({},t.alert,{show:!1});break;case T.SET_LOGIN_ERROR:t.loginError=!0;break;case T.SET_SHOW_ABORT_MODAL:t.showAbortModal=M.showAbortModal;break;case T.SHOW_ABOUT:t.showAbout=M.showAbout;break;case T.SET_SORT_NAME_ORDER:t.sortNameOrder=M.sortNameOrder;break;case T.SET_SORT_SIZE_ORDER:t.sortSizeOrder=M.sortSizeOrder;break;case T.SET_SORT_DATE_ORDER:t.sortDateOrder=M.sortDateOrder;break;case T.SET_LATEST_UI_VERSION:t.latestUiVersion=M.latestUiVersion;break;case T.SET_SIDEBAR_STATUS:t.sidebarStatus=M.sidebarStatus;break;case T.SET_LOGIN_REDIRECT_PATH:t.loginRedirectPath=M.path;case T.SET_LOAD_BUCKET:t.loadBucket=M.loadBucket;break;case T.SET_LOAD_PATH:t.loadPath=M.loadPath;break;case T.SHOW_SETTINGS:t.showSettings=M.showSettings; -break;case T.SET_SETTINGS:t.settings=Object.assign({},t.settings,M.settings);break;case T.SHOW_BUCKET_POLICY:t.showBucketPolicy=M.showBucketPolicy;break;case T.SET_POLICIES:t.policies=M.policies;break;case T.DELETE_CONFIRMATION:t.deleteConfirmation=Object.assign({},M.payload);break;case T.SET_SHARE_OBJECT:t.shareObject=Object.assign({},M.shareObject)}return t}},function(A,M,t){A.exports={"default":t(257),__esModule:!0}},function(A,M,t){t(267),A.exports=t(49).Object.assign},function(A,M,t){var I=t(79);A.exports=function(A,M){return I.create(A,M)}},function(A,M,t){t(268),A.exports=t(49).Object.keys},function(A,M,t){t(269),A.exports=t(49).Object.setPrototypeOf},function(A,M){A.exports=function(A){if("function"!=typeof A)throw TypeError(A+" is not a function!");return A}},function(A,M,t){var I=t(135);A.exports=function(A){if(!I(A))throw TypeError(A+" is not an object!");return A}},function(A,M){var t={}.toString;A.exports=function(A){return t.call(A).slice(8,-1)}},function(A,M){A.exports=function(A){if(void 0==A)throw TypeError("Can't call method on "+A);return A}},function(A,M){var t=A.exports="undefined"!=typeof window&&window.Math==Math?window:"undefined"!=typeof self&&self.Math==Math?self:Function("return this")();"number"==typeof __g&&(__g=t)},function(A,M,t){var I=t(260);A.exports=Object("z").propertyIsEnumerable(0)?Object:function(A){return"String"==I(A)?A.split(""):Object(A)}},function(A,M,t){var I=t(79),g=t(136),e=t(263);A.exports=t(134)(function(){var A=Object.assign,M={},t={},I=Symbol(),g="abcdefghijklmnopqrst";return M[I]=7,g.split("").forEach(function(A){t[A]=A}),7!=A({},M)[I]||Object.keys(A({},t)).join("")!=g})?function(A,M){for(var t=g(A),i=arguments,T=i.length,E=1,N=I.getKeys,o=I.getSymbols,n=I.isEnum;T>E;)for(var C,c=e(i[E++]),D=o?N(c).concat(o(c)):N(c),a=D.length,B=0;a>B;)n.call(c,C=D[B++])&&(t[C]=c[C]);return t}:Object.assign},function(A,M,t){var I=t(78),g=t(49),e=t(134);A.exports=function(A,M){var t=(g.Object||{})[A]||Object[A],i={};i[A]=M(t),I(I.S+I.F*e(function(){t(1)}),"Object",i)}},function(A,M,t){var I=t(79).getDesc,g=t(135),e=t(259),i=function(A,M){if(e(A),!g(M)&&null!==M)throw TypeError(M+": can't set as prototype!")};A.exports={set:Object.setPrototypeOf||("__proto__"in{}?function(A,M,g){try{g=t(133)(Function.call,I(Object.prototype,"__proto__").set,2),g(A,[]),M=!(A instanceof Array)}catch(e){M=!0}return function(A,t){return i(A,t),M?A.__proto__=t:g(A,t),A}}({},!1):void 0),check:i}},function(A,M,t){var I=t(78);I(I.S+I.F,"Object",{assign:t(264)})},function(A,M,t){var I=t(136);t(265)("keys",function(A){return function(M){return A(I(M))}})},function(A,M,t){var I=t(78);I(I.S,"Object",{setPrototypeOf:t(266).set})},function(A,M,t){function I(A){if(A)return g(A)}function g(A){for(var M in I.prototype)A[M]=I.prototype[M];return A}A.exports=I,I.prototype.on=I.prototype.addEventListener=function(A,M){return this._callbacks=this._callbacks||{},(this._callbacks["$"+A]=this._callbacks["$"+A]||[]).push(M),this},I.prototype.once=function(A,M){function t(){this.off(A,t),M.apply(this,arguments)}return t.fn=M,this.on(A,t),this},I.prototype.off=I.prototype.removeListener=I.prototype.removeAllListeners=I.prototype.removeEventListener=function(A,M){if(this._callbacks=this._callbacks||{},0==arguments.length)return this._callbacks={},this;var t=this._callbacks["$"+A];if(!t)return this;if(1==arguments.length)return delete this._callbacks["$"+A],this;for(var I,g=0;g-1&&A%1==0&&AA.clientHeight}Object.defineProperty(M,"__esModule",{value:!0}),M.default=i;var T=t(59),E=I(T),N=t(42),o=I(N);A.exports=M.default},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}function g(A,M,t,I,g){var i=A[M],E="undefined"==typeof i?"undefined":e(i);return T.default.isValidElement(i)?new Error("Invalid "+I+" ` + "`" + `"+g+"` + "`" + ` of type ReactElement "+("supplied to ` + "`" + `"+t+"` + "`" + `, expected an element type (a string ")+"or a ReactClass)."):"function"!==E&&"string"!==E?new Error("Invalid "+I+" ` + "`" + `"+g+"` + "`" + ` of value ` + "`" + `"+i+"` + "`" + ` "+("supplied to ` + "`" + `"+t+"` + "`" + `, expected an element type (a string ")+"or a ReactClass)."):null}M.__esModule=!0;var e="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(A){return typeof A}:function(A){return A&&"function"==typeof Symbol&&A.constructor===Symbol?"symbol":typeof A},i=t(1),T=I(i),E=t(175),N=I(E);M.default=(0,N.default)(g)},function(A,M){"use strict";function t(A){function M(M,t,I,g,e,i){var T=g||"<>",E=i||I;if(null==t[I])return M?new Error("Required "+e+" ` + "`" + `"+E+"` + "`" + ` was not specified "+("in ` + "`" + `"+T+"` + "`" + `.")):null;for(var N=arguments.length,o=Array(N>6?N-6:0),n=6;n=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}function e(A){return 0===A.button}function i(A){return!!(A.metaKey||A.altKey||A.ctrlKey||A.shiftKey)}function T(A){for(var M in A)if(Object.prototype.hasOwnProperty.call(A,M))return!1;return!0}function E(A,M){var t=M.query,I=M.hash,g=M.state;return t||I||g?{pathname:A,query:t,hash:I,state:g}:A}M.__esModule=!0;var N=Object.assign||function(A){for(var M=1;M=0;I--){var g=A[I],e=g.path||"";if(t=e.replace(/\/*$/,"/")+t,0===e.indexOf("/"))break}return"/"+t}},propTypes:{path:C,from:C,to:C.isRequired,query:c,state:c,onEnter:o.falsy,children:o.falsy},render:function(){(0,T.default)(!1)}});M.default=D,A.exports=M.default},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}M.__esModule=!0;var g=t(1),e=I(g),i=t(9),T=I(i),E=t(28),N=t(34),o=e.default.PropTypes,n=o.string,C=o.func,c=e.default.createClass({displayName:"Route",statics:{createRouteFromReactElement:E.createRouteFromReactElement},propTypes:{path:n,component:N.component,components:N.components,getComponent:C,getComponents:C},render:function(){(0,T.default)(!1)}});M.default=c,A.exports=M.default},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}function g(A,M){var t={};for(var I in A)M.indexOf(I)>=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}function e(A){return!A||!A.__v2_compatible__}function i(A){return A&&A.getCurrentLocation}M.__esModule=!0;var T=Object.assign||function(A){for(var M=1;M=0&&0===window.sessionStorage.length)return;throw A}}function i(A){var M=void 0;try{M=window.sessionStorage.getItem(g(A))}catch(A){if(A.name===o)return null}if(M)try{return JSON.parse(M)}catch(A){}return null}M.__esModule=!0,M.saveState=e,M.readState=i;var T=t(22),E=(I(T),"@@History/"),N=["QuotaExceededError","QUOTA_EXCEEDED_ERR"],o="SecurityError"},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}function g(A){function M(A){return E.canUseDOM?void 0:T.default(!1),t.listen(A)}var t=n.default(e({getUserConfirmation:N.getUserConfirmation},A,{go:N.go}));return e({},t,{listen:M})}M.__esModule=!0;var e=Object.assign||function(A){for(var M=1;M1?M-1:0),e=1;e=A.childNodes.length?null:A.childNodes.item(t);A.insertBefore(M,I)}var g=t(431),e=t(213),i=t(17),T=t(74),E=t(123),N=t(2),o={dangerouslyReplaceNodeWithMarkup:g.dangerouslyReplaceNodeWithMarkup,updateTextContent:E,processUpdates:function(A,M){for(var t,i=null,o=null,n=0;n-1?void 0:i(!1),!N.plugins[t]){M.extractEvents?void 0:i(!1),N.plugins[t]=M;var I=M.eventTypes;for(var e in I)g(I[e],M,e)?void 0:i(!1)}}}function g(A,M,t){N.eventNameDispatchConfigs.hasOwnProperty(t)?i(!1):void 0,N.eventNameDispatchConfigs[t]=A;var I=A.phasedRegistrationNames;if(I){for(var g in I)if(I.hasOwnProperty(g)){var T=I[g];e(T,M,t)}return!0}return!!A.registrationName&&(e(A.registrationName,M,t),!0)}function e(A,M,t){N.registrationNameModules[A]?i(!1):void 0,N.registrationNameModules[A]=M,N.registrationNameDependencies[A]=M.eventTypes[t].dependencies}var i=t(2),T=null,E={},N={plugins:[],eventNameDispatchConfigs:{},registrationNameModules:{},registrationNameDependencies:{},injectEventPluginOrder:function(A){T?i(!1):void 0,T=Array.prototype.slice.call(A),I()},injectEventPluginsByName:function(A){var M=!1;for(var t in A)if(A.hasOwnProperty(t)){var g=A[t];E.hasOwnProperty(t)&&E[t]===g||(E[t]?i(!1):void 0,E[t]=g,M=!0)}M&&I()},getPluginModuleForEvent:function(A){var M=A.dispatchConfig;if(M.registrationName)return N.registrationNameModules[M.registrationName]||null;for(var t in M.phasedRegistrationNames)if(M.phasedRegistrationNames.hasOwnProperty(t)){var I=N.registrationNameModules[M.phasedRegistrationNames[t]];if(I)return I}return null},_resetEventPlugins:function(){T=null;for(var A in E)E.hasOwnProperty(A)&&delete E[A];N.plugins.length=0; +var M=N.eventNameDispatchConfigs;for(var t in M)M.hasOwnProperty(t)&&delete M[t];var I=N.registrationNameModules;for(var g in I)I.hasOwnProperty(g)&&delete I[g]}};A.exports=N},function(A,M,t){"use strict";function I(A){return(""+A).replace(x,"//")}function g(A,M){this.func=A,this.context=M,this.count=0}function e(A,M,t){var I=A.func,g=A.context;I.call(g,M,A.count++)}function i(A,M,t){if(null==A)return A;var I=g.getPooled(M,t);Q(A,e,I),g.release(I)}function T(A,M,t,I){this.result=A,this.keyPrefix=M,this.func=t,this.context=I,this.count=0}function E(A,M,t){var g=A.result,e=A.keyPrefix,i=A.func,T=A.context,E=i.call(T,M,A.count++);Array.isArray(E)?N(E,g,t,B.thatReturnsArgument):null!=E&&(a.isValidElement(E)&&(E=a.cloneAndReplaceKey(E,e+(E!==M?I(E.key||"")+"/":"")+t)),g.push(E))}function N(A,M,t,g,e){var i="";null!=t&&(i=I(t)+"/");var N=T.getPooled(M,i,g,e);Q(A,E,N),T.release(N)}function o(A,M,t){if(null==A)return A;var I=[];return N(A,I,null,M,t),I}function n(A,M,t){return null}function C(A,M){return Q(A,n,null)}function c(A){var M=[];return N(A,M,null,B.thatReturnsArgument),M}var D=t(29),a=t(13),B=t(23),Q=t(125),r=D.twoArgumentPooler,s=D.fourArgumentPooler,x=/\/(?!\/)/g;g.prototype.destructor=function(){this.func=null,this.context=null,this.count=0},D.addPoolingTo(g,r),T.prototype.destructor=function(){this.result=null,this.keyPrefix=null,this.func=null,this.context=null,this.count=0},D.addPoolingTo(T,s);var j={forEach:i,map:o,mapIntoWithKeyPrefixInternal:N,count:C,toArray:c};A.exports=j},function(A,M,t){"use strict";function I(A,M){var t=y.hasOwnProperty(M)?y[M]:null;w.hasOwnProperty(M)&&(t!==x.OVERRIDE_BASE?B(!1):void 0),A.hasOwnProperty(M)&&(t!==x.DEFINE_MANY&&t!==x.DEFINE_MANY_MERGED?B(!1):void 0)}function g(A,M){if(M){"function"==typeof M?B(!1):void 0,C.isValidElement(M)?B(!1):void 0;var t=A.prototype;M.hasOwnProperty(s)&&u.mixins(A,M.mixins);for(var g in M)if(M.hasOwnProperty(g)&&g!==s){var e=M[g];if(I(t,g),u.hasOwnProperty(g))u[g](A,e);else{var i=y.hasOwnProperty(g),N=t.hasOwnProperty(g),o="function"==typeof e,n=o&&!i&&!N&&M.autobind!==!1;if(n)t.__reactAutoBindMap||(t.__reactAutoBindMap={}),t.__reactAutoBindMap[g]=e,t[g]=e;else if(N){var c=y[g];!i||c!==x.DEFINE_MANY_MERGED&&c!==x.DEFINE_MANY?B(!1):void 0,c===x.DEFINE_MANY_MERGED?t[g]=T(t[g],e):c===x.DEFINE_MANY&&(t[g]=E(t[g],e))}else t[g]=e}}}}function e(A,M){if(M)for(var t in M){var I=M[t];if(M.hasOwnProperty(t)){var g=t in u;g?B(!1):void 0;var e=t in A;e?B(!1):void 0,A[t]=I}}}function i(A,M){A&&M&&"object"==typeof A&&"object"==typeof M?void 0:B(!1);for(var t in M)M.hasOwnProperty(t)&&(void 0!==A[t]?B(!1):void 0,A[t]=M[t]);return A}function T(A,M){return function(){var t=A.apply(this,arguments),I=M.apply(this,arguments);if(null==t)return I;if(null==I)return t;var g={};return i(g,t),i(g,I),g}}function E(A,M){return function(){A.apply(this,arguments),M.apply(this,arguments)}}function N(A,M){var t=M.bind(A);return t}function o(A){for(var M in A.__reactAutoBindMap)if(A.__reactAutoBindMap.hasOwnProperty(M)){var t=A.__reactAutoBindMap[M];A[M]=N(A,t)}}var n=t(200),C=t(13),c=(t(69),t(68),t(215)),D=t(3),a=t(56),B=t(2),Q=t(75),r=t(30),s=(t(4),r({mixins:null})),x=Q({DEFINE_ONCE:null,DEFINE_MANY:null,OVERRIDE_BASE:null,DEFINE_MANY_MERGED:null}),j=[],y={mixins:x.DEFINE_MANY,statics:x.DEFINE_MANY,propTypes:x.DEFINE_MANY,contextTypes:x.DEFINE_MANY,childContextTypes:x.DEFINE_MANY,getDefaultProps:x.DEFINE_MANY_MERGED,getInitialState:x.DEFINE_MANY_MERGED,getChildContext:x.DEFINE_MANY_MERGED,render:x.DEFINE_ONCE,componentWillMount:x.DEFINE_MANY,componentDidMount:x.DEFINE_MANY,componentWillReceiveProps:x.DEFINE_MANY,shouldComponentUpdate:x.DEFINE_ONCE,componentWillUpdate:x.DEFINE_MANY,componentDidUpdate:x.DEFINE_MANY,componentWillUnmount:x.DEFINE_MANY,updateComponent:x.OVERRIDE_BASE},u={displayName:function(A,M){A.displayName=M},mixins:function(A,M){if(M)for(var t=0;t"+T+""},receiveComponent:function(A,M){if(A!==this._currentElement){this._currentElement=A;var t=""+A;if(t!==this._stringText){this._stringText=t;var g=i.getNode(this._rootNodeID);I.updateTextContent(g,t)}}},unmountComponent:function(){e.unmountIDFromEnvironment(this._rootNodeID)}}),A.exports=o},function(A,M,t){"use strict";function I(){this.reinitializeTransaction()}var g=t(18),e=t(71),i=t(3),T=t(23),E={initialize:T,close:function(){C.isBatchingUpdates=!1}},N={initialize:T,close:g.flushBatchedUpdates.bind(g)},o=[N,E];i(I.prototype,e.Mixin,{getTransactionWrappers:function(){return o}});var n=new I,C={isBatchingUpdates:!1,batchedUpdates:function(A,M,t,I,g,e){var i=C.isBatchingUpdates;C.isBatchingUpdates=!0,i?A(M,t,I,g,e):n.perform(A,null,M,t,I,g,e)}};A.exports=C},function(A,M,t){"use strict";function I(){if(!L){L=!0,Q.EventEmitter.injectReactEventListener(B),Q.EventPluginHub.injectEventPluginOrder(T),Q.EventPluginHub.injectInstanceHandle(r),Q.EventPluginHub.injectMount(s),Q.EventPluginHub.injectEventPluginsByName({SimpleEventPlugin:u,EnterLeaveEventPlugin:E,ChangeEventPlugin:e,SelectEventPlugin:j,BeforeInputEventPlugin:g}),Q.NativeComponent.injectGenericComponentClass(D),Q.NativeComponent.injectTextComponentClass(a),Q.Class.injectMixin(n),Q.DOMProperty.injectDOMPropertyConfig(o),Q.DOMProperty.injectDOMPropertyConfig(w),Q.EmptyComponent.injectEmptyComponent("noscript"),Q.Updates.injectReconcileTransaction(x),Q.Updates.injectBatchingStrategy(c),Q.RootIndex.injectCreateReactRootIndex(N.canUseDOM?i.createReactRootIndex:y.createReactRootIndex),Q.Component.injectEnvironment(C)}}var g=t(427),e=t(429),i=t(430),T=t(432),E=t(433),N=t(10),o=t(436),n=t(438),C=t(111),c=t(205),D=t(442),a=t(204),B=t(450),Q=t(451),r=t(47),s=t(12),x=t(455),j=t(461),y=t(462),u=t(463),w=t(460),L=!1;A.exports={inject:I}},function(A,M,t){"use strict";function I(){if(n.current){var A=n.current.getName();if(A)return" Check the render method of ` + "`" + `"+A+"` + "`" + `."}return""}function g(A,M){if(A._store&&!A._store.validated&&null==A.key){A._store.validated=!0;e("uniqueKey",A,M)}}function e(A,M,t){var g=I();if(!g){var e="string"==typeof t?t:t.displayName||t.name;e&&(g=" Check the top-level render call using <"+e+">.")}var i=D[A]||(D[A]={});if(i[g])return null;i[g]=!0;var T={parentOrOwner:g,url:" See https://fb.me/react-warning-keys for more information.",childOwner:null};return M&&M._owner&&M._owner!==n.current&&(T.childOwner=" It was passed a child from "+M._owner.getName()+"."),T}function i(A,M){if("object"==typeof A)if(Array.isArray(A))for(var t=0;t/,e={CHECKSUM_ATTR_NAME:"data-react-checksum",addChecksumToMarkup:function(A){var M=I(A);return A.replace(g," "+e.CHECKSUM_ATTR_NAME+'="'+M+'"$&')},canReuseMarkup:function(A,M){var t=M.getAttribute(e.CHECKSUM_ATTR_NAME);t=t&&parseInt(t,10);var g=I(A);return g===t}};A.exports=e},function(A,M,t){"use strict";var I=t(75),g=I({INSERT_MARKUP:null,MOVE_EXISTING:null,REMOVE_NODE:null,SET_MARKUP:null,TEXT_CONTENT:null});A.exports=g},function(A,M,t){"use strict";function I(A){if("function"==typeof A.type)return A.type;var M=A.type,t=n[M];return null==t&&(n[M]=t=N(M)),t}function g(A){return o?void 0:E(!1),new o(A.type,A.props)}function e(A){return new C(A)}function i(A){return A instanceof C}var T=t(3),E=t(2),N=null,o=null,n={},C=null,c={injectGenericComponentClass:function(A){o=A},injectTextComponentClass:function(A){C=A},injectComponentClasses:function(A){T(n,A)}},D={getComponentClassForElement:I,createInternalComponent:g,createInstanceForText:e,isTextComponent:i,injection:c};A.exports=D},function(A,M,t){"use strict";function I(A,M){}var g=(t(4),{isMounted:function(A){return!1},enqueueCallback:function(A,M){},enqueueForceUpdate:function(A){I(A,"forceUpdate")},enqueueReplaceState:function(A,M){I(A,"replaceState")},enqueueSetState:function(A,M){I(A,"setState")},enqueueSetProps:function(A,M){I(A,"setProps")},enqueueReplaceProps:function(A,M){I(A,"replaceProps")}});A.exports=g},function(A,M,t){"use strict";function I(A){function M(M,t,I,g,e,i){if(g=g||y,i=i||I,null==t[I]){var T=s[e];return M?new Error("Required "+T+" ` + "`" + `"+i+"` + "`" + ` was not specified in "+("` + "`" + `"+g+"` + "`" + `.")):null}return A(t,I,g,e,i)}var t=M.bind(null,!1);return t.isRequired=M.bind(null,!0),t}function g(A){function M(M,t,I,g,e){var i=M[t],T=a(i);if(T!==A){var E=s[g],N=B(i);return new Error("Invalid "+E+" ` + "`" + `"+e+"` + "`" + ` of type "+("` + "`" + `"+N+"` + "`" + ` supplied to ` + "`" + `"+I+"` + "`" + `, expected ")+("` + "`" + `"+A+"` + "`" + `."))}return null}return I(M)}function e(){return I(x.thatReturns(null))}function i(A){function M(M,t,I,g,e){var i=M[t];if(!Array.isArray(i)){var T=s[g],E=a(i);return new Error("Invalid "+T+" ` + "`" + `"+e+"` + "`" + ` of type "+("` + "`" + `"+E+"` + "`" + ` supplied to ` + "`" + `"+I+"` + "`" + `, expected an array."))}for(var N=0;N>"}var r=t(13),s=t(68),x=t(23),j=t(120),y="<>",u={array:g("array"),bool:g("boolean"),func:g("function"),number:g("number"),object:g("object"),string:g("string"),any:e(),arrayOf:i,element:T(),instanceOf:E,node:C(),objectOf:o,oneOf:N,oneOfType:n,shape:c};A.exports=u},function(A,M){"use strict";var t={injectCreateReactRootIndex:function(A){I.createReactRootIndex=A}},I={createReactRootIndex:null,injection:t};A.exports=I},function(A,M){"use strict";var t={currentScrollLeft:0,currentScrollTop:0,refreshScrollValues:function(A){t.currentScrollLeft=A.x,t.currentScrollTop=A.y}};A.exports=t},function(A,M,t){"use strict";function I(A,M){if(null==M?g(!1):void 0,null==A)return M;var t=Array.isArray(A),I=Array.isArray(M);return t&&I?(A.push.apply(A,M),A):t?(A.push(M),A):I?[A].concat(M):[A,M]}var g=t(2);A.exports=I},function(A,M){"use strict";var t=function(A,M,t){Array.isArray(A)?A.forEach(M,t):A&&M.call(t,A)};A.exports=t},function(A,M,t){"use strict";function I(){return!e&&g.canUseDOM&&(e="textContent"in document.documentElement?"textContent":"innerText"),e}var g=t(10),e=null;A.exports=I},function(A,M){"use strict";function t(A){var M=A&&A.nodeName&&A.nodeName.toLowerCase();return M&&("input"===M&&I[A.type]||"textarea"===M)}var I={color:!0,date:!0,datetime:!0,"datetime-local":!0,email:!0,month:!0,number:!0,password:!0,range:!0,search:!0,tel:!0,text:!0,time:!0,url:!0,week:!0};A.exports=t},function(A,M,t){"use strict";var I=t(23),g={listen:function(A,M,t){return A.addEventListener?(A.addEventListener(M,t,!1),{remove:function(){A.removeEventListener(M,t,!1)}}):A.attachEvent?(A.attachEvent("on"+M,t),{remove:function(){A.detachEvent("on"+M,t)}}):void 0},capture:function(A,M,t){return A.addEventListener?(A.addEventListener(M,t,!0),{remove:function(){A.removeEventListener(M,t,!0)}}):{remove:I}},registerDefault:function(){}};A.exports=g},function(A,M,t){"use strict";function I(A,M){var t=!0;A:for(;t;){var I=A,e=M;if(t=!1,I&&e){if(I===e)return!0;if(g(I))return!1;if(g(e)){A=I,M=e.parentNode,t=!0;continue A}return I.contains?I.contains(e):!!I.compareDocumentPosition&&!!(16&I.compareDocumentPosition(e))}return!1}}var g=t(489);A.exports=I},function(A,M){"use strict";function t(A){try{A.focus()}catch(A){}}A.exports=t},function(A,M){"use strict";function t(){if("undefined"==typeof document)return null;try{return document.activeElement||document.body}catch(A){return document.body}}A.exports=t},function(A,M,t){"use strict";function I(A){return i?void 0:e(!1),C.hasOwnProperty(A)||(A="*"),T.hasOwnProperty(A)||("*"===A?i.innerHTML="":i.innerHTML="<"+A+">",T[A]=!i.firstChild),T[A]?C[A]:null}var g=t(10),e=t(2),i=g.canUseDOM?document.createElement("div"):null,T={},E=[1,'"],N=[1,"","
"],o=[3,"","
"],n=[1,'',""],C={"*":[1,"?
","
"],area:[1,"",""],col:[2,"","
"],legend:[1,"
","
"],param:[1,"",""],tr:[2,"","
"],optgroup:E,option:E,caption:N,colgroup:N,tbody:N,tfoot:N,thead:N,td:o,th:o},c=["circle","clipPath","defs","ellipse","g","image","line","linearGradient","mask","path","pattern","polygon","polyline","radialGradient","rect","stop","text","tspan"];c.forEach(function(A){C[A]=n,T[A]=!0}),A.exports=I},function(A,M){"use strict";function t(A,M){if(A===M)return!0;if("object"!=typeof A||null===A||"object"!=typeof M||null===M)return!1;var t=Object.keys(A),g=Object.keys(M);if(t.length!==g.length)return!1;for(var e=I.bind(M),i=0;i=0&&s.splice(M,1)}function T(A){var M=document.createElement("style");return M.type="text/css",e(A,M),M}function E(A){var M=document.createElement("link");return M.rel="stylesheet",e(A,M),M}function N(A,M){var t,I,g;if(M.singleton){var e=r++;t=Q||(Q=T(M)),I=o.bind(null,t,e,!1),g=o.bind(null,t,e,!0)}else A.sourceMap&&"function"==typeof URL&&"function"==typeof URL.createObjectURL&&"function"==typeof URL.revokeObjectURL&&"function"==typeof Blob&&"function"==typeof btoa?(t=E(M),I=C.bind(null,t),g=function(){i(t),t.href&&URL.revokeObjectURL(t.href)}):(t=T(M),I=n.bind(null,t),g=function(){i(t)});return I(A),function(M){if(M){if(M.css===A.css&&M.media===A.media&&M.sourceMap===A.sourceMap)return;I(A=M)}else g()}}function o(A,M,t,I){var g=t?"":I.css;if(A.styleSheet)A.styleSheet.cssText=x(M,g);else{var e=document.createTextNode(g),i=A.childNodes;i[M]&&A.removeChild(i[M]),i.length?A.insertBefore(e,i[M]):A.appendChild(e)}}function n(A,M){var t=M.css,I=M.media;if(I&&A.setAttribute("media",I),A.styleSheet)A.styleSheet.cssText=t;else{for(;A.firstChild;)A.removeChild(A.firstChild);A.appendChild(document.createTextNode(t))}}function C(A,M){var t=M.css,I=M.sourceMap;I&&(t+="\n/*# sourceMappingURL=data:application/json;base64,"+btoa(unescape(encodeURIComponent(JSON.stringify(I))))+" */");var g=new Blob([t],{type:"text/css"}),e=A.href;A.href=URL.createObjectURL(g),e&&URL.revokeObjectURL(e)}var c={},D=function(A){var M;return function(){return"undefined"==typeof M&&(M=A.apply(this,arguments)),M}},a=D(function(){return/msie [6-9]\b/.test(window.navigator.userAgent.toLowerCase())}),B=D(function(){return document.head||document.getElementsByTagName("head")[0]}),Q=null,r=0,s=[];A.exports=function(A,M){M=M||{},"undefined"==typeof M.singleton&&(M.singleton=a()),"undefined"==typeof M.insertAt&&(M.insertAt="bottom");var t=g(A);return I(t,M),function(A){for(var e=[],i=0;i",'"',"` + "`" + `"," ","\r","\n","\t"],B=["{","}","|","\\","^","` + "`" + `"].concat(a),Q=["'"].concat(B),r=["%","/","?",";","#"].concat(Q),s=["/","?","#"],x=255,j=/^[a-z0-9A-Z_-]{0,63}$/,y=/^([a-z0-9A-Z_-]{0,63})(.*)$/,u={javascript:!0,"javascript:":!0},w={javascript:!0,"javascript:":!0},L={http:!0,https:!0,ftp:!0,gopher:!0,file:!0,"http:":!0,"https:":!0,"ftp:":!0,"gopher:":!0,"file:":!0},l=t(278);I.prototype.parse=function(A,M,t){if(!E(A))throw new TypeError("Parameter 'url' must be a string, not "+typeof A);var I=A;I=I.trim();var g=c.exec(I);if(g){g=g[0];var e=g.toLowerCase();this.protocol=e,I=I.substr(g.length)}if(t||g||I.match(/^\/\/[^@\/]+@[^@\/]+/)){var i="//"===I.substr(0,2);!i||g&&w[g]||(I=I.substr(2),this.slashes=!0)}if(!w[g]&&(i||g&&!L[g])){for(var T=-1,N=0;N127?"x":d[S];if(!h.match(j)){var U=B.slice(0,N),p=B.slice(N+1),O=d.match(y);O&&(U.push(O[1]),p.unshift(O[2])),p.length&&(I="/"+p.join(".")+I),this.hostname=U.join(".");break}}}if(this.hostname.length>x?this.hostname="":this.hostname=this.hostname.toLowerCase(),!a){for(var m=this.hostname.split("."),F=[],N=0;N0)&&t.host.split("@");B&&(t.auth=B.shift(),t.host=t.hostname=B.shift())}return t.search=A.search,t.query=A.query,o(t.pathname)&&o(t.search)||(t.path=(t.pathname?t.pathname:"")+(t.search?t.search:"")),t.href=t.format(),t}if(!D.length)return t.pathname=null,t.search?t.path="/"+t.search:t.path=null,t.href=t.format(),t;for(var Q=D.slice(-1)[0],r=(t.host||A.host)&&("."===Q||".."===Q)||""===Q,s=0,x=D.length;x>=0;x--)Q=D[x],"."==Q?D.splice(x,1):".."===Q?(D.splice(x,1),s++):s&&(D.splice(x,1),s--);if(!C&&!c)for(;s--;s)D.unshift("..");!C||""===D[0]||D[0]&&"/"===D[0].charAt(0)||D.unshift(""),r&&"/"!==D.join("/").substr(-1)&&D.push("");var j=""===D[0]||D[0]&&"/"===D[0].charAt(0);if(a){t.hostname=t.host=j?"":D.length?D.shift():"";var B=!!(t.host&&t.host.indexOf("@")>0)&&t.host.split("@");B&&(t.auth=B.shift(),t.host=t.hostname=B.shift())}return C=C||t.host&&D.length,C&&!j&&D.unshift(""),D.length?t.pathname=D.join("/"):(t.pathname=null,t.path=null),o(t.pathname)&&o(t.search)||(t.path=(t.pathname?t.pathname:"")+(t.search?t.search:"")),t.auth=A.auth||t.auth,t.slashes=t.slashes||A.slashes,t.href=t.format(),t},I.prototype.parseHost=function(){var A=this.host,M=D.exec(A);M&&(M=M[0],":"!==M&&(this.port=M.substr(1)),A=A.substr(0,A.length-M.length)),A&&(this.hostname=A)}},function(A,M){A.exports=function(A){return A.webpackPolyfill||(A.deprecate=function(){},A.paths=[],A.children=[],A.webpackPolyfill=1),A}},function(A,M,t){"use strict";function I(A){if(A&&A.__esModule)return A;var M={};if(null!=A)for(var t in A)Object.prototype.hasOwnProperty.call(A,t)&&(M[t]=A[t]);return M.default=A,M}function g(A){return A&&A.__esModule?A:{default:A}}function e(A,M,t){return P.LoggedIn()?t():(location.pathname!==z.minioBrowserPrefix&&location.pathname!==z.minioBrowserPrefix+"/"||M(z.minioBrowserPrefix+"/login"),t())}function i(A,M){P.LoggedIn()&&M(""+z.minioBrowserPrefix)}function T(){q<2&&setTimeout(function(){document.querySelector(".page-load").classList.add("pl-"+q),q++,T()},K[q])}t(504);var E=t(1),N=g(E),o=t(16),n=g(o),C=t(493),c=g(C),D=t(127),a=g(D),B=t(229),Q=g(B),r=t(183),s=g(r),x=t(184),j=g(x),y=t(103),u=g(y),w=t(180),L=g(w),l=t(394),Y=g(l),d=t(21),h=g(d),S=t(38),z=(g(S),t(31)),U=t(19),p=I(U),O=t(253),m=g(O),F=t(243),f=g(F),k=t(239),R=g(k),J=t(503),G=(g(J),t(131)),H=g(G),b=t(57),X=g(b);window.Web=H.default;var v=(0,Q.default)(c.default)(a.default)(m.default),W=(0,h.default)(function(A){return A})(R.default),V=(0,h.default)(function(A){return A})(f.default),P=new H.default(window.location.protocol+"//"+window.location.host+z.minioBrowserPrefix+"/webrpc",v.dispatch);window.web=P,v.dispatch(p.setWeb(P));var Z=function(A){return N.default.createElement("div",null,A.children)};n.default.render(N.default.createElement(Y.default,{store:v,web:P},N.default.createElement(j.default,{history:u.default},N.default.createElement(s.default,{path:"/",component:Z},N.default.createElement(s.default,{path:"minio",component:Z},N.default.createElement(L.default,{component:W,onEnter:e}),N.default.createElement(s.default,{path:"login",component:V,onEnter:i}),N.default.createElement(s.default,{path:":bucket",component:W,onEnter:e}),N.default.createElement(s.default,{path:":bucket/*",component:W,onEnter:e}))))),document.getElementById("root"));var K=[0,400],q=0;T(),X.default.getItem("newlyUpdated")&&(v.dispatch(p.showAlert({type:"success",message:"Updated to the latest UI Version."})),X.default.removeItem("newlyUpdated"))},function(A,M,t){"use strict";function I(A){if(A&&A.__esModule)return A;var M={};if(null!=A)for(var t in A)Object.prototype.hasOwnProperty.call(A,t)&&(M[t]=A[t]);return M.default=A,M}function g(A){return A&&A.__esModule?A:{default:A}}function e(A,M){if(!(A instanceof M))throw new TypeError("Cannot call a class as a function")}function i(A,M){if(!A)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!M||"object"!=typeof M&&"function"!=typeof M?A:M}function T(A,M){if("function"!=typeof M&&null!==M)throw new TypeError("Super expression must either be null or a function, not "+typeof M);A.prototype=Object.create(M&&M.prototype,{constructor:{value:A,enumerable:!1,writable:!0,configurable:!0}}),M&&(Object.setPrototypeOf?Object.setPrototypeOf(A,M):A.__proto__=M)}Object.defineProperty(M,"__esModule",{value:!0});var E=function(){function A(A,M){for(var t=0;t-1})))}},{key:"selectPrefix",value:function(A,M){A.preventDefault();var t=this.props,I=(t.dispatch,t.currentPath),g=(t.web,t.currentBucket),e=encodeURI(M);if(M.endsWith("/")||""===M){if(M===I)return;D.default.push(TA.pathJoin(g,e))}else window.location=window.location.origin+"/minio/download/"+g+"/"+e+"?token="+DA.default.getItem("token")}},{key:"makeBucket",value:function(A){A.preventDefault();var M=this.refs.makeBucketRef.value;this.refs.makeBucketRef.value="";var t=this.props,I=t.web,g=t.dispatch;this.hideMakeBucketModal(),I.MakeBucket({bucketName:M}).then(function(){g(eA.addBucket(M)),g(eA.selectBucket(M))}).catch(function(A){return g(eA.showAlert({type:"danger",message:A.message}))})}},{key:"hideMakeBucketModal",value:function(){var A=this.props.dispatch;A(eA.hideMakeBucketModal())}},{key:"showMakeBucketModal",value:function(A){A.preventDefault();var M=this.props.dispatch;M(eA.showMakeBucketModal())}},{key:"showAbout",value:function(A){A.preventDefault();var M=this.props.dispatch;M(eA.showAbout())}},{key:"hideAbout",value:function(A){A.preventDefault();var M=this.props.dispatch;M(eA.hideAbout())}},{key:"showBucketPolicy",value:function(A){A.preventDefault();var M=this.props.dispatch;M(eA.showBucketPolicy())}},{key:"hideBucketPolicy",value:function(A){A.preventDefault();var M=this.props.dispatch;M(eA.hideBucketPolicy())}},{key:"uploadFile",value:function(A){A.preventDefault();var M=this.props,t=M.dispatch,I=M.buckets;if(0===I.length)return void t(eA.showAlert({type:"danger",message:"Bucket needs to be created before trying to upload files."}));var g=A.target.files[0];A.target.value=null,this.xhr=new XMLHttpRequest,t(eA.uploadFile(g,this.xhr))}},{key:"removeObject",value:function(){var A=this,M=this.props,t=M.web,I=M.dispatch,g=M.currentPath,e=M.currentBucket,i=M.deleteConfirmation;t.RemoveObject({bucketName:e,objectName:i.object}).then(function(){A.hideDeleteConfirmation(),I(eA.selectPrefix(g))}).catch(function(A){return I(eA.showAlert({type:"danger",message:A.message}))})}},{key:"hideAlert",value:function(A){A.preventDefault();var M=this.props.dispatch;M(eA.hideAlert())}},{key:"showDeleteConfirmation",value:function(A,M){A.preventDefault();var t=this.props.dispatch;t(eA.showDeleteConfirmation(M))}},{key:"hideDeleteConfirmation",value:function(){var A=this.props.dispatch;A(eA.hideDeleteConfirmation())}},{key:"shareObject",value:function(A,M){A.preventDefault();var t=this.props.dispatch;t(eA.shareObject(M))}},{key:"hideShareObjectModal",value:function(){var A=this.props.dispatch;A(eA.hideShareObject())}},{key:"dataType",value:function(A,M){return NA.getDataType(A,M)}},{key:"sortObjectsByName",value:function(A){var M=this.props,t=M.dispatch,I=M.objects,g=M.sortNameOrder;t(eA.setObjects(TA.sortObjectsByName(I,!g))),t(eA.setSortNameOrder(!g))}},{key:"sortObjectsBySize",value:function(){var A=this.props,M=A.dispatch,t=A.objects,I=A.sortSizeOrder;M(eA.setObjects(TA.sortObjectsBySize(t,!I))),M(eA.setSortSizeOrder(!I))}},{key:"sortObjectsByDate",value:function(){var A=this.props,M=A.dispatch,t=A.objects,I=A.sortDateOrder;M(eA.setObjects(TA.sortObjectsByDate(t,!I))),M(eA.setSortDateOrder(!I))}},{key:"logout",value:function(A){var M=this.props.web;A.preventDefault(),M.Logout(),D.default.push(oA.minioBrowserPrefix+"/login")}},{key:"landingPage",value:function(A){A.preventDefault(),this.props.dispatch(eA.selectBucket(this.props.buckets[0]))}},{key:"fullScreen",value:function(A){A.preventDefault();var M=document.documentElement;M.requestFullscreen&&M.requestFullscreen(),M.mozRequestFullScreen&&M.mozRequestFullScreen(),M.webkitRequestFullscreen&&M.webkitRequestFullscreen(),M.msRequestFullscreen&&M.msRequestFullscreen()}},{key:"toggleSidebar",value:function(A){this.props.dispatch(eA.setSidebarStatus(A))}},{key:"hideSidebar",value:function(A){var M=A||window.event,t=M.srcElement||M.target;3===t.nodeType&&(t=t.parentNode);var I=t.id;"feh-trigger"!==I&&this.props.dispatch(eA.setSidebarStatus(!1))}},{key:"showSettings",value:function(A){A.preventDefault();var M=this.props.dispatch;M(eA.showSettings())}},{key:"showMessage",value:function(){var A=this.props.dispatch;A(eA.showAlert({type:"success",message:"Link copied to clipboard!"})),this.hideShareObjectModal()}},{key:"selectTexts",value:function(){this.refs.copyTextInput.select()}},{key:"handleExpireValue",value:function(A,M){M===-1?this.refs[A].stepDown(1):this.refs[A].stepUp(1),7==this.refs.expireDays.value&&(this.refs.expireHours.value=0,this.refs.expireMins.value=0)}},{key:"render",value:function(){var A=this.props.storageInfo,M=A.total,t=A.free,I=this.props,g=I.showMakeBucketModal,e=I.alert,i=I.sortNameOrder,T=I.sortSizeOrder,E=I.sortDateOrder,N=I.showAbout,n=I.showBucketPolicy,c=this.props.serverInfo,D=c.version,a=c.memory,Q=c.platform,r=c.runtime,x=this.props.sidebarStatus,y=this.props.showSettings,w=this.props,l=w.policies,d=w.currentBucket,S=(w.currentPath,this.props.deleteConfirmation),U=this.props.shareObject,p=this.props,O=p.web,F=p.prefixWritable,k=y?o.default.createElement(V.default,null):o.default.createElement("noscript",null),J=o.default.createElement(L.default,{className:(0,C.default)({alert:!0,animated:!0,fadeInDown:e.show,fadeOutUp:!e.show}),bsStyle:e.type,onDismiss:this.hideAlert.bind(this)},o.default.createElement("div",{className:"text-center"},e.message));e.message||(J="");var H=(o.default.createElement(h.default,{id:"tt-sign-out"},"Sign out"),o.default.createElement(h.default,{id:"tt-upload-file"},"Upload file")),X=o.default.createElement(h.default,{id:"tt-create-bucket"},"Create bucket"),W="",P="",K="",_=M-t,AA=_/M*100+"%";O.LoggedIn()?P=o.default.createElement($.default,{fullScreen:this.fullScreen.bind(this),showAbout:this.showAbout.bind(this),showSettings:this.showSettings.bind(this),logout:this.logout.bind(this)}):W=o.default.createElement("a",{className:"btn btn-danger",href:"/minio/login"},"Login"),O.LoggedIn()&&(K=o.default.createElement("div",{className:"feh-usage"},o.default.createElement("div",{className:"fehu-chart"},o.default.createElement("div",{style:{width:AA}})),o.default.createElement("ul",null,o.default.createElement("li",null,"Used:",B.default.filesize(M-t)),o.default.createElement("li",{className:"pull-right"},"Free:",B.default.filesize(M-_)))));var tA="";return O.LoggedIn()?tA=o.default.createElement(z.default,{dropup:!0,className:"feb-actions",id:"fe-action-toggle"},o.default.createElement(z.default.Toggle,{noCaret:!0,className:"feba-toggle"},o.default.createElement("span",null,o.default.createElement("i",{className:"fa fa-plus"}))),o.default.createElement(z.default.Menu,null,o.default.createElement(Y.default,{placement:"left",overlay:H},o.default.createElement("a",{href:"#",className:"feba-btn feba-upload"},o.default.createElement("input",{type:"file",onChange:this.uploadFile.bind(this),style:{display:"none"},id:"file-input"}),o.default.createElement("label",{htmlFor:"file-input"}," ",o.default.createElement("i",{className:"fa fa-cloud-upload"})," "))),o.default.createElement(Y.default,{placement:"left",overlay:X},o.default.createElement("a",{href:"#",className:"feba-btn feba-bucket",onClick:this.showMakeBucketModal.bind(this)},o.default.createElement("i",{className:"fa fa-hdd-o"}))))):F&&(tA=o.default.createElement(z.default,{dropup:!0,className:"feb-actions",id:"fe-action-toggle"},o.default.createElement(z.default.Toggle,{noCaret:!0,className:"feba-toggle"},o.default.createElement("span",null,o.default.createElement("i",{className:"fa fa-plus"}))),o.default.createElement(z.default.Menu,null,o.default.createElement(Y.default,{placement:"left",overlay:H},o.default.createElement("a",{href:"#",className:"feba-btn feba-upload"},o.default.createElement("input",{type:"file",onChange:this.uploadFile.bind(this),style:{display:"none"},id:"file-input"}),o.default.createElement("label",{htmlFor:"file-input"}," ",o.default.createElement("i",{className:"fa fa-cloud-upload"})," ")))))),o.default.createElement("div",{className:(0,C.default)({"file-explorer":!0,toggled:x})},o.default.createElement(R.default,{landingPage:this.landingPage.bind(this),searchBuckets:this.searchBuckets.bind(this),selectBucket:this.selectBucket.bind(this),clickOutside:this.hideSidebar.bind(this),showPolicy:this.showBucketPolicy.bind(this)}),o.default.createElement("div",{className:"fe-body"},o.default.createElement(m.default,null,J,o.default.createElement("header",{className:"fe-header-mobile hidden-lg hidden-md"},o.default.createElement("div",{id:"feh-trigger",className:"feh-trigger "+(0,C.default)({"feht-toggled":x}),onClick:this.toggleSidebar.bind(this,!x)},o.default.createElement("div",{className:"feht-lines"},o.default.createElement("div",{className:"top"}),o.default.createElement("div",{className:"center"}),o.default.createElement("div",{className:"bottom"}))),o.default.createElement("img",{className:"mh-logo",src:IA.default,alt:""})),o.default.createElement("header",{className:"fe-header"},o.default.createElement(G.default,{selectPrefix:this.selectPrefix.bind(this)}),K,o.default.createElement("ul",{className:"feh-actions"},o.default.createElement(b.default,null),W,P)),o.default.createElement("div",{className:"feb-container"},o.default.createElement("header",{className:"fesl-row","data-type":"folder"},o.default.createElement("div",{className:"fesl-item fi-name",onClick:this.sortObjectsByName.bind(this),"data-sort":"name"},"Name",o.default.createElement("i",{className:(0,C.default)({"fesli-sort":!0,fa:!0,"fa-sort-alpha-desc":i,"fa-sort-alpha-asc":!i})})),o.default.createElement("div",{className:"fesl-item fi-size",onClick:this.sortObjectsBySize.bind(this),"data-sort":"size"},"Size",o.default.createElement("i",{className:(0,C.default)({"fesli-sort":!0,fa:!0,"fa-sort-amount-desc":T,"fa-sort-amount-asc":!T})})),o.default.createElement("div",{className:"fesl-item fi-modified",onClick:this.sortObjectsByDate.bind(this),"data-sort":"last-modified"},"Last Modified",o.default.createElement("i",{className:(0,C.default)({"fesli-sort":!0,fa:!0,"fa-sort-numeric-desc":E,"fa-sort-numeric-asc":!E})})),o.default.createElement("div",{className:"fesl-item fi-actions"}))),o.default.createElement("div",{className:"feb-container"},o.default.createElement(f.default,{dataType:this.dataType.bind(this),selectPrefix:this.selectPrefix.bind(this),showDeleteConfirmation:this.showDeleteConfirmation.bind(this),shareObject:this.shareObject.bind(this)})),o.default.createElement(v.default,null),tA,o.default.createElement(s.default,{className:"modal-create-bucket",bsSize:"small",animation:!1,show:g,onHide:this.hideMakeBucketModal.bind(this)},o.default.createElement("button",{className:"close close-alt",onClick:this.hideMakeBucketModal.bind(this)},o.default.createElement("span",null,"×")),o.default.createElement(j.default,null,o.default.createElement("form",{onSubmit:this.makeBucket.bind(this)},o.default.createElement("div",{className:"input-group"},o.default.createElement("input",{className:"ig-text",type:"text",ref:"makeBucketRef",placeholder:"Bucket Name",autoFocus:!0}),o.default.createElement("i",{className:"ig-helpers"}))))),o.default.createElement(s.default,{className:"modal-about modal-dark",animation:!1,show:N,onHide:this.hideAbout.bind(this)},o.default.createElement("button",{className:"close",onClick:this.hideAbout.bind(this)},o.default.createElement("span",null,"×")),o.default.createElement("div",{className:"ma-inner"},o.default.createElement("div",{className:"mai-item hidden-xs"},o.default.createElement("a",{href:"https://minio.io",target:"_blank"},o.default.createElement("img",{className:"maii-logo",src:IA.default,alt:""}))),o.default.createElement("div",{className:"mai-item"},o.default.createElement("ul",{className:"maii-list"},o.default.createElement("li",null,o.default.createElement("div",null,"Version"),o.default.createElement("small",null,D)),o.default.createElement("li",null,o.default.createElement("div",null,"Memory"),o.default.createElement("small",null,a)),o.default.createElement("li",null,o.default.createElement("div",null,"Platform"),o.default.createElement("small",null,Q)),o.default.createElement("li",null,o.default.createElement("div",null,"Runtime"),o.default.createElement("small",null,r)))))),o.default.createElement(s.default,{className:"modal-policy",animation:!1,show:n,onHide:this.hideBucketPolicy.bind(this)},o.default.createElement(u.default,null,"Bucket Policy (",d,")",o.default.createElement("button",{className:"close close-alt",onClick:this.hideBucketPolicy.bind(this)},o.default.createElement("span",null,"×"))),o.default.createElement("div",{className:"pm-body"},o.default.createElement(Z.default,{bucket:d}),l.map(function(A,M){return o.default.createElement(q.default,{key:M,prefix:A.prefix,policy:A.policy})}))),o.default.createElement(MA.default,{show:S.show,icon:"fa fa-exclamation-triangle mci-red",text:"Are you sure you want to delete?",sub:"This cannot be undone!",okText:"Delete",cancelText:"Cancel",okHandler:this.removeObject.bind(this),cancelHandler:this.hideDeleteConfirmation.bind(this)}),o.default.createElement(s.default,{show:U.show,animation:!1,onHide:this.hideShareObjectModal.bind(this),bsSize:"small"},o.default.createElement(u.default,null,"Share Object"),o.default.createElement(j.default,null,o.default.createElement("div",{className:"input-group copy-text"},o.default.createElement("label",null,"Shareable Link"),o.default.createElement("input",{type:"text",ref:"copyTextInput",readOnly:"readOnly",value:window.location.protocol+"//"+U.url,onClick:this.selectTexts.bind(this)})),o.default.createElement("div",{className:"input-group",style:{display:O.LoggedIn()?"block":"none"}},o.default.createElement("label",null,"Expires in"),o.default.createElement("div",{className:"set-expire"},o.default.createElement("div",{className:"set-expire-item"},o.default.createElement("i",{className:"set-expire-increase",onClick:this.handleExpireValue.bind(this,"expireDays",1)}),o.default.createElement("div",{className:"set-expire-title"},"Days"),o.default.createElement("div",{className:"set-expire-value"},o.default.createElement("input",{ref:"expireDays",type:"number",min:0,max:7,defaultValue:0})),o.default.createElement("i",{className:"set-expire-decrease",onClick:this.handleExpireValue.bind(this,"expireDays",-1)})),o.default.createElement("div",{className:"set-expire-item"},o.default.createElement("i",{className:"set-expire-increase",onClick:this.handleExpireValue.bind(this,"expireHours",1)}),o.default.createElement("div",{className:"set-expire-title"},"Hours"),o.default.createElement("div",{className:"set-expire-value"},o.default.createElement("input",{ref:"expireHours",type:"number",min:0,max:24,defaultValue:0})),o.default.createElement("i",{className:"set-expire-decrease",onClick:this.handleExpireValue.bind(this,"expireHours",-1)})),o.default.createElement("div",{className:"set-expire-item"},o.default.createElement("i",{className:"set-expire-increase",onClick:this.handleExpireValue.bind(this,"expireMins",1)}),o.default.createElement("div",{className:"set-expire-title"},"Minutes"),o.default.createElement("div",{className:"set-expire-value"},o.default.createElement("input",{ref:"expireMins",type:"number",min:1,max:60,defaultValue:45})),o.default.createElement("i",{className:"set-expire-decrease",onClick:this.handleExpireValue.bind(this,"expireMins",-1)}))))),o.default.createElement("div",{className:"modal-footer"},o.default.createElement(CA.default,{text:U.url,onCopy:this.showMessage.bind(this)},o.default.createElement("button",{className:"btn btn-success"},"Copy Link")),o.default.createElement("button",{className:"btn btn-link",onClick:this.hideShareObjectModal.bind(this)},"Cancel"))),k)))}}]),M}(o.default.Component);M.default=aA},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}Object.defineProperty(M,"__esModule",{value:!0});var g=t(1),e=I(g),i=t(21),T=I(i),E=t(78),N=I(E),o=function(A){var M=A.fullScreen,t=A.showAbout,I=A.showSettings,g=A.logout;return e.default.createElement("li",null,e.default.createElement(N.default,{pullRight:!0,id:"top-right-menu"},e.default.createElement(N.default.Toggle,{noCaret:!0},e.default.createElement("i",{className:"fa fa-reorder"})),e.default.createElement(N.default.Menu,{className:"dropdown-menu-right"},e.default.createElement("li",null,e.default.createElement("a",{target:"_blank",href:"https://github.com/minio/miniobrowser"},"Github ",e.default.createElement("i",{className:"fa fa-github"}))),e.default.createElement("li",null,e.default.createElement("a",{href:"",onClick:M},"Fullscreen ",e.default.createElement("i",{className:"fa fa-expand"}))),e.default.createElement("li",null,e.default.createElement("a",{target:"_blank",href:"https://docs.minio.io/"},"Documentation ",e.default.createElement("i",{className:"fa fa-book"}))),e.default.createElement("li",null,e.default.createElement("a",{target:"_blank",href:"https://slack.minio.io"},"Ask for help ",e.default.createElement("i",{className:"fa fa-question-circle"}))),e.default.createElement("li",null,e.default.createElement("a",{href:"",onClick:t},"About ",e.default.createElement("i",{className:"fa fa-info-circle"}))),e.default.createElement("li",null,e.default.createElement("a",{href:"",onClick:I},"Settings ",e.default.createElement("i",{className:"fa fa-cog"}))),e.default.createElement("li",null,e.default.createElement("a",{href:"",onClick:g},"Sign Out ",e.default.createElement("i",{className:"fa fa-sign-out"}))))))};M.default=(0,T.default)(function(A){return A})(o)},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}Object.defineProperty(M,"__esModule",{value:!0});var g=t(1),e=I(g),i=t(21),T=I(i),E=t(82),N=I(E),o=t(81),n=I(o),C=function(A){var M=A.latestUiVersion;return M===currentUiVersion?e.default.createElement("noscript",null):e.default.createElement("li",{className:"hidden-xs hidden-sm"},e.default.createElement("a",{href:""},e.default.createElement(n.default,{placement:"left",overlay:e.default.createElement(N.default,{id:"tt-version-update"},"New update available. Click to refresh.")}," ",e.default.createElement("i",{className:"fa fa-refresh"})," ")))};M.default=(0,T.default)(function(A){return{latestUiVersion:A.latestUiVersion}})(C)},function(A,M,t){"use strict";function I(A){if(A&&A.__esModule)return A;var M={};if(null!=A)for(var t in A)Object.prototype.hasOwnProperty.call(A,t)&&(M[t]=A[t]);return M.default=A,M}function g(A){return A&&A.__esModule?A:{default:A}}function e(A,M){if(!(A instanceof M))throw new TypeError("Cannot call a class as a function")}function i(A,M){if(!A)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!M||"object"!=typeof M&&"function"!=typeof M?A:M}function T(A,M){if("function"!=typeof M&&null!==M)throw new TypeError("Super expression must either be null or a function, not "+typeof M);A.prototype=Object.create(M&&M.prototype,{constructor:{value:A,enumerable:!1,writable:!0,configurable:!0}}),M&&(Object.setPrototypeOf?Object.setPrototypeOf(A,M):A.__proto__=M)}Object.defineProperty(M,"__esModule",{value:!0});var E=function(){function A(A,M){for(var t=0;t0&&void 0!==arguments[0]?arguments[0]:{buckets:[],visibleBuckets:[],objects:[],storageInfo:{},serverInfo:{},currentBucket:"",currentPath:"",showMakeBucketModal:!1,uploads:{},alert:{show:!1,type:"danger",message:""},loginError:!1,sortNameOrder:!1,sortSizeOrder:!1,sortDateOrder:!1,latestUiVersion:currentUiVersion,sideBarActive:!1,loginRedirectPath:E.minioBrowserPrefix,settings:{accessKey:"",secretKey:"",secretKeyVisible:!1},showSettings:!1,policies:[],deleteConfirmation:{object:"",show:!1},shareObject:{show:!1,url:"",expiry:604800},prefixWritable:!1},M=arguments[1],t=Object.assign({},A);switch(M.type){case T.SET_WEB:t.web=M.web;break;case T.SET_BUCKETS:t.buckets=M.buckets;break;case T.ADD_BUCKET:t.buckets=[M.bucket].concat(e(t.buckets)),t.visibleBuckets=[M.bucket].concat(e(t.visibleBuckets));break;case T.SET_VISIBLE_BUCKETS:t.visibleBuckets=M.visibleBuckets;break;case T.SET_CURRENT_BUCKET:t.currentBucket=M.currentBucket;break;case T.SET_OBJECTS:t.objects=M.objects;break;case T.SET_CURRENT_PATH:t.currentPath=M.currentPath;break;case T.SET_STORAGE_INFO:t.storageInfo=M.storageInfo;break;case T.SET_SERVER_INFO:t.serverInfo=M.serverInfo;break;case T.SHOW_MAKEBUCKET_MODAL:t.showMakeBucketModal=M.showMakeBucketModal;break;case T.UPLOAD_PROGRESS:t.uploads=Object.assign({},t.uploads),t.uploads[M.slug].loaded=M.loaded;break;case T.ADD_UPLOAD:t.uploads=Object.assign({},t.uploads,g({},M.slug,{loaded:0,size:M.size,xhr:M.xhr,name:M.name}));break;case T.STOP_UPLOAD:t.uploads=Object.assign({},t.uploads),delete t.uploads[M.slug];break;case T.SET_ALERT:t.alert.alertTimeout&&clearTimeout(t.alert.alertTimeout),M.alert.show?t.alert=M.alert:t.alert=Object.assign({},t.alert,{show:!1});break;case T.SET_LOGIN_ERROR:t.loginError=!0;break;case T.SET_SHOW_ABORT_MODAL:t.showAbortModal=M.showAbortModal;break;case T.SHOW_ABOUT:t.showAbout=M.showAbout;break;case T.SET_SORT_NAME_ORDER:t.sortNameOrder=M.sortNameOrder;break;case T.SET_SORT_SIZE_ORDER:t.sortSizeOrder=M.sortSizeOrder;break;case T.SET_SORT_DATE_ORDER:t.sortDateOrder=M.sortDateOrder;break;case T.SET_LATEST_UI_VERSION:t.latestUiVersion=M.latestUiVersion;break;case T.SET_SIDEBAR_STATUS:t.sidebarStatus=M.sidebarStatus;break;case T.SET_LOGIN_REDIRECT_PATH:t.loginRedirectPath=M.path;case T.SET_LOAD_BUCKET:t.loadBucket=M.loadBucket;break;case T.SET_LOAD_PATH:t.loadPath=M.loadPath;break;case T.SHOW_SETTINGS:t.showSettings=M.showSettings;break;case T.SET_SETTINGS:t.settings=Object.assign({},t.settings,M.settings);break;case T.SHOW_BUCKET_POLICY:t.showBucketPolicy=M.showBucketPolicy;break;case T.SET_POLICIES:t.policies=M.policies;break;case T.DELETE_CONFIRMATION:t.deleteConfirmation=Object.assign({},M.payload);break;case T.SET_SHARE_OBJECT:t.shareObject=Object.assign({},M.shareObject);break;case T.SET_PREFIX_WRITABLE:t.prefixWritable=M.prefixWritable}return t}},function(A,M){/*! * cookie * Copyright(c) 2012-2014 Roman Shtylman * Copyright(c) 2015 Douglas Christopher Wilson * MIT Licensed */ -"use strict";function t(A,M){if("string"!=typeof A)throw new TypeError("argument str must be a string");for(var t={},I=M||{},i=A.split(T),E=I.decode||e,N=0;Nli{position:relative}.fa-li{position:absolute;left:-2.14285714em;width:2.14285714em;top:.14285714em;text-align:center}.fa-li.fa-lg{left:-1.85714286em}.fa-border{padding:.2em .25em .15em;border:.08em solid #eee;border-radius:.1em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa.fa-pull-left{margin-right:.3em}.fa.fa-pull-right{margin-left:.3em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left{margin-right:.3em}.fa.pull-right{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s infinite linear;animation:fa-spin 2s infinite linear}.fa-pulse{-webkit-animation:fa-spin 1s infinite steps(8);animation:fa-spin 1s infinite steps(8)}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.fa-rotate-90{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";-webkit-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";-webkit-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";-webkit-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";-webkit-transform:scaleX(-1);transform:scaleX(-1)}.fa-flip-vertical{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)";-webkit-transform:scaleY(-1);transform:scaleY(-1)}:root .fa-flip-horizontal,:root .fa-flip-vertical,:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270{filter:none}.fa-stack{position:relative;display:inline-block;width:2em;height:2em;line-height:2em;vertical-align:middle}.fa-stack-1x,.fa-stack-2x{position:absolute;left:0;width:100%;text-align:center}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-glass:before{content:"\\F000"}.fa-music:before{content:"\\F001"}.fa-search:before{content:"\\F002"}.fa-envelope-o:before{content:"\\F003"}.fa-heart:before{content:"\\F004"}.fa-star:before{content:"\\F005"}.fa-star-o:before{content:"\\F006"}.fa-user:before{content:"\\F007"}.fa-film:before{content:"\\F008"}.fa-th-large:before{content:"\\F009"}.fa-th:before{content:"\\F00A"}.fa-th-list:before{content:"\\F00B"}.fa-check:before{content:"\\F00C"}.fa-close:before,.fa-remove:before,.fa-times:before{content:"\\F00D"}.fa-search-plus:before{content:"\\F00E"}.fa-search-minus:before{content:"\\F010"}.fa-power-off:before{content:"\\F011"}.fa-signal:before{content:"\\F012"}.fa-cog:before,.fa-gear:before{content:"\\F013"}.fa-trash-o:before{content:"\\F014"}.fa-home:before{content:"\\F015"}.fa-file-o:before{content:"\\F016"}.fa-clock-o:before{content:"\\F017"}.fa-road:before{content:"\\F018"}.fa-download:before{content:"\\F019"}.fa-arrow-circle-o-down:before{content:"\\F01A"}.fa-arrow-circle-o-up:before{content:"\\F01B"}.fa-inbox:before{content:"\\F01C"}.fa-play-circle-o:before{content:"\\F01D"}.fa-repeat:before,.fa-rotate-right:before{content:"\\F01E"}.fa-refresh:before{content:"\\F021"}.fa-list-alt:before{content:"\\F022"}.fa-lock:before{content:"\\F023"}.fa-flag:before{content:"\\F024"}.fa-headphones:before{content:"\\F025"}.fa-volume-off:before{content:"\\F026"}.fa-volume-down:before{content:"\\F027"}.fa-volume-up:before{content:"\\F028"}.fa-qrcode:before{content:"\\F029"}.fa-barcode:before{content:"\\F02A"}.fa-tag:before{content:"\\F02B"}.fa-tags:before{content:"\\F02C"}.fa-book:before{content:"\\F02D"}.fa-bookmark:before{content:"\\F02E"}.fa-print:before{content:"\\F02F"}.fa-camera:before{content:"\\F030"}.fa-font:before{content:"\\F031"}.fa-bold:before{content:"\\F032"}.fa-italic:before{content:"\\F033"}.fa-text-height:before{content:"\\F034"}.fa-text-width:before{content:"\\F035"}.fa-align-left:before{content:"\\F036"}.fa-align-center:before{content:"\\F037"}.fa-align-right:before{content:"\\F038"}.fa-align-justify:before{content:"\\F039"}.fa-list:before{content:"\\F03A"}.fa-dedent:before,.fa-outdent:before{content:"\\F03B"}.fa-indent:before{content:"\\F03C"}.fa-video-camera:before{content:"\\F03D"}.fa-image:before,.fa-photo:before,.fa-picture-o:before{content:"\\F03E"}.fa-pencil:before{content:"\\F040"}.fa-map-marker:before{content:"\\F041"}.fa-adjust:before{content:"\\F042"}.fa-tint:before{content:"\\F043"}.fa-edit:before,.fa-pencil-square-o:before{content:"\\F044"}.fa-share-square-o:before{content:"\\F045"}.fa-check-square-o:before{content:"\\F046"}.fa-arrows:before{content:"\\F047"}.fa-step-backward:before{content:"\\F048"}.fa-fast-backward:before{content:"\\F049"}.fa-backward:before{content:"\\F04A"}.fa-play:before{content:"\\F04B"}.fa-pause:before{content:"\\F04C"}.fa-stop:before{content:"\\F04D"}.fa-forward:before{content:"\\F04E"}.fa-fast-forward:before{content:"\\F050"}.fa-step-forward:before{content:"\\F051"}.fa-eject:before{content:"\\F052"}.fa-chevron-left:before{content:"\\F053"}.fa-chevron-right:before{content:"\\F054"}.fa-plus-circle:before{content:"\\F055"}.fa-minus-circle:before{content:"\\F056"}.fa-times-circle:before{content:"\\F057"}.fa-check-circle:before{content:"\\F058"}.fa-question-circle:before{content:"\\F059"}.fa-info-circle:before{content:"\\F05A"}.fa-crosshairs:before{content:"\\F05B"}.fa-times-circle-o:before{content:"\\F05C"}.fa-check-circle-o:before{content:"\\F05D"}.fa-ban:before{content:"\\F05E"}.fa-arrow-left:before{content:"\\F060"}.fa-arrow-right:before{content:"\\F061"}.fa-arrow-up:before{content:"\\F062"}.fa-arrow-down:before{content:"\\F063"}.fa-mail-forward:before,.fa-share:before{content:"\\F064"}.fa-expand:before{content:"\\F065"}.fa-compress:before{content:"\\F066"}.fa-plus:before{content:"\\F067"}.fa-minus:before{content:"\\F068"}.fa-asterisk:before{content:"\\F069"}.fa-exclamation-circle:before{content:"\\F06A"}.fa-gift:before{content:"\\F06B"}.fa-leaf:before{content:"\\F06C"}.fa-fire:before{content:"\\F06D"}.fa-eye:before{content:"\\F06E"}.fa-eye-slash:before{content:"\\F070"}.fa-exclamation-triangle:before,.fa-warning:before{content:"\\F071"}.fa-plane:before{content:"\\F072"}.fa-calendar:before{content:"\\F073"}.fa-random:before{content:"\\F074"}.fa-comment:before{content:"\\F075"}.fa-magnet:before{content:"\\F076"}.fa-chevron-up:before{content:"\\F077"}.fa-chevron-down:before{content:"\\F078"}.fa-retweet:before{content:"\\F079"}.fa-shopping-cart:before{content:"\\F07A"}.fa-folder:before{content:"\\F07B"}.fa-folder-open:before{content:"\\F07C"}.fa-arrows-v:before{content:"\\F07D"}.fa-arrows-h:before{content:"\\F07E"}.fa-bar-chart-o:before,.fa-bar-chart:before{content:"\\F080"}.fa-twitter-square:before{content:"\\F081"}.fa-facebook-square:before{content:"\\F082"}.fa-camera-retro:before{content:"\\F083"}.fa-key:before{content:"\\F084"}.fa-cogs:before,.fa-gears:before{content:"\\F085"}.fa-comments:before{content:"\\F086"}.fa-thumbs-o-up:before{content:"\\F087"}.fa-thumbs-o-down:before{content:"\\F088"}.fa-star-half:before{content:"\\F089"}.fa-heart-o:before{content:"\\F08A"}.fa-sign-out:before{content:"\\F08B"}.fa-linkedin-square:before{content:"\\F08C"}.fa-thumb-tack:before{content:"\\F08D"}.fa-external-link:before{content:"\\F08E"}.fa-sign-in:before{content:"\\F090"}.fa-trophy:before{content:"\\F091"}.fa-github-square:before{content:"\\F092"}.fa-upload:before{content:"\\F093"}.fa-lemon-o:before{content:"\\F094"}.fa-phone:before{content:"\\F095"}.fa-square-o:before{content:"\\F096"}.fa-bookmark-o:before{content:"\\F097"}.fa-phone-square:before{content:"\\F098"}.fa-twitter:before{content:"\\F099"}.fa-facebook-f:before,.fa-facebook:before{content:"\\F09A"}.fa-github:before{content:"\\F09B"}.fa-unlock:before{content:"\\F09C"}.fa-credit-card:before{content:"\\F09D"}.fa-feed:before,.fa-rss:before{content:"\\F09E"}.fa-hdd-o:before{content:"\\F0A0"}.fa-bullhorn:before{content:"\\F0A1"}.fa-bell:before{content:"\\F0F3"}.fa-certificate:before{content:"\\F0A3"}.fa-hand-o-right:before{content:"\\F0A4"}.fa-hand-o-left:before{content:"\\F0A5"}.fa-hand-o-up:before{content:"\\F0A6"}.fa-hand-o-down:before{content:"\\F0A7"}.fa-arrow-circle-left:before{content:"\\F0A8"}.fa-arrow-circle-right:before{content:"\\F0A9"}.fa-arrow-circle-up:before{content:"\\F0AA"}.fa-arrow-circle-down:before{content:"\\F0AB"}.fa-globe:before{content:"\\F0AC"}.fa-wrench:before{content:"\\F0AD"}.fa-tasks:before{content:"\\F0AE"}.fa-filter:before{content:"\\F0B0"}.fa-briefcase:before{content:"\\F0B1"}.fa-arrows-alt:before{content:"\\F0B2"}.fa-group:before,.fa-users:before{content:"\\F0C0"}.fa-chain:before,.fa-link:before{content:"\\F0C1"}.fa-cloud:before{content:"\\F0C2"}.fa-flask:before{content:"\\F0C3"}.fa-cut:before,.fa-scissors:before{content:"\\F0C4"}.fa-copy:before,.fa-files-o:before{content:"\\F0C5"}.fa-paperclip:before{content:"\\F0C6"}.fa-floppy-o:before,.fa-save:before{content:"\\F0C7"}.fa-square:before{content:"\\F0C8"}.fa-bars:before,.fa-navicon:before,.fa-reorder:before{content:"\\F0C9"}.fa-list-ul:before{content:"\\F0CA"}.fa-list-ol:before{content:"\\F0CB"}.fa-strikethrough:before{content:"\\F0CC"}.fa-underline:before{content:"\\F0CD"}.fa-table:before{content:"\\F0CE"}.fa-magic:before{content:"\\F0D0"}.fa-truck:before{content:"\\F0D1"}.fa-pinterest:before{content:"\\F0D2"}.fa-pinterest-square:before{content:"\\F0D3"}.fa-google-plus-square:before{content:"\\F0D4"}.fa-google-plus:before{content:"\\F0D5"}.fa-money:before{content:"\\F0D6"}.fa-caret-down:before{content:"\\F0D7"}.fa-caret-up:before{content:"\\F0D8"}.fa-caret-left:before{content:"\\F0D9"}.fa-caret-right:before{content:"\\F0DA"}.fa-columns:before{content:"\\F0DB"}.fa-sort:before,.fa-unsorted:before{content:"\\F0DC"}.fa-sort-desc:before,.fa-sort-down:before{content:"\\F0DD"}.fa-sort-asc:before,.fa-sort-up:before{content:"\\F0DE"}.fa-envelope:before{content:"\\F0E0"}.fa-linkedin:before{content:"\\F0E1"}.fa-rotate-left:before,.fa-undo:before{content:"\\F0E2"}.fa-gavel:before,.fa-legal:before{content:"\\F0E3"}.fa-dashboard:before,.fa-tachometer:before{content:"\\F0E4"}.fa-comment-o:before{content:"\\F0E5"}.fa-comments-o:before{content:"\\F0E6"}.fa-bolt:before,.fa-flash:before{content:"\\F0E7"}.fa-sitemap:before{content:"\\F0E8"}.fa-umbrella:before{content:"\\F0E9"}.fa-clipboard:before,.fa-paste:before{content:"\\F0EA"}.fa-lightbulb-o:before{content:"\\F0EB"}.fa-exchange:before{content:"\\F0EC"}.fa-cloud-download:before{content:"\\F0ED"}.fa-cloud-upload:before{content:"\\F0EE"}.fa-user-md:before{content:"\\F0F0"}.fa-stethoscope:before{content:"\\F0F1"}.fa-suitcase:before{content:"\\F0F2"}.fa-bell-o:before{content:"\\F0A2"}.fa-coffee:before{content:"\\F0F4"}.fa-cutlery:before{content:"\\F0F5"}.fa-file-text-o:before{content:"\\F0F6"}.fa-building-o:before{content:"\\F0F7"}.fa-hospital-o:before{content:"\\F0F8"}.fa-ambulance:before{content:"\\F0F9"}.fa-medkit:before{content:"\\F0FA"}.fa-fighter-jet:before{content:"\\F0FB"}.fa-beer:before{content:"\\F0FC"}.fa-h-square:before{content:"\\F0FD"}.fa-plus-square:before{content:"\\F0FE"}.fa-angle-double-left:before{content:"\\F100"}.fa-angle-double-right:before{content:"\\F101"}.fa-angle-double-up:before{content:"\\F102"}.fa-angle-double-down:before{content:"\\F103"}.fa-angle-left:before{content:"\\F104"}.fa-angle-right:before{content:"\\F105"}.fa-angle-up:before{content:"\\F106"}.fa-angle-down:before{content:"\\F107"}.fa-desktop:before{content:"\\F108"}.fa-laptop:before{content:"\\F109"}.fa-tablet:before{content:"\\F10A"}.fa-mobile-phone:before,.fa-mobile:before{content:"\\F10B"}.fa-circle-o:before{content:"\\F10C"}.fa-quote-left:before{content:"\\F10D"}.fa-quote-right:before{content:"\\F10E"}.fa-spinner:before{content:"\\F110"}.fa-circle:before{content:"\\F111"}.fa-mail-reply:before,.fa-reply:before{content:"\\F112"}.fa-github-alt:before{content:"\\F113"}.fa-folder-o:before{content:"\\F114"}.fa-folder-open-o:before{content:"\\F115"}.fa-smile-o:before{content:"\\F118"}.fa-frown-o:before{content:"\\F119"}.fa-meh-o:before{content:"\\F11A"}.fa-gamepad:before{content:"\\F11B"}.fa-keyboard-o:before{content:"\\F11C"}.fa-flag-o:before{content:"\\F11D"}.fa-flag-checkered:before{content:"\\F11E"}.fa-terminal:before{content:"\\F120"}.fa-code:before{content:"\\F121"}.fa-mail-reply-all:before,.fa-reply-all:before{content:"\\F122"}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:"\\F123"}.fa-location-arrow:before{content:"\\F124"}.fa-crop:before{content:"\\F125"}.fa-code-fork:before{content:"\\F126"}.fa-chain-broken:before,.fa-unlink:before{content:"\\F127"}.fa-question:before{content:"\\F128"}.fa-info:before{content:"\\F129"}.fa-exclamation:before{content:"\\F12A"}.fa-superscript:before{content:"\\F12B"}.fa-subscript:before{content:"\\F12C"}.fa-eraser:before{content:"\\F12D"}.fa-puzzle-piece:before{content:"\\F12E"}.fa-microphone:before{content:"\\F130"}.fa-microphone-slash:before{content:"\\F131"}.fa-shield:before{content:"\\F132"}.fa-calendar-o:before{content:"\\F133"}.fa-fire-extinguisher:before{content:"\\F134"}.fa-rocket:before{content:"\\F135"}.fa-maxcdn:before{content:"\\F136"}.fa-chevron-circle-left:before{content:"\\F137"}.fa-chevron-circle-right:before{content:"\\F138"}.fa-chevron-circle-up:before{content:"\\F139"}.fa-chevron-circle-down:before{content:"\\F13A"}.fa-html5:before{content:"\\F13B"}.fa-css3:before{content:"\\F13C"}.fa-anchor:before{content:"\\F13D"}.fa-unlock-alt:before{content:"\\F13E"}.fa-bullseye:before{content:"\\F140"}.fa-ellipsis-h:before{content:"\\F141"}.fa-ellipsis-v:before{content:"\\F142"}.fa-rss-square:before{content:"\\F143"}.fa-play-circle:before{content:"\\F144"}.fa-ticket:before{content:"\\F145"}.fa-minus-square:before{content:"\\F146"}.fa-minus-square-o:before{content:"\\F147"}.fa-level-up:before{content:"\\F148"}.fa-level-down:before{content:"\\F149"}.fa-check-square:before{content:"\\F14A"}.fa-pencil-square:before{content:"\\F14B"}.fa-external-link-square:before{content:"\\F14C"}.fa-share-square:before{content:"\\F14D"}.fa-compass:before{content:"\\F14E"}.fa-caret-square-o-down:before,.fa-toggle-down:before{content:"\\F150"}.fa-caret-square-o-up:before,.fa-toggle-up:before{content:"\\F151"}.fa-caret-square-o-right:before,.fa-toggle-right:before{content:"\\F152"}.fa-eur:before,.fa-euro:before{content:"\\F153"}.fa-gbp:before{content:"\\F154"}.fa-dollar:before,.fa-usd:before{content:"\\F155"}.fa-inr:before,.fa-rupee:before{content:"\\F156"}.fa-cny:before,.fa-jpy:before,.fa-rmb:before,.fa-yen:before{content:"\\F157"}.fa-rouble:before,.fa-rub:before,.fa-ruble:before{content:"\\F158"}.fa-krw:before,.fa-won:before{content:"\\F159"}.fa-bitcoin:before,.fa-btc:before{content:"\\F15A"}.fa-file:before{content:"\\F15B"}.fa-file-text:before{content:"\\F15C"}.fa-sort-alpha-asc:before{content:"\\F15D"}.fa-sort-alpha-desc:before{content:"\\F15E"}.fa-sort-amount-asc:before{content:"\\F160"}.fa-sort-amount-desc:before{content:"\\F161"}.fa-sort-numeric-asc:before{content:"\\F162"}.fa-sort-numeric-desc:before{content:"\\F163"}.fa-thumbs-up:before{content:"\\F164"}.fa-thumbs-down:before{content:"\\F165"}.fa-youtube-square:before{content:"\\F166"}.fa-youtube:before{content:"\\F167"}.fa-xing:before{content:"\\F168"}.fa-xing-square:before{content:"\\F169"}.fa-youtube-play:before{content:"\\F16A"}.fa-dropbox:before{content:"\\F16B"}.fa-stack-overflow:before{content:"\\F16C"}.fa-instagram:before{content:"\\F16D"}.fa-flickr:before{content:"\\F16E"}.fa-adn:before{content:"\\F170"}.fa-bitbucket:before{content:"\\F171"}.fa-bitbucket-square:before{content:"\\F172"}.fa-tumblr:before{content:"\\F173"}.fa-tumblr-square:before{content:"\\F174"}.fa-long-arrow-down:before{content:"\\F175"}.fa-long-arrow-up:before{content:"\\F176"}.fa-long-arrow-left:before{content:"\\F177"}.fa-long-arrow-right:before{content:"\\F178"}.fa-apple:before{content:"\\F179"}.fa-windows:before{content:"\\F17A"}.fa-android:before{content:"\\F17B"}.fa-linux:before{content:"\\F17C"}.fa-dribbble:before{content:"\\F17D"}.fa-skype:before{content:"\\F17E"}.fa-foursquare:before{content:"\\F180"}.fa-trello:before{content:"\\F181"}.fa-female:before{content:"\\F182"}.fa-male:before{content:"\\F183"}.fa-gittip:before,.fa-gratipay:before{content:"\\F184"}.fa-sun-o:before{content:"\\F185"}.fa-moon-o:before{content:"\\F186"}.fa-archive:before{content:"\\F187"}.fa-bug:before{content:"\\F188"}.fa-vk:before{content:"\\F189"}.fa-weibo:before{content:"\\F18A"}.fa-renren:before{content:"\\F18B"}.fa-pagelines:before{content:"\\F18C"}.fa-stack-exchange:before{content:"\\F18D"}.fa-arrow-circle-o-right:before{content:"\\F18E"}.fa-arrow-circle-o-left:before{content:"\\F190"}.fa-caret-square-o-left:before,.fa-toggle-left:before{content:"\\F191"}.fa-dot-circle-o:before{content:"\\F192"}.fa-wheelchair:before{content:"\\F193"}.fa-vimeo-square:before{content:"\\F194"}.fa-try:before,.fa-turkish-lira:before{content:"\\F195"}.fa-plus-square-o:before{content:"\\F196"}.fa-space-shuttle:before{content:"\\F197"}.fa-slack:before{content:"\\F198"}.fa-envelope-square:before{content:"\\F199"}.fa-wordpress:before{content:"\\F19A"}.fa-openid:before{content:"\\F19B"}.fa-bank:before,.fa-institution:before,.fa-university:before{content:"\\F19C"}.fa-graduation-cap:before,.fa-mortar-board:before{content:"\\F19D"}.fa-yahoo:before{content:"\\F19E"}.fa-google:before{content:"\\F1A0"}.fa-reddit:before{content:"\\F1A1"}.fa-reddit-square:before{content:"\\F1A2"}.fa-stumbleupon-circle:before{content:"\\F1A3"}.fa-stumbleupon:before{content:"\\F1A4"}.fa-delicious:before{content:"\\F1A5"}.fa-digg:before{content:"\\F1A6"}.fa-pied-piper-pp:before{content:"\\F1A7"}.fa-pied-piper-alt:before{content:"\\F1A8"}.fa-drupal:before{content:"\\F1A9"}.fa-joomla:before{content:"\\F1AA"}.fa-language:before{content:"\\F1AB"}.fa-fax:before{content:"\\F1AC"}.fa-building:before{content:"\\F1AD"}.fa-child:before{content:"\\F1AE"}.fa-paw:before{content:"\\F1B0"}.fa-spoon:before{content:"\\F1B1"}.fa-cube:before{content:"\\F1B2"}.fa-cubes:before{content:"\\F1B3"}.fa-behance:before{content:"\\F1B4"}.fa-behance-square:before{content:"\\F1B5"}.fa-steam:before{content:"\\F1B6"}.fa-steam-square:before{content:"\\F1B7"}.fa-recycle:before{content:"\\F1B8"}.fa-automobile:before,.fa-car:before{content:"\\F1B9"}.fa-cab:before,.fa-taxi:before{content:"\\F1BA"}.fa-tree:before{content:"\\F1BB"}.fa-spotify:before{content:"\\F1BC"}.fa-deviantart:before{content:"\\F1BD"}.fa-soundcloud:before{content:"\\F1BE"}.fa-database:before{content:"\\F1C0"}.fa-file-pdf-o:before{content:"\\F1C1"}.fa-file-word-o:before{content:"\\F1C2"}.fa-file-excel-o:before{content:"\\F1C3"}.fa-file-powerpoint-o:before{content:"\\F1C4"}.fa-file-image-o:before,.fa-file-photo-o:before,.fa-file-picture-o:before{content:"\\F1C5"}.fa-file-archive-o:before,.fa-file-zip-o:before{content:"\\F1C6"}.fa-file-audio-o:before,.fa-file-sound-o:before{content:"\\F1C7"}.fa-file-movie-o:before,.fa-file-video-o:before{content:"\\F1C8"}.fa-file-code-o:before{content:"\\F1C9"}.fa-vine:before{content:"\\F1CA"}.fa-codepen:before{content:"\\F1CB"}.fa-jsfiddle:before{content:"\\F1CC"}.fa-life-bouy:before,.fa-life-buoy:before,.fa-life-ring:before,.fa-life-saver:before,.fa-support:before{content:"\\F1CD"}.fa-circle-o-notch:before{content:"\\F1CE"}.fa-ra:before,.fa-rebel:before,.fa-resistance:before{content:"\\F1D0"}.fa-empire:before,.fa-ge:before{content:"\\F1D1"}.fa-git-square:before{content:"\\F1D2"}.fa-git:before{content:"\\F1D3"}.fa-hacker-news:before,.fa-y-combinator-square:before,.fa-yc-square:before{content:"\\F1D4"}.fa-tencent-weibo:before{content:"\\F1D5"}.fa-qq:before{content:"\\F1D6"}.fa-wechat:before,.fa-weixin:before{content:"\\F1D7"}.fa-paper-plane:before,.fa-send:before{content:"\\F1D8"}.fa-paper-plane-o:before,.fa-send-o:before{content:"\\F1D9"}.fa-history:before{content:"\\F1DA"}.fa-circle-thin:before{content:"\\F1DB"}.fa-header:before{content:"\\F1DC"}.fa-paragraph:before{content:"\\F1DD"}.fa-sliders:before{content:"\\F1DE"}.fa-share-alt:before{content:"\\F1E0"}.fa-share-alt-square:before{content:"\\F1E1"}.fa-bomb:before{content:"\\F1E2"}.fa-futbol-o:before,.fa-soccer-ball-o:before{content:"\\F1E3"}.fa-tty:before{content:"\\F1E4"}.fa-binoculars:before{content:"\\F1E5"}.fa-plug:before{content:"\\F1E6"}.fa-slideshare:before{content:"\\F1E7"}.fa-twitch:before{content:"\\F1E8"}.fa-yelp:before{content:"\\F1E9"}.fa-newspaper-o:before{content:"\\F1EA"}.fa-wifi:before{content:"\\F1EB"}.fa-calculator:before{content:"\\F1EC"}.fa-paypal:before{content:"\\F1ED"}.fa-google-wallet:before{content:"\\F1EE"}.fa-cc-visa:before{content:"\\F1F0"}.fa-cc-mastercard:before{content:"\\F1F1"}.fa-cc-discover:before{content:"\\F1F2"}.fa-cc-amex:before{content:"\\F1F3"}.fa-cc-paypal:before{content:"\\F1F4"}.fa-cc-stripe:before{content:"\\F1F5"}.fa-bell-slash:before{content:"\\F1F6"}.fa-bell-slash-o:before{content:"\\F1F7"}.fa-trash:before{content:"\\F1F8"}.fa-copyright:before{content:"\\F1F9"}.fa-at:before{content:"\\F1FA"}.fa-eyedropper:before{content:"\\F1FB"}.fa-paint-brush:before{content:"\\F1FC"}.fa-birthday-cake:before{content:"\\F1FD"}.fa-area-chart:before{content:"\\F1FE"}.fa-pie-chart:before{content:"\\F200"}.fa-line-chart:before{content:"\\F201"}.fa-lastfm:before{content:"\\F202"}.fa-lastfm-square:before{content:"\\F203"}.fa-toggle-off:before{content:"\\F204"}.fa-toggle-on:before{content:"\\F205"}.fa-bicycle:before{content:"\\F206"}.fa-bus:before{content:"\\F207"}.fa-ioxhost:before{content:"\\F208"}.fa-angellist:before{content:"\\F209"}.fa-cc:before{content:"\\F20A"}.fa-ils:before,.fa-shekel:before,.fa-sheqel:before{content:"\\F20B"}.fa-meanpath:before{content:"\\F20C"}.fa-buysellads:before{content:"\\F20D"}.fa-connectdevelop:before{content:"\\F20E"}.fa-dashcube:before{content:"\\F210"}.fa-forumbee:before{content:"\\F211"}.fa-leanpub:before{content:"\\F212"}.fa-sellsy:before{content:"\\F213"}.fa-shirtsinbulk:before{content:"\\F214"}.fa-simplybuilt:before{content:"\\F215"}.fa-skyatlas:before{content:"\\F216"}.fa-cart-plus:before{content:"\\F217"}.fa-cart-arrow-down:before{content:"\\F218"}.fa-diamond:before{content:"\\F219"}.fa-ship:before{content:"\\F21A"}.fa-user-secret:before{content:"\\F21B"}.fa-motorcycle:before{content:"\\F21C"}.fa-street-view:before{content:"\\F21D"}.fa-heartbeat:before{content:"\\F21E"}.fa-venus:before{content:"\\F221"}.fa-mars:before{content:"\\F222"}.fa-mercury:before{content:"\\F223"}.fa-intersex:before,.fa-transgender:before{content:"\\F224"}.fa-transgender-alt:before{content:"\\F225"}.fa-venus-double:before{content:"\\F226"}.fa-mars-double:before{content:"\\F227"}.fa-venus-mars:before{content:"\\F228"}.fa-mars-stroke:before{content:"\\F229"}.fa-mars-stroke-v:before{content:"\\F22A"}.fa-mars-stroke-h:before{content:"\\F22B"}.fa-neuter:before{content:"\\F22C"}.fa-genderless:before{content:"\\F22D"}.fa-facebook-official:before{content:"\\F230"}.fa-pinterest-p:before{content:"\\F231"}.fa-whatsapp:before{content:"\\F232"}.fa-server:before{content:"\\F233"}.fa-user-plus:before{content:"\\F234"}.fa-user-times:before{content:"\\F235"}.fa-bed:before,.fa-hotel:before{content:"\\F236"}.fa-viacoin:before{content:"\\F237"}.fa-train:before{content:"\\F238"}.fa-subway:before{content:"\\F239"}.fa-medium:before{content:"\\F23A"}.fa-y-combinator:before,.fa-yc:before{content:"\\F23B"}.fa-optin-monster:before{content:"\\F23C"}.fa-opencart:before{content:"\\F23D"}.fa-expeditedssl:before{content:"\\F23E"}.fa-battery-4:before,.fa-battery-full:before,.fa-battery:before{content:"\\F240"}.fa-battery-3:before,.fa-battery-three-quarters:before{content:"\\F241"}.fa-battery-2:before,.fa-battery-half:before{content:"\\F242"}.fa-battery-1:before,.fa-battery-quarter:before{content:"\\F243"}.fa-battery-0:before,.fa-battery-empty:before{content:"\\F244"}.fa-mouse-pointer:before{content:"\\F245"}.fa-i-cursor:before{content:"\\F246"}.fa-object-group:before{content:"\\F247"}.fa-object-ungroup:before{content:"\\F248"}.fa-sticky-note:before{content:"\\F249"}.fa-sticky-note-o:before{content:"\\F24A"}.fa-cc-jcb:before{content:"\\F24B"}.fa-cc-diners-club:before{content:"\\F24C"}.fa-clone:before{content:"\\F24D"}.fa-balance-scale:before{content:"\\F24E"}.fa-hourglass-o:before{content:"\\F250"}.fa-hourglass-1:before,.fa-hourglass-start:before{content:"\\F251"}.fa-hourglass-2:before,.fa-hourglass-half:before{content:"\\F252"}.fa-hourglass-3:before,.fa-hourglass-end:before{content:"\\F253"}.fa-hourglass:before{content:"\\F254"}.fa-hand-grab-o:before,.fa-hand-rock-o:before{content:"\\F255"}.fa-hand-paper-o:before,.fa-hand-stop-o:before{content:"\\F256"}.fa-hand-scissors-o:before{content:"\\F257"}.fa-hand-lizard-o:before{content:"\\F258"}.fa-hand-spock-o:before{content:"\\F259"}.fa-hand-pointer-o:before{content:"\\F25A"}.fa-hand-peace-o:before{content:"\\F25B"}.fa-trademark:before{content:"\\F25C"}.fa-registered:before{content:"\\F25D"}.fa-creative-commons:before{content:"\\F25E"}.fa-gg:before{content:"\\F260"}.fa-gg-circle:before{content:"\\F261"}.fa-tripadvisor:before{content:"\\F262"}.fa-odnoklassniki:before{content:"\\F263"}.fa-odnoklassniki-square:before{content:"\\F264"}.fa-get-pocket:before{content:"\\F265"}.fa-wikipedia-w:before{content:"\\F266"}.fa-safari:before{content:"\\F267"}.fa-chrome:before{content:"\\F268"}.fa-firefox:before{content:"\\F269"}.fa-opera:before{content:"\\F26A"}.fa-internet-explorer:before{content:"\\F26B"}.fa-television:before,.fa-tv:before{content:"\\F26C"}.fa-contao:before{content:"\\F26D"}.fa-500px:before{content:"\\F26E"}.fa-amazon:before{content:"\\F270"}.fa-calendar-plus-o:before{content:"\\F271"}.fa-calendar-minus-o:before{content:"\\F272"}.fa-calendar-times-o:before{content:"\\F273"}.fa-calendar-check-o:before{content:"\\F274"}.fa-industry:before{content:"\\F275"}.fa-map-pin:before{content:"\\F276"}.fa-map-signs:before{content:"\\F277"}.fa-map-o:before{content:"\\F278"}.fa-map:before{content:"\\F279"}.fa-commenting:before{content:"\\F27A"}.fa-commenting-o:before{content:"\\F27B"}.fa-houzz:before{content:"\\F27C"}.fa-vimeo:before{content:"\\F27D"}.fa-black-tie:before{content:"\\F27E"}.fa-fonticons:before{content:"\\F280"}.fa-reddit-alien:before{content:"\\F281"}.fa-edge:before{content:"\\F282"}.fa-credit-card-alt:before{content:"\\F283"}.fa-codiepie:before{content:"\\F284"}.fa-modx:before{content:"\\F285"}.fa-fort-awesome:before{content:"\\F286"}.fa-usb:before{content:"\\F287"}.fa-product-hunt:before{content:"\\F288"}.fa-mixcloud:before{content:"\\F289"}.fa-scribd:before{content:"\\F28A"}.fa-pause-circle:before{content:"\\F28B"}.fa-pause-circle-o:before{content:"\\F28C"}.fa-stop-circle:before{content:"\\F28D"}.fa-stop-circle-o:before{content:"\\F28E"}.fa-shopping-bag:before{content:"\\F290"}.fa-shopping-basket:before{content:"\\F291"}.fa-hashtag:before{content:"\\F292"}.fa-bluetooth:before{content:"\\F293"}.fa-bluetooth-b:before{content:"\\F294"}.fa-percent:before{content:"\\F295"}.fa-gitlab:before{content:"\\F296"}.fa-wpbeginner:before{content:"\\F297"}.fa-wpforms:before{content:"\\F298"}.fa-envira:before{content:"\\F299"}.fa-universal-access:before{content:"\\F29A"}.fa-wheelchair-alt:before{content:"\\F29B"}.fa-question-circle-o:before{content:"\\F29C"}.fa-blind:before{content:"\\F29D"}.fa-audio-description:before{content:"\\F29E"}.fa-volume-control-phone:before{content:"\\F2A0"}.fa-braille:before{content:"\\F2A1"}.fa-assistive-listening-systems:before{content:"\\F2A2"}.fa-american-sign-language-interpreting:before,.fa-asl-interpreting:before{content:"\\F2A3"}.fa-deaf:before,.fa-deafness:before,.fa-hard-of-hearing:before{content:"\\F2A4"}.fa-glide:before{content:"\\F2A5"}.fa-glide-g:before{content:"\\F2A6"}.fa-sign-language:before,.fa-signing:before{content:"\\F2A7"}.fa-low-vision:before{content:"\\F2A8"}.fa-viadeo:before{content:"\\F2A9"}.fa-viadeo-square:before{content:"\\F2AA"}.fa-snapchat:before{content:"\\F2AB"}.fa-snapchat-ghost:before{content:"\\F2AC"}.fa-snapchat-square:before{content:"\\F2AD"}.fa-pied-piper:before{content:"\\F2AE"}.fa-first-order:before{content:"\\F2B0"}.fa-yoast:before{content:"\\F2B1"}.fa-themeisle:before{content:"\\F2B2"}.fa-google-plus-circle:before,.fa-google-plus-official:before{content:"\\F2B3"}.fa-fa:before,.fa-font-awesome:before{content:"\\F2B4"}.fa-handshake-o:before{content:"\\F2B5"}.fa-envelope-open:before{content:"\\F2B6"}.fa-envelope-open-o:before{content:"\\F2B7"}.fa-linode:before{content:"\\F2B8"}.fa-address-book:before{content:"\\F2B9"}.fa-address-book-o:before{content:"\\F2BA"}.fa-address-card:before,.fa-vcard:before{content:"\\F2BB"}.fa-address-card-o:before,.fa-vcard-o:before{content:"\\F2BC"}.fa-user-circle:before{content:"\\F2BD"}.fa-user-circle-o:before{content:"\\F2BE"}.fa-user-o:before{content:"\\F2C0"}.fa-id-badge:before{content:"\\F2C1"}.fa-drivers-license:before,.fa-id-card:before{content:"\\F2C2"}.fa-drivers-license-o:before,.fa-id-card-o:before{content:"\\F2C3"}.fa-quora:before{content:"\\F2C4"}.fa-free-code-camp:before{content:"\\F2C5"}.fa-telegram:before{content:"\\F2C6"}.fa-thermometer-4:before,.fa-thermometer-full:before,.fa-thermometer:before{content:"\\F2C7"}.fa-thermometer-3:before,.fa-thermometer-three-quarters:before{content:"\\F2C8"}.fa-thermometer-2:before,.fa-thermometer-half:before{content:"\\F2C9"}.fa-thermometer-1:before,.fa-thermometer-quarter:before{content:"\\F2CA"}.fa-thermometer-0:before,.fa-thermometer-empty:before{content:"\\F2CB"}.fa-shower:before{content:"\\F2CC"}.fa-bath:before,.fa-bathtub:before,.fa-s15:before{content:"\\F2CD"}.fa-podcast:before{content:"\\F2CE"}.fa-window-maximize:before{content:"\\F2D0"}.fa-window-minimize:before{content:"\\F2D1"}.fa-window-restore:before{content:"\\F2D2"}.fa-times-rectangle:before,.fa-window-close:before{content:"\\F2D3"}.fa-times-rectangle-o:before,.fa-window-close-o:before{content:"\\F2D4"}.fa-bandcamp:before{content:"\\F2D5"}.fa-grav:before{content:"\\F2D6"}.fa-etsy:before{content:"\\F2D7"}.fa-imdb:before{content:"\\F2D8"}.fa-ravelry:before{content:"\\F2D9"}.fa-eercast:before{content:"\\F2DA"}.fa-microchip:before{content:"\\F2DB"}.fa-snowflake-o:before{content:"\\F2DC"}.fa-superpowers:before{content:"\\F2DD"}.fa-wpexplorer:before{content:"\\F2DE"}.fa-meetup:before{content:"\\F2E0"}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}',""]); -},function(A,M,t){M=A.exports=t(137)(),M.push([A.id,'*,:after,:before{box-sizing:border-box}body{font-family:Lato,sans-serif;font-size:15px;line-height:1.42857143;color:gray;background-color:#edecec}button,input,select,textarea{font-family:inherit;font-size:inherit;line-height:inherit}a{color:#2cc9f4}a,a:focus,a:hover{text-decoration:none}a:focus,a:hover{color:#0aa0c9}a:focus{outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}figure{margin:0}img{vertical-align:middle}.img-responsive{display:block;max-width:100%;height:auto}.img-rounded{border-radius:6px}.img-thumbnail{padding:4px;line-height:1.42857143;background-color:#edecec;border:1px solid #ddd;border-radius:4px;-webkit-transition:all .2s ease-in-out;transition:all .2s ease-in-out;display:inline-block;max-width:100%;height:auto}.img-circle{border-radius:50%}hr{margin-top:21px;margin-bottom:21px;border:0;border-top:1px solid #eee}.sr-only{position:absolute;width:1px;height:1px;margin:-1px;padding:0;overflow:hidden;clip:rect(0,0,0,0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}[role=button]{cursor:pointer}.container{margin-right:auto;margin-left:auto;padding-left:15px;padding-right:15px}@media (min-width:768px){.container{width:750px}}@media (min-width:992px){.container{width:970px}}@media (min-width:1200px){.container{width:1170px}}.container-fluid{margin-right:auto;margin-left:auto;padding-left:15px;padding-right:15px}.row{margin-left:-15px;margin-right:-15px}.col-lg-1,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-lg-10,.col-lg-11,.col-lg-12,.col-md-1,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-md-10,.col-md-11,.col-md-12,.col-sm-1,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-sm-10,.col-sm-11,.col-sm-12,.col-xs-1,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9,.col-xs-10,.col-xs-11,.col-xs-12{position:relative;min-height:1px;padding-left:15px;padding-right:15px}.col-xs-1,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9,.col-xs-10,.col-xs-11,.col-xs-12{float:left}.col-xs-12{width:100%}.col-xs-11{width:91.66666667%}.col-xs-10{width:83.33333333%}.col-xs-9{width:75%}.col-xs-8{width:66.66666667%}.col-xs-7{width:58.33333333%}.col-xs-6{width:50%}.col-xs-5{width:41.66666667%}.col-xs-4{width:33.33333333%}.col-xs-3{width:25%}.col-xs-2{width:16.66666667%}.col-xs-1{width:8.33333333%}.col-xs-pull-12{right:100%}.col-xs-pull-11{right:91.66666667%}.col-xs-pull-10{right:83.33333333%}.col-xs-pull-9{right:75%}.col-xs-pull-8{right:66.66666667%}.col-xs-pull-7{right:58.33333333%}.col-xs-pull-6{right:50%}.col-xs-pull-5{right:41.66666667%}.col-xs-pull-4{right:33.33333333%}.col-xs-pull-3{right:25%}.col-xs-pull-2{right:16.66666667%}.col-xs-pull-1{right:8.33333333%}.col-xs-pull-0{right:auto}.col-xs-push-12{left:100%}.col-xs-push-11{left:91.66666667%}.col-xs-push-10{left:83.33333333%}.col-xs-push-9{left:75%}.col-xs-push-8{left:66.66666667%}.col-xs-push-7{left:58.33333333%}.col-xs-push-6{left:50%}.col-xs-push-5{left:41.66666667%}.col-xs-push-4{left:33.33333333%}.col-xs-push-3{left:25%}.col-xs-push-2{left:16.66666667%}.col-xs-push-1{left:8.33333333%}.col-xs-push-0{left:auto}.col-xs-offset-12{margin-left:100%}.col-xs-offset-11{margin-left:91.66666667%}.col-xs-offset-10{margin-left:83.33333333%}.col-xs-offset-9{margin-left:75%}.col-xs-offset-8{margin-left:66.66666667%}.col-xs-offset-7{margin-left:58.33333333%}.col-xs-offset-6{margin-left:50%}.col-xs-offset-5{margin-left:41.66666667%}.col-xs-offset-4{margin-left:33.33333333%}.col-xs-offset-3{margin-left:25%}.col-xs-offset-2{margin-left:16.66666667%}.col-xs-offset-1{margin-left:8.33333333%}.col-xs-offset-0{margin-left:0}@media (min-width:768px){.col-sm-1,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-sm-10,.col-sm-11,.col-sm-12{float:left}.col-sm-12{width:100%}.col-sm-11{width:91.66666667%}.col-sm-10{width:83.33333333%}.col-sm-9{width:75%}.col-sm-8{width:66.66666667%}.col-sm-7{width:58.33333333%}.col-sm-6{width:50%}.col-sm-5{width:41.66666667%}.col-sm-4{width:33.33333333%}.col-sm-3{width:25%}.col-sm-2{width:16.66666667%}.col-sm-1{width:8.33333333%}.col-sm-pull-12{right:100%}.col-sm-pull-11{right:91.66666667%}.col-sm-pull-10{right:83.33333333%}.col-sm-pull-9{right:75%}.col-sm-pull-8{right:66.66666667%}.col-sm-pull-7{right:58.33333333%}.col-sm-pull-6{right:50%}.col-sm-pull-5{right:41.66666667%}.col-sm-pull-4{right:33.33333333%}.col-sm-pull-3{right:25%}.col-sm-pull-2{right:16.66666667%}.col-sm-pull-1{right:8.33333333%}.col-sm-pull-0{right:auto}.col-sm-push-12{left:100%}.col-sm-push-11{left:91.66666667%}.col-sm-push-10{left:83.33333333%}.col-sm-push-9{left:75%}.col-sm-push-8{left:66.66666667%}.col-sm-push-7{left:58.33333333%}.col-sm-push-6{left:50%}.col-sm-push-5{left:41.66666667%}.col-sm-push-4{left:33.33333333%}.col-sm-push-3{left:25%}.col-sm-push-2{left:16.66666667%}.col-sm-push-1{left:8.33333333%}.col-sm-push-0{left:auto}.col-sm-offset-12{margin-left:100%}.col-sm-offset-11{margin-left:91.66666667%}.col-sm-offset-10{margin-left:83.33333333%}.col-sm-offset-9{margin-left:75%}.col-sm-offset-8{margin-left:66.66666667%}.col-sm-offset-7{margin-left:58.33333333%}.col-sm-offset-6{margin-left:50%}.col-sm-offset-5{margin-left:41.66666667%}.col-sm-offset-4{margin-left:33.33333333%}.col-sm-offset-3{margin-left:25%}.col-sm-offset-2{margin-left:16.66666667%}.col-sm-offset-1{margin-left:8.33333333%}.col-sm-offset-0{margin-left:0}}@media (min-width:992px){.col-md-1,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-md-10,.col-md-11,.col-md-12{float:left}.col-md-12{width:100%}.col-md-11{width:91.66666667%}.col-md-10{width:83.33333333%}.col-md-9{width:75%}.col-md-8{width:66.66666667%}.col-md-7{width:58.33333333%}.col-md-6{width:50%}.col-md-5{width:41.66666667%}.col-md-4{width:33.33333333%}.col-md-3{width:25%}.col-md-2{width:16.66666667%}.col-md-1{width:8.33333333%}.col-md-pull-12{right:100%}.col-md-pull-11{right:91.66666667%}.col-md-pull-10{right:83.33333333%}.col-md-pull-9{right:75%}.col-md-pull-8{right:66.66666667%}.col-md-pull-7{right:58.33333333%}.col-md-pull-6{right:50%}.col-md-pull-5{right:41.66666667%}.col-md-pull-4{right:33.33333333%}.col-md-pull-3{right:25%}.col-md-pull-2{right:16.66666667%}.col-md-pull-1{right:8.33333333%}.col-md-pull-0{right:auto}.col-md-push-12{left:100%}.col-md-push-11{left:91.66666667%}.col-md-push-10{left:83.33333333%}.col-md-push-9{left:75%}.col-md-push-8{left:66.66666667%}.col-md-push-7{left:58.33333333%}.col-md-push-6{left:50%}.col-md-push-5{left:41.66666667%}.col-md-push-4{left:33.33333333%}.col-md-push-3{left:25%}.col-md-push-2{left:16.66666667%}.col-md-push-1{left:8.33333333%}.col-md-push-0{left:auto}.col-md-offset-12{margin-left:100%}.col-md-offset-11{margin-left:91.66666667%}.col-md-offset-10{margin-left:83.33333333%}.col-md-offset-9{margin-left:75%}.col-md-offset-8{margin-left:66.66666667%}.col-md-offset-7{margin-left:58.33333333%}.col-md-offset-6{margin-left:50%}.col-md-offset-5{margin-left:41.66666667%}.col-md-offset-4{margin-left:33.33333333%}.col-md-offset-3{margin-left:25%}.col-md-offset-2{margin-left:16.66666667%}.col-md-offset-1{margin-left:8.33333333%}.col-md-offset-0{margin-left:0}}@media (min-width:1200px){.col-lg-1,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-lg-10,.col-lg-11,.col-lg-12{float:left}.col-lg-12{width:100%}.col-lg-11{width:91.66666667%}.col-lg-10{width:83.33333333%}.col-lg-9{width:75%}.col-lg-8{width:66.66666667%}.col-lg-7{width:58.33333333%}.col-lg-6{width:50%}.col-lg-5{width:41.66666667%}.col-lg-4{width:33.33333333%}.col-lg-3{width:25%}.col-lg-2{width:16.66666667%}.col-lg-1{width:8.33333333%}.col-lg-pull-12{right:100%}.col-lg-pull-11{right:91.66666667%}.col-lg-pull-10{right:83.33333333%}.col-lg-pull-9{right:75%}.col-lg-pull-8{right:66.66666667%}.col-lg-pull-7{right:58.33333333%}.col-lg-pull-6{right:50%}.col-lg-pull-5{right:41.66666667%}.col-lg-pull-4{right:33.33333333%}.col-lg-pull-3{right:25%}.col-lg-pull-2{right:16.66666667%}.col-lg-pull-1{right:8.33333333%}.col-lg-pull-0{right:auto}.col-lg-push-12{left:100%}.col-lg-push-11{left:91.66666667%}.col-lg-push-10{left:83.33333333%}.col-lg-push-9{left:75%}.col-lg-push-8{left:66.66666667%}.col-lg-push-7{left:58.33333333%}.col-lg-push-6{left:50%}.col-lg-push-5{left:41.66666667%}.col-lg-push-4{left:33.33333333%}.col-lg-push-3{left:25%}.col-lg-push-2{left:16.66666667%}.col-lg-push-1{left:8.33333333%}.col-lg-push-0{left:auto}.col-lg-offset-12{margin-left:100%}.col-lg-offset-11{margin-left:91.66666667%}.col-lg-offset-10{margin-left:83.33333333%}.col-lg-offset-9{margin-left:75%}.col-lg-offset-8{margin-left:66.66666667%}.col-lg-offset-7{margin-left:58.33333333%}.col-lg-offset-6{margin-left:50%}.col-lg-offset-5{margin-left:41.66666667%}.col-lg-offset-4{margin-left:33.33333333%}.col-lg-offset-3{margin-left:25%}.col-lg-offset-2{margin-left:16.66666667%}.col-lg-offset-1{margin-left:8.33333333%}.col-lg-offset-0{margin-left:0}}/*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */html{font-family:sans-serif;-ms-text-size-adjust:100%;-webkit-text-size-adjust:100%}body{margin:0}article,aside,details,figcaption,figure,footer,header,hgroup,main,menu,nav,section,summary{display:block}audio,canvas,progress,video{display:inline-block;vertical-align:baseline}audio:not([controls]){display:none;height:0}[hidden],template{display:none}a{background-color:transparent}a:active,a:hover{outline:0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:700}dfn{font-style:italic}h1{font-size:2em;margin:.67em 0}mark{background:#ff0;color:#000}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sup{top:-.5em}sub{bottom:-.25em}img{border:0}svg:not(:root){overflow:hidden}figure{margin:1em 40px}hr{box-sizing:content-box;height:0}pre{overflow:auto}code,kbd,pre,samp{font-family:monospace,monospace;font-size:1em}button,input,optgroup,select,textarea{color:inherit;font:inherit;margin:0}button{overflow:visible}button,select{text-transform:none}button,html input[type=button],input[type=reset],input[type=submit]{-webkit-appearance:button;cursor:pointer}button[disabled],html input[disabled]{cursor:default}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}input{line-height:normal}input[type=checkbox],input[type=radio]{box-sizing:border-box;padding:0}input[type=number]::-webkit-inner-spin-button,input[type=number]::-webkit-outer-spin-button{height:auto}input[type=search]{-webkit-appearance:textfield;box-sizing:content-box}input[type=search]::-webkit-search-cancel-button,input[type=search]::-webkit-search-decoration{-webkit-appearance:none}fieldset{border:1px solid silver;margin:0 2px;padding:.35em .625em .75em}legend{border:0;padding:0}textarea{overflow:auto}optgroup{font-weight:700}table{border-collapse:collapse;border-spacing:0}td,th{padding:0}.caret{display:inline-block;width:0;height:0;margin-left:2px;vertical-align:middle;border-top:4px dashed;border-top:4px solid\\9;border-right:4px solid transparent;border-left:4px solid transparent}.dropdown,.dropup{position:relative}.dropdown-toggle:focus{outline:0}.dropdown-menu{position:absolute;top:100%;left:0;z-index:1000;display:none;float:left;min-width:160px;padding:5px 0;margin:2px 0 0;list-style:none;font-size:15px;text-align:left;background-color:#fff;border:1px solid transparent;border-radius:4px;box-shadow:0 6px 12px rgba(0,0,0,.175);background-clip:padding-box}.dropdown-menu.pull-right{right:0;left:auto}.dropdown-menu .divider{height:1px;margin:9.5px 0;overflow:hidden;background-color:rgba(0,0,0,.08)}.dropdown-menu>li>a{display:block;padding:3px 20px;clear:both;font-weight:400;line-height:1.42857143;color:gray;white-space:nowrap}.dropdown-menu>li>a:focus,.dropdown-menu>li>a:hover{text-decoration:none;color:#333;background-color:rgba(0,0,0,.05)}.dropdown-menu>.active>a,.dropdown-menu>.active>a:focus,.dropdown-menu>.active>a:hover{color:#333;text-decoration:none;outline:0;background-color:rgba(0,0,0,.075)}.dropdown-menu>.disabled>a,.dropdown-menu>.disabled>a:focus,.dropdown-menu>.disabled>a:hover{color:#e4e4e4}.dropdown-menu>.disabled>a:focus,.dropdown-menu>.disabled>a:hover{text-decoration:none;background-color:transparent;background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled = false);cursor:not-allowed}.open>.dropdown-menu{display:block}.open>a{outline:0}.dropdown-menu-right{left:auto;right:0}.dropdown-menu-left{left:0;right:auto}.dropdown-header{display:block;padding:3px 20px;font-size:13px;line-height:1.42857143;color:#777;white-space:nowrap}.dropdown-backdrop{position:fixed;left:0;right:0;bottom:0;top:0;z-index:990}.pull-right>.dropdown-menu{right:0;left:auto}.dropup .caret,.navbar-fixed-bottom .dropdown .caret{border-top:0;border-bottom:4px dashed;border-bottom:4px solid\\9;content:""}.dropup .dropdown-menu,.navbar-fixed-bottom .dropdown .dropdown-menu{top:auto;bottom:100%;margin-bottom:2px}@media (min-width:768px){.navbar-right .dropdown-menu{left:auto;right:0}.navbar-right .dropdown-menu>li>a{text-align:right}.navbar-right .dropdown-menu-left{left:0;right:auto}}.modal,.modal-open{overflow:hidden}.modal{display:none;position:fixed;top:0;right:0;bottom:0;left:0;z-index:1050;-webkit-overflow-scrolling:touch;outline:0}.modal.fade .modal-dialog{-webkit-transform:translateY(-25%);transform:translateY(-25%);-webkit-transition:-webkit-transform .3s ease-out;transition:transform .3s ease-out}.modal.in .modal-dialog{-webkit-transform:translate(0);transform:translate(0)}.modal-open .modal{overflow-x:hidden;overflow-y:auto}.modal-dialog{position:relative;width:auto;margin:10px}.modal-content{position:relative;background-color:#fff;border:1px solid transparent;border-radius:6px;box-shadow:0 3px 9px rgba(0,0,0,.5);background-clip:padding-box;outline:0}.modal-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1040;background-color:rgba(0,0,0,.1)}.modal-backdrop.fade{opacity:0;filter:alpha(opacity=0)}.modal-backdrop.in{opacity:.5;filter:alpha(opacity=50)}.modal-header{padding:30px 35px 0;border-bottom:1px solid transparent}.modal-header .close{margin-top:-2px}.modal-title{margin:0;line-height:transparent}.modal-body{position:relative;padding:30px 35px}.modal-footer{padding:30px 35px;text-align:right;border-top:1px solid transparent}.modal-footer .btn+.btn{margin-left:5px;margin-bottom:0}.modal-footer .btn-group .btn+.btn{margin-left:-1px}.modal-footer .btn-block+.btn-block{margin-left:0}.modal-scrollbar-measure{position:absolute;top:-9999px;width:50px;height:50px;overflow:scroll}@media (min-width:768px){.modal-dialog{width:600px;margin:30px auto}.modal-content{box-shadow:0 5px 15px rgba(0,0,0,.5)}.modal-sm{width:400px}}@media (min-width:992px){.modal-lg{width:900px}}.tooltip{position:absolute;z-index:1070;display:block;font-family:Lato,sans-serif;font-style:normal;font-weight:400;letter-spacing:normal;line-break:auto;line-height:1.42857143;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;white-space:normal;word-break:normal;word-spacing:normal;word-wrap:normal;font-size:13px;opacity:0;filter:alpha(opacity=0)}.tooltip.in{opacity:.9;filter:alpha(opacity=90)}.tooltip.top{margin-top:-3px;padding:5px 0}.tooltip.right{margin-left:3px;padding:0 5px}.tooltip.bottom{margin-top:3px;padding:5px 0}.tooltip.left{margin-left:-3px;padding:0 5px}.tooltip-inner{max-width:200px;padding:3px 8px;color:#fff;text-align:center;background-color:#000;border-radius:4px}.tooltip-arrow{position:absolute;width:0;height:0;border-color:transparent;border-style:solid}.tooltip.top .tooltip-arrow{bottom:0;left:50%;margin-left:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.top-left .tooltip-arrow{right:5px}.tooltip.top-left .tooltip-arrow,.tooltip.top-right .tooltip-arrow{bottom:0;margin-bottom:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.top-right .tooltip-arrow{left:5px}.tooltip.right .tooltip-arrow{top:50%;left:0;margin-top:-5px;border-width:5px 5px 5px 0;border-right-color:#000}.tooltip.left .tooltip-arrow{top:50%;right:0;margin-top:-5px;border-width:5px 0 5px 5px;border-left-color:#000}.tooltip.bottom .tooltip-arrow{top:0;left:50%;margin-left:-5px;border-width:0 5px 5px;border-bottom-color:#000}.tooltip.bottom-left .tooltip-arrow{top:0;right:5px;margin-top:-5px;border-width:0 5px 5px;border-bottom-color:#000}.tooltip.bottom-right .tooltip-arrow{top:0;left:5px;margin-top:-5px;border-width:0 5px 5px;border-bottom-color:#000}@-ms-viewport{width:device-width}.visible-lg,.visible-lg-block,.visible-lg-inline,.visible-lg-inline-block,.visible-md,.visible-md-block,.visible-md-inline,.visible-md-inline-block,.visible-sm,.visible-sm-block,.visible-sm-inline,.visible-sm-inline-block,.visible-xs,.visible-xs-block,.visible-xs-inline,.visible-xs-inline-block{display:none!important}@media (max-width:767px){.visible-xs{display:block!important}table.visible-xs{display:table!important}tr.visible-xs{display:table-row!important}td.visible-xs,th.visible-xs{display:table-cell!important}}@media (max-width:767px){.visible-xs-block{display:block!important}}@media (max-width:767px){.visible-xs-inline{display:inline!important}}@media (max-width:767px){.visible-xs-inline-block{display:inline-block!important}}@media (min-width:768px) and (max-width:991px){.visible-sm{display:block!important}table.visible-sm{display:table!important}tr.visible-sm{display:table-row!important}td.visible-sm,th.visible-sm{display:table-cell!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-block{display:block!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-inline{display:inline!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-inline-block{display:inline-block!important}}@media (min-width:992px) and (max-width:1199px){.visible-md{display:block!important}table.visible-md{display:table!important}tr.visible-md{display:table-row!important}td.visible-md,th.visible-md{display:table-cell!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-block{display:block!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-inline{display:inline!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-inline-block{display:inline-block!important}}@media (min-width:1200px){.visible-lg{display:block!important}table.visible-lg{display:table!important}tr.visible-lg{display:table-row!important}td.visible-lg,th.visible-lg{display:table-cell!important}}@media (min-width:1200px){.visible-lg-block{display:block!important}}@media (min-width:1200px){.visible-lg-inline{display:inline!important}}@media (min-width:1200px){.visible-lg-inline-block{display:inline-block!important}}@media (max-width:767px){.hidden-xs{display:none!important}}@media (min-width:768px) and (max-width:991px){.hidden-sm{display:none!important}}@media (min-width:992px) and (max-width:1199px){.hidden-md{display:none!important}}@media (min-width:1200px){.hidden-lg{display:none!important}}.visible-print{display:none!important}@media print{.visible-print{display:block!important}table.visible-print{display:table!important}tr.visible-print{display:table-row!important}td.visible-print,th.visible-print{display:table-cell!important}}.visible-print-block{display:none!important}@media print{.visible-print-block{display:block!important}}.visible-print-inline{display:none!important}@media print{.visible-print-inline{display:inline!important}}.visible-print-inline-block{display:none!important}@media print{.visible-print-inline-block{display:inline-block!important}}@media print{.hidden-print{display:none!important}}*{-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}:active,:focus{outline:0}html{font-size:10px;-webkit-tap-highlight-color:rgba(0,0,0,0)}body,html{min-height:100%}a{-webkit-transition:color;transition:color;-webkit-transition-duration:.3s;transition-duration:.3s}button{border:0}.animated.infinite{-webkit-animation-iteration-count:infinite;animation-iteration-count:infinite}@-webkit-keyframes fadeIn{0%{opacity:0}to{opacity:1}}@keyframes fadeIn{0%{opacity:0}to{opacity:1}}.fadeIn{-webkit-animation-name:fadeIn;animation-name:fadeIn}@-webkit-keyframes fadeInDown{0%{opacity:0;-webkit-transform:translateY(-20px)}to{opacity:1;-webkit-transform:translateY(0)}}@keyframes fadeInDown{0%{opacity:0;transform:translateY(-20px)}to{opacity:1;transform:translateY(0)}}.fadeInDown{-webkit-animation-name:fadeInDown;animation-name:fadeInDown}@-webkit-keyframes fadeInUp{0%{opacity:0;-webkit-transform:translateY(20px)}to{opacity:1;-webkit-transform:translateY(0)}}@keyframes fadeInUp{0%{opacity:0;transform:translateY(20px)}to{opacity:1;transform:translateY(0)}}.fadeInUp{-webkit-animation-name:fadeInUp;animation-name:fadeInUp}@-webkit-keyframes fadeOut{0%{opacity:1}to{opacity:0}}@keyframes fadeOut{0%{opacity:1}to{opacity:0}}.fadeOut{-webkit-animation-name:fadeOut;animation-name:fadeOut}@-webkit-keyframes fadeOutDown{0%{opacity:1;-webkit-transform:translateY(0)}to{opacity:0;-webkit-transform:translateY(20px)}}@keyframes fadeOutDown{0%{opacity:1;transform:translateY(0)}to{opacity:0;transform:translateY(20px)}}.fadeOutDown{-webkit-animation-name:fadeOutDown;animation-name:fadeOutDown}@-webkit-keyframes fadeOutUp{0%{opacity:1;-webkit-transform:translateY(0)}to{opacity:0;-webkit-transform:translateY(-20px)}}@keyframes fadeOutUp{0%{opacity:1;transform:translateY(0)}to{opacity:0;transform:translateY(-20px)}}.fadeOutUp{-webkit-animation-name:fadeOutUp;animation-name:fadeOutUp}@-webkit-keyframes zoomIn{0%{opacity:0;-webkit-transform:scale3d(.3,.3,.3);transform:scale3d(.3,.3,.3)}50%{opacity:1}}@keyframes zoomIn{0%{opacity:0;-webkit-transform:scale3d(.3,.3,.3);transform:scale3d(.3,.3,.3)}50%{opacity:1}}.text-center{text-align:center!important}.text-left{text-align:left!important}.text-right{text-align:right!important}.clearfix:after,.clearfix:before,.container-fluid:after,.container-fluid:before,.container:after,.container:before,.modal-footer:after,.modal-footer:before,.modal-header:after,.modal-header:before,.row:after,.row:before{content:" ";display:table}.clearfix:after,.container-fluid:after,.container:after,.modal-footer:after,.modal-header:after,.row:after{clear:both}.pull-right{float:right!important}.pull-left{float:left!important}.p-relative{position:relative}.m-0{margin:0!important}.m-t-0{margin-top:0!important}.m-b-0{margin-bottom:0!important}.m-l-0{margin-left:0!important}.m-r-0{margin-right:0!important}.m-5{margin:5px!important}.m-t-5{margin-top:5px!important}.m-b-5{margin-bottom:5px!important}.m-l-5{margin-left:5px!important}.m-r-5{margin-right:5px!important}.m-10{margin:10px!important}.m-t-10{margin-top:10px!important}.m-b-10{margin-bottom:10px!important}.m-l-10{margin-left:10px!important}.m-r-10{margin-right:10px!important}.m-15{margin:15px!important}.m-t-15{margin-top:15px!important}.m-b-15{margin-bottom:15px!important}.m-l-15{margin-left:15px!important}.m-r-15{margin-right:15px!important}.m-20{margin:20px!important}.m-t-20{margin-top:20px!important}.m-b-20{margin-bottom:20px!important}.m-l-20{margin-left:20px!important}.m-r-20{margin-right:20px!important}.m-25{margin:25px!important}.m-t-25{margin-top:25px!important}.m-b-25{margin-bottom:25px!important}.m-l-25{margin-left:25px!important}.m-r-25{margin-right:25px!important}.m-30{margin:30px!important}.m-t-30{margin-top:30px!important}.m-b-30{margin-bottom:30px!important}.m-l-30{margin-left:30px!important}.m-r-30{margin-right:30px!important}.p-0{padding:0!important}.p-t-0{padding-top:0!important}.p-b-0{padding-bottom:0!important}.p-l-0{padding-left:0!important}.p-r-0{padding-right:0!important}.p-5{padding:5px!important}.p-t-5{padding-top:5px!important}.p-b-5{padding-bottom:5px!important}.p-l-5{padding-left:5px!important}.p-r-5{padding-right:5px!important}.p-10{padding:10px!important}.p-t-10{padding-top:10px!important}.p-b-10{padding-bottom:10px!important}.p-l-10{padding-left:10px!important}.p-r-10{padding-right:10px!important}.p-15{padding:15px!important}.p-t-15{padding-top:15px!important}.p-b-15{padding-bottom:15px!important}.p-l-15{padding-left:15px!important}.p-r-15{padding-right:15px!important}.p-20{padding:20px!important}.p-t-20{padding-top:20px!important}.p-b-20{padding-bottom:20px!important}.p-l-20{padding-left:20px!important}.p-r-20{padding-right:20px!important}.p-25{padding:25px!important}.p-t-25{padding-top:25px!important}.p-b-25{padding-bottom:25px!important}.p-l-25{padding-left:25px!important}.p-r-25{padding-right:25px!important}.p-30{padding:30px!important}.p-t-30{padding-top:30px!important}.p-b-30{padding-bottom:30px!important}.p-l-30{padding-left:30px!important}.p-r-30{padding-right:30px!important}@font-face{font-family:Lato;src:url('+t(501)+") format('woff2'),url("+t(500)+") format('woff');font-weight:400;font-style:normal}.form-control{border:0;border-bottom:1px solid #eee;color:#32393f;padding:5px;width:100%;font-size:13px;background-color:transparent}select.form-control{-webkit-appearance:none;-moz-appearance:none;border-radius:0;background:url("+t(503)+") no-repeat bottom 7px right}.input-group{position:relative}.input-group:not(:last-child){margin-bottom:40px}.ig-label{position:absolute;text-align:center;bottom:7px;left:0;width:100%;-webkit-transition:all;transition:all;-webkit-transition-duration:.25s;transition-duration:.25s;padding:2px 0 3px;border-radius:2px;font-weight:400}.ig-helpers{z-index:1;width:100%;left:0}.ig-helpers,.ig-helpers:after,.ig-helpers:before{position:absolute;height:2px;bottom:0}.ig-helpers:after,.ig-helpers:before{content:'';width:0;-webkit-transition:all;transition:all;-webkit-transition-duration:.25s;transition-duration:.25s;background-color:#03a9f4}.ig-helpers:before{left:50%}.ig-helpers:after{right:50%}.ig-text{width:100%;height:40px;border:0;background:transparent;text-align:center;position:relative;z-index:1;border-bottom:1px solid #eee;color:#32393f;font-size:13px}.ig-text:focus+.ig-helpers:after,.ig-text:focus+.ig-helpers:before{width:50%}.ig-text:disabled~.ig-label,.ig-text:focus~.ig-label,.ig-text:valid~.ig-label{bottom:40px;font-size:13px;z-index:1}.ig-text:disabled{opacity:.5;filter:alpha(opacity=50)}.ig-dark .ig-text{color:#fff;border-color:hsla(0,0%,100%,.1)}.ig-dark .ig-helpers:after,.ig-dark .ig-helpers:before{background-color:#dfdfdf;height:1px}.ig-left .ig-label,.ig-left .ig-text{text-align:left}.ig-error .ig-label{color:#e23f3f}.ig-error .ig-helpers i:first-child,.ig-error .ig-helpers i:first-child:after,.ig-error .ig-helpers i:first-child:before{background:rgba(226,63,63,.43)}.ig-error .ig-helpers i:last-child,.ig-error .ig-helpers i:last-child:after,.ig-error .ig-helpers i:last-child:before{background:#e23f3f!important}.ig-error:after{content:\"\\F05A\";font-family:FontAwesome;position:absolute;top:17px;right:9px;font-size:20px;color:#d33d3e}.ig-search:before{font-family:fontAwesome;content:'\\F002';font-size:15px;position:absolute;left:2px;top:8px}.ig-search .ig-text{padding-left:25px}.btn{border:0;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:2px;text-align:center;-webkit-transition:all;transition:all;-webkit-transition-duration:.3s;transition-duration:.3s}.btn:focus,.btn:hover{opacity:.9;filter:alpha(opacity=90)}.btn-block{display:block;width:100%}.btn-link{color:#545454;background-color:#eee}.btn-link:focus,.btn-link:hover{color:#545454;background-color:#dfdfdf}.btn-danger{color:#fff;background-color:#ff726f}.btn-danger:focus,.btn-danger:hover{color:#fff;background-color:#ff5450}.btn-primary{color:#fff;background-color:#50b2ff}.btn-primary:focus,.btn-primary:hover{color:#fff;background-color:#31a5ff}.btn-success{color:#fff;background-color:#33d46f}.btn-success:focus,.btn-success:hover{color:#fff;background-color:#28c061}.close{right:15px;font-weight:400;opacity:1;font-size:18px;position:absolute;text-align:center;top:16px;z-index:1;padding:0;border:0;background-color:transparent}.close span{width:25px;height:25px;display:block;border-radius:50%;line-height:24px;text-shadow:none}.close:not(.close-alt) span{background-color:hsla(0,0%,100%,.1);color:hsla(0,0%,100%,.8)}.close:not(.close-alt):focus span,.close:not(.close-alt):hover span{background-color:hsla(0,0%,100%,.2);color:#fff}.close-alt span{background-color:#efefef;color:#989898}.close-alt:focus span,.close-alt:hover span{background-color:#e8e8e8;color:#7b7b7b}.hidden{display:none!important}.copy-text label{font-size:12px;color:#333;margin-bottom:10px;display:block}.copy-text input{width:100%;border-radius:1px;border:1px solid #eee;padding:7px 12px;font-size:13px;line-height:100%;cursor:text;-webkit-transition:border-color;transition:border-color;-webkit-transition-duration:.3s;transition-duration:.3s}.copy-text input:hover{border-color:#e1e1e1}.login{height:100vh;min-height:500px;background:#32393f;text-align:center}.login:before{height:calc(100% - 110px);width:1px;content:\"\"}.l-wrap,.login:before{display:inline-block;vertical-align:middle}.l-wrap{width:80%;max-width:500px;margin-top:-50px}.l-wrap.toggled{display:inline-block}.l-wrap .input-group:not(:last-child){margin-bottom:40px}.l-footer{height:110px;padding:0 50px}.lf-logo{float:right}.lf-logo img{width:40px}.lf-server{float:left;color:hsla(0,0%,100%,.4);font-size:20px;font-weight:400;padding-top:40px}@media (max-width:768px){.lf-logo,.lf-server{float:none;display:block;text-align:center;width:100%}.lf-logo{margin-bottom:5px}.lf-server{font-size:15px}}.lw-btn{width:50px;height:50px;border:1px solid #fff;display:inline-block;border-radius:50%;font-size:22px;color:#fff;-webkit-transition:all;transition:all;-webkit-transition-duration:.3s;transition-duration:.3s;opacity:.3;background-color:transparent;line-height:45px;padding:0}.lw-btn:hover{color:#fff;opacity:.8;border-color:#fff}.lw-btn i{display:block;width:100%;padding-left:3px}input:-webkit-autofill{-webkit-box-shadow:0 0 0 50px #32393f inset!important;-webkit-text-fill-color:#fff!important}.fe-header{padding:45px 55px 20px}@media (min-width:992px){.fe-header{position:relative}}@media (max-width:667px){.fe-header{padding:25px 25px 20px}}.fe-header h2{font-size:16px;font-weight:400;margin:0}.fe-header h2>span{margin-bottom:7px;display:inline-block}.fe-header h2>span:not(:first-child):before{content:'/';margin:0 4px;color:#e6e6e6}.fe-header p{color:#bdbdbd;margin-top:7px}.feh-usage{margin-top:12px;max-width:285px}@media (max-width:667px){.feh-usage{max-width:100%;font-size:12px}}.feh-usage>ul{color:#bdbdbd;margin-top:7px;list-style:none;padding:0}.feh-usage>ul>li{padding-right:0;display:inline-block}.feh-usage>ul>li:first-child{color:#2ed2ff}.fehu-chart{height:5px;background:#eee;position:relative;border-radius:2px;overflow:hidden}.fehu-chart>div{position:absolute;left:0;height:100%;background:#2ed2ff}.feh-actions{list-style:none;padding:0;margin:0;position:absolute;right:35px;top:30px;z-index:11}@media (max-width:991px){.feh-actions{top:7px;right:10px;position:fixed}}.feh-actions>li{display:inline-block;text-align:right;vertical-align:top;line-height:100%}.feh-actions>li>.btn-group>button,.feh-actions>li>a{display:block;height:45px;min-width:45px;text-align:center;border-radius:50%;padding:0;border:0;background:none}@media (min-width:992px){.feh-actions>li>.btn-group>button,.feh-actions>li>a{color:#7b7b7b;font-size:21px;line-height:45px;-webkit-transition:all;transition:all;-webkit-transition-duration:.3s;transition-duration:.3s}.feh-actions>li>.btn-group>button:hover,.feh-actions>li>a:hover{background:rgba(0,0,0,.09)}}@media (max-width:991px){.feh-actions>li>.btn-group>button,.feh-actions>li>a{background:url("+t(235)+") no-repeat 50%}.feh-actions>li>.btn-group>button .fa-reorder,.feh-actions>li>a .fa-reorder{display:none}}@media (max-width:991px){.fe-header-mobile{background-color:#32393f;padding:10px 50px 9px 12px;text-align:center;position:fixed;z-index:10;box-shadow:0 0 10px rgba(0,0,0,.3);left:0;top:0;width:100%}.fe-header-mobile .mh-logo{height:35px;position:relative;top:4px}.feh-trigger{width:41px;height:41px;cursor:pointer;float:left;position:relative;text-align:center}.feh-trigger:after,.feh-trigger:before{content:\"\";position:absolute;top:0;left:0;width:100%;height:100%;border-radius:50%}.feh-trigger:after{z-index:1}.feh-trigger:before{background:hsla(0,0%,100%,.1);-webkit-transition:all;transition:all;-webkit-transition-duration:.3s;transition-duration:.3s;-webkit-transform:scale(0);transform:scale(0)}.feht-toggled:before{-webkit-transform:scale(1);transform:scale(1)}.feht-toggled .feht-lines{-webkit-transform:rotate(180deg);transform:rotate(180deg)}.feht-toggled .feht-lines>div.top{width:12px;transform:translateX(8px) translateY(1px) rotate(45deg);-webkit-transform:translateX(8px) translateY(1px) rotate(45deg)}.feht-toggled .feht-lines>div.bottom{width:12px;transform:translateX(8px) translateY(-1px) rotate(-45deg);-webkit-transform:translateX(8px) translateY(-1px) rotate(-45deg)}.feht-lines,.feht-lines>div{-webkit-transition:all;transition:all;-webkit-transition-duration:.3s;transition-duration:.3s}.feht-lines{width:18px;height:12px;display:inline-block;margin-top:14px}.feht-lines>div{background-color:#eaeaea;width:18px;height:2px}.feht-lines>div.center{margin:3px 0}}.fe-sidebar{width:300px;background-color:#32393f;position:fixed;height:100%;overflow:hidden;padding:35px}@media (min-width:992px){.fe-sidebar{-webkit-transform:translateZ(0);transform:translateZ(0)}}@media (max-width:991px){.fe-sidebar{padding-top:85px;z-index:9;box-shadow:0 0 10px rgba(0,0,0,.65);-webkit-transition:all;transition:all;-webkit-transition-duration:.3s;transition-duration:.3s;-webkit-transform:translate3d(-315px,0,0);transform:translate3d(-315px,0,0)}.fe-sidebar.toggled{-webkit-transform:translateZ(0);transform:translateZ(0)}}.fe-sidebar a{color:hsla(0,0%,100%,.58)}.fe-sidebar a:hover{color:#fff}.fes-header{margin-bottom:40px}.fes-header h2,.fes-header img{float:left}.fes-header h2{margin:13px 0 0 10px;font-weight:400}.fes-header img{width:32px}.fesl-inner{height:calc(100vh - 260px);overflow:auto;padding:0;margin:0 -35px}.fesl-inner li{position:relative}.fesl-inner li>a{display:block;padding:10px 40px 12px 35px;overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.fesl-inner li>a:before{font-family:FontAwesome;content:'\\F0A0';font-size:17px;margin-right:15px;position:relative;top:1px;opacity:.8;filter:alpha(opacity=80)}.fesl-inner li.active{background-color:rgba(0,0,0,.2)}.fesl-inner li.active>a{color:#fff}.fesl-inner li:not(.active):hover{background-color:rgba(0,0,0,.1)}.fesl-inner li:not(.active):hover>a{color:#fff}.fesl-inner li:hover .fesli-trigger{opacity:.6;filter:alpha(opacity=60)}.fesl-inner li:hover .fesli-trigger:hover{opacity:1;filter:alpha(opacity=100)}.fesl-inner ul{list-style:none;padding:0;margin:0}.fesl-inner:hover .scrollbar-vertical{opacity:1}.fesli-trigger{filter:alpha(opacity=0);-webkit-transition:all;transition:all;-webkit-transition-duration:.2s;transition-duration:.2s;top:0;right:0;width:40px;cursor:pointer;background:url("+t(235)+') no-repeat 0}.fesli-trigger,.scrollbar-vertical{opacity:0;position:absolute;height:100%}.scrollbar-vertical{right:5px;width:4px;-webkit-transition:opacity;transition:opacity;-webkit-transition-duration:.3s;transition-duration:.3s}.scrollbar-vertical div{border-radius:1px!important;background-color:#6a6a6a!important}.fes-host{position:fixed;left:0;bottom:0;z-index:1;background:#32393f;color:hsla(0,0%,100%,.4);font-size:15px;font-weight:400;width:300px;padding:20px;overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.fes-host>i{margin-right:10px}.fesl-row{padding-right:40px;padding-top:5px;padding-bottom:5px;position:relative}@media (min-width:668px){.fesl-row{display:flex;flex-flow:row;justify-content:space-between}}.fesl-row:after,.fesl-row:before{content:" ";display:table}.fesl-row:after{clear:both}@media (min-width:668px){header.fesl-row{margin-bottom:20px;border-bottom:1px solid #f0f0f0;padding-left:40px}header.fesl-row .fesl-item,header.fesl-row .fesli-sort{-webkit-transition:all;transition:all;-webkit-transition-duration:.3s;transition-duration:.3s}header.fesl-row .fesl-item{cursor:pointer;color:#bdbdbd;font-weight:500;margin-bottom:-5px}header.fesl-row .fesl-item>.fesli-sort{float:right;margin:4px 0 0;opacity:0;filter:alpha(opacity=0);color:#32393f;font-size:14px}header.fesl-row .fesl-item:hover:not(.fi-actions){background:#f5f5f5;color:#32393f}header.fesl-row .fesl-item:hover:not(.fi-actions)>.fesli-sort{opacity:.5;filter:alpha(opacity=50)}}@media (max-width:667px){header.fesl-row{display:none}}div.fesl-row{padding-left:85px;border-bottom:1px solid transparent;cursor:default}@media (max-width:667px){div.fesl-row{padding-left:70px;padding-right:45px}}div.fesl-row:nth-child(even){background-color:#fafafa}div.fesl-row:hover{background-color:#fbf7dc}div.fesl-row[data-type]:before{font-family:fontAwesome;width:35px;height:35px;text-align:center;line-height:35px;position:absolute;border-radius:50%;font-size:16px;left:50px;top:9px;color:#fff}@media (max-width:667px){div.fesl-row[data-type]:before{left:20px}}@media (max-width:667px){div.fesl-row[data-type=folder] .fesl-item.fi-name{padding-top:10px;padding-bottom:7px}div.fesl-row[data-type=folder] .fesl-item.fi-modified,div.fesl-row[data-type=folder] .fesl-item.fi-size{display:none}}div.fesl-row[data-type=folder]:before{content:\'\\F114\';background-color:#2dd3fb}div.fesl-row[data-type=pdf]:before{content:"\\F1C1";background-color:#fb766d}div.fesl-row[data-type=zip]:before{content:"\\F1C6";background-color:#374952}div.fesl-row[data-type=audio]:before{content:"\\F1C7";background-color:#009688}div.fesl-row[data-type=code]:before{content:"\\F1C9";background-color:#997867}div.fesl-row[data-type=excel]:before{content:"\\F1C3";background-color:#64c866}div.fesl-row[data-type=image]:before{content:"\\F1C5";background-color:#d24ce9}div.fesl-row[data-type=video]:before{content:"\\F1C8";background-color:#fdc206}div.fesl-row[data-type=other]:before{content:"\\F016";background-color:#8a8a8a}div.fesl-row[data-type=text]:before{content:"\\F0F6";background-color:#8a8a8a}div.fesl-row[data-type=doc]:before{content:"\\F1C2";background-color:#2196f5}div.fesl-row[data-type=presentation]:before{content:"\\F1C4";background-color:#fba220}div.fesl-row.fesl-loading:before{content:\'\'}.fesl-item{color:gray;display:block}.fesl-item a{color:gray}@media (min-width:668px){.fesl-item:not(.fi-actions){text-overflow:ellipsis;padding:10px 15px;white-space:nowrap;overflow:hidden}.fesl-item.fi-name{flex:3}.fesl-item.fi-size{width:140px}.fesl-item.fi-modified{width:190px}.fesl-item.fi-actions{width:40px}}@media (max-width:667px){.fesl-item{padding:0}.fesl-item.fi-name{width:100%;margin-bottom:3px}.fesl-item.fi-modified,.fesl-item.fi-size{font-size:12px;color:#b5b5b5;float:left}.fesl-item.fi-modified{max-width:72px;white-space:nowrap;overflow:hidden}.fesl-item.fi-size{margin-right:10px}.fesl-item.fi-actions{position:absolute;top:5px;right:10px}}.fia-toggle{height:36px;width:36px;background:transparent url('+t(502)+") no-repeat 50%;position:relative;top:3px;opacity:.4;filter:alpha(opacity=40)}.fia-toggle:hover{opacity:.7;filter:alpha(opacity=70)}.fi-actions .dropdown-menu{background-color:transparent;box-shadow:none;padding:0;right:38px;left:auto;margin:0;height:100%;text-align:right}.fi-actions .dropdown.open .dropdown-menu .fiad-action{right:0}.fiad-action{height:35px;width:35px;background:#ffc107;display:inline-block;border-radius:50%;text-align:center;line-height:35px;font-weight:400;position:relative;top:4px;margin-left:5px;-webkit-animation-name:fiad-action-anim;animation-name:fiad-action-anim;-webkit-transform-origin:center center;transform-origin:center center;-webkit-backface-visibility:none;backface-visibility:none;box-shadow:0 2px 4px rgba(0,0,0,.1)}.fiad-action:nth-child(2){-webkit-animation-duration:.1s;animation-duration:.1s}.fiad-action:nth-child(1){-webkit-animation-duration:.25s;animation-duration:.25s}.fiad-action>i{font-size:14px;color:#fff}.fiad-action:hover{background-color:#f7b900}@-webkit-keyframes fiad-action-anim{0%{-webkit-transform:scale(0);transform:scale(0);opacity:0;filter:alpha(opacity=0);right:-20px}to{-webkit-transform:scale(1);transform:scale(1);opacity:1;filter:alpha(opacity=100);right:0}}@keyframes fiad-action-anim{0%{-webkit-transform:scale(0);transform:scale(0);opacity:0;filter:alpha(opacity=0);right:-20px}to{-webkit-transform:scale(1);transform:scale(1);opacity:1;filter:alpha(opacity=100);right:0}}.file-explorer{background-color:#fff;position:relative;height:100%}.file-explorer.toggled{height:100vh;overflow:hidden}.fe-body{min-height:100vh;overflow:auto}@media (min-width:992px){.fe-body{padding:0 0 40px 300px}}@media (max-width:991px){.fe-body{padding:75px 0 40px}}.feb-actions{position:fixed;bottom:30px;right:30px}.feb-actions .dropdown-menu{min-width:55px;width:55px;text-align:center;background:transparent;box-shadow:none;margin:0}.feb-actions.open .feba-btn{-webkit-transform:scale(1);transform:scale(1)}.feb-actions.open .feba-btn:first-child{-webkit-animation-name:feba-btn-anim;animation-name:feba-btn-anim;-webkit-animation-duration:.3s;animation-duration:.3s}.feb-actions.open .feba-btn:last-child{-webkit-animation-name:feba-btn-anim;animation-name:feba-btn-anim;-webkit-animation-duration:.1s;animation-duration:.1s}.feb-actions.open .feba-toggle{background:#ff403c}.feb-actions.open .feba-toggle>span{-webkit-transform:rotate(135deg);transform:rotate(135deg)}.feba-toggle{width:55px;height:55px;line-height:55px;border-radius:50%;background:#ff726f;box-shadow:0 2px 3px rgba(0,0,0,.15);display:inline-block;text-align:center;border:0;padding:0}.feba-toggle span{display:inline-block;height:100%;width:100%}.feba-toggle i{color:#fff;font-size:17px;line-height:58px}.feba-toggle,.feba-toggle>span{-webkit-transition:all;transition:all;-webkit-transition-duration:.25s;transition-duration:.25s;-webkit-backface-visibility:hidden;backface-visibility:hidden}.feba-btn{width:40px;margin-top:10px;height:40px;border-radius:50%;text-align:center;display:inline-block;line-height:40px;box-shadow:0 2px 3px rgba(0,0,0,.15);-webkit-transform:scale(0);transform:scale(0);position:relative}.feba-btn,.feba-btn:focus,.feba-btn:hover{color:#fff}.feba-btn label{width:100%;height:100%;position:absolute;left:0;top:0;cursor:pointer}.feba-bucket{background:#ffc155}.feba-upload{background:#ffc107}@-webkit-keyframes feba-btn-anim{0%{-webkit-transform:scale(0);transform:scale(0);opacity:0;filter:alpha(opacity=0)}to{-webkit-transform:scale(1);transform:scale(1);opacity:1;filter:alpha(opacity=100)}}@keyframes feba-btn-anim{0%{-webkit-transform:scale(0);transform:scale(0);opacity:0;filter:alpha(opacity=0)}to{-webkit-transform:scale(1);transform:scale(1);opacity:1;filter:alpha(opacity=100)}}.l-bucket,.l-listing{width:23px;height:23px;-webkit-animation-name:zoomIn;animation-name:zoomIn;-webkit-animation-duration:.5s;animation-duration:.5s;-webkit-animation-fill-mode:both;animation-fill-mode:both}.l-bucket>i,.l-listing>i{border-width:2px}.l-bucket{left:31px;top:10px}.l-bucket>i{background-color:#32393f;border-top-color:hsla(0,0%,100%,.1);border-right-color:hsla(0,0%,100%,.1);border-bottom-color:hsla(0,0%,100%,.1)}.active .l-bucket>i{background-color:#282e32}.l-listing{left:56px;top:15px}.l-listing>i{border-top-color:hsla(0,0%,100%,.4);border-right-color:hsla(0,0%,100%,.4);border-bottom-color:hsla(0,0%,100%,.4)}@media (max-width:667px){.l-listing{left:31px}}.ie-warning{background-color:#ff5252;width:100%;height:100%;position:fixed;left:0;top:0;text-align:center}.ie-warning:before{width:1px;content:'';height:100%}.ie-warning .iw-inner,.ie-warning:before{display:inline-block;vertical-align:middle}.iw-inner{width:470px;height:300px;background-color:#fff;border-radius:5px;padding:40px;position:relative}.iw-inner ul{list-style:none;padding:0;margin:0;width:230px;margin-left:80px;margin-top:16px}.iw-inner ul>li{float:left}.iw-inner ul>li>a{display:block;padding:10px 15px 7px;font-size:14px;margin:0 1px;border-radius:3px}.iw-inner ul>li>a:hover{background:#eee}.iw-inner ul>li>a img{height:40px;margin-bottom:5px}.iwi-icon{color:#ff5252;font-size:40px;display:block;line-height:100%;margin-bottom:15px}.iwi-skip{position:absolute;left:0;bottom:-35px;width:100%;color:hsla(0,0%,100%,.6);cursor:pointer}.iwi-skip:hover{color:#fff}.dropdown-menu{padding:15px 0;top:0;margin-top:-1px}.dropdown-menu>li>a{padding:8px 20px;font-size:15px}.dropdown-menu>li>a>i{width:20px;position:relative;top:1px}.dropdown-menu-right>li>a{text-align:right}.alert{border:0;position:fixed;max-width:500px;margin:0;box-shadow:0 4px 5px rgba(0,0,0,.1);color:#fff;width:100%;right:20px;border-radius:3px;padding:17px 50px 17px 17px;z-index:10010;-webkit-animation-duration:.8s;animation-duration:.8s;-webkit-animation-fill-mode:both;animation-fill-mode:both}.alert:not(.progress){top:20px}@media (min-width:768px){.alert:not(.progress){left:50%;margin-left:-250px}}.alert.progress{bottom:20px;right:20px}.alert.alert-danger{background:#ff726f}.alert.alert-success{background:#33d46f}.alert.alert-info{background:#50b2ff}@media (max-width:767px){.alert{left:20px;width:calc(100% - 40px);max-width:100%}}.alert .progress{margin:10px 10px 8px 0;height:5px;box-shadow:none;border-radius:1px;background-color:#50b2ff;border-radius:2px;overflow:hidden}.alert .progress-bar{box-shadow:none;background-color:#fff;height:100%}.alert .close{position:absolute;top:15px}@media (min-width:768px){.modal{text-align:center}.modal:before{content:'';height:100%;width:1px}.modal .modal-dialog,.modal:before{display:inline-block;vertical-align:middle}.modal .modal-dialog{text-align:left;margin:10px auto}}.modal-dark .modal-header{color:hsla(0,0%,100%,.4)}.modal-dark .modal-header small{color:hsla(0,0%,100%,.2)}.modal-dark .modal-content{background-color:#32393f}.modal-backdrop{-webkit-animation-name:fadeIn;animation-name:fadeIn;-webkit-animation-fill-mode:both;animation-fill-mode:both}.modal-backdrop,.modal-dialog{-webkit-animation-duration:.2s;animation-duration:.2s}.modal-dialog{-webkit-animation-name:zoomIn;animation-name:zoomIn;-webkit-animation-fill-mode:both;animation-fill-mode:both}.modal-header{color:#333;position:relative}.modal-header small{display:block;text-transform:none;font-size:12px;margin-top:5px;color:#a8a8a8}.modal-content{border-radius:3px;box-shadow:none}.modal-footer{padding:0 30px 30px}.modal-confirm .modal-dialog,.modal-footer{text-align:center}.mc-icon{margin:0 0 10px}.mc-icon>i{font-size:60px}.mci-red{color:#ff8f8f}.mci-amber{color:#ffc107}.mci-green{color:#64e096}.mc-text{color:#333}.mc-sub{color:#bdbdbd;margin-top:5px;font-size:13px}@media (max-width:767px){.modal-about{text-align:center}.modal-about .modal-dialog{max-width:400px;width:90%;margin:20px auto 0}}.ma-inner{display:flex;flex-direction:row;align-items:center;min-height:350px;position:relative}@media (min-width:768px){.ma-inner:before{content:'';width:150px;height:100%;top:0;left:0;position:absolute;border-radius:3px 0 0 3px;background-color:#23282c}}.mai-item:first-child{width:150px;text-align:center}.mai-item:last-child{flex:4;padding:30px}.maii-logo{width:70px;position:relative}.maii-list{list-style:none;padding:0}.maii-list>li{margin-bottom:15px}.maii-list>li div{color:hsla(0,0%,100%,.8);text-transform:uppercase;font-size:14px}.maii-list>li small{font-size:13px;color:hsla(0,0%,100%,.4)}.toggle-password{position:absolute;bottom:30px;right:35px;width:30px;height:30px;border:1px solid #eee;border-radius:0;text-align:center;cursor:pointer;z-index:10;background-color:#fff;padding-top:5px}.toggle-password.toggled{background:#eee}.pm-body{padding-bottom:30px}.pmb-header{margin-bottom:35px}.pmb-list{display:flex;flex-flow:row;align-items:center;justify-content:center;padding:10px 35px}.pmb-list:nth-child(even){background-color:#f7f7f7}.pmb-list .form-control{padding-left:0;padding-right:0}header.pmb-list{margin:20px 0 10px}.pmbl-item{display:block;font-size:13px}.pmbl-item:nth-child(1){flex:2}.pmbl-item:nth-child(2){margin:0 25px;width:150px}.pmbl-item:nth-child(3){width:70px}div.pmb-list select{border:0}div.pmb-list .pml-item:not(:last-child){padding:0 5px}.modal-create-bucket .modal-dialog{position:fixed;right:25px;bottom:95px;margin:0;height:110px}.modal-create-bucket .modal-content{width:100%;height:100%}",""]); -},function(A,M,t){function I(A){return null===A||void 0===A}function g(A){return!(!A||"object"!=typeof A||"number"!=typeof A.length)&&("function"==typeof A.copy&&"function"==typeof A.slice&&!(A.length>0&&"number"!=typeof A[0]))}function e(A,M,t){var e,o;if(I(A)||I(M))return!1;if(A.prototype!==M.prototype)return!1;if(E(A))return!!E(M)&&(A=i.call(A),M=i.call(M),N(A,M,t));if(g(A)){if(!g(M))return!1;if(A.length!==M.length)return!1;for(e=0;e=0;e--)if(n[e]!=C[e])return!1;for(e=n.length-1;e>=0;e--)if(o=n[e],!N(A[o],M[o],t))return!1;return typeof A==typeof M}var i=Array.prototype.slice,T=t(277),E=t(276),N=A.exports=function(A,M,t){return t||(t={}),A===M||(A instanceof Date&&M instanceof Date?A.getTime()===M.getTime():!A||!M||"object"!=typeof A&&"object"!=typeof M?t.strict?A===M:A==M:e(A,M,t))}},function(A,M){function t(A){return"[object Arguments]"==Object.prototype.toString.call(A)}function I(A){return A&&"object"==typeof A&&"number"==typeof A.length&&Object.prototype.hasOwnProperty.call(A,"callee")&&!Object.prototype.propertyIsEnumerable.call(A,"callee")||!1}var g="[object Arguments]"==function(){return Object.prototype.toString.call(arguments)}();M=A.exports=g?t:I,M.supported=t,M.unsupported=I},function(A,M){function t(A){var M=[];for(var t in A)M.push(t);return M}M=A.exports="function"==typeof Object.keys?Object.keys:t,M.shim=t},function(A,M,t){"use strict";var I=t(140);A.exports=function(A,M){A.classList?A.classList.add(M):I(A)||(A.className=A.className+" "+M)}},function(A,M,t){"use strict";A.exports={addClass:t(278),removeClass:t(280),hasClass:t(140)}},function(A,M){"use strict";A.exports=function(A,M){A.classList?A.classList.remove(M):A.className=A.className.replace(new RegExp("(^|\\s)"+M+"(?:\\s|$)","g"),"$1").replace(/\s+/g," ").replace(/^\s*|\s*$/g,"")}},function(A,M,t){"use strict";var I=t(40),g=t(285);A.exports=function(A,M){return function(t){var e=t.currentTarget,i=t.target,T=g(e,A);T.some(function(A){return I(A,i)})&&M.call(this,t)}}},function(A,M,t){"use strict";var I=t(80),g=t(141),e=t(281);A.exports={on:I,off:g,filter:e}},function(A,M,t){"use strict";function I(A){return A.nodeName&&A.nodeName.toLowerCase()}function g(A){for(var M=(0,T["default"])(A),t=A&&A.offsetParent;t&&"html"!==I(A)&&"static"===(0,N["default"])(t,"position");)t=t.offsetParent;return t||M.documentElement}var e=t(58);M.__esModule=!0,M["default"]=g;var i=t(39),T=e.interopRequireDefault(i),E=t(81),N=e.interopRequireDefault(E);A.exports=M["default"]},function(A,M,t){"use strict";function I(A){return A.nodeName&&A.nodeName.toLowerCase()}function g(A,M){var t,g={top:0,left:0};return"fixed"===(0,a["default"])(A,"position")?t=A.getBoundingClientRect():(M=M||(0,N["default"])(A),t=(0,T["default"])(A),"html"!==I(M)&&(g=(0,T["default"])(M)),g.top+=parseInt((0,a["default"])(M,"borderTopWidth"),10)-(0,n["default"])(M)||0,g.left+=parseInt((0,a["default"])(M,"borderLeftWidth"),10)-(0,c["default"])(M)||0),e._extends({},t,{top:t.top-g.top-(parseInt((0,a["default"])(A,"marginTop"),10)||0),left:t.left-g.left-(parseInt((0,a["default"])(A,"marginLeft"),10)||0)})}var e=t(58);M.__esModule=!0,M["default"]=g;var i=t(142),T=e.interopRequireDefault(i),E=t(283),N=e.interopRequireDefault(E),o=t(143),n=e.interopRequireDefault(o),C=t(286),c=e.interopRequireDefault(C),D=t(81),a=e.interopRequireDefault(D);A.exports=M["default"]},function(A,M){"use strict";var t=/^[\w-]*$/,I=Function.prototype.bind.call(Function.prototype.call,[].slice);A.exports=function(A,M){var g,e="#"===M[0],i="."===M[0],T=e||i?M.slice(1):M,E=t.test(T);return E?e?(A=A.getElementById?A:document,(g=A.getElementById(T))?[g]:[]):I(A.getElementsByClassName&&i?A.getElementsByClassName(T):A.getElementsByTagName(M)):I(A.querySelectorAll(M))}},function(A,M,t){"use strict";var I=t(57);A.exports=function(A,M){var t=I(A);return void 0===M?t?"pageXOffset"in t?t.pageXOffset:t.document.documentElement.scrollLeft:A.scrollLeft:void(t?t.scrollTo(M,"pageYOffset"in t?t.pageYOffset:t.document.documentElement.scrollTop):A.scrollLeft=M)}},function(A,M,t){"use strict";var I=t(58),g=t(144),e=I.interopRequireDefault(g),i=/^(top|right|bottom|left)$/,T=/^([+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|))(?!px)[a-z%]+$/i;A.exports=function(A){if(!A)throw new TypeError("No Element passed to ` + "`" + `getComputedStyle()` + "`" + `");var M=A.ownerDocument;return"defaultView"in M?M.defaultView.opener?A.ownerDocument.defaultView.getComputedStyle(A,null):window.getComputedStyle(A,null):{getPropertyValue:function(M){var t=A.style;M=(0,e["default"])(M),"float"==M&&(M="styleFloat");var I=A.currentStyle[M]||null;if(null==I&&t&&t[M]&&(I=t[M]),T.test(I)&&!i.test(M)){var g=t.left,E=A.runtimeStyle,N=E&&E.left;N&&(E.left=A.currentStyle.left),t.left="fontSize"===M?"1em":I,I=t.pixelLeft+"px",t.left=g,N&&(E.left=N)}return I}}}},function(A,M){"use strict";A.exports=function(A,M){return"removeProperty"in A.style?A.style.removeProperty(M):A.style.removeAttribute(M)}},function(A,M,t){"use strict";function I(){var A,M="",t={O:"otransitionend",Moz:"transitionend",Webkit:"webkitTransitionEnd",ms:"MSTransitionEnd"},I=document.createElement("div");for(var g in t)if(N.call(t,g)&&void 0!==I.style[g+"TransitionProperty"]){M="-"+g.toLowerCase()+"-",A=t[g];break}return A||void 0===I.style.transitionProperty||(A="transitionend"),{end:A,prefix:M}}var g,e,i,T,E=t(32),N=Object.prototype.hasOwnProperty,o="transform",n={};E&&(n=I(),o=n.prefix+o,i=n.prefix+"transition-property",e=n.prefix+"transition-duration",T=n.prefix+"transition-delay",g=n.prefix+"transition-timing-function"),A.exports={transform:o,end:n.end,property:i,timing:g,delay:T,duration:e}},function(A,M){"use strict";var t=/-(.)/g;A.exports=function(A){return A.replace(t,function(A,M){return M.toUpperCase()})}},function(A,M){"use strict";var t=/([A-Z])/g;A.exports=function(A){return A.replace(t,"-$1").toLowerCase()}},function(A,M,t){"use strict";var I=t(291),g=/^ms-/;A.exports=function(A){return I(A).replace(g,"-ms-")}},function(A,M){"use strict";function t(A){return A.replace(I,function(A,M){return M.toUpperCase()})}var I=/-(.)/g;A.exports=t},function(A,M,t){"use strict";function I(A){return g(A.replace(e,"ms-"))}var g=t(293),e=/^-ms-/;A.exports=I},function(A,M,t){"use strict";function I(A){return!!A&&("object"==typeof A||"function"==typeof A)&&"length"in A&&!("setInterval"in A)&&"number"!=typeof A.nodeType&&(Array.isArray(A)||"callee"in A||"item"in A)}function g(A){return I(A)?Array.isArray(A)?A.slice():e(A):[A]}var e=t(304);A.exports=g},function(A,M,t){"use strict";function I(A){var M=A.match(o);return M&&M[1].toLowerCase()}function g(A,M){var t=N;N?void 0:E(!1);var g=I(A),e=g&&T(g);if(e){t.innerHTML=e[1]+A+e[2];for(var o=e[0];o--;)t=t.lastChild}else t.innerHTML=A;var n=t.getElementsByTagName("script");n.length&&(M?void 0:E(!1),i(n).forEach(M));for(var C=i(t.childNodes);t.lastChild;)t.removeChild(t.lastChild);return C}var e=t(10),i=t(295),T=t(150),E=t(2),N=e.canUseDOM?document.createElement("div"):null,o=/^\s*<(\w+)/;A.exports=g},function(A,M){"use strict";function t(A){return A===window?{x:window.pageXOffset||document.documentElement.scrollLeft,y:window.pageYOffset||document.documentElement.scrollTop}:{x:A.scrollLeft,y:A.scrollTop}}A.exports=t},function(A,M){"use strict";function t(A){return A.replace(I,"-$1").toLowerCase()}var I=/([A-Z])/g;A.exports=t},function(A,M,t){"use strict";function I(A){return g(A).replace(e,"-ms-")}var g=t(298),e=/^ms-/;A.exports=I},function(A,M){"use strict";function t(A){return!(!A||!("function"==typeof Node?A instanceof Node:"object"==typeof A&&"number"==typeof A.nodeType&&"string"==typeof A.nodeName))}A.exports=t},function(A,M,t){"use strict";function I(A){return g(A)&&3==A.nodeType}var g=t(300);A.exports=I},function(A,M){"use strict";function t(A,M,t){if(!A)return null;var g={};for(var e in A)I.call(A,e)&&(g[e]=M.call(t,A[e],e,A));return g}var I=Object.prototype.hasOwnProperty;A.exports=t},function(A,M){"use strict";function t(A){var M={};return function(t){return M.hasOwnProperty(t)||(M[t]=A.call(this,t)),M[t]}}A.exports=t},function(A,M,t){"use strict";function I(A){var M=A.length;if(Array.isArray(A)||"object"!=typeof A&&"function"!=typeof A?g(!1):void 0,"number"!=typeof M?g(!1):void 0,0===M||M-1 in A?void 0:g(!1),A.hasOwnProperty)try{return Array.prototype.slice.call(A)}catch(t){}for(var I=Array(M),e=0;e=T?i(M):null,C=M.length;n&&(N=e,o=!1,M=n);A:for(;++Eg?0:g+M),t=void 0===t||t>g?g:+t||0,t<0&&(t+=g),g=M>t?0:t-M>>>0,M>>>=0;for(var e=Array(g);++I-1?t[N]:void 0}return e(t,I,A)}}var g=t(316),e=t(319),i=t(320),T=t(27);A.exports=I},function(A,M,t){function I(A,M,t,I,e,i,T){var E=-1,N=A.length,o=M.length;if(N!=o&&!(e&&o>N))return!1;for(;++Eli{position:relative}.fa-li{position:absolute;left:-2.14285714em;width:2.14285714em;top:.14285714em;text-align:center}.fa-li.fa-lg{left:-1.85714286em}.fa-border{padding:.2em .25em .15em;border:.08em solid #eee;border-radius:.1em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa.fa-pull-left{margin-right:.3em}.fa.fa-pull-right{margin-left:.3em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left{margin-right:.3em}.fa.pull-right{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s infinite linear;animation:fa-spin 2s infinite linear}.fa-pulse{-webkit-animation:fa-spin 1s infinite steps(8);animation:fa-spin 1s infinite steps(8)}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.fa-rotate-90{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";-webkit-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";-webkit-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";-webkit-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";-webkit-transform:scaleX(-1);transform:scaleX(-1)}.fa-flip-vertical{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)";-webkit-transform:scaleY(-1);transform:scaleY(-1)}:root .fa-flip-horizontal,:root .fa-flip-vertical,:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270{filter:none}.fa-stack{position:relative;display:inline-block;width:2em;height:2em;line-height:2em;vertical-align:middle}.fa-stack-1x,.fa-stack-2x{position:absolute;left:0;width:100%;text-align:center}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-glass:before{content:"\\F000"}.fa-music:before{content:"\\F001"}.fa-search:before{content:"\\F002"}.fa-envelope-o:before{content:"\\F003"}.fa-heart:before{content:"\\F004"}.fa-star:before{content:"\\F005"}.fa-star-o:before{content:"\\F006"}.fa-user:before{content:"\\F007"}.fa-film:before{content:"\\F008"}.fa-th-large:before{content:"\\F009"}.fa-th:before{content:"\\F00A"}.fa-th-list:before{content:"\\F00B"}.fa-check:before{content:"\\F00C"}.fa-close:before,.fa-remove:before,.fa-times:before{content:"\\F00D"}.fa-search-plus:before{content:"\\F00E"}.fa-search-minus:before{content:"\\F010"}.fa-power-off:before{content:"\\F011"}.fa-signal:before{content:"\\F012"}.fa-cog:before,.fa-gear:before{content:"\\F013"}.fa-trash-o:before{content:"\\F014"}.fa-home:before{content:"\\F015"}.fa-file-o:before{content:"\\F016"}.fa-clock-o:before{content:"\\F017"}.fa-road:before{content:"\\F018"}.fa-download:before{content:"\\F019"}.fa-arrow-circle-o-down:before{content:"\\F01A"}.fa-arrow-circle-o-up:before{content:"\\F01B"}.fa-inbox:before{content:"\\F01C"}.fa-play-circle-o:before{content:"\\F01D"}.fa-repeat:before,.fa-rotate-right:before{content:"\\F01E"}.fa-refresh:before{content:"\\F021"}.fa-list-alt:before{content:"\\F022"}.fa-lock:before{content:"\\F023"}.fa-flag:before{content:"\\F024"}.fa-headphones:before{content:"\\F025"}.fa-volume-off:before{content:"\\F026"}.fa-volume-down:before{content:"\\F027"}.fa-volume-up:before{content:"\\F028"}.fa-qrcode:before{content:"\\F029"}.fa-barcode:before{content:"\\F02A"}.fa-tag:before{content:"\\F02B"}.fa-tags:before{content:"\\F02C"}.fa-book:before{content:"\\F02D"}.fa-bookmark:before{content:"\\F02E"}.fa-print:before{content:"\\F02F"}.fa-camera:before{content:"\\F030"}.fa-font:before{content:"\\F031"}.fa-bold:before{content:"\\F032"}.fa-italic:before{content:"\\F033"}.fa-text-height:before{content:"\\F034"}.fa-text-width:before{content:"\\F035"}.fa-align-left:before{content:"\\F036"}.fa-align-center:before{content:"\\F037"}.fa-align-right:before{content:"\\F038"}.fa-align-justify:before{content:"\\F039"}.fa-list:before{content:"\\F03A"}.fa-dedent:before,.fa-outdent:before{content:"\\F03B"}.fa-indent:before{content:"\\F03C"}.fa-video-camera:before{content:"\\F03D"}.fa-image:before,.fa-photo:before,.fa-picture-o:before{content:"\\F03E"}.fa-pencil:before{content:"\\F040"}.fa-map-marker:before{content:"\\F041"}.fa-adjust:before{content:"\\F042"}.fa-tint:before{content:"\\F043"}.fa-edit:before,.fa-pencil-square-o:before{content:"\\F044"}.fa-share-square-o:before{content:"\\F045"}.fa-check-square-o:before{content:"\\F046"}.fa-arrows:before{content:"\\F047"}.fa-step-backward:before{content:"\\F048"}.fa-fast-backward:before{content:"\\F049"}.fa-backward:before{content:"\\F04A"}.fa-play:before{content:"\\F04B"}.fa-pause:before{content:"\\F04C"}.fa-stop:before{content:"\\F04D"}.fa-forward:before{content:"\\F04E"}.fa-fast-forward:before{content:"\\F050"}.fa-step-forward:before{content:"\\F051"}.fa-eject:before{content:"\\F052"}.fa-chevron-left:before{content:"\\F053"}.fa-chevron-right:before{content:"\\F054"}.fa-plus-circle:before{content:"\\F055"}.fa-minus-circle:before{content:"\\F056"}.fa-times-circle:before{content:"\\F057"}.fa-check-circle:before{content:"\\F058"}.fa-question-circle:before{content:"\\F059"}.fa-info-circle:before{content:"\\F05A"}.fa-crosshairs:before{content:"\\F05B"}.fa-times-circle-o:before{content:"\\F05C"}.fa-check-circle-o:before{content:"\\F05D"}.fa-ban:before{content:"\\F05E"}.fa-arrow-left:before{content:"\\F060"}.fa-arrow-right:before{content:"\\F061"}.fa-arrow-up:before{content:"\\F062"}.fa-arrow-down:before{content:"\\F063"}.fa-mail-forward:before,.fa-share:before{content:"\\F064"}.fa-expand:before{content:"\\F065"}.fa-compress:before{content:"\\F066"}.fa-plus:before{content:"\\F067"}.fa-minus:before{content:"\\F068"}.fa-asterisk:before{content:"\\F069"}.fa-exclamation-circle:before{content:"\\F06A"}.fa-gift:before{content:"\\F06B"}.fa-leaf:before{content:"\\F06C"}.fa-fire:before{content:"\\F06D"}.fa-eye:before{content:"\\F06E"}.fa-eye-slash:before{content:"\\F070"}.fa-exclamation-triangle:before,.fa-warning:before{content:"\\F071"}.fa-plane:before{content:"\\F072"}.fa-calendar:before{content:"\\F073"}.fa-random:before{content:"\\F074"}.fa-comment:before{content:"\\F075"}.fa-magnet:before{content:"\\F076"}.fa-chevron-up:before{content:"\\F077"}.fa-chevron-down:before{content:"\\F078"}.fa-retweet:before{content:"\\F079"}.fa-shopping-cart:before{content:"\\F07A"}.fa-folder:before{content:"\\F07B"}.fa-folder-open:before{content:"\\F07C"}.fa-arrows-v:before{content:"\\F07D"}.fa-arrows-h:before{content:"\\F07E"}.fa-bar-chart-o:before,.fa-bar-chart:before{content:"\\F080"}.fa-twitter-square:before{content:"\\F081"}.fa-facebook-square:before{content:"\\F082"}.fa-camera-retro:before{content:"\\F083"}.fa-key:before{content:"\\F084"}.fa-cogs:before,.fa-gears:before{content:"\\F085"}.fa-comments:before{content:"\\F086"}.fa-thumbs-o-up:before{content:"\\F087"}.fa-thumbs-o-down:before{content:"\\F088"}.fa-star-half:before{content:"\\F089"}.fa-heart-o:before{content:"\\F08A"}.fa-sign-out:before{content:"\\F08B"}.fa-linkedin-square:before{content:"\\F08C"}.fa-thumb-tack:before{content:"\\F08D"}.fa-external-link:before{content:"\\F08E"}.fa-sign-in:before{content:"\\F090"}.fa-trophy:before{content:"\\F091"}.fa-github-square:before{content:"\\F092"}.fa-upload:before{content:"\\F093"}.fa-lemon-o:before{content:"\\F094"}.fa-phone:before{content:"\\F095"}.fa-square-o:before{content:"\\F096"}.fa-bookmark-o:before{content:"\\F097"}.fa-phone-square:before{content:"\\F098"}.fa-twitter:before{content:"\\F099"}.fa-facebook-f:before,.fa-facebook:before{content:"\\F09A"}.fa-github:before{content:"\\F09B"}.fa-unlock:before{content:"\\F09C"}.fa-credit-card:before{content:"\\F09D"}.fa-feed:before,.fa-rss:before{content:"\\F09E"}.fa-hdd-o:before{content:"\\F0A0"}.fa-bullhorn:before{content:"\\F0A1"}.fa-bell:before{content:"\\F0F3"}.fa-certificate:before{content:"\\F0A3"}.fa-hand-o-right:before{content:"\\F0A4"}.fa-hand-o-left:before{content:"\\F0A5"}.fa-hand-o-up:before{content:"\\F0A6"}.fa-hand-o-down:before{content:"\\F0A7"}.fa-arrow-circle-left:before{content:"\\F0A8"}.fa-arrow-circle-right:before{content:"\\F0A9"}.fa-arrow-circle-up:before{content:"\\F0AA"}.fa-arrow-circle-down:before{content:"\\F0AB"}.fa-globe:before{content:"\\F0AC"}.fa-wrench:before{content:"\\F0AD"}.fa-tasks:before{content:"\\F0AE"}.fa-filter:before{content:"\\F0B0"}.fa-briefcase:before{content:"\\F0B1"}.fa-arrows-alt:before{content:"\\F0B2"}.fa-group:before,.fa-users:before{content:"\\F0C0"}.fa-chain:before,.fa-link:before{content:"\\F0C1"}.fa-cloud:before{content:"\\F0C2"}.fa-flask:before{content:"\\F0C3"}.fa-cut:before,.fa-scissors:before{content:"\\F0C4"}.fa-copy:before,.fa-files-o:before{content:"\\F0C5"}.fa-paperclip:before{content:"\\F0C6"}.fa-floppy-o:before,.fa-save:before{content:"\\F0C7"}.fa-square:before{content:"\\F0C8"}.fa-bars:before,.fa-navicon:before,.fa-reorder:before{content:"\\F0C9"}.fa-list-ul:before{content:"\\F0CA"}.fa-list-ol:before{content:"\\F0CB"}.fa-strikethrough:before{content:"\\F0CC"}.fa-underline:before{content:"\\F0CD"}.fa-table:before{content:"\\F0CE"}.fa-magic:before{content:"\\F0D0"}.fa-truck:before{content:"\\F0D1"}.fa-pinterest:before{content:"\\F0D2"}.fa-pinterest-square:before{content:"\\F0D3"}.fa-google-plus-square:before{content:"\\F0D4"}.fa-google-plus:before{content:"\\F0D5"}.fa-money:before{content:"\\F0D6"}.fa-caret-down:before{content:"\\F0D7"}.fa-caret-up:before{content:"\\F0D8"}.fa-caret-left:before{content:"\\F0D9"}.fa-caret-right:before{content:"\\F0DA"}.fa-columns:before{content:"\\F0DB"}.fa-sort:before,.fa-unsorted:before{content:"\\F0DC"}.fa-sort-desc:before,.fa-sort-down:before{content:"\\F0DD"}.fa-sort-asc:before,.fa-sort-up:before{content:"\\F0DE"}.fa-envelope:before{content:"\\F0E0"}.fa-linkedin:before{content:"\\F0E1"}.fa-rotate-left:before,.fa-undo:before{content:"\\F0E2"}.fa-gavel:before,.fa-legal:before{content:"\\F0E3"}.fa-dashboard:before,.fa-tachometer:before{content:"\\F0E4"}.fa-comment-o:before{content:"\\F0E5"}.fa-comments-o:before{content:"\\F0E6"}.fa-bolt:before,.fa-flash:before{content:"\\F0E7"}.fa-sitemap:before{content:"\\F0E8"}.fa-umbrella:before{content:"\\F0E9"}.fa-clipboard:before,.fa-paste:before{content:"\\F0EA"}.fa-lightbulb-o:before{content:"\\F0EB"}.fa-exchange:before{content:"\\F0EC"}.fa-cloud-download:before{content:"\\F0ED"}.fa-cloud-upload:before{content:"\\F0EE"}.fa-user-md:before{content:"\\F0F0"}.fa-stethoscope:before{content:"\\F0F1"}.fa-suitcase:before{content:"\\F0F2"}.fa-bell-o:before{content:"\\F0A2"}.fa-coffee:before{content:"\\F0F4"}.fa-cutlery:before{content:"\\F0F5"}.fa-file-text-o:before{content:"\\F0F6"}.fa-building-o:before{content:"\\F0F7"}.fa-hospital-o:before{content:"\\F0F8"}.fa-ambulance:before{content:"\\F0F9"}.fa-medkit:before{content:"\\F0FA"}.fa-fighter-jet:before{content:"\\F0FB"}.fa-beer:before{content:"\\F0FC"}.fa-h-square:before{content:"\\F0FD"}.fa-plus-square:before{content:"\\F0FE"}.fa-angle-double-left:before{content:"\\F100"}.fa-angle-double-right:before{content:"\\F101"}.fa-angle-double-up:before{content:"\\F102"}.fa-angle-double-down:before{content:"\\F103"}.fa-angle-left:before{content:"\\F104"}.fa-angle-right:before{content:"\\F105"}.fa-angle-up:before{content:"\\F106"}.fa-angle-down:before{content:"\\F107"}.fa-desktop:before{content:"\\F108"}.fa-laptop:before{content:"\\F109"}.fa-tablet:before{content:"\\F10A"}.fa-mobile-phone:before,.fa-mobile:before{content:"\\F10B"}.fa-circle-o:before{content:"\\F10C"}.fa-quote-left:before{content:"\\F10D"}.fa-quote-right:before{content:"\\F10E"}.fa-spinner:before{content:"\\F110"}.fa-circle:before{content:"\\F111"}.fa-mail-reply:before,.fa-reply:before{content:"\\F112"}.fa-github-alt:before{content:"\\F113"}.fa-folder-o:before{content:"\\F114"}.fa-folder-open-o:before{content:"\\F115"}.fa-smile-o:before{content:"\\F118"}.fa-frown-o:before{content:"\\F119"}.fa-meh-o:before{content:"\\F11A"}.fa-gamepad:before{content:"\\F11B"}.fa-keyboard-o:before{content:"\\F11C"}.fa-flag-o:before{content:"\\F11D"}.fa-flag-checkered:before{content:"\\F11E"}.fa-terminal:before{content:"\\F120"}.fa-code:before{content:"\\F121"}.fa-mail-reply-all:before,.fa-reply-all:before{content:"\\F122"}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:"\\F123"}.fa-location-arrow:before{content:"\\F124"}.fa-crop:before{content:"\\F125"}.fa-code-fork:before{content:"\\F126"}.fa-chain-broken:before,.fa-unlink:before{content:"\\F127"}.fa-question:before{content:"\\F128"}.fa-info:before{content:"\\F129"}.fa-exclamation:before{content:"\\F12A"}.fa-superscript:before{content:"\\F12B"}.fa-subscript:before{content:"\\F12C"}.fa-eraser:before{content:"\\F12D"}.fa-puzzle-piece:before{content:"\\F12E"}.fa-microphone:before{content:"\\F130"}.fa-microphone-slash:before{content:"\\F131"}.fa-shield:before{content:"\\F132"}.fa-calendar-o:before{content:"\\F133"}.fa-fire-extinguisher:before{content:"\\F134"}.fa-rocket:before{content:"\\F135"}.fa-maxcdn:before{content:"\\F136"}.fa-chevron-circle-left:before{content:"\\F137"}.fa-chevron-circle-right:before{content:"\\F138"}.fa-chevron-circle-up:before{content:"\\F139"}.fa-chevron-circle-down:before{content:"\\F13A"}.fa-html5:before{content:"\\F13B"}.fa-css3:before{content:"\\F13C"}.fa-anchor:before{content:"\\F13D"}.fa-unlock-alt:before{content:"\\F13E"}.fa-bullseye:before{content:"\\F140"}.fa-ellipsis-h:before{content:"\\F141"}.fa-ellipsis-v:before{content:"\\F142"}.fa-rss-square:before{content:"\\F143"}.fa-play-circle:before{content:"\\F144"}.fa-ticket:before{content:"\\F145"}.fa-minus-square:before{content:"\\F146"}.fa-minus-square-o:before{content:"\\F147"}.fa-level-up:before{content:"\\F148"}.fa-level-down:before{content:"\\F149"}.fa-check-square:before{content:"\\F14A"}.fa-pencil-square:before{content:"\\F14B"}.fa-external-link-square:before{content:"\\F14C"}.fa-share-square:before{content:"\\F14D"}.fa-compass:before{content:"\\F14E"}.fa-caret-square-o-down:before,.fa-toggle-down:before{content:"\\F150"}.fa-caret-square-o-up:before,.fa-toggle-up:before{content:"\\F151"}.fa-caret-square-o-right:before,.fa-toggle-right:before{content:"\\F152"}.fa-eur:before,.fa-euro:before{content:"\\F153"}.fa-gbp:before{content:"\\F154"}.fa-dollar:before,.fa-usd:before{content:"\\F155"}.fa-inr:before,.fa-rupee:before{content:"\\F156"}.fa-cny:before,.fa-jpy:before,.fa-rmb:before,.fa-yen:before{content:"\\F157"}.fa-rouble:before,.fa-rub:before,.fa-ruble:before{content:"\\F158"}.fa-krw:before,.fa-won:before{content:"\\F159"}.fa-bitcoin:before,.fa-btc:before{content:"\\F15A"}.fa-file:before{content:"\\F15B"}.fa-file-text:before{content:"\\F15C"}.fa-sort-alpha-asc:before{content:"\\F15D"}.fa-sort-alpha-desc:before{content:"\\F15E"}.fa-sort-amount-asc:before{content:"\\F160"}.fa-sort-amount-desc:before{content:"\\F161"}.fa-sort-numeric-asc:before{content:"\\F162"}.fa-sort-numeric-desc:before{content:"\\F163"}.fa-thumbs-up:before{content:"\\F164"}.fa-thumbs-down:before{content:"\\F165"}.fa-youtube-square:before{content:"\\F166"}.fa-youtube:before{content:"\\F167"}.fa-xing:before{content:"\\F168"}.fa-xing-square:before{content:"\\F169"}.fa-youtube-play:before{content:"\\F16A"}.fa-dropbox:before{content:"\\F16B"}.fa-stack-overflow:before{content:"\\F16C"}.fa-instagram:before{content:"\\F16D"}.fa-flickr:before{content:"\\F16E"}.fa-adn:before{content:"\\F170"}.fa-bitbucket:before{content:"\\F171"}.fa-bitbucket-square:before{content:"\\F172"}.fa-tumblr:before{content:"\\F173"}.fa-tumblr-square:before{content:"\\F174"}.fa-long-arrow-down:before{content:"\\F175"}.fa-long-arrow-up:before{content:"\\F176"}.fa-long-arrow-left:before{content:"\\F177"}.fa-long-arrow-right:before{content:"\\F178"}.fa-apple:before{content:"\\F179"}.fa-windows:before{content:"\\F17A"}.fa-android:before{content:"\\F17B"}.fa-linux:before{content:"\\F17C"}.fa-dribbble:before{content:"\\F17D"}.fa-skype:before{content:"\\F17E"}.fa-foursquare:before{content:"\\F180"}.fa-trello:before{content:"\\F181"}.fa-female:before{content:"\\F182"}.fa-male:before{content:"\\F183"}.fa-gittip:before,.fa-gratipay:before{content:"\\F184"}.fa-sun-o:before{content:"\\F185"}.fa-moon-o:before{content:"\\F186"}.fa-archive:before{content:"\\F187"}.fa-bug:before{content:"\\F188"}.fa-vk:before{content:"\\F189"}.fa-weibo:before{content:"\\F18A"}.fa-renren:before{content:"\\F18B"}.fa-pagelines:before{content:"\\F18C"}.fa-stack-exchange:before{content:"\\F18D"}.fa-arrow-circle-o-right:before{content:"\\F18E"}.fa-arrow-circle-o-left:before{content:"\\F190"}.fa-caret-square-o-left:before,.fa-toggle-left:before{content:"\\F191"}.fa-dot-circle-o:before{content:"\\F192"}.fa-wheelchair:before{content:"\\F193"}.fa-vimeo-square:before{content:"\\F194"}.fa-try:before,.fa-turkish-lira:before{content:"\\F195"}.fa-plus-square-o:before{content:"\\F196"}.fa-space-shuttle:before{content:"\\F197"}.fa-slack:before{content:"\\F198"}.fa-envelope-square:before{content:"\\F199"}.fa-wordpress:before{content:"\\F19A"}.fa-openid:before{content:"\\F19B"}.fa-bank:before,.fa-institution:before,.fa-university:before{content:"\\F19C"}.fa-graduation-cap:before,.fa-mortar-board:before{content:"\\F19D"}.fa-yahoo:before{content:"\\F19E"}.fa-google:before{content:"\\F1A0"}.fa-reddit:before{content:"\\F1A1"}.fa-reddit-square:before{content:"\\F1A2"}.fa-stumbleupon-circle:before{content:"\\F1A3"}.fa-stumbleupon:before{content:"\\F1A4"}.fa-delicious:before{content:"\\F1A5"}.fa-digg:before{content:"\\F1A6"}.fa-pied-piper-pp:before{content:"\\F1A7"}.fa-pied-piper-alt:before{content:"\\F1A8"}.fa-drupal:before{content:"\\F1A9"}.fa-joomla:before{content:"\\F1AA"}.fa-language:before{content:"\\F1AB"}.fa-fax:before{content:"\\F1AC"}.fa-building:before{content:"\\F1AD"}.fa-child:before{content:"\\F1AE"}.fa-paw:before{content:"\\F1B0"}.fa-spoon:before{content:"\\F1B1"}.fa-cube:before{content:"\\F1B2"}.fa-cubes:before{content:"\\F1B3"}.fa-behance:before{content:"\\F1B4"}.fa-behance-square:before{content:"\\F1B5"}.fa-steam:before{content:"\\F1B6"}.fa-steam-square:before{content:"\\F1B7"}.fa-recycle:before{content:"\\F1B8"}.fa-automobile:before,.fa-car:before{content:"\\F1B9"}.fa-cab:before,.fa-taxi:before{content:"\\F1BA"}.fa-tree:before{content:"\\F1BB"}.fa-spotify:before{content:"\\F1BC"}.fa-deviantart:before{content:"\\F1BD"}.fa-soundcloud:before{content:"\\F1BE"}.fa-database:before{content:"\\F1C0"}.fa-file-pdf-o:before{content:"\\F1C1"}.fa-file-word-o:before{content:"\\F1C2"}.fa-file-excel-o:before{content:"\\F1C3"}.fa-file-powerpoint-o:before{content:"\\F1C4"}.fa-file-image-o:before,.fa-file-photo-o:before,.fa-file-picture-o:before{content:"\\F1C5"}.fa-file-archive-o:before,.fa-file-zip-o:before{content:"\\F1C6"}.fa-file-audio-o:before,.fa-file-sound-o:before{content:"\\F1C7"}.fa-file-movie-o:before,.fa-file-video-o:before{content:"\\F1C8"}.fa-file-code-o:before{content:"\\F1C9"}.fa-vine:before{content:"\\F1CA"}.fa-codepen:before{content:"\\F1CB"}.fa-jsfiddle:before{content:"\\F1CC"}.fa-life-bouy:before,.fa-life-buoy:before,.fa-life-ring:before,.fa-life-saver:before,.fa-support:before{content:"\\F1CD"}.fa-circle-o-notch:before{content:"\\F1CE"}.fa-ra:before,.fa-rebel:before,.fa-resistance:before{content:"\\F1D0"}.fa-empire:before,.fa-ge:before{content:"\\F1D1"}.fa-git-square:before{content:"\\F1D2"}.fa-git:before{content:"\\F1D3"}.fa-hacker-news:before,.fa-y-combinator-square:before,.fa-yc-square:before{content:"\\F1D4"}.fa-tencent-weibo:before{content:"\\F1D5"}.fa-qq:before{content:"\\F1D6"}.fa-wechat:before,.fa-weixin:before{content:"\\F1D7"}.fa-paper-plane:before,.fa-send:before{content:"\\F1D8"}.fa-paper-plane-o:before,.fa-send-o:before{content:"\\F1D9"}.fa-history:before{content:"\\F1DA"}.fa-circle-thin:before{content:"\\F1DB"}.fa-header:before{content:"\\F1DC"}.fa-paragraph:before{content:"\\F1DD"}.fa-sliders:before{content:"\\F1DE"}.fa-share-alt:before{content:"\\F1E0"}.fa-share-alt-square:before{content:"\\F1E1"}.fa-bomb:before{content:"\\F1E2"}.fa-futbol-o:before,.fa-soccer-ball-o:before{content:"\\F1E3"}.fa-tty:before{content:"\\F1E4"}.fa-binoculars:before{content:"\\F1E5"}.fa-plug:before{content:"\\F1E6"}.fa-slideshare:before{content:"\\F1E7"}.fa-twitch:before{content:"\\F1E8"}.fa-yelp:before{content:"\\F1E9"}.fa-newspaper-o:before{content:"\\F1EA"}.fa-wifi:before{content:"\\F1EB"}.fa-calculator:before{content:"\\F1EC"}.fa-paypal:before{content:"\\F1ED"}.fa-google-wallet:before{content:"\\F1EE"}.fa-cc-visa:before{content:"\\F1F0"}.fa-cc-mastercard:before{content:"\\F1F1"}.fa-cc-discover:before{content:"\\F1F2"}.fa-cc-amex:before{content:"\\F1F3"}.fa-cc-paypal:before{content:"\\F1F4"}.fa-cc-stripe:before{content:"\\F1F5"}.fa-bell-slash:before{content:"\\F1F6"}.fa-bell-slash-o:before{content:"\\F1F7"}.fa-trash:before{content:"\\F1F8"}.fa-copyright:before{content:"\\F1F9"}.fa-at:before{content:"\\F1FA"}.fa-eyedropper:before{content:"\\F1FB"}.fa-paint-brush:before{content:"\\F1FC"}.fa-birthday-cake:before{content:"\\F1FD"}.fa-area-chart:before{content:"\\F1FE"}.fa-pie-chart:before{content:"\\F200"}.fa-line-chart:before{content:"\\F201"}.fa-lastfm:before{content:"\\F202"}.fa-lastfm-square:before{content:"\\F203"}.fa-toggle-off:before{content:"\\F204"}.fa-toggle-on:before{content:"\\F205"}.fa-bicycle:before{content:"\\F206"}.fa-bus:before{content:"\\F207"}.fa-ioxhost:before{content:"\\F208"}.fa-angellist:before{content:"\\F209"}.fa-cc:before{content:"\\F20A"}.fa-ils:before,.fa-shekel:before,.fa-sheqel:before{content:"\\F20B"}.fa-meanpath:before{content:"\\F20C"}.fa-buysellads:before{content:"\\F20D"}.fa-connectdevelop:before{content:"\\F20E"}.fa-dashcube:before{content:"\\F210"}.fa-forumbee:before{content:"\\F211"}.fa-leanpub:before{content:"\\F212"}.fa-sellsy:before{content:"\\F213"}.fa-shirtsinbulk:before{content:"\\F214"}.fa-simplybuilt:before{content:"\\F215"}.fa-skyatlas:before{content:"\\F216"}.fa-cart-plus:before{content:"\\F217"}.fa-cart-arrow-down:before{content:"\\F218"}.fa-diamond:before{content:"\\F219"}.fa-ship:before{content:"\\F21A"}.fa-user-secret:before{content:"\\F21B"}.fa-motorcycle:before{content:"\\F21C"}.fa-street-view:before{content:"\\F21D"}.fa-heartbeat:before{content:"\\F21E"}.fa-venus:before{content:"\\F221"}.fa-mars:before{content:"\\F222"}.fa-mercury:before{content:"\\F223"}.fa-intersex:before,.fa-transgender:before{content:"\\F224"}.fa-transgender-alt:before{content:"\\F225"}.fa-venus-double:before{content:"\\F226"}.fa-mars-double:before{content:"\\F227"}.fa-venus-mars:before{content:"\\F228"}.fa-mars-stroke:before{content:"\\F229"}.fa-mars-stroke-v:before{content:"\\F22A"}.fa-mars-stroke-h:before{content:"\\F22B"}.fa-neuter:before{content:"\\F22C"}.fa-genderless:before{content:"\\F22D"}.fa-facebook-official:before{content:"\\F230"}.fa-pinterest-p:before{content:"\\F231"}.fa-whatsapp:before{content:"\\F232"}.fa-server:before{content:"\\F233"}.fa-user-plus:before{content:"\\F234"}.fa-user-times:before{content:"\\F235"}.fa-bed:before,.fa-hotel:before{content:"\\F236"}.fa-viacoin:before{content:"\\F237"}.fa-train:before{content:"\\F238"}.fa-subway:before{content:"\\F239"}.fa-medium:before{content:"\\F23A"}.fa-y-combinator:before,.fa-yc:before{content:"\\F23B"}.fa-optin-monster:before{content:"\\F23C"}.fa-opencart:before{content:"\\F23D"}.fa-expeditedssl:before{content:"\\F23E"}.fa-battery-4:before,.fa-battery-full:before,.fa-battery:before{content:"\\F240"}.fa-battery-3:before,.fa-battery-three-quarters:before{content:"\\F241"}.fa-battery-2:before,.fa-battery-half:before{content:"\\F242"}.fa-battery-1:before,.fa-battery-quarter:before{content:"\\F243"}.fa-battery-0:before,.fa-battery-empty:before{content:"\\F244"}.fa-mouse-pointer:before{content:"\\F245"}.fa-i-cursor:before{content:"\\F246"}.fa-object-group:before{content:"\\F247"}.fa-object-ungroup:before{content:"\\F248"}.fa-sticky-note:before{content:"\\F249"}.fa-sticky-note-o:before{content:"\\F24A"}.fa-cc-jcb:before{content:"\\F24B"}.fa-cc-diners-club:before{content:"\\F24C"}.fa-clone:before{content:"\\F24D"}.fa-balance-scale:before{content:"\\F24E"}.fa-hourglass-o:before{content:"\\F250"}.fa-hourglass-1:before,.fa-hourglass-start:before{content:"\\F251"}.fa-hourglass-2:before,.fa-hourglass-half:before{content:"\\F252"}.fa-hourglass-3:before,.fa-hourglass-end:before{content:"\\F253"}.fa-hourglass:before{content:"\\F254"}.fa-hand-grab-o:before,.fa-hand-rock-o:before{content:"\\F255"}.fa-hand-paper-o:before,.fa-hand-stop-o:before{content:"\\F256"}.fa-hand-scissors-o:before{content:"\\F257"}.fa-hand-lizard-o:before{content:"\\F258"}.fa-hand-spock-o:before{content:"\\F259"}.fa-hand-pointer-o:before{content:"\\F25A"}.fa-hand-peace-o:before{content:"\\F25B"}.fa-trademark:before{content:"\\F25C"}.fa-registered:before{content:"\\F25D"}.fa-creative-commons:before{content:"\\F25E"}.fa-gg:before{content:"\\F260"}.fa-gg-circle:before{content:"\\F261"}.fa-tripadvisor:before{content:"\\F262"}.fa-odnoklassniki:before{content:"\\F263"}.fa-odnoklassniki-square:before{content:"\\F264"}.fa-get-pocket:before{content:"\\F265"}.fa-wikipedia-w:before{content:"\\F266"}.fa-safari:before{content:"\\F267"}.fa-chrome:before{content:"\\F268"}.fa-firefox:before{content:"\\F269"}.fa-opera:before{content:"\\F26A"}.fa-internet-explorer:before{content:"\\F26B"}.fa-television:before,.fa-tv:before{content:"\\F26C"}.fa-contao:before{content:"\\F26D"}.fa-500px:before{content:"\\F26E"}.fa-amazon:before{content:"\\F270"}.fa-calendar-plus-o:before{content:"\\F271"}.fa-calendar-minus-o:before{content:"\\F272"}.fa-calendar-times-o:before{content:"\\F273"}.fa-calendar-check-o:before{content:"\\F274"}.fa-industry:before{content:"\\F275"}.fa-map-pin:before{content:"\\F276"}.fa-map-signs:before{content:"\\F277"}.fa-map-o:before{content:"\\F278"}.fa-map:before{content:"\\F279"}.fa-commenting:before{content:"\\F27A"}.fa-commenting-o:before{content:"\\F27B"}.fa-houzz:before{content:"\\F27C"}.fa-vimeo:before{content:"\\F27D"}.fa-black-tie:before{content:"\\F27E"}.fa-fonticons:before{content:"\\F280"}.fa-reddit-alien:before{content:"\\F281"}.fa-edge:before{content:"\\F282"}.fa-credit-card-alt:before{content:"\\F283"}.fa-codiepie:before{content:"\\F284"}.fa-modx:before{content:"\\F285"}.fa-fort-awesome:before{content:"\\F286"}.fa-usb:before{content:"\\F287"}.fa-product-hunt:before{content:"\\F288"}.fa-mixcloud:before{content:"\\F289"}.fa-scribd:before{content:"\\F28A"}.fa-pause-circle:before{content:"\\F28B"}.fa-pause-circle-o:before{content:"\\F28C"}.fa-stop-circle:before{content:"\\F28D"}.fa-stop-circle-o:before{content:"\\F28E"}.fa-shopping-bag:before{content:"\\F290"}.fa-shopping-basket:before{content:"\\F291"}.fa-hashtag:before{content:"\\F292"}.fa-bluetooth:before{content:"\\F293"}.fa-bluetooth-b:before{content:"\\F294"}.fa-percent:before{content:"\\F295"}.fa-gitlab:before{content:"\\F296"}.fa-wpbeginner:before{content:"\\F297"}.fa-wpforms:before{content:"\\F298"}.fa-envira:before{content:"\\F299"}.fa-universal-access:before{content:"\\F29A"}.fa-wheelchair-alt:before{content:"\\F29B"}.fa-question-circle-o:before{content:"\\F29C"}.fa-blind:before{content:"\\F29D"}.fa-audio-description:before{content:"\\F29E"}.fa-volume-control-phone:before{content:"\\F2A0"}.fa-braille:before{content:"\\F2A1"}.fa-assistive-listening-systems:before{content:"\\F2A2"}.fa-american-sign-language-interpreting:before,.fa-asl-interpreting:before{content:"\\F2A3"}.fa-deaf:before,.fa-deafness:before,.fa-hard-of-hearing:before{content:"\\F2A4"}.fa-glide:before{content:"\\F2A5"}.fa-glide-g:before{content:"\\F2A6"}.fa-sign-language:before,.fa-signing:before{content:"\\F2A7"}.fa-low-vision:before{content:"\\F2A8"}.fa-viadeo:before{content:"\\F2A9"}.fa-viadeo-square:before{content:"\\F2AA"}.fa-snapchat:before{content:"\\F2AB"}.fa-snapchat-ghost:before{content:"\\F2AC"}.fa-snapchat-square:before{content:"\\F2AD"}.fa-pied-piper:before{content:"\\F2AE"}.fa-first-order:before{content:"\\F2B0"}.fa-yoast:before{content:"\\F2B1"}.fa-themeisle:before{content:"\\F2B2"}.fa-google-plus-circle:before,.fa-google-plus-official:before{content:"\\F2B3"}.fa-fa:before,.fa-font-awesome:before{content:"\\F2B4"}.fa-handshake-o:before{content:"\\F2B5"}.fa-envelope-open:before{content:"\\F2B6"}.fa-envelope-open-o:before{content:"\\F2B7"}.fa-linode:before{content:"\\F2B8"}.fa-address-book:before{content:"\\F2B9"}.fa-address-book-o:before{content:"\\F2BA"}.fa-address-card:before,.fa-vcard:before{content:"\\F2BB"}.fa-address-card-o:before,.fa-vcard-o:before{content:"\\F2BC"}.fa-user-circle:before{content:"\\F2BD"}.fa-user-circle-o:before{content:"\\F2BE"}.fa-user-o:before{content:"\\F2C0"}.fa-id-badge:before{content:"\\F2C1"}.fa-drivers-license:before,.fa-id-card:before{content:"\\F2C2"}.fa-drivers-license-o:before,.fa-id-card-o:before{content:"\\F2C3"}.fa-quora:before{content:"\\F2C4"}.fa-free-code-camp:before{content:"\\F2C5"}.fa-telegram:before{content:"\\F2C6"}.fa-thermometer-4:before,.fa-thermometer-full:before,.fa-thermometer:before{content:"\\F2C7"}.fa-thermometer-3:before,.fa-thermometer-three-quarters:before{content:"\\F2C8"}.fa-thermometer-2:before,.fa-thermometer-half:before{content:"\\F2C9"}.fa-thermometer-1:before,.fa-thermometer-quarter:before{content:"\\F2CA"}.fa-thermometer-0:before,.fa-thermometer-empty:before{content:"\\F2CB"}.fa-shower:before{content:"\\F2CC"}.fa-bath:before,.fa-bathtub:before,.fa-s15:before{content:"\\F2CD"}.fa-podcast:before{content:"\\F2CE"}.fa-window-maximize:before{content:"\\F2D0"}.fa-window-minimize:before{content:"\\F2D1"}.fa-window-restore:before{content:"\\F2D2"}.fa-times-rectangle:before,.fa-window-close:before{content:"\\F2D3"}.fa-times-rectangle-o:before,.fa-window-close-o:before{content:"\\F2D4"}.fa-bandcamp:before{content:"\\F2D5"}.fa-grav:before{content:"\\F2D6"}.fa-etsy:before{content:"\\F2D7"}.fa-imdb:before{content:"\\F2D8"}.fa-ravelry:before{content:"\\F2D9"}.fa-eercast:before{content:"\\F2DA"}.fa-microchip:before{content:"\\F2DB"}.fa-snowflake-o:before{content:"\\F2DC"}.fa-superpowers:before{content:"\\F2DD"}.fa-wpexplorer:before{content:"\\F2DE"}.fa-meetup:before{content:"\\F2E0"}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}',""]); +},function(A,M,t){M=A.exports=t(132)(),M.push([A.id,'*,:after,:before{box-sizing:border-box}body{font-family:Lato,sans-serif;font-size:15px;line-height:1.42857143;color:#8e8e8e;background-color:#edecec}button,input,select,textarea{font-family:inherit;font-size:inherit;line-height:inherit}a{color:#46a5e0}a,a:focus,a:hover{text-decoration:none}a:focus,a:hover{color:#1f7fba}a:focus{outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}figure{margin:0}img{vertical-align:middle}.img-responsive{display:block;max-width:100%;height:auto}.img-rounded{border-radius:6px}.img-thumbnail{padding:4px;line-height:1.42857143;background-color:#edecec;border:1px solid #ddd;border-radius:4px;-webkit-transition:all .2s ease-in-out;transition:all .2s ease-in-out;display:inline-block;max-width:100%;height:auto}.img-circle{border-radius:50%}hr{margin-top:21px;margin-bottom:21px;border:0;border-top:1px solid #eee}.sr-only{position:absolute;width:1px;height:1px;margin:-1px;padding:0;overflow:hidden;clip:rect(0,0,0,0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}[role=button]{cursor:pointer}.container{margin-right:auto;margin-left:auto;padding-left:15px;padding-right:15px}@media (min-width:768px){.container{width:750px}}@media (min-width:992px){.container{width:970px}}@media (min-width:1200px){.container{width:1170px}}.container-fluid{margin-right:auto;margin-left:auto;padding-left:15px;padding-right:15px}.row{margin-left:-15px;margin-right:-15px}.col-lg-1,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-lg-10,.col-lg-11,.col-lg-12,.col-md-1,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-md-10,.col-md-11,.col-md-12,.col-sm-1,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-sm-10,.col-sm-11,.col-sm-12,.col-xs-1,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9,.col-xs-10,.col-xs-11,.col-xs-12{position:relative;min-height:1px;padding-left:15px;padding-right:15px}.col-xs-1,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9,.col-xs-10,.col-xs-11,.col-xs-12{float:left}.col-xs-12{width:100%}.col-xs-11{width:91.66666667%}.col-xs-10{width:83.33333333%}.col-xs-9{width:75%}.col-xs-8{width:66.66666667%}.col-xs-7{width:58.33333333%}.col-xs-6{width:50%}.col-xs-5{width:41.66666667%}.col-xs-4{width:33.33333333%}.col-xs-3{width:25%}.col-xs-2{width:16.66666667%}.col-xs-1{width:8.33333333%}.col-xs-pull-12{right:100%}.col-xs-pull-11{right:91.66666667%}.col-xs-pull-10{right:83.33333333%}.col-xs-pull-9{right:75%}.col-xs-pull-8{right:66.66666667%}.col-xs-pull-7{right:58.33333333%}.col-xs-pull-6{right:50%}.col-xs-pull-5{right:41.66666667%}.col-xs-pull-4{right:33.33333333%}.col-xs-pull-3{right:25%}.col-xs-pull-2{right:16.66666667%}.col-xs-pull-1{right:8.33333333%}.col-xs-pull-0{right:auto}.col-xs-push-12{left:100%}.col-xs-push-11{left:91.66666667%}.col-xs-push-10{left:83.33333333%}.col-xs-push-9{left:75%}.col-xs-push-8{left:66.66666667%}.col-xs-push-7{left:58.33333333%}.col-xs-push-6{left:50%}.col-xs-push-5{left:41.66666667%}.col-xs-push-4{left:33.33333333%}.col-xs-push-3{left:25%}.col-xs-push-2{left:16.66666667%}.col-xs-push-1{left:8.33333333%}.col-xs-push-0{left:auto}.col-xs-offset-12{margin-left:100%}.col-xs-offset-11{margin-left:91.66666667%}.col-xs-offset-10{margin-left:83.33333333%}.col-xs-offset-9{margin-left:75%}.col-xs-offset-8{margin-left:66.66666667%}.col-xs-offset-7{margin-left:58.33333333%}.col-xs-offset-6{margin-left:50%}.col-xs-offset-5{margin-left:41.66666667%}.col-xs-offset-4{margin-left:33.33333333%}.col-xs-offset-3{margin-left:25%}.col-xs-offset-2{margin-left:16.66666667%}.col-xs-offset-1{margin-left:8.33333333%}.col-xs-offset-0{margin-left:0}@media (min-width:768px){.col-sm-1,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-sm-10,.col-sm-11,.col-sm-12{float:left}.col-sm-12{width:100%}.col-sm-11{width:91.66666667%}.col-sm-10{width:83.33333333%}.col-sm-9{width:75%}.col-sm-8{width:66.66666667%}.col-sm-7{width:58.33333333%}.col-sm-6{width:50%}.col-sm-5{width:41.66666667%}.col-sm-4{width:33.33333333%}.col-sm-3{width:25%}.col-sm-2{width:16.66666667%}.col-sm-1{width:8.33333333%}.col-sm-pull-12{right:100%}.col-sm-pull-11{right:91.66666667%}.col-sm-pull-10{right:83.33333333%}.col-sm-pull-9{right:75%}.col-sm-pull-8{right:66.66666667%}.col-sm-pull-7{right:58.33333333%}.col-sm-pull-6{right:50%}.col-sm-pull-5{right:41.66666667%}.col-sm-pull-4{right:33.33333333%}.col-sm-pull-3{right:25%}.col-sm-pull-2{right:16.66666667%}.col-sm-pull-1{right:8.33333333%}.col-sm-pull-0{right:auto}.col-sm-push-12{left:100%}.col-sm-push-11{left:91.66666667%}.col-sm-push-10{left:83.33333333%}.col-sm-push-9{left:75%}.col-sm-push-8{left:66.66666667%}.col-sm-push-7{left:58.33333333%}.col-sm-push-6{left:50%}.col-sm-push-5{left:41.66666667%}.col-sm-push-4{left:33.33333333%}.col-sm-push-3{left:25%}.col-sm-push-2{left:16.66666667%}.col-sm-push-1{left:8.33333333%}.col-sm-push-0{left:auto}.col-sm-offset-12{margin-left:100%}.col-sm-offset-11{margin-left:91.66666667%}.col-sm-offset-10{margin-left:83.33333333%}.col-sm-offset-9{margin-left:75%}.col-sm-offset-8{margin-left:66.66666667%}.col-sm-offset-7{margin-left:58.33333333%}.col-sm-offset-6{margin-left:50%}.col-sm-offset-5{margin-left:41.66666667%}.col-sm-offset-4{margin-left:33.33333333%}.col-sm-offset-3{margin-left:25%}.col-sm-offset-2{margin-left:16.66666667%}.col-sm-offset-1{margin-left:8.33333333%}.col-sm-offset-0{margin-left:0}}@media (min-width:992px){.col-md-1,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-md-10,.col-md-11,.col-md-12{float:left}.col-md-12{width:100%}.col-md-11{width:91.66666667%}.col-md-10{width:83.33333333%}.col-md-9{width:75%}.col-md-8{width:66.66666667%}.col-md-7{width:58.33333333%}.col-md-6{width:50%}.col-md-5{width:41.66666667%}.col-md-4{width:33.33333333%}.col-md-3{width:25%}.col-md-2{width:16.66666667%}.col-md-1{width:8.33333333%}.col-md-pull-12{right:100%}.col-md-pull-11{right:91.66666667%}.col-md-pull-10{right:83.33333333%}.col-md-pull-9{right:75%}.col-md-pull-8{right:66.66666667%}.col-md-pull-7{right:58.33333333%}.col-md-pull-6{right:50%}.col-md-pull-5{right:41.66666667%}.col-md-pull-4{right:33.33333333%}.col-md-pull-3{right:25%}.col-md-pull-2{right:16.66666667%}.col-md-pull-1{right:8.33333333%}.col-md-pull-0{right:auto}.col-md-push-12{left:100%}.col-md-push-11{left:91.66666667%}.col-md-push-10{left:83.33333333%}.col-md-push-9{left:75%}.col-md-push-8{left:66.66666667%}.col-md-push-7{left:58.33333333%}.col-md-push-6{left:50%}.col-md-push-5{left:41.66666667%}.col-md-push-4{left:33.33333333%}.col-md-push-3{left:25%}.col-md-push-2{left:16.66666667%}.col-md-push-1{left:8.33333333%}.col-md-push-0{left:auto}.col-md-offset-12{margin-left:100%}.col-md-offset-11{margin-left:91.66666667%}.col-md-offset-10{margin-left:83.33333333%}.col-md-offset-9{margin-left:75%}.col-md-offset-8{margin-left:66.66666667%}.col-md-offset-7{margin-left:58.33333333%}.col-md-offset-6{margin-left:50%}.col-md-offset-5{margin-left:41.66666667%}.col-md-offset-4{margin-left:33.33333333%}.col-md-offset-3{margin-left:25%}.col-md-offset-2{margin-left:16.66666667%}.col-md-offset-1{margin-left:8.33333333%}.col-md-offset-0{margin-left:0}}@media (min-width:1200px){.col-lg-1,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-lg-10,.col-lg-11,.col-lg-12{float:left}.col-lg-12{width:100%}.col-lg-11{width:91.66666667%}.col-lg-10{width:83.33333333%}.col-lg-9{width:75%}.col-lg-8{width:66.66666667%}.col-lg-7{width:58.33333333%}.col-lg-6{width:50%}.col-lg-5{width:41.66666667%}.col-lg-4{width:33.33333333%}.col-lg-3{width:25%}.col-lg-2{width:16.66666667%}.col-lg-1{width:8.33333333%}.col-lg-pull-12{right:100%}.col-lg-pull-11{right:91.66666667%}.col-lg-pull-10{right:83.33333333%}.col-lg-pull-9{right:75%}.col-lg-pull-8{right:66.66666667%}.col-lg-pull-7{right:58.33333333%}.col-lg-pull-6{right:50%}.col-lg-pull-5{right:41.66666667%}.col-lg-pull-4{right:33.33333333%}.col-lg-pull-3{right:25%}.col-lg-pull-2{right:16.66666667%}.col-lg-pull-1{right:8.33333333%}.col-lg-pull-0{right:auto}.col-lg-push-12{left:100%}.col-lg-push-11{left:91.66666667%}.col-lg-push-10{left:83.33333333%}.col-lg-push-9{left:75%}.col-lg-push-8{left:66.66666667%}.col-lg-push-7{left:58.33333333%}.col-lg-push-6{left:50%}.col-lg-push-5{left:41.66666667%}.col-lg-push-4{left:33.33333333%}.col-lg-push-3{left:25%}.col-lg-push-2{left:16.66666667%}.col-lg-push-1{left:8.33333333%}.col-lg-push-0{left:auto}.col-lg-offset-12{margin-left:100%}.col-lg-offset-11{margin-left:91.66666667%}.col-lg-offset-10{margin-left:83.33333333%}.col-lg-offset-9{margin-left:75%}.col-lg-offset-8{margin-left:66.66666667%}.col-lg-offset-7{margin-left:58.33333333%}.col-lg-offset-6{margin-left:50%}.col-lg-offset-5{margin-left:41.66666667%}.col-lg-offset-4{margin-left:33.33333333%}.col-lg-offset-3{margin-left:25%}.col-lg-offset-2{margin-left:16.66666667%}.col-lg-offset-1{margin-left:8.33333333%}.col-lg-offset-0{margin-left:0}}/*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */html{font-family:sans-serif;-ms-text-size-adjust:100%;-webkit-text-size-adjust:100%}body{margin:0}article,aside,details,figcaption,figure,footer,header,hgroup,main,menu,nav,section,summary{display:block}audio,canvas,progress,video{display:inline-block;vertical-align:baseline}audio:not([controls]){display:none;height:0}[hidden],template{display:none}a{background-color:transparent}a:active,a:hover{outline:0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:700}dfn{font-style:italic}h1{font-size:2em;margin:.67em 0}mark{background:#ff0;color:#000}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sup{top:-.5em}sub{bottom:-.25em}img{border:0}svg:not(:root){overflow:hidden}figure{margin:1em 40px}hr{box-sizing:content-box;height:0}pre{overflow:auto}code,kbd,pre,samp{font-family:monospace,monospace;font-size:1em}button,input,optgroup,select,textarea{color:inherit;font:inherit;margin:0}button{overflow:visible}button,select{text-transform:none}button,html input[type=button],input[type=reset],input[type=submit]{-webkit-appearance:button;cursor:pointer}button[disabled],html input[disabled]{cursor:default}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}input{line-height:normal}input[type=checkbox],input[type=radio]{box-sizing:border-box;padding:0}input[type=number]::-webkit-inner-spin-button,input[type=number]::-webkit-outer-spin-button{height:auto}input[type=search]{-webkit-appearance:textfield;box-sizing:content-box}input[type=search]::-webkit-search-cancel-button,input[type=search]::-webkit-search-decoration{-webkit-appearance:none}fieldset{border:1px solid silver;margin:0 2px;padding:.35em .625em .75em}legend{border:0;padding:0}textarea{overflow:auto}optgroup{font-weight:700}table{border-collapse:collapse;border-spacing:0}td,th{padding:0}.caret{display:inline-block;width:0;height:0;margin-left:2px;vertical-align:middle;border-top:4px dashed;border-top:4px solid\\9;border-right:4px solid transparent;border-left:4px solid transparent}.dropdown,.dropup{position:relative}.dropdown-toggle:focus{outline:0}.dropdown-menu{position:absolute;top:100%;left:0;z-index:1000;display:none;float:left;min-width:160px;padding:5px 0;margin:2px 0 0;list-style:none;font-size:15px;text-align:left;background-color:#fff;border:1px solid transparent;border-radius:4px;box-shadow:0 6px 12px rgba(0,0,0,.175);background-clip:padding-box}.dropdown-menu.pull-right{right:0;left:auto}.dropdown-menu .divider{height:1px;margin:9.5px 0;overflow:hidden;background-color:rgba(0,0,0,.08)}.dropdown-menu>li>a{display:block;padding:3px 20px;clear:both;font-weight:400;line-height:1.42857143;color:#8e8e8e;white-space:nowrap}.dropdown-menu>li>a:focus,.dropdown-menu>li>a:hover{text-decoration:none;color:#333;background-color:rgba(0,0,0,.05)}.dropdown-menu>.active>a,.dropdown-menu>.active>a:focus,.dropdown-menu>.active>a:hover{color:#333;text-decoration:none;outline:0;background-color:rgba(0,0,0,.075)}.dropdown-menu>.disabled>a,.dropdown-menu>.disabled>a:focus,.dropdown-menu>.disabled>a:hover{color:#e4e4e4}.dropdown-menu>.disabled>a:focus,.dropdown-menu>.disabled>a:hover{text-decoration:none;background-color:transparent;background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled = false);cursor:not-allowed}.open>.dropdown-menu{display:block}.open>a{outline:0}.dropdown-menu-right{left:auto;right:0}.dropdown-menu-left{left:0;right:auto}.dropdown-header{display:block;padding:3px 20px;font-size:13px;line-height:1.42857143;color:#777;white-space:nowrap}.dropdown-backdrop{position:fixed;left:0;right:0;bottom:0;top:0;z-index:990}.pull-right>.dropdown-menu{right:0;left:auto}.dropup .caret,.navbar-fixed-bottom .dropdown .caret{border-top:0;border-bottom:4px dashed;border-bottom:4px solid\\9;content:""}.dropup .dropdown-menu,.navbar-fixed-bottom .dropdown .dropdown-menu{top:auto;bottom:100%;margin-bottom:2px}@media (min-width:768px){.navbar-right .dropdown-menu{left:auto;right:0}.navbar-right .dropdown-menu>li>a{text-align:right}.navbar-right .dropdown-menu-left{left:0;right:auto}}.modal,.modal-open{overflow:hidden}.modal{display:none;position:fixed;top:0;right:0;bottom:0;left:0;z-index:1050;-webkit-overflow-scrolling:touch;outline:0}.modal.fade .modal-dialog{-webkit-transform:translateY(-25%);transform:translateY(-25%);-webkit-transition:-webkit-transform .3s ease-out;transition:transform .3s ease-out}.modal.in .modal-dialog{-webkit-transform:translate(0);transform:translate(0)}.modal-open .modal{overflow-x:hidden;overflow-y:auto}.modal-dialog{position:relative;width:auto;margin:10px}.modal-content{position:relative;background-color:#fff;border:1px solid transparent;border-radius:6px;box-shadow:0 3px 9px rgba(0,0,0,.5);background-clip:padding-box;outline:0}.modal-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1040;background-color:rgba(0,0,0,.1)}.modal-backdrop.fade{opacity:0;filter:alpha(opacity=0)}.modal-backdrop.in{opacity:.5;filter:alpha(opacity=50)}.modal-header{padding:30px 35px 0;border-bottom:1px solid transparent}.modal-header .close{margin-top:-2px}.modal-title{margin:0;line-height:transparent}.modal-body{position:relative;padding:30px 35px}.modal-footer{padding:30px 35px;text-align:right;border-top:1px solid transparent}.modal-footer .btn+.btn{margin-left:5px;margin-bottom:0}.modal-footer .btn-group .btn+.btn{margin-left:-1px}.modal-footer .btn-block+.btn-block{margin-left:0}.modal-scrollbar-measure{position:absolute;top:-9999px;width:50px;height:50px;overflow:scroll}@media (min-width:768px){.modal-dialog{width:600px;margin:30px auto}.modal-content{box-shadow:0 5px 15px rgba(0,0,0,.5)}.modal-sm{width:400px}}@media (min-width:992px){.modal-lg{width:900px}}.tooltip{position:absolute;z-index:1070;display:block;font-family:Lato,sans-serif;font-style:normal;font-weight:400;letter-spacing:normal;line-break:auto;line-height:1.42857143;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;white-space:normal;word-break:normal;word-spacing:normal;word-wrap:normal;font-size:13px;opacity:0;filter:alpha(opacity=0)}.tooltip.in{opacity:.9;filter:alpha(opacity=90)}.tooltip.top{margin-top:-3px;padding:5px 0}.tooltip.right{margin-left:3px;padding:0 5px}.tooltip.bottom{margin-top:3px;padding:5px 0}.tooltip.left{margin-left:-3px;padding:0 5px}.tooltip-inner{max-width:200px;padding:3px 8px;color:#fff;text-align:center;background-color:#000;border-radius:4px}.tooltip-arrow{position:absolute;width:0;height:0;border-color:transparent;border-style:solid}.tooltip.top .tooltip-arrow{bottom:0;left:50%;margin-left:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.top-left .tooltip-arrow{right:5px}.tooltip.top-left .tooltip-arrow,.tooltip.top-right .tooltip-arrow{bottom:0;margin-bottom:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.top-right .tooltip-arrow{left:5px}.tooltip.right .tooltip-arrow{top:50%;left:0;margin-top:-5px;border-width:5px 5px 5px 0;border-right-color:#000}.tooltip.left .tooltip-arrow{top:50%;right:0;margin-top:-5px;border-width:5px 0 5px 5px;border-left-color:#000}.tooltip.bottom .tooltip-arrow{top:0;left:50%;margin-left:-5px;border-width:0 5px 5px;border-bottom-color:#000}.tooltip.bottom-left .tooltip-arrow{top:0;right:5px;margin-top:-5px;border-width:0 5px 5px;border-bottom-color:#000}.tooltip.bottom-right .tooltip-arrow{top:0;left:5px;margin-top:-5px;border-width:0 5px 5px;border-bottom-color:#000}@-ms-viewport{width:device-width}.visible-lg,.visible-lg-block,.visible-lg-inline,.visible-lg-inline-block,.visible-md,.visible-md-block,.visible-md-inline,.visible-md-inline-block,.visible-sm,.visible-sm-block,.visible-sm-inline,.visible-sm-inline-block,.visible-xs,.visible-xs-block,.visible-xs-inline,.visible-xs-inline-block{display:none!important}@media (max-width:767px){.visible-xs{display:block!important}table.visible-xs{display:table!important}tr.visible-xs{display:table-row!important}td.visible-xs,th.visible-xs{display:table-cell!important}}@media (max-width:767px){.visible-xs-block{display:block!important}}@media (max-width:767px){.visible-xs-inline{display:inline!important}}@media (max-width:767px){.visible-xs-inline-block{display:inline-block!important}}@media (min-width:768px) and (max-width:991px){.visible-sm{display:block!important}table.visible-sm{display:table!important}tr.visible-sm{display:table-row!important}td.visible-sm,th.visible-sm{display:table-cell!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-block{display:block!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-inline{display:inline!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-inline-block{display:inline-block!important}}@media (min-width:992px) and (max-width:1199px){.visible-md{display:block!important}table.visible-md{display:table!important}tr.visible-md{display:table-row!important}td.visible-md,th.visible-md{display:table-cell!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-block{display:block!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-inline{display:inline!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-inline-block{display:inline-block!important}}@media (min-width:1200px){.visible-lg{display:block!important}table.visible-lg{display:table!important}tr.visible-lg{display:table-row!important}td.visible-lg,th.visible-lg{display:table-cell!important}}@media (min-width:1200px){.visible-lg-block{display:block!important}}@media (min-width:1200px){.visible-lg-inline{display:inline!important}}@media (min-width:1200px){.visible-lg-inline-block{display:inline-block!important}}@media (max-width:767px){.hidden-xs{display:none!important}}@media (min-width:768px) and (max-width:991px){.hidden-sm{display:none!important}}@media (min-width:992px) and (max-width:1199px){.hidden-md{display:none!important}}@media (min-width:1200px){.hidden-lg{display:none!important}}.visible-print{display:none!important}@media print{.visible-print{display:block!important}table.visible-print{display:table!important}tr.visible-print{display:table-row!important}td.visible-print,th.visible-print{display:table-cell!important}}.visible-print-block{display:none!important}@media print{.visible-print-block{display:block!important}}.visible-print-inline{display:none!important}@media print{.visible-print-inline{display:inline!important}}.visible-print-inline-block{display:none!important}@media print{.visible-print-inline-block{display:inline-block!important}}@media print{.hidden-print{display:none!important}}*{-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}:active,:focus{outline:0}html{font-size:10px;-webkit-tap-highlight-color:rgba(0,0,0,0)}body,html{min-height:100%}a{-webkit-transition:color;transition:color;-webkit-transition-duration:.3s;transition-duration:.3s}button{border:0}.animated.infinite{-webkit-animation-iteration-count:infinite;animation-iteration-count:infinite}@-webkit-keyframes fadeIn{0%{opacity:0}to{opacity:1}}@keyframes fadeIn{0%{opacity:0}to{opacity:1}}.fadeIn{-webkit-animation-name:fadeIn;animation-name:fadeIn}@-webkit-keyframes fadeInDown{0%{opacity:0;-webkit-transform:translateY(-20px)}to{opacity:1;-webkit-transform:translateY(0)}}@keyframes fadeInDown{0%{opacity:0;transform:translateY(-20px)}to{opacity:1;transform:translateY(0)}}.fadeInDown{-webkit-animation-name:fadeInDown;animation-name:fadeInDown}@-webkit-keyframes fadeInUp{0%{opacity:0;-webkit-transform:translateY(20px)}to{opacity:1;-webkit-transform:translateY(0)}}@keyframes fadeInUp{0%{opacity:0;transform:translateY(20px)}to{opacity:1;transform:translateY(0)}}.fadeInUp{-webkit-animation-name:fadeInUp;animation-name:fadeInUp}@-webkit-keyframes fadeOut{0%{opacity:1}to{opacity:0}}@keyframes fadeOut{0%{opacity:1}to{opacity:0}}.fadeOut{-webkit-animation-name:fadeOut;animation-name:fadeOut}@-webkit-keyframes fadeOutDown{0%{opacity:1;-webkit-transform:translateY(0)}to{opacity:0;-webkit-transform:translateY(20px)}}@keyframes fadeOutDown{0%{opacity:1;transform:translateY(0)}to{opacity:0;transform:translateY(20px)}}.fadeOutDown{-webkit-animation-name:fadeOutDown;animation-name:fadeOutDown}@-webkit-keyframes fadeOutUp{0%{opacity:1;-webkit-transform:translateY(0)}to{opacity:0;-webkit-transform:translateY(-20px)}}@keyframes fadeOutUp{0%{opacity:1;transform:translateY(0)}to{opacity:0;transform:translateY(-20px)}}.fadeOutUp{-webkit-animation-name:fadeOutUp;animation-name:fadeOutUp}@-webkit-keyframes zoomIn{0%{opacity:0;-webkit-transform:scale3d(.3,.3,.3);transform:scale3d(.3,.3,.3)}50%{opacity:1}}@keyframes zoomIn{0%{opacity:0;-webkit-transform:scale3d(.3,.3,.3);transform:scale3d(.3,.3,.3)}50%{opacity:1}}.text-center{text-align:center!important}.text-left{text-align:left!important}.text-right{text-align:right!important}.clearfix:after,.clearfix:before,.container-fluid:after,.container-fluid:before,.container:after,.container:before,.modal-footer:after,.modal-footer:before,.modal-header:after,.modal-header:before,.row:after,.row:before{content:" ";display:table}.clearfix:after,.container-fluid:after,.container:after,.modal-footer:after,.modal-header:after,.row:after{clear:both}.pull-right{float:right!important}.pull-left{float:left!important}.p-relative{position:relative}.m-0{margin:0!important}.m-t-0{margin-top:0!important}.m-b-0{margin-bottom:0!important}.m-l-0{margin-left:0!important}.m-r-0{margin-right:0!important}.m-5{margin:5px!important}.m-t-5{margin-top:5px!important}.m-b-5{margin-bottom:5px!important}.m-l-5{margin-left:5px!important}.m-r-5{margin-right:5px!important}.m-10{margin:10px!important}.m-t-10{margin-top:10px!important}.m-b-10{margin-bottom:10px!important}.m-l-10{margin-left:10px!important}.m-r-10{margin-right:10px!important}.m-15{margin:15px!important}.m-t-15{margin-top:15px!important}.m-b-15{margin-bottom:15px!important}.m-l-15{margin-left:15px!important}.m-r-15{margin-right:15px!important}.m-20{margin:20px!important}.m-t-20{margin-top:20px!important}.m-b-20{margin-bottom:20px!important}.m-l-20{margin-left:20px!important}.m-r-20{margin-right:20px!important}.m-25{margin:25px!important}.m-t-25{margin-top:25px!important}.m-b-25{margin-bottom:25px!important}.m-l-25{margin-left:25px!important}.m-r-25{margin-right:25px!important}.m-30{margin:30px!important}.m-t-30{margin-top:30px!important}.m-b-30{margin-bottom:30px!important}.m-l-30{margin-left:30px!important}.m-r-30{margin-right:30px!important}.p-0{padding:0!important}.p-t-0{padding-top:0!important}.p-b-0{padding-bottom:0!important}.p-l-0{padding-left:0!important}.p-r-0{padding-right:0!important}.p-5{padding:5px!important}.p-t-5{padding-top:5px!important}.p-b-5{padding-bottom:5px!important}.p-l-5{padding-left:5px!important}.p-r-5{padding-right:5px!important}.p-10{padding:10px!important}.p-t-10{padding-top:10px!important}.p-b-10{padding-bottom:10px!important}.p-l-10{padding-left:10px!important}.p-r-10{padding-right:10px!important}.p-15{padding:15px!important}.p-t-15{padding-top:15px!important}.p-b-15{padding-bottom:15px!important}.p-l-15{padding-left:15px!important}.p-r-15{padding-right:15px!important}.p-20{padding:20px!important}.p-t-20{padding-top:20px!important}.p-b-20{padding-bottom:20px!important}.p-l-20{padding-left:20px!important}.p-r-20{padding-right:20px!important}.p-25{padding:25px!important}.p-t-25{padding-top:25px!important}.p-b-25{padding-bottom:25px!important}.p-l-25{padding-left:25px!important}.p-r-25{padding-right:25px!important}.p-30{padding:30px!important}.p-t-30{padding-top:30px!important}.p-b-30{padding-bottom:30px!important}.p-l-30{padding-left:30px!important}.p-r-30{padding-right:30px!important}@font-face{font-family:Lato;src:url('+t(512)+") format('woff2'),url("+t(511)+") format('woff');font-weight:400;font-style:normal}.form-control{border:0;border-bottom:1px solid #eee;color:#32393f;padding:5px;width:100%;font-size:13px;background-color:transparent}select.form-control{-webkit-appearance:none;-moz-appearance:none;border-radius:0;background:url("+t(515)+") no-repeat bottom 7px right}.input-group{position:relative}.input-group:not(:last-child){margin-bottom:25px}.input-group label:not(.ig-label){font-size:13px;display:block;margin-bottom:10px}.ig-label{position:absolute;text-align:center;bottom:7px;left:0;width:100%;-webkit-transition:all;transition:all;-webkit-transition-duration:.25s;transition-duration:.25s;padding:2px 0 3px;border-radius:2px;font-weight:400}.ig-helpers{z-index:1;width:100%;left:0}.ig-helpers,.ig-helpers:after,.ig-helpers:before{position:absolute;height:2px;bottom:0}.ig-helpers:after,.ig-helpers:before{content:'';width:0;-webkit-transition:all;transition:all;-webkit-transition-duration:.25s;transition-duration:.25s;background-color:#03a9f4}.ig-helpers:before{left:50%}.ig-helpers:after{right:50%}.ig-text{width:100%;height:40px;border:0;background:transparent;text-align:center;position:relative;z-index:1;border-bottom:1px solid #eee;color:#32393f;font-size:13px}.ig-text:focus+.ig-helpers:after,.ig-text:focus+.ig-helpers:before{width:50%}.ig-text:disabled~.ig-label,.ig-text:focus~.ig-label,.ig-text:valid~.ig-label{bottom:35px;font-size:13px;z-index:1}.ig-text:disabled{opacity:.5;filter:alpha(opacity=50)}.ig-dark .ig-text{color:#fff;border-color:hsla(0,0%,100%,.1)}.ig-dark .ig-helpers:after,.ig-dark .ig-helpers:before{background-color:#dfdfdf;height:1px}.ig-left .ig-label,.ig-left .ig-text{text-align:left}.ig-error .ig-label{color:#e23f3f}.ig-error .ig-helpers i:first-child,.ig-error .ig-helpers i:first-child:after,.ig-error .ig-helpers i:first-child:before{background:rgba(226,63,63,.43)}.ig-error .ig-helpers i:last-child,.ig-error .ig-helpers i:last-child:after,.ig-error .ig-helpers i:last-child:before{background:#e23f3f!important}.ig-error:after{content:\"\\F05A\";font-family:FontAwesome;position:absolute;top:17px;right:9px;font-size:20px;color:#d33d3e}.ig-search:before{font-family:fontAwesome;content:'\\F002';font-size:15px;position:absolute;left:2px;top:8px}.ig-search .ig-text{padding-left:25px}.set-expire{border:1px solid #eee;margin:35px 0 30px}.set-expire-item{padding:9px 5px 3px;position:relative;display:table-cell;width:1%;text-align:center}.set-expire-item:not(:last-child){border-right:1px solid #eee}.set-expire-title{font-size:10px;text-transform:uppercase}.set-expire-value{display:inline-block;overflow:hidden;position:relative;left:-8px}.set-expire-value input{font-size:20px;text-align:center;position:relative;right:-15px;border:0;color:#333;padding:0;height:25px;width:100%;font-weight:400}.set-expire-decrease,.set-expire-increase{position:absolute;width:20px;height:20px;background:url("+t(513)+") no-repeat 50%;background-size:85%;left:50%;margin-left:-10px;opacity:.2;filter:alpha(opacity=20);cursor:pointer}.set-expire-decrease:hover,.set-expire-increase:hover{opacity:.5;filter:alpha(opacity=50)}.set-expire-increase{top:-25px}.set-expire-decrease{bottom:-27px;-webkit-transform:rotate(-180deg);transform:rotate(-180deg)}.btn{border:0;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:2px;text-align:center;-webkit-transition:all;transition:all;-webkit-transition-duration:.3s;transition-duration:.3s}.btn:focus,.btn:hover{opacity:.9;filter:alpha(opacity=90)}.btn-block{display:block;width:100%}.btn-link{color:#545454;background-color:#eee}.btn-link:focus,.btn-link:hover{color:#545454;background-color:#dfdfdf}.btn-danger{color:#fff;background-color:#ff726f}.btn-danger:focus,.btn-danger:hover{color:#fff;background-color:#ff5450}.btn-primary{color:#fff;background-color:#50b2ff}.btn-primary:focus,.btn-primary:hover{color:#fff;background-color:#31a5ff}.btn-success{color:#fff;background-color:#33d46f}.btn-success:focus,.btn-success:hover{color:#fff;background-color:#28c061}.close{right:15px;font-weight:400;opacity:1;font-size:18px;position:absolute;text-align:center;top:16px;z-index:1;padding:0;border:0;background-color:transparent}.close span{width:25px;height:25px;display:block;border-radius:50%;line-height:24px;text-shadow:none}.close:not(.close-alt) span{background-color:hsla(0,0%,100%,.1);color:hsla(0,0%,100%,.8)}.close:not(.close-alt):focus span,.close:not(.close-alt):hover span{background-color:hsla(0,0%,100%,.2);color:#fff}.close-alt span{background-color:#efefef;color:#989898}.close-alt:focus span,.close-alt:hover span{background-color:#e8e8e8;color:#7b7b7b}.hidden{display:none!important}.copy-text input{width:100%;border-radius:1px;border:1px solid #eee;padding:7px 12px;font-size:13px;line-height:100%;cursor:text;-webkit-transition:border-color;transition:border-color;-webkit-transition-duration:.3s;transition-duration:.3s}.copy-text input:hover{border-color:#e1e1e1}.share-availability{margin-bottom:40px}.share-availability:after,.share-availability:before{position:absolute;bottom:-30px;font-size:10px}.share-availability:before{content:'01 Sec';left:0}.share-availability:after{content:'7 days';right:0}.login{height:100vh;min-height:500px;background:#32393f;text-align:center}.login:before{height:calc(100% - 110px);width:1px;content:\"\"}.l-wrap,.login:before{display:inline-block;vertical-align:middle}.l-wrap{width:80%;max-width:500px;margin-top:-50px}.l-wrap.toggled{display:inline-block}.l-wrap .input-group:not(:last-child){margin-bottom:40px}.l-footer{height:110px;padding:0 50px}.lf-logo{float:right}.lf-logo img{width:40px}.lf-server{float:left;color:hsla(0,0%,100%,.4);font-size:20px;font-weight:400;padding-top:40px}@media (max-width:768px){.lf-logo,.lf-server{float:none;display:block;text-align:center;width:100%}.lf-logo{margin-bottom:5px}.lf-server{font-size:15px}}.lw-btn{width:50px;height:50px;border:1px solid #fff;display:inline-block;border-radius:50%;font-size:22px;color:#fff;-webkit-transition:all;transition:all;-webkit-transition-duration:.3s;transition-duration:.3s;opacity:.3;background-color:transparent;line-height:45px;padding:0}.lw-btn:hover{color:#fff;opacity:.8;border-color:#fff}.lw-btn i{display:block;width:100%;padding-left:3px}input:-webkit-autofill{-webkit-box-shadow:0 0 0 50px #32393f inset!important;-webkit-text-fill-color:#fff!important}.fe-header{padding:45px 55px 20px}@media (min-width:992px){.fe-header{position:relative}}@media (max-width:667px){.fe-header{padding:25px 25px 20px}}.fe-header h2{font-size:16px;font-weight:400;margin:0}.fe-header h2>span{margin-bottom:7px;display:inline-block}.fe-header h2>span:not(:first-child):before{content:'/';margin:0 4px;color:#8e8e8e}.fe-header p{margin-top:7px}.feh-usage{margin-top:12px;max-width:285px}@media (max-width:667px){.feh-usage{max-width:100%;font-size:12px}}.feh-usage>ul{margin-top:7px;list-style:none;padding:0}.feh-usage>ul>li{padding-right:0;display:inline-block}.fehu-chart{height:5px;background:#eee;position:relative;border-radius:2px;overflow:hidden}.fehu-chart>div{position:absolute;left:0;height:100%;background:#46a5e0}.feh-actions{list-style:none;padding:0;margin:0;position:absolute;right:35px;top:30px;z-index:11}@media (max-width:991px){.feh-actions{top:7px;right:10px;position:fixed}}.feh-actions>li{display:inline-block;text-align:right;vertical-align:top;line-height:100%}.feh-actions>li>.btn-group>button,.feh-actions>li>a{display:block;height:45px;min-width:45px;text-align:center;border-radius:50%;padding:0;border:0;background:none}@media (min-width:992px){.feh-actions>li>.btn-group>button,.feh-actions>li>a{color:#7b7b7b;font-size:21px;line-height:45px;-webkit-transition:all;transition:all;-webkit-transition-duration:.3s;transition-duration:.3s}.feh-actions>li>.btn-group>button:hover,.feh-actions>li>a:hover{background:rgba(0,0,0,.09)}}@media (max-width:991px){.feh-actions>li>.btn-group>button,.feh-actions>li>a{background:url("+t(235)+") no-repeat 50%}.feh-actions>li>.btn-group>button .fa-reorder,.feh-actions>li>a .fa-reorder{display:none}}@media (max-width:991px){.fe-header-mobile{background-color:#32393f;padding:10px 50px 9px 12px;text-align:center;position:fixed;z-index:10;box-shadow:0 0 10px rgba(0,0,0,.3);left:0;top:0;width:100%}.fe-header-mobile .mh-logo{height:35px;position:relative;top:4px}.feh-trigger{width:41px;height:41px;cursor:pointer;float:left;position:relative;text-align:center}.feh-trigger:after,.feh-trigger:before{content:\"\";position:absolute;top:0;left:0;width:100%;height:100%;border-radius:50%}.feh-trigger:after{z-index:1}.feh-trigger:before{background:hsla(0,0%,100%,.1);-webkit-transition:all;transition:all;-webkit-transition-duration:.3s;transition-duration:.3s;-webkit-transform:scale(0);transform:scale(0)}.feht-toggled:before{-webkit-transform:scale(1);transform:scale(1)}.feht-toggled .feht-lines{-webkit-transform:rotate(180deg);transform:rotate(180deg)}.feht-toggled .feht-lines>div.top{width:12px;transform:translateX(8px) translateY(1px) rotate(45deg);-webkit-transform:translateX(8px) translateY(1px) rotate(45deg)}.feht-toggled .feht-lines>div.bottom{width:12px;transform:translateX(8px) translateY(-1px) rotate(-45deg);-webkit-transform:translateX(8px) translateY(-1px) rotate(-45deg)}.feht-lines,.feht-lines>div{-webkit-transition:all;transition:all;-webkit-transition-duration:.3s;transition-duration:.3s}.feht-lines{width:18px;height:12px;display:inline-block;margin-top:14px}.feht-lines>div{background-color:#eaeaea;width:18px;height:2px}.feht-lines>div.center{margin:3px 0}}.fe-sidebar{width:300px;background-color:#32393f;position:fixed;height:100%;overflow:hidden;padding:35px}@media (min-width:992px){.fe-sidebar{-webkit-transform:translateZ(0);transform:translateZ(0)}}@media (max-width:991px){.fe-sidebar{padding-top:85px;z-index:9;box-shadow:0 0 10px rgba(0,0,0,.65);-webkit-transition:all;transition:all;-webkit-transition-duration:.3s;transition-duration:.3s;-webkit-transform:translate3d(-315px,0,0);transform:translate3d(-315px,0,0)}.fe-sidebar.toggled{-webkit-transform:translateZ(0);transform:translateZ(0)}}.fe-sidebar a{color:hsla(0,0%,100%,.58)}.fe-sidebar a:hover{color:#fff}.fes-header{margin-bottom:40px}.fes-header h2,.fes-header img{float:left}.fes-header h2{margin:13px 0 0 10px;font-weight:400}.fes-header img{width:32px}.fesl-inner{height:calc(100vh - 260px);overflow:auto;padding:0;margin:0 -35px}.fesl-inner li{position:relative}.fesl-inner li>a{display:block;padding:10px 40px 12px 65px;overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.fesl-inner li>a:before{font-family:FontAwesome;content:'\\F0A0';font-size:17px;position:absolute;top:10px;left:35px;opacity:.8;filter:alpha(opacity=80)}.fesl-inner li>a.fesli-loading:before{content:'';width:20px;height:20px;border-radius:50%;-webkit-animation-name:zoomIn;animation-name:zoomIn;-webkit-animation-duration:.5s;animation-duration:.5s;-webkit-animation-fill-mode:both;animation-fill-mode:both;border:2px solid hsla(0,0%,100%,.1);border-bottom-color:hsla(0,0%,100%,.5);position:absolute;z-index:1;-webkit-animation:zoomIn .25s,spin .7s .25s infinite linear;animation:zoomIn .25s,spin .7s .25s infinite linear;left:32px;top:0;bottom:0;margin:auto}.fesl-inner li.active{background-color:rgba(0,0,0,.2)}.fesl-inner li.active>a{color:#fff}.fesl-inner li:not(.active):hover{background-color:rgba(0,0,0,.1)}.fesl-inner li:not(.active):hover>a{color:#fff}.fesl-inner li:hover .fesli-trigger{opacity:.6;filter:alpha(opacity=60)}.fesl-inner li:hover .fesli-trigger:hover{opacity:1;filter:alpha(opacity=100)}.fesl-inner ul{list-style:none;padding:0;margin:0}.fesl-inner:hover .scrollbar-vertical{opacity:1}.fesli-trigger{filter:alpha(opacity=0);-webkit-transition:all;transition:all;-webkit-transition-duration:.2s;transition-duration:.2s;top:0;right:0;width:40px;cursor:pointer;background:url("+t(235)+') no-repeat 0}.fesli-trigger,.scrollbar-vertical{opacity:0;position:absolute;height:100%}.scrollbar-vertical{right:5px;width:4px;-webkit-transition:opacity;transition:opacity;-webkit-transition-duration:.3s;transition-duration:.3s}.scrollbar-vertical div{border-radius:1px!important;background-color:#6a6a6a!important}.fes-host{position:fixed;left:0;bottom:0;z-index:1;background:#32393f;color:hsla(0,0%,100%,.4);font-size:15px;font-weight:400;width:300px;padding:20px;overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.fes-host>i{margin-right:10px}.fesl-row{padding-right:40px;padding-top:5px;padding-bottom:5px;position:relative}@media (min-width:668px){.fesl-row{display:flex;flex-flow:row;justify-content:space-between}}.fesl-row:after,.fesl-row:before{content:" ";display:table}.fesl-row:after{clear:both}@media (min-width:668px){header.fesl-row{margin-bottom:20px;border-bottom:1px solid #f0f0f0;padding-left:40px}header.fesl-row .fesl-item,header.fesl-row .fesli-sort{-webkit-transition:all;transition:all;-webkit-transition-duration:.3s;transition-duration:.3s}header.fesl-row .fesl-item{cursor:pointer;color:#8e8e8e;font-weight:500;margin-bottom:-5px}header.fesl-row .fesl-item>.fesli-sort{float:right;margin:4px 0 0;opacity:0;filter:alpha(opacity=0);color:#32393f;font-size:14px}header.fesl-row .fesl-item:hover:not(.fi-actions){background:#f5f5f5;color:#32393f}header.fesl-row .fesl-item:hover:not(.fi-actions)>.fesli-sort{opacity:.5;filter:alpha(opacity=50)}}@media (max-width:667px){header.fesl-row{display:none}}div.fesl-row{padding-left:85px;border-bottom:1px solid transparent;cursor:default}@media (max-width:667px){div.fesl-row{padding-left:70px;padding-right:45px}}div.fesl-row:nth-child(even){background-color:#fafafa}div.fesl-row:hover{background-color:#fbf7dc}div.fesl-row[data-type]:before{font-family:fontAwesome;width:35px;height:35px;text-align:center;line-height:35px;position:absolute;border-radius:50%;font-size:16px;left:50px;top:9px;color:#fff}@media (max-width:667px){div.fesl-row[data-type]:before{left:20px}}@media (max-width:667px){div.fesl-row[data-type=folder] .fesl-item.fi-name{padding-top:10px;padding-bottom:7px}div.fesl-row[data-type=folder] .fesl-item.fi-modified,div.fesl-row[data-type=folder] .fesl-item.fi-size{display:none}}div.fesl-row[data-type=folder]:before{content:\'\\F114\';background-color:#a1d6dd}div.fesl-row[data-type=pdf]:before{content:"\\F1C1";background-color:#fa7775}div.fesl-row[data-type=zip]:before{content:"\\F1C6";background-color:#427089}div.fesl-row[data-type=audio]:before{content:"\\F1C7";background-color:#009688}div.fesl-row[data-type=code]:before{content:"\\F1C9";background-color:#997867}div.fesl-row[data-type=excel]:before{content:"\\F1C3";background-color:#64c866}div.fesl-row[data-type=image]:before{content:"\\F1C5";background-color:#f06292}div.fesl-row[data-type=video]:before{content:"\\F1C8";background-color:#f8c363}div.fesl-row[data-type=other]:before{content:"\\F016";background-color:#afafaf}div.fesl-row[data-type=text]:before{content:"\\F0F6";background-color:#8a8a8a}div.fesl-row[data-type=doc]:before{content:"\\F1C2";background-color:#2196f5}div.fesl-row[data-type=presentation]:before{content:"\\F1C4";background-color:#896ea6}div.fesl-row.fesl-loading:before{content:\'\'}div.fesl-row.fesl-loading:after{content:\'\';width:20px;height:20px;border-radius:50%;-webkit-animation-name:zoomIn;animation-name:zoomIn;-webkit-animation-duration:.5s;animation-duration:.5s;-webkit-animation-fill-mode:both;animation-fill-mode:both;border:2px solid hsla(0,0%,100%,.5);border-bottom-color:#fff;position:absolute;z-index:1;-webkit-animation:zoomIn .25s,spin .7s .25s infinite linear;animation:zoomIn .25s,spin .7s .25s infinite linear;left:57px;top:17px}@media (max-width:667px){div.fesl-row.fesl-loading:after{left:27px}}.fesl-item{display:block}.fesl-item a{color:#818181}@media (min-width:668px){.fesl-item:not(.fi-actions){text-overflow:ellipsis;padding:10px 15px;white-space:nowrap;overflow:hidden}.fesl-item.fi-name{flex:3}.fesl-item.fi-size{width:140px}.fesl-item.fi-modified{width:190px}.fesl-item.fi-actions{width:40px}}@media (max-width:667px){.fesl-item{padding:0}.fesl-item.fi-name{width:100%;margin-bottom:3px}.fesl-item.fi-modified,.fesl-item.fi-size{font-size:12px;color:#b5b5b5;float:left}.fesl-item.fi-modified{max-width:72px;white-space:nowrap;overflow:hidden}.fesl-item.fi-size{margin-right:10px}.fesl-item.fi-actions{position:absolute;top:5px;right:10px}}.fia-toggle{height:36px;width:36px;background:transparent url('+t(514)+") no-repeat 50%;position:relative;top:3px;opacity:.4;filter:alpha(opacity=40)}.fia-toggle:hover{opacity:.7;filter:alpha(opacity=70)}.fi-actions .dropdown-menu{background-color:transparent;box-shadow:none;padding:0;right:38px;left:auto;margin:0;height:100%;text-align:right}.fi-actions .dropdown.open .dropdown-menu .fiad-action{right:0}.fiad-action{height:35px;width:35px;background:#ffc107;display:inline-block;border-radius:50%;text-align:center;line-height:35px;font-weight:400;position:relative;top:4px;margin-left:5px;-webkit-animation-name:fiad-action-anim;animation-name:fiad-action-anim;-webkit-transform-origin:center center;transform-origin:center center;-webkit-backface-visibility:none;backface-visibility:none;box-shadow:0 2px 4px rgba(0,0,0,.1)}.fiad-action:nth-child(2){-webkit-animation-duration:.1s;animation-duration:.1s}.fiad-action:nth-child(1){-webkit-animation-duration:.25s;animation-duration:.25s}.fiad-action>i{font-size:14px;color:#fff}.fiad-action:hover{background-color:#f7b900}@-webkit-keyframes fiad-action-anim{0%{-webkit-transform:scale(0);transform:scale(0);opacity:0;filter:alpha(opacity=0);right:-20px}to{-webkit-transform:scale(1);transform:scale(1);opacity:1;filter:alpha(opacity=100);right:0}}@keyframes fiad-action-anim{0%{-webkit-transform:scale(0);transform:scale(0);opacity:0;filter:alpha(opacity=0);right:-20px}to{-webkit-transform:scale(1);transform:scale(1);opacity:1;filter:alpha(opacity=100);right:0}}.file-explorer{background-color:#fff;position:relative;height:100%}.file-explorer.toggled{height:100vh;overflow:hidden}.fe-body{min-height:100vh;overflow:auto}@media (min-width:992px){.fe-body{padding:0 0 40px 300px}}@media (max-width:991px){.fe-body{padding:75px 0 80px}}.feb-actions{position:fixed;bottom:30px;right:30px}.feb-actions .dropdown-menu{min-width:55px;width:55px;text-align:center;background:transparent;box-shadow:none;margin:0}.feb-actions.open .feba-btn{-webkit-transform:scale(1);transform:scale(1)}.feb-actions.open .feba-btn:first-child{-webkit-animation-name:feba-btn-anim;animation-name:feba-btn-anim;-webkit-animation-duration:.3s;animation-duration:.3s}.feb-actions.open .feba-btn:last-child{-webkit-animation-name:feba-btn-anim;animation-name:feba-btn-anim;-webkit-animation-duration:.1s;animation-duration:.1s}.feb-actions.open .feba-toggle{background:#ff403c}.feb-actions.open .feba-toggle>span{-webkit-transform:rotate(135deg);transform:rotate(135deg)}.feba-toggle{width:55px;height:55px;line-height:55px;border-radius:50%;background:#ff726f;box-shadow:0 2px 3px rgba(0,0,0,.15);display:inline-block;text-align:center;border:0;padding:0}.feba-toggle span{display:inline-block;height:100%;width:100%}.feba-toggle i{color:#fff;font-size:17px;line-height:58px}.feba-toggle,.feba-toggle>span{-webkit-transition:all;transition:all;-webkit-transition-duration:.25s;transition-duration:.25s;-webkit-backface-visibility:hidden;backface-visibility:hidden}.feba-btn{width:40px;margin-top:10px;height:40px;border-radius:50%;text-align:center;display:inline-block;line-height:40px;box-shadow:0 2px 3px rgba(0,0,0,.15);-webkit-transform:scale(0);transform:scale(0);position:relative}.feba-btn,.feba-btn:focus,.feba-btn:hover{color:#fff}.feba-btn label{width:100%;height:100%;position:absolute;left:0;top:0;cursor:pointer}.feba-bucket{background:#ffc155}.feba-upload{background:#ffc107}@-webkit-keyframes feba-btn-anim{0%{-webkit-transform:scale(0);transform:scale(0);opacity:0;filter:alpha(opacity=0)}to{-webkit-transform:scale(1);transform:scale(1);opacity:1;filter:alpha(opacity=100)}}@keyframes feba-btn-anim{0%{-webkit-transform:scale(0);transform:scale(0);opacity:0;filter:alpha(opacity=0)}to{-webkit-transform:scale(1);transform:scale(1);opacity:1;filter:alpha(opacity=100)}}.ie-warning{background-color:#ff5252;width:100%;height:100%;position:fixed;left:0;top:0;text-align:center}.ie-warning:before{width:1px;content:'';height:100%}.ie-warning .iw-inner,.ie-warning:before{display:inline-block;vertical-align:middle}.iw-inner{width:470px;height:300px;background-color:#fff;border-radius:5px;padding:40px;position:relative}.iw-inner ul{list-style:none;padding:0;margin:0;width:230px;margin-left:80px;margin-top:16px}.iw-inner ul>li{float:left}.iw-inner ul>li>a{display:block;padding:10px 15px 7px;font-size:14px;margin:0 1px;border-radius:3px}.iw-inner ul>li>a:hover{background:#eee}.iw-inner ul>li>a img{height:40px;margin-bottom:5px}.iwi-icon{color:#ff5252;font-size:40px;display:block;line-height:100%;margin-bottom:15px}.iwi-skip{position:absolute;left:0;bottom:-35px;width:100%;color:hsla(0,0%,100%,.6);cursor:pointer}.iwi-skip:hover{color:#fff}.dropdown-menu{padding:15px 0;top:0;margin-top:-1px}.dropdown-menu>li>a{padding:8px 20px;font-size:15px}.dropdown-menu>li>a>i{width:20px;position:relative;top:1px}.dropdown-menu-right>li>a{text-align:right}.alert{border:0;position:fixed;max-width:500px;margin:0;box-shadow:0 4px 5px rgba(0,0,0,.1);color:#fff;width:100%;right:20px;border-radius:3px;padding:17px 50px 17px 17px;z-index:10010;-webkit-animation-duration:.8s;animation-duration:.8s;-webkit-animation-fill-mode:both;animation-fill-mode:both}.alert:not(.progress){top:20px}@media (min-width:768px){.alert:not(.progress){left:50%;margin-left:-250px}}.alert.progress{bottom:20px;right:20px}.alert.alert-danger{background:#ff726f}.alert.alert-success{background:#33d46f}.alert.alert-info{background:#50b2ff}@media (max-width:767px){.alert{left:20px;width:calc(100% - 40px);max-width:100%}}.alert .progress{margin:10px 10px 8px 0;height:5px;box-shadow:none;border-radius:1px;background-color:#50b2ff;border-radius:2px;overflow:hidden}.alert .progress-bar{box-shadow:none;background-color:#fff;height:100%}.alert .close{position:absolute;top:15px}@media (min-width:768px){.modal{text-align:center}.modal:before{content:'';height:100%;width:1px}.modal .modal-dialog,.modal:before{display:inline-block;vertical-align:middle}.modal .modal-dialog{text-align:left;margin:10px auto}}.modal-dark .modal-header{color:hsla(0,0%,100%,.4)}.modal-dark .modal-header small{color:hsla(0,0%,100%,.2)}.modal-dark .modal-content{background-color:#32393f}.modal-backdrop{-webkit-animation-name:fadeIn;animation-name:fadeIn;-webkit-animation-fill-mode:both;animation-fill-mode:both}.modal-backdrop,.modal-dialog{-webkit-animation-duration:.2s;animation-duration:.2s}.modal-dialog{-webkit-animation-name:zoomIn;animation-name:zoomIn;-webkit-animation-fill-mode:both;animation-fill-mode:both}.modal-header{color:#333;position:relative}.modal-header small{display:block;text-transform:none;font-size:12px;margin-top:5px;color:#a8a8a8}.modal-content{border-radius:3px;box-shadow:none}.modal-footer{padding:0 30px 30px}.modal-confirm .modal-dialog,.modal-footer{text-align:center}.mc-icon{margin:0 0 10px}.mc-icon>i{font-size:60px}.mci-red{color:#ff8f8f}.mci-amber{color:#ffc107}.mci-green{color:#64e096}.mc-text{color:#333}.mc-sub{color:#bdbdbd;margin-top:5px;font-size:13px}@media (max-width:767px){.modal-about{text-align:center}.modal-about .modal-dialog{max-width:400px;width:90%;margin:20px auto 0}}.ma-inner{display:flex;flex-direction:row;align-items:center;min-height:350px;position:relative}@media (min-width:768px){.ma-inner:before{content:'';width:150px;height:100%;top:0;left:0;position:absolute;border-radius:3px 0 0 3px;background-color:#23282c}}.mai-item:first-child{width:150px;text-align:center}.mai-item:last-child{flex:4;padding:30px}.maii-logo{width:70px;position:relative}.maii-list{list-style:none;padding:0}.maii-list>li{margin-bottom:15px}.maii-list>li div{color:hsla(0,0%,100%,.8);text-transform:uppercase;font-size:14px}.maii-list>li small{font-size:13px;color:hsla(0,0%,100%,.4)}.toggle-password{position:absolute;bottom:30px;right:35px;width:30px;height:30px;border:1px solid #eee;border-radius:0;text-align:center;cursor:pointer;z-index:10;background-color:#fff;padding-top:5px}.toggle-password.toggled{background:#eee}.pm-body{padding-bottom:30px}.pmb-header{margin-bottom:35px}.pmb-list{display:flex;flex-flow:row;align-items:center;justify-content:center;padding:10px 35px}.pmb-list:nth-child(even){background-color:#f7f7f7}.pmb-list .form-control{padding-left:0;padding-right:0}header.pmb-list{margin:20px 0 10px}.pmbl-item{display:block;font-size:13px}.pmbl-item:nth-child(1){flex:2}.pmbl-item:nth-child(2){margin:0 25px;width:150px}.pmbl-item:nth-child(3){width:70px}div.pmb-list select{border:0}div.pmb-list .pml-item:not(:last-child){padding:0 5px}.modal-create-bucket .modal-dialog{position:fixed;right:25px;bottom:95px;margin:0;height:110px}.modal-create-bucket .modal-content{width:100%;height:100%}",""]); +},function(A,M){"use strict";var t={childContextTypes:!0,contextTypes:!0,defaultProps:!0,displayName:!0,getDefaultProps:!0,mixins:!0,propTypes:!0,type:!0},I={name:!0,length:!0,prototype:!0,caller:!0,arguments:!0,arity:!0},g="function"==typeof Object.getOwnPropertySymbols;A.exports=function(A,M,e){if("string"!=typeof M){var i=Object.getOwnPropertyNames(M);g&&(i=i.concat(Object.getOwnPropertySymbols(M)));for(var T=0;To||N===o&&"application/"===M[T].substr(0,12))continue}M[T]=I}}})}var E=t(348),N=t(351).extname,o=/^\s*([^;\s]*)(?:;|\s|$)/,n=/^text\//i;M.charset=I,M.charsets={lookup:I},M.contentType=g,M.extension=e,M.extensions=Object.create(null),M.lookup=i,M.types=Object.create(null),T(M.extensions,M.types)},function(A,M){function t(A){throw new Error("Cannot find module '"+A+"'.")}t.keys=function(){return[]},t.resolve=t,A.exports=t,t.id=350},function(A,M,t){(function(A){function t(A,M){for(var t=0,I=A.length-1;I>=0;I--){var g=A[I];"."===g?A.splice(I,1):".."===g?(A.splice(I,1),t++):t&&(A.splice(I,1),t--)}if(M)for(;t--;t)A.unshift("..");return A}function I(A,M){if(A.filter)return A.filter(M);for(var t=[],I=0;I=-1&&!g;e--){var i=e>=0?arguments[e]:A.cwd();if("string"!=typeof i)throw new TypeError("Arguments to path.resolve must be strings");i&&(M=i+"/"+M,g="/"===i.charAt(0))}return M=t(I(M.split("/"),function(A){return!!A}),!g).join("/"),(g?"/":"")+M||"."},M.normalize=function(A){var g=M.isAbsolute(A),e="/"===i(A,-1);return A=t(I(A.split("/"),function(A){return!!A}),!g).join("/"),A||g||(A="."),A&&e&&(A+="/"),(g?"/":"")+A},M.isAbsolute=function(A){return"/"===A.charAt(0)},M.join=function(){var A=Array.prototype.slice.call(arguments,0);return M.normalize(I(A,function(A,M){if("string"!=typeof A)throw new TypeError("Arguments to path.join must be strings");return A}).join("/"))},M.relative=function(A,t){function I(A){for(var M=0;M=0&&""===A[t];t--);return M>t?[]:A.slice(M,t-M+1)}A=M.resolve(A).substr(1),t=M.resolve(t).substr(1);for(var g=I(A.split("/")),e=I(t.split("/")),i=Math.min(g.length,e.length),T=i,E=0;E=0;e--){var i=I[e]+g;if(i in M)return i}return!1}},function(A,M,t){"use strict";var I=t(483);M.extract=function(A){return A.split("?")[1]||""},M.parse=function(A){return"string"!=typeof A?{}:(A=A.trim().replace(/^(\?|#|&)/,""),A?A.split("&").reduce(function(A,M){var t=M.replace(/\+/g," ").split("="),I=t.shift(),g=t.length>0?t.join("="):void 0;return I=decodeURIComponent(I),g=void 0===g?null:decodeURIComponent(g),A.hasOwnProperty(I)?Array.isArray(A[I])?A[I].push(g):A[I]=[A[I],g]:A[I]=g,A},{}):{})},M.stringify=function(A){return A?Object.keys(A).sort().map(function(M){var t=A[M];return void 0===t?"":null===t?M:Array.isArray(t)?t.slice().sort().map(function(A){return I(M)+"="+I(A)}).join("&"):I(M)+"="+I(t)}).filter(function(A){return A.length>0}).join("&"):""}},function(A,M){"use strict";function t(A,M){return Object.prototype.hasOwnProperty.call(A,M)}A.exports=function(A,M,I,g){M=M||"&",I=I||"=";var e={};if("string"!=typeof A||0===A.length)return e;var i=/\+/g;A=A.split(M);var T=1e3;g&&"number"==typeof g.maxKeys&&(T=g.maxKeys);var E=A.length;T>0&&E>T&&(E=T);for(var N=0;N=0?(o=D.substr(0,a),n=D.substr(a+1)):(o=D,n=""),C=decodeURIComponent(o),c=decodeURIComponent(n),t(e,C)?Array.isArray(e[C])?e[C].push(c):e[C]=[e[C],c]:e[C]=c}return e}},function(A,M){"use strict";var t=function(A){switch(typeof A){case"string":return A;case"boolean":return A?"true":"false";case"number":return isFinite(A)?A:"";default:return""}};A.exports=function(A,M,I,g){return M=M||"&",I=I||"=",null===A&&(A=void 0),"object"==typeof A?Object.keys(A).map(function(g){var e=encodeURIComponent(t(g))+I;return Array.isArray(A[g])?A[g].map(function(A){return e+encodeURIComponent(t(A))}).join(M):e+encodeURIComponent(t(A[g]))}).join(M):g?encodeURIComponent(t(g))+I+encodeURIComponent(t(A)):""}},function(A,M,t){"use strict";M.decode=M.parse=t(355),M.encode=M.stringify=t(356)},function(A,M,t){(function(M){for(var I=t(352),g="undefined"==typeof window?M:window,e=["moz","webkit"],i="AnimationFrame",T=g["request"+i],E=g["cancel"+i]||g["cancelRequest"+i],N=0;!T&&N1)||(e=A,!1)}),e)return new Error("(children) "+I+" - Duplicate children detected of bsRole: "+e+". Only one child each allowed with the following bsRoles: "+M.join(", "))})}},A.exports=M["default"]},function(A,M,t){"use strict";function I(A){var M=[];return void 0===A?M:(i["default"].forEach(A,function(A){M.push(A)}),M)}var g=t(6)["default"];M.__esModule=!0,M["default"]=I;var e=t(51),i=g(e);A.exports=M["default"]},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{"default":A}}function g(A,M){var t={};for(var I in A)M.indexOf(I)>=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}Object.defineProperty(M,"__esModule",{value:!0}),M.CopyToClipboard=void 0;var e=Object.assign||function(A){for(var M=1;M=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}function e(A){var M=A.style,t=g(A,["style"]),I=o({},M,{height:6,right:2,bottom:2,left:2,borderRadius:3});return C["default"].createElement("div",o({style:I},t))}function i(A){var M=A.style,t=g(A,["style"]),I=o({},M,{width:6,right:2,bottom:2,top:2,borderRadius:3});return C["default"].createElement("div",o({style:I},t))}function T(A){var M=A.style,t=g(A,["style"]),I=o({},M,{cursor:"pointer",borderRadius:"inherit",backgroundColor:"rgba(0,0,0,.2)"});return C["default"].createElement("div",o({style:I},t))}function E(A){var M=A.style,t=g(A,["style"]),I=o({},M,{cursor:"pointer",borderRadius:"inherit",backgroundColor:"rgba(0,0,0,.2)"});return C["default"].createElement("div",o({style:I},t))}function N(A){return C["default"].createElement("div",A)}var o=Object.assign||function(A){for(var M=1;M=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}var e=Object.assign||function(A){for(var M=1;M=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}function e(A,M){if(!(A instanceof M))throw new TypeError("Cannot call a class as a function")}function i(A,M){if(!A)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!M||"object"!=typeof M&&"function"!=typeof M?A:M}function T(A,M){if("function"!=typeof M&&null!==M)throw new TypeError("Super expression must either be null or a function, not "+typeof M);A.prototype=Object.create(M&&M.prototype,{constructor:{value:A,enumerable:!1,writable:!0,configurable:!0}}),M&&(Object.setPrototypeOf?Object.setPrototypeOf(A,M):A.__proto__=M)}Object.defineProperty(M,"__esModule",{value:!0});var E=Object.assign||function(A){for(var M=1;M0||(this.setState({isDragActive:!1,isDragReject:!1}),this.props.onDragLeave&&this.props.onDragLeave.call(this,A))}},{key:"onDrop",value:function(A){A.preventDefault(),this.enterCounter=0,this.setState({isDragActive:!1,isDragReject:!1});for(var M=A.dataTransfer?A.dataTransfer.files:A.target.files,t=this.props.multiple?M.length:Math.min(M.length,1),I=[],g=0;g=M.props.minSize})}},{key:"open",value:function(){this.fileInputEl.value=null,this.fileInputEl.click()}},{key:"render",value:function(){var A=this,M=this.props,t=M.accept,I=M.activeClassName,e=M.inputProps,i=M.multiple,T=M.name,N=M.rejectClassName,o=g(M,["accept","activeClassName","inputProps","multiple","name","rejectClassName"]),n=o.activeStyle,C=o.className,a=o.rejectStyle,B=o.style,Q=g(o,["activeStyle","className","rejectStyle","style"]),r=this.state,s=r.isDragActive,x=r.isDragReject;C=C||"",s&&I&&(C+=" "+I),x&&N&&(C+=" "+N),C||B||n||a||(B={width:200,height:200,borderWidth:2,borderColor:"#666",borderStyle:"dashed",borderRadius:5},n={borderStyle:"solid",backgroundColor:"#eee"},a={borderStyle:"solid",backgroundColor:"#ffdddd"});var j=void 0;j=n&&s?E({},B,n):a&&x?E({},B,a):E({},B);var y={accept:t,type:"file",style:{display:"none"},multiple:D&&i,ref:function(M){return A.fileInputEl=M},onChange:this.onDrop};T&&T.length&&(y.name=T);var w=["disablePreview","disableClick","onDropAccepted","onDropRejected","maxSize","minSize"],u=E({},Q);return w.forEach(function(A){return delete u[A]}),c["default"].createElement("div",E({className:C,style:j},u,{onClick:this.onClick,onDragStart:this.onDragStart,onDragEnter:this.onDragEnter,onDragOver:this.onDragOver,onDragLeave:this.onDragLeave,onDrop:this.onDrop}),this.props.children,c["default"].createElement("input",E({},e,y)))}}]),M}(c["default"].Component);a.defaultProps={disablePreview:!1,disableClick:!1,multiple:!0,maxSize:1/0,minSize:0},a.propTypes={onDrop:c["default"].PropTypes.func,onDropAccepted:c["default"].PropTypes.func,onDropRejected:c["default"].PropTypes.func,onDragStart:c["default"].PropTypes.func,onDragEnter:c["default"].PropTypes.func,onDragLeave:c["default"].PropTypes.func,children:c["default"].PropTypes.node,style:c["default"].PropTypes.object,activeStyle:c["default"].PropTypes.object,rejectStyle:c["default"].PropTypes.object,className:c["default"].PropTypes.string,activeClassName:c["default"].PropTypes.string,rejectClassName:c["default"].PropTypes.string,disablePreview:c["default"].PropTypes.bool,disableClick:c["default"].PropTypes.bool,inputProps:c["default"].PropTypes.object,multiple:c["default"].PropTypes.bool,accept:c["default"].PropTypes.string,name:c["default"].PropTypes.string,maxSize:c["default"].PropTypes.number,minSize:c["default"].PropTypes.number},M["default"]=a,A.exports=M["default"]},function(A,M){A.exports=function(A){function M(I){if(t[I])return t[I].exports;var g=t[I]={exports:{},id:I,loaded:!1};return A[I].call(g.exports,g,g.exports,M),g.loaded=!0,g.exports}var t={};return M.m=A,M.c=t,M.p="",M(0)}([function(A,M,t){"use strict";M.__esModule=!0,t(8),t(9),M["default"]=function(A,M){if(A&&M){var t=function(){var t=M.split(","),I=A.name||"",g=A.type||"",e=g.replace(/\/.*$/,"");return{v:t.some(function(A){var M=A.trim();return"."===M.charAt(0)?I.toLowerCase().endsWith(M.toLowerCase()):/\/\*$/.test(M)?e===M.replace(/\/.*$/,""):g===M})}}();if("object"==typeof t)return t.v}return!0},A.exports=M["default"]},function(A,M){var t=A.exports={version:"1.2.2"};"number"==typeof __e&&(__e=t)},function(A,M){var t=A.exports="undefined"!=typeof window&&window.Math==Math?window:"undefined"!=typeof self&&self.Math==Math?self:Function("return this")();"number"==typeof __g&&(__g=t)},function(A,M,t){var I=t(2),g=t(1),e=t(4),i=t(19),T="prototype",E=function(A,M){return function(){return A.apply(M,arguments)}},N=function(A,M,t){var o,n,C,c,D=A&N.G,a=A&N.P,B=D?I:A&N.S?I[M]||(I[M]={}):(I[M]||{})[T],Q=D?g:g[M]||(g[M]={});D&&(t=M);for(o in t)n=!(A&N.F)&&B&&o in B,C=(n?B:t)[o],c=A&N.B&&n?E(C,I):a&&"function"==typeof C?E(Function.call,C):C,B&&!n&&i(B,o,C),Q[o]!=C&&e(Q,o,c),a&&((Q[T]||(Q[T]={}))[o]=C)};I.core=g,N.F=1,N.G=2,N.S=4,N.P=8,N.B=16,N.W=32,A.exports=N},function(A,M,t){var I=t(5),g=t(18);A.exports=t(22)?function(A,M,t){return I.setDesc(A,M,g(1,t))}:function(A,M,t){return A[M]=t,A}},function(A,M){var t=Object;A.exports={create:t.create,getProto:t.getPrototypeOf,isEnum:{}.propertyIsEnumerable,getDesc:t.getOwnPropertyDescriptor,setDesc:t.defineProperty,setDescs:t.defineProperties,getKeys:t.keys,getNames:t.getOwnPropertyNames,getSymbols:t.getOwnPropertySymbols,each:[].forEach}},function(A,M){var t=0,I=Math.random();A.exports=function(A){return"Symbol(".concat(void 0===A?"":A,")_",(++t+I).toString(36))}},function(A,M,t){var I=t(20)("wks"),g=t(2).Symbol;A.exports=function(A){return I[A]||(I[A]=g&&g[A]||(g||t(6))("Symbol."+A))}},function(A,M,t){t(26),A.exports=t(1).Array.some},function(A,M,t){t(25),A.exports=t(1).String.endsWith},function(A,M){A.exports=function(A){if("function"!=typeof A)throw TypeError(A+" is not a function!");return A}},function(A,M){var t={}.toString;A.exports=function(A){return t.call(A).slice(8,-1)}},function(A,M,t){var I=t(10);A.exports=function(A,M,t){if(I(A),void 0===M)return A;switch(t){case 1:return function(t){return A.call(M,t)};case 2:return function(t,I){return A.call(M,t,I)};case 3:return function(t,I,g){return A.call(M,t,I,g)}}return function(){return A.apply(M,arguments)}}},function(A,M){A.exports=function(A){if(void 0==A)throw TypeError("Can't call method on "+A);return A}},function(A,M,t){A.exports=function(A){var M=/./;try{"/./"[A](M)}catch(I){try{return M[t(7)("match")]=!1,!"/./"[A](M)}catch(g){}}return!0}},function(A,M){A.exports=function(A){try{return!!A()}catch(M){return!0}}},function(A,M){A.exports=function(A){return"object"==typeof A?null!==A:"function"==typeof A}},function(A,M,t){var I=t(16),g=t(11),e=t(7)("match");A.exports=function(A){var M;return I(A)&&(void 0!==(M=A[e])?!!M:"RegExp"==g(A))}},function(A,M){A.exports=function(A,M){return{enumerable:!(1&A),configurable:!(2&A),writable:!(4&A),value:M}}},function(A,M,t){var I=t(2),g=t(4),e=t(6)("src"),i="toString",T=Function[i],E=(""+T).split(i);t(1).inspectSource=function(A){return T.call(A)},(A.exports=function(A,M,t,i){"function"==typeof t&&(g(t,e,A[M]?""+A[M]:E.join(String(M))),"name"in t||(t.name=M)),A===I?A[M]=t:(i||delete A[M],g(A,M,t))})(Function.prototype,i,function(){return"function"==typeof this&&this[e]||T.call(this)})},function(A,M,t){var I=t(2),g="__core-js_shared__",e=I[g]||(I[g]={});A.exports=function(A){return e[A]||(e[A]={})}},function(A,M,t){var I=t(17),g=t(13);A.exports=function(A,M,t){if(I(M))throw TypeError("String#"+t+" doesn't accept regex!");return String(g(A))}},function(A,M,t){A.exports=!t(15)(function(){return 7!=Object.defineProperty({},"a",{get:function(){return 7}}).a})},function(A,M){var t=Math.ceil,I=Math.floor;A.exports=function(A){return isNaN(A=+A)?0:(A>0?I:t)(A)}},function(A,M,t){var I=t(23),g=Math.min;A.exports=function(A){return A>0?g(I(A),9007199254740991):0}},function(A,M,t){"use strict";var I=t(3),g=t(24),e=t(21),i="endsWith",T=""[i];I(I.P+I.F*t(14)(i),"String",{endsWith:function(A){var M=e(this,A,i),t=arguments,I=t.length>1?t[1]:void 0,E=g(M.length),N=void 0===I?E:Math.min(g(I),E),o=String(A);return T?T.call(M,o,N):M.slice(N-o.length,N)===o}})},function(A,M,t){var I=t(5),g=t(3),e=t(1).Array||Array,i={},T=function(A,M){I.each.call(A.split(","),function(A){void 0==M&&A in e?i[A]=e[A]:A in[]&&(i[A]=t(12)(Function.call,[][A],M))})};T("pop,reverse,shift,keys,values,entries",1),T("indexOf,every,some,forEach,map,filter,find,findIndex,includes",3),T("join,slice,concat,push,splice,unshift,sort,lastIndexOf,reduce,reduceRight,copyWithin,fill"),g(g.S,"Array",i)}])},function(M,t){M.exports=A}])})},function(A,M,t){"use strict";function I(A,M){if(!(A instanceof M))throw new TypeError("Cannot call a class as a function")}function g(A,M){if("function"!=typeof M&&null!==M)throw new TypeError("Super expression must either be null or a function, not "+typeof M);A.prototype=Object.create(M&&M.prototype,{constructor:{value:A,enumerable:!1,writable:!0,configurable:!0}}),M&&(Object.setPrototypeOf?Object.setPrototypeOf(A,M):A.__proto__=M)}var e=function(){function A(A,M){for(var t=0;t0&&void 0!==arguments[0]?arguments[0]:{},t=M.hideSiblingNodes,I=void 0===t||t,e=M.handleContainerOverflow,i=void 0===e||e;g(this,A),this.hideSiblingNodes=I,this.handleContainerOverflow=i,this.modals=[],this.containers=[],this.data=[]}return N(A,[{key:"add",value:function(A,M,t){var I=this.modals.indexOf(A),g=this.containers.indexOf(M);if(I!==-1)return I;if(I=this.modals.length,this.modals.push(A),this.hideSiblingNodes&&(0,r.hideSiblings)(M,A.mountNode),g!==-1)return this.data[g].modals.push(A),I;var e={modals:[A],classes:t?t.split(/\s+/):[],overflowing:(0,Q["default"])(M)};return this.handleContainerOverflow&&T(e,M),e.classes.forEach(c["default"].addClass.bind(null,M)),this.containers.push(M),this.data.push(e),I}},{key:"remove",value:function(A){var M=this.modals.indexOf(A);if(M!==-1){var t=i(this.data,A),I=this.data[t],g=this.containers[t];I.modals.splice(I.modals.indexOf(A),1),this.modals.splice(M,1),0===I.modals.length?(I.classes.forEach(c["default"].removeClass.bind(null,g)),this.handleContainerOverflow&&E(I,g),this.hideSiblingNodes&&(0,r.showSiblings)(g,A.mountNode),this.containers.splice(t,1),this.data.splice(t,1)):this.hideSiblingNodes&&(0,r.ariaHidden)(!1,I.modals[I.modals.length-1].mountNode)}}},{key:"isTopModal",value:function(A){return!!this.modals.length&&this.modals[this.modals.length-1]===A}}]),A}();M["default"]=s,A.exports=M["default"]},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{"default":A}}function g(A,M){var t={};for(var I in A)M.indexOf(I)>=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}function e(A,M){if(!(A instanceof M))throw new TypeError("Cannot call a class as a function")}function i(A,M){if(!A)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!M||"object"!=typeof M&&"function"!=typeof M?A:M}function T(A,M){if("function"!=typeof M&&null!==M)throw new TypeError("Super expression must either be null or a function, not "+typeof M);A.prototype=Object.create(M&&M.prototype,{constructor:{value:A,enumerable:!1,writable:!0,configurable:!0}}),M&&(Object.setPrototypeOf?Object.setPrototypeOf(A,M):A.__proto__=M)}Object.defineProperty(M,"__esModule",{value:!0});var E=Object.assign||function(A){for(var M=1;M1?t-1:0),g=1;g=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}function e(A,M){if(!(A instanceof M))throw new TypeError("Cannot call a class as a function")}function i(A,M){if(!A)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!M||"object"!=typeof M&&"function"!=typeof M?A:M}function T(A,M){if("function"!=typeof M&&null!==M)throw new TypeError("Super expression must either be null or a function, not "+typeof M);A.prototype=Object.create(M&&M.prototype,{constructor:{value:A,enumerable:!1,writable:!0,configurable:!0}}),M&&(Object.setPrototypeOf?Object.setPrototypeOf(A,M):A.__proto__=M)}Object.defineProperty(M,"__esModule",{value:!0});var E=Object.assign||function(A){for(var M=1;M=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}function e(A,M){if(!(A instanceof M))throw new TypeError("Cannot call a class as a function")}function i(A,M){if(!A)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!M||"object"!=typeof M&&"function"!=typeof M?A:M}function T(A,M){if("function"!=typeof M&&null!==M)throw new TypeError("Super expression must either be null or a function, not "+typeof M);A.prototype=Object.create(M&&M.prototype,{constructor:{value:A,enumerable:!1,writable:!0,configurable:!0}}),M&&(Object.setPrototypeOf?Object.setPrototypeOf(A,M):A.__proto__=M)}function E(){}Object.defineProperty(M,"__esModule",{value:!0}),M.EXITING=M.ENTERED=M.ENTERING=M.EXITED=M.UNMOUNTED=void 0;var N=Object.assign||function(A){for(var M=1;MT?T-N:0}function i(A,M,t,I){var e=g(t),i=e.width,T=A-I,E=A+I+M;return T<0?-T:E>i?i-E:0}function T(A,M,t,I,g){var T="BODY"===I.tagName?(0,N["default"])(t):(0,n["default"])(t,I),E=(0,N["default"])(M),o=E.height,C=E.width,c=void 0,D=void 0,a=void 0,B=void 0;if("left"===A||"right"===A){D=T.top+(T.height-o)/2,c="left"===A?T.left-C:T.left+T.width;var Q=e(D,o,I,g);D+=Q,B=50*(1-2*Q/o)+"%",a=void 0}else{if("top"!==A&&"bottom"!==A)throw new Error('calcOverlayPosition(): No such placement of "'+A+'" found.');c=T.left+(T.width-C)/2,D="top"===A?T.top-o:T.top+T.height;var r=i(c,C,I,g);c+=r,a=50*(1-2*r/C)+"%",B=void 0}return{positionLeft:c,positionTop:D,arrowOffsetLeft:a,arrowOffsetTop:B}}Object.defineProperty(M,"__esModule",{value:!0}),M["default"]=T;var E=t(142),N=I(E),o=t(284),n=I(o),C=t(143),c=I(C),D=t(52),a=I(D);A.exports=M["default"]},function(A,M){"use strict";function t(A,M){M&&(A?M.setAttribute("aria-hidden","true"):M.removeAttribute("aria-hidden"))}function I(A,M){T(A,M,function(A){return t(!0,A)})}function g(A,M){T(A,M,function(A){return t(!1,A)})}Object.defineProperty(M,"__esModule",{value:!0}),M.ariaHidden=t,M.hideSiblings=I,M.showSiblings=g;var e=["template","script","style"],i=function(A){var M=A.nodeType,t=A.tagName;return 1===M&&e.indexOf(t.toLowerCase())===-1},T=function(A,M,t){M=[].concat(M),[].forEach.call(A.children,function(A){M.indexOf(A)===-1&&i(A)&&t(A)})}},function(A,M,t){"use strict";var I=function(){};A.exports=I},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{"default":A}}function g(A,M){return function(t,I,g){if(null!=t[I]){var e='"'+I+'" property of "'+g+'" has been deprecated.\n'+M;E[e]||(T["default"](!1,e),E[e]=!0)}return A(t,I,g)}}function e(){E={}}M.__esModule=!0,M["default"]=g;var i=t(126),T=I(i),E={};g._resetWarned=e,A.exports=M["default"]},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{"default":A}}function g(A,M){if(!(A instanceof M))throw new TypeError("Cannot call a class as a function")}function e(A,M){if(!A)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!M||"object"!=typeof M&&"function"!=typeof M?A:M}function i(A,M){if("function"!=typeof M&&null!==M)throw new TypeError("Super expression must either be null or a function, not "+typeof M);A.prototype=Object.create(M&&M.prototype,{constructor:{value:A,enumerable:!1,writable:!0,configurable:!0}}),M&&(Object.setPrototypeOf?Object.setPrototypeOf(A,M):A.__proto__=M)}M.__esModule=!0,M["default"]=void 0;var T=t(1),E=t(184),N=I(E),o=t(185),n=(I(o),function(A){function M(t,I){g(this,M);var i=e(this,A.call(this,t,I));return i.store=t.store,i}return i(M,A),M.prototype.getChildContext=function(){return{store:this.store}},M.prototype.render=function(){var A=this.props.children;return T.Children.only(A)},M}(T.Component));M["default"]=n,n.propTypes={store:N["default"].isRequired,children:T.PropTypes.element.isRequired},n.childContextTypes={store:N["default"].isRequired}},function(A,M){"use strict";function t(A,M){if(A===M)return!0;var t=Object.keys(A),I=Object.keys(M);if(t.length!==I.length)return!1;for(var g=Object.prototype.hasOwnProperty,e=0;e=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}function e(A,M){var t=A.history,I=A.routes,e=A.location,E=g(A,["history","routes","location"]);t||e?void 0:(0,N["default"])(!1),t=t?t:(0,n["default"])(E);var o=(0,c["default"])(t,(0,D.createRoutes)(I)),C=void 0;e?e=t.createLocation(e):C=t.listen(function(A){e=A});var B=(0,a.createRouterObject)(t,o);t=(0,a.createRoutingHistory)(t,o),o.match(e,function(A,I,g){M(A,I&&B.createLocation(I,T.REPLACE),g&&i({},g,{history:t,router:B,matchContext:{history:t,transitionManager:o,router:B}})),C&&C()})}M.__esModule=!0;var i=Object.assign||function(A){for(var M=1;M=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}function e(A){return function(){var M=arguments.length<=0||void 0===arguments[0]?{}:arguments[0],t=M.routes,I=g(M,["routes"]),e=(0,E["default"])(A)(I),T=(0,o["default"])(e,t);return i({},e,T)}}M.__esModule=!0;var i=Object.assign||function(A){for(var M=1;M=A&&N&&(T=!0,t()))}}var i=0,T=!1,E=!1,N=!1,o=void 0;e()}M.__esModule=!0;var I=Array.prototype.slice;M.loopAsync=t},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{"default":A}}function g(){function A(A){try{A=A||window.history.state||{}}catch(M){A={}}var t=n.getWindowPath(),I=A,g=I.key,i=void 0;g?i=C.readState(g):(i=null,g=s.createKey(),Q&&window.history.replaceState(e({},A,{key:g}),null));var T=N.parsePath(t);return s.createLocation(e({},T,{state:i}),void 0,g)}function M(M){function t(M){void 0!==M.state&&I(A(M.state))}var I=M.transitionTo;return n.addEventListener(window,"popstate",t),function(){n.removeEventListener(window,"popstate",t)}}function t(A){var M=A.basename,t=A.pathname,I=A.search,g=A.hash,e=A.state,i=A.action,T=A.key;if(i!==E.POP){C.saveState(T,e);var N=(M||"")+t+I+g,o={key:T};if(i===E.PUSH){if(r)return window.location.href=N,!1;window.history.pushState(o,null,N)}else{if(r)return window.location.replace(N),!1;window.history.replaceState(o,null,N)}}}function I(A){1===++x&&(j=M(s));var t=s.listenBefore(A);return function(){t(),0===--x&&j()}}function g(A){1===++x&&(j=M(s));var t=s.listen(A);return function(){t(),0===--x&&j()}}function i(A){1===++x&&(j=M(s)),s.registerTransitionHook(A)}function c(A){s.unregisterTransitionHook(A),0===--x&&j()}var a=arguments.length<=0||void 0===arguments[0]?{}:arguments[0];o.canUseDOM?void 0:T["default"](!1);var B=a.forceRefresh,Q=n.supportsHistory(),r=!Q||B,s=D["default"](e({},a,{getCurrentLocation:A,finishTransition:t,saveState:C.saveState})),x=0,j=void 0;return e({},s,{listenBefore:I,listen:g,registerTransitionHook:i,unregisterTransitionHook:c})}M.__esModule=!0;var e=Object.assign||function(A){for(var M=1;M=0&&M=0&&B8&&w<=11),Y=32,l=String.fromCharCode(Y),d=c.topLevelTypes,h={beforeInput:{phasedRegistrationNames:{bubbled:s({onBeforeInput:null}),captured:s({onBeforeInputCapture:null})},dependencies:[d.topCompositionEnd,d.topKeyPress,d.topTextInput,d.topPaste]},compositionEnd:{phasedRegistrationNames:{bubbled:s({onCompositionEnd:null}),captured:s({onCompositionEndCapture:null})},dependencies:[d.topBlur,d.topCompositionEnd,d.topKeyDown,d.topKeyPress,d.topKeyUp,d.topMouseDown]},compositionStart:{phasedRegistrationNames:{bubbled:s({onCompositionStart:null}),captured:s({onCompositionStartCapture:null})},dependencies:[d.topBlur,d.topCompositionStart,d.topKeyDown,d.topKeyPress,d.topKeyUp,d.topMouseDown]},compositionUpdate:{phasedRegistrationNames:{bubbled:s({onCompositionUpdate:null}),captured:s({onCompositionUpdateCapture:null})},dependencies:[d.topBlur,d.topCompositionUpdate,d.topKeyDown,d.topKeyPress,d.topKeyUp,d.topMouseDown]}},S=!1,z=null,U={eventTypes:h,extractEvents:function(A,M,t,I,g){return[N(A,M,t,I,g),C(A,M,t,I,g)]}};A.exports=U},function(A,M,t){"use strict";var I=t(201),g=t(10),e=t(17),i=(t(294),t(467)),T=t(299),E=t(303),N=(t(3),E(function(A){return T(A)})),o=!1,n="cssFloat";if(g.canUseDOM){var C=document.createElement("div").style;try{C.font=""}catch(c){o=!0}void 0===document.documentElement.style.cssFloat&&(n="styleFloat")}var D={createMarkupForStyles:function(A){var M="";for(var t in A)if(A.hasOwnProperty(t)){var I=A[t];null!=I&&(M+=N(t)+":",M+=i(t,I)+";")}return M||null},setValueForStyles:function(A,M){var t=A.style;for(var g in M)if(M.hasOwnProperty(g)){var e=i(g,M[g]);if("float"===g&&(g=n),e)t[g]=e;else{var T=o&&I.shorthandPropertyExpansions[g];if(T)for(var E in T)t[E]="";else t[g]=""}}}};e.measureMethods(D,"CSSPropertyOperations",{setValueForStyles:"setValueForStyles"}),A.exports=D},function(A,M,t){"use strict";function I(A){var M=A.nodeName&&A.nodeName.toLowerCase();return"select"===M||"input"===M&&"file"===A.type}function g(A){var M=w.getPooled(h.change,z,A,u(A));x.accumulateTwoPhaseDispatches(M),y.batchedUpdates(e,M)}function e(A){s.enqueueEvents(A),s.processEventQueue(!1)}function i(A,M){S=A,z=M,S.attachEvent("onchange",g)}function T(){S&&(S.detachEvent("onchange",g),S=null,z=null)}function E(A,M,t){if(A===d.topChange)return t}function N(A,M,t){A===d.topFocus?(T(),i(M,t)):A===d.topBlur&&T()}function o(A,M){S=A,z=M,U=A.value,p=Object.getOwnPropertyDescriptor(A.constructor.prototype,"value"),Object.defineProperty(S,"value",F),S.attachEvent("onpropertychange",C)}function n(){S&&(delete S.value,S.detachEvent("onpropertychange",C),S=null,z=null,U=null,p=null)}function C(A){if("value"===A.propertyName){var M=A.srcElement.value;M!==U&&(U=M,g(A))}}function c(A,M,t){if(A===d.topInput)return t}function D(A,M,t){A===d.topFocus?(n(),o(M,t)):A===d.topBlur&&n()}function a(A,M,t){if((A===d.topSelectionChange||A===d.topKeyUp||A===d.topKeyDown)&&S&&S.value!==U)return U=S.value,z}function B(A){return A.nodeName&&"input"===A.nodeName.toLowerCase()&&("checkbox"===A.type||"radio"===A.type)}function Q(A,M,t){if(A===d.topClick)return t}var r=t(24),s=t(53),x=t(54),j=t(10),y=t(18),w=t(37),u=t(116),L=t(119),Y=t(228),l=t(26),d=r.topLevelTypes,h={change:{phasedRegistrationNames:{bubbled:l({onChange:null}),captured:l({onChangeCapture:null})},dependencies:[d.topBlur,d.topChange,d.topClick,d.topFocus,d.topInput,d.topKeyDown,d.topKeyUp,d.topSelectionChange] -}},S=null,z=null,U=null,p=null,O=!1;j.canUseDOM&&(O=L("change")&&(!("documentMode"in document)||document.documentMode>8));var m=!1;j.canUseDOM&&(m=L("input")&&(!("documentMode"in document)||document.documentMode>9));var F={get:function(){return p.get.call(this)},set:function(A){U=""+A,p.set.call(this,A)}},f={eventTypes:h,extractEvents:function(A,M,t,g,e){var i,T;if(I(M)?O?i=E:T=N:Y(M)?m?i=c:(i=a,T=D):B(M)&&(i=Q),i){var o=i(A,M,t);if(o){var n=w.getPooled(h.change,o,g,e);return n.type="change",x.accumulateTwoPhaseDispatches(n),n}}T&&T(A,M,t)}};A.exports=f},function(A,M){"use strict";var t=0,I={createReactRootIndex:function(){return t++}};A.exports=I},function(A,M,t){"use strict";function I(A){return A.substring(1,A.indexOf(" "))}var g=t(10),e=t(296),i=t(20),T=t(150),E=t(2),N=/^(<[^ \/>]+)/,o="data-danger-index",n={dangerouslyRenderMarkup:function(A){g.canUseDOM?void 0:E(!1);for(var M,t={},n=0;n1?1-M:void 0;return this._fallbackText=g.slice(A,T),this._fallbackText}}),g.addPoolingTo(I),A.exports=I},function(A,M,t){"use strict";var I,g=t(47),e=t(10),i=g.injection.MUST_USE_ATTRIBUTE,T=g.injection.MUST_USE_PROPERTY,E=g.injection.HAS_BOOLEAN_VALUE,N=g.injection.HAS_SIDE_EFFECTS,o=g.injection.HAS_NUMERIC_VALUE,n=g.injection.HAS_POSITIVE_NUMERIC_VALUE,C=g.injection.HAS_OVERLOADED_BOOLEAN_VALUE;if(e.canUseDOM){var c=document.implementation;I=c&&c.hasFeature&&c.hasFeature("http://www.w3.org/TR/SVG11/feature#BasicStructure","1.1")}var D={isCustomAttribute:RegExp.prototype.test.bind(/^(data|aria)-[a-z_][a-z\d_.\-]*$/),Properties:{accept:null,acceptCharset:null,accessKey:null,action:null,allowFullScreen:i|E,allowTransparency:i,alt:null,async:E,autoComplete:null,autoPlay:E,capture:i|E,cellPadding:null,cellSpacing:null,charSet:i,challenge:i,checked:T|E,classID:i,className:I?i:T,cols:i|n,colSpan:null,content:null,contentEditable:null,contextMenu:i,controls:T|E,coords:null,crossOrigin:null,data:null,dateTime:i,"default":E,defer:E,dir:null,disabled:i|E,download:C,draggable:null,encType:null,form:i,formAction:i,formEncType:i,formMethod:i,formNoValidate:E,formTarget:i,frameBorder:i,headers:null,height:i,hidden:i|E,high:null,href:null,hrefLang:null,htmlFor:null,httpEquiv:null,icon:null,id:T,inputMode:i,integrity:null,is:i,keyParams:i,keyType:i,kind:null,label:null,lang:null,list:i,loop:T|E,low:null,manifest:i,marginHeight:null,marginWidth:null,max:null,maxLength:i,media:i,mediaGroup:null,method:null,min:null,minLength:i,multiple:T|E,muted:T|E,name:null,nonce:i,noValidate:E,open:E,optimum:null,pattern:null,placeholder:null,poster:null,preload:null,radioGroup:null,readOnly:T|E,rel:null,required:E,reversed:E,role:i,rows:i|n,rowSpan:null,sandbox:null,scope:null,scoped:E,scrolling:null,seamless:i|E,selected:T|E,shape:null,size:i|n,sizes:i,span:n,spellCheck:null,src:null,srcDoc:T,srcLang:null,srcSet:i,start:o,step:null,style:null,summary:null,tabIndex:null,target:null,title:null,type:null,useMap:null,value:T|N,width:i,wmode:i,wrap:null,about:i,datatype:i,inlist:i,prefix:i,property:i,resource:i,"typeof":i,vocab:i,autoCapitalize:i,autoCorrect:i,autoSave:null,color:null,itemProp:i,itemScope:i|E,itemType:i,itemID:i,itemRef:i,results:null,security:i,unselectable:i},DOMAttributeNames:{acceptCharset:"accept-charset",className:"class",htmlFor:"for",httpEquiv:"http-equiv"},DOMPropertyNames:{autoComplete:"autocomplete",autoFocus:"autofocus",autoPlay:"autoplay",autoSave:"autosave",encType:"encoding",hrefLang:"hreflang",radioGroup:"radiogroup",spellCheck:"spellcheck",srcDoc:"srcdoc",srcSet:"srcset"}};A.exports=D},function(A,M,t){"use strict";var I=t(207),g=t(441),e=t(446),i=t(4),T=t(468),E={};i(E,e),i(E,{findDOMNode:T("findDOMNode","ReactDOM","react-dom",I,I.findDOMNode),render:T("render","ReactDOM","react-dom",I,I.render),unmountComponentAtNode:T("unmountComponentAtNode","ReactDOM","react-dom",I,I.unmountComponentAtNode),renderToString:T("renderToString","ReactDOMServer","react-dom/server",g,g.renderToString),renderToStaticMarkup:T("renderToStaticMarkup","ReactDOMServer","react-dom/server",g,g.renderToStaticMarkup)}),E.__SECRET_DOM_DO_NOT_USE_OR_YOU_WILL_BE_FIRED=I,E.__SECRET_DOM_SERVER_DO_NOT_USE_OR_YOU_WILL_BE_FIRED=g,A.exports=E},function(A,M,t){"use strict";var I=(t(55),t(113)),g=(t(3),"_getDOMNodeDidWarn"),e={getDOMNode:function(){return this.constructor[g]=!0,I(this)}};A.exports=e},function(A,M,t){"use strict";function I(A,M,t){var I=void 0===A[t];null!=M&&I&&(A[t]=e(M,null))}var g=t(36),e=t(118),i=t(121),T=t(122),E=(t(3),{instantiateChildren:function(A,M,t){if(null==A)return null;var g={};return T(A,I,g),g},updateChildren:function(A,M,t,I){if(!M&&!A)return null;var T;for(T in M)if(M.hasOwnProperty(T)){var E=A&&A[T],N=E&&E._currentElement,o=M[T];if(null!=E&&i(N,o))g.receiveComponent(E,o,t,I),M[T]=E;else{E&&g.unmountComponent(E,T);var n=e(o,null);M[T]=n}}for(T in A)!A.hasOwnProperty(T)||M&&M.hasOwnProperty(T)||g.unmountComponent(A[T]);return M},unmountChildren:function(A){for(var M in A)if(A.hasOwnProperty(M)){var t=A[M];g.unmountComponent(t)}}});A.exports=E},function(A,M,t){"use strict";function I(A){var M=A._currentElement._owner||null;if(M){var t=M.getName();if(t)return" Check the render method of ` + "`" + `"+t+"` + "`" + `."}return""}function g(A){}var e=t(109),i=t(25),T=t(13),E=t(55),N=t(17),o=t(70),n=(t(69),t(36)),C=t(111),c=t(4),D=t(50),a=t(2),B=t(121);t(3);g.prototype.render=function(){var A=E.get(this)._currentElement.type;return A(this.props,this.context,this.updater)};var Q=1,r={construct:function(A){this._currentElement=A,this._rootNodeID=null,this._instance=null,this._pendingElement=null,this._pendingStateQueue=null,this._pendingReplaceState=!1,this._pendingForceUpdate=!1,this._renderedComponent=null,this._context=null,this._mountOrder=0,this._topLevelWrapper=null,this._pendingCallbacks=null},mountComponent:function(A,M,t){this._context=t,this._mountOrder=Q++,this._rootNodeID=A;var I,e,i=this._processProps(this._currentElement.props),N=this._processContext(t),o=this._currentElement.type,c="prototype"in o;c&&(I=new o(i,N,C)),c&&null!==I&&I!==!1&&!T.isValidElement(I)||(e=I,I=new g(o)),I.props=i,I.context=N,I.refs=D,I.updater=C,this._instance=I,E.set(I,this);var B=I.state;void 0===B&&(I.state=B=null),"object"!=typeof B||Array.isArray(B)?a(!1):void 0,this._pendingStateQueue=null,this._pendingReplaceState=!1,this._pendingForceUpdate=!1,I.componentWillMount&&(I.componentWillMount(),this._pendingStateQueue&&(I.state=this._processPendingState(I.props,I.context))),void 0===e&&(e=this._renderValidatedComponent()),this._renderedComponent=this._instantiateReactComponent(e);var r=n.mountComponent(this._renderedComponent,A,M,this._processChildContext(t));return I.componentDidMount&&M.getReactMountReady().enqueue(I.componentDidMount,I),r},unmountComponent:function(){var A=this._instance;A.componentWillUnmount&&A.componentWillUnmount(),n.unmountComponent(this._renderedComponent),this._renderedComponent=null,this._instance=null,this._pendingStateQueue=null,this._pendingReplaceState=!1,this._pendingForceUpdate=!1,this._pendingCallbacks=null,this._pendingElement=null,this._context=null,this._rootNodeID=null,this._topLevelWrapper=null,E.remove(A)},_maskContext:function(A){var M=null,t=this._currentElement.type,I=t.contextTypes;if(!I)return D;M={};for(var g in I)M[g]=A[g];return M},_processContext:function(A){var M=this._maskContext(A);return M},_processChildContext:function(A){var M=this._currentElement.type,t=this._instance,I=t.getChildContext&&t.getChildContext();if(I){"object"!=typeof M.childContextTypes?a(!1):void 0;for(var g in I)g in M.childContextTypes?void 0:a(!1);return c({},A,I)}return A},_processProps:function(A){return A},_checkPropTypes:function(A,M,t){var g=this.getName();for(var e in A)if(A.hasOwnProperty(e)){var i;try{"function"!=typeof A[e]?a(!1):void 0,i=A[e](M,e,g,t)}catch(T){i=T}if(i instanceof Error){I(this);t===o.prop}}},receiveComponent:function(A,M,t){var I=this._currentElement,g=this._context;this._pendingElement=null,this.updateComponent(M,I,A,g,t)},performUpdateIfNecessary:function(A){null!=this._pendingElement&&n.receiveComponent(this,this._pendingElement||this._currentElement,A,this._context),(null!==this._pendingStateQueue||this._pendingForceUpdate)&&this.updateComponent(A,this._currentElement,this._currentElement,this._context,this._context)},updateComponent:function(A,M,t,I,g){var e,i=this._instance,T=this._context===g?i.context:this._processContext(g);M===t?e=t.props:(e=this._processProps(t.props),i.componentWillReceiveProps&&i.componentWillReceiveProps(e,T));var E=this._processPendingState(e,T),N=this._pendingForceUpdate||!i.shouldComponentUpdate||i.shouldComponentUpdate(e,E,T);N?(this._pendingForceUpdate=!1,this._performComponentUpdate(t,e,E,T,A,g)):(this._currentElement=t,this._context=g,i.props=e,i.state=E,i.context=T)},_processPendingState:function(A,M){var t=this._instance,I=this._pendingStateQueue,g=this._pendingReplaceState;if(this._pendingReplaceState=!1,this._pendingStateQueue=null,!I)return t.state;if(g&&1===I.length)return I[0];for(var e=c({},g?I[0]:t.state),i=g?1:0;i=0||null!=M.is}function B(A){D(A),this._tag=A.toLowerCase(),this._renderedChildren=null,this._previousStyle=null,this._previousStyleCopy=null,this._rootNodeID=null,this._wrapperState=null,this._topLevelWrapper=null,this._nodeWithLegacyProperties=null}var Q=t(420),r=t(422),s=t(47),x=t(106),j=t(24),y=t(68),w=t(108),u=t(435),L=t(438),Y=t(439),l=t(209),d=t(442),h=t(12),S=t(447),z=t(17),U=t(111),p=t(4),O=t(73),m=t(74),F=t(2),f=(t(119),t(26)),k=t(75),R=t(120),J=(t(151),t(123),t(3),y.deleteListener),G=y.listenTo,H=y.registrationNameModules,b={string:!0,number:!0},X=f({children:null}),v=f({style:null}),W=f({__html:null}),V=1,P={topAbort:"abort",topCanPlay:"canplay",topCanPlayThrough:"canplaythrough",topDurationChange:"durationchange",topEmptied:"emptied",topEncrypted:"encrypted",topEnded:"ended",topError:"error",topLoadedData:"loadeddata",topLoadedMetadata:"loadedmetadata",topLoadStart:"loadstart",topPause:"pause",topPlay:"play",topPlaying:"playing",topProgress:"progress",topRateChange:"ratechange",topSeeked:"seeked",topSeeking:"seeking",topStalled:"stalled",topSuspend:"suspend",topTimeUpdate:"timeupdate",topVolumeChange:"volumechange",topWaiting:"waiting"},Z={area:!0,base:!0,br:!0,col:!0,embed:!0,hr:!0,img:!0,input:!0,keygen:!0,link:!0,meta:!0,param:!0,source:!0,track:!0,wbr:!0},K={listing:!0,pre:!0,textarea:!0},q=(p({menuitem:!0},Z),/^[a-zA-Z][a-zA-Z:_\.\-\d]*$/),_={},$={}.hasOwnProperty;B.displayName="ReactDOMComponent",B.Mixin={construct:function(A){this._currentElement=A},mountComponent:function(A,M,t){this._rootNodeID=A;var I=this._currentElement.props;switch(this._tag){case"iframe":case"img":case"form":case"video":case"audio":this._wrapperState={listeners:null},M.getReactMountReady().enqueue(n,this);break;case"button":I=u.getNativeProps(this,I,t);break;case"input":L.mountWrapper(this,I,t),I=L.getNativeProps(this,I,t);break;case"option":Y.mountWrapper(this,I,t),I=Y.getNativeProps(this,I,t);break;case"select":l.mountWrapper(this,I,t),I=l.getNativeProps(this,I,t),t=l.processChildContext(this,I,t);break;case"textarea":d.mountWrapper(this,I,t),I=d.getNativeProps(this,I,t)}E(this,I);var g;if(M.useCreateElement){var e=t[h.ownerDocumentContextKey],i=e.createElement(this._currentElement.type);x.setAttributeForID(i,this._rootNodeID),h.getID(i),this._updateDOMProperties({},I,M,i),this._createInitialChildren(M,I,t,i),g=i}else{var T=this._createOpenTagMarkupAndPutListeners(M,I),N=this._createContentMarkup(M,I,t);g=!N&&Z[this._tag]?T+"/>":T+">"+N+""}switch(this._tag){case"input":M.getReactMountReady().enqueue(C,this);case"button":case"select":case"textarea":I.autoFocus&&M.getReactMountReady().enqueue(Q.focusDOMComponent,this)}return g},_createOpenTagMarkupAndPutListeners:function(A,M){var t="<"+this._currentElement.type;for(var I in M)if(M.hasOwnProperty(I)){var g=M[I];if(null!=g)if(H.hasOwnProperty(I))g&&N(this._rootNodeID,I,g,A);else{I===v&&(g&&(g=this._previousStyleCopy=p({},M.style)),g=r.createMarkupForStyles(g));var e=null;null!=this._tag&&a(this._tag,M)?I!==X&&(e=x.createMarkupForCustomAttribute(I,g)):e=x.createMarkupForProperty(I,g),e&&(t+=" "+e)}}if(A.renderToStaticMarkup)return t;var i=x.createMarkupForID(this._rootNodeID);return t+" "+i},_createContentMarkup:function(A,M,t){var I="",g=M.dangerouslySetInnerHTML;if(null!=g)null!=g.__html&&(I=g.__html);else{var e=b[typeof M.children]?M.children:null,i=null!=e?null:M.children;if(null!=e)I=m(e);else if(null!=i){var T=this.mountChildren(i,A,t);I=T.join("")}}return K[this._tag]&&"\n"===I.charAt(0)?"\n"+I:I},_createInitialChildren:function(A,M,t,I){var g=M.dangerouslySetInnerHTML;if(null!=g)null!=g.__html&&k(I,g.__html);else{var e=b[typeof M.children]?M.children:null,i=null!=e?null:M.children;if(null!=e)R(I,e);else if(null!=i)for(var T=this.mountChildren(i,A,t),E=0;EM.end?(t=M.end,I=M.start):(t=M.start,I=M.end),g.moveToElementText(A),g.moveStart("character",t),g.setEndPoint("EndToStart",g),g.moveEnd("character",I-t),g.select()}function T(A,M){if(window.getSelection){var t=window.getSelection(),I=A[o()].length,g=Math.min(M.start,I),e="undefined"==typeof M.end?g:Math.min(M.end,I);if(!t.extend&&g>e){var i=e;e=g,g=i}var T=N(A,g),E=N(A,e);if(T&&E){var n=document.createRange();n.setStart(T.node,T.offset),t.removeAllRanges(),g>e?(t.addRange(n),t.extend(E.node,E.offset)):(n.setEnd(E.node,E.offset),t.addRange(n))}}}var E=t(10),N=t(471),o=t(227),n=E.canUseDOM&&"selection"in document&&!("getSelection"in window),C={getOffsets:n?g:e,setOffsets:n?i:T};A.exports=C},function(A,M,t){"use strict";var I=t(212),g=t(452),e=t(112);I.inject();var i={renderToString:g.renderToString,renderToStaticMarkup:g.renderToStaticMarkup,version:e};A.exports=i},function(A,M,t){"use strict";function I(){this._rootNodeID&&o.updateWrapper(this)}function g(A){var M=this._currentElement.props,t=e.executeOnChange(M,A);return T.asap(I,this),t}var e=t(107),i=t(110),T=t(18),E=t(4),N=t(2),o=(t(3),{getNativeProps:function(A,M,t){null!=M.dangerouslySetInnerHTML?N(!1):void 0;var I=E({},M,{defaultValue:void 0,value:void 0,children:A._wrapperState.initialValue,onChange:A._wrapperState.onChange});return I},mountWrapper:function(A,M){var t=M.defaultValue,I=M.children;null!=I&&(null!=t?N(!1):void 0,Array.isArray(I)&&(I.length<=1?void 0:N(!1),I=I[0]),t=""+I),null==t&&(t="");var i=e.getValue(M);A._wrapperState={initialValue:""+(null!=i?i:t),onChange:g.bind(A)}},updateWrapper:function(A){var M=A._currentElement.props,t=e.getValue(M);null!=t&&i.updatePropertyByID(A._rootNodeID,"value",""+t)}});A.exports=o},function(A,M,t){"use strict";function I(A){g.enqueueEvents(A),g.processEventQueue(!1)}var g=t(53),e={handleTopLevel:function(A,M,t,e,i){var T=g.extractEvents(A,M,t,e,i);I(T)}};A.exports=e},function(A,M,t){"use strict";function I(A){var M=C.getID(A),t=n.getReactRootIDFromNodeID(M),I=C.findReactContainerForID(t),g=C.getFirstReactDOM(I);return g}function g(A,M){this.topLevelType=A,this.nativeEvent=M,this.ancestors=[]}function e(A){i(A)}function i(A){for(var M=C.getFirstReactDOM(a(A.nativeEvent))||window,t=M;t;)A.ancestors.push(t),t=I(t);for(var g=0;g=M)return{node:g,offset:M-e};e=i}g=t(I(g))}}A.exports=g},function(A,M,t){"use strict";function I(A){return g.isValidElement(A)?void 0:e(!1),A}var g=t(13),e=t(2);A.exports=I},function(A,M,t){"use strict";function I(A){return'"'+g(A)+'"'}var g=t(74);A.exports=I},function(A,M,t){"use strict";var I=t(12);A.exports=I.renderSubtreeIntoContainer},function(A,M){A.exports=function(A,M,t){for(var I=0,g=A.length,e=3==arguments.length?t:A[I++];I=400){var T="cannot "+M.method+" "+M.url+" ("+i.status+")";A=new e(T),A.status=i.status,A.body=i.body,A.res=i,I(A)}else g?I(new e(g)):t(i)})})},g.prototype.then=function(){var A=this.promise();return A.then.apply(A,arguments)}},function(A,M,t){function I(){}function g(A){var M={}.toString.call(A);switch(M){case"[object File]":case"[object Blob]":case"[object FormData]":return!0;default:return!1}}function e(A){if(!s(A))return A;var M=[];for(var t in A)null!=A[t]&&i(M,t,A[t]);return M.join("&")}function i(A,M,t){return Array.isArray(t)?t.forEach(function(t){i(A,M,t)}):void A.push(encodeURIComponent(M)+"="+encodeURIComponent(t))}function T(A){for(var M,t,I={},g=A.split("&"),e=0,i=g.length;e=200&&M.status<300)return t.callback(A,M);var g=new Error(M.statusText||"Unsuccessful HTTP response");g.original=A,g.response=M,g.status=M.status,t.callback(g,M)})}function D(A,M){var t=x("DELETE",A);return M&&t.end(M),t}var a,B=t(270),Q=t(475),r=t(488),s=t(234);a="undefined"!=typeof window?window:"undefined"!=typeof self?self:this;var x=A.exports=t(489).bind(null,c);x.getXHR=function(){if(!(!a.XMLHttpRequest||a.location&&"file:"==a.location.protocol&&a.ActiveXObject))return new XMLHttpRequest;try{return new ActiveXObject("Microsoft.XMLHTTP")}catch(A){}try{return new ActiveXObject("Msxml2.XMLHTTP.6.0")}catch(A){}try{return new ActiveXObject("Msxml2.XMLHTTP.3.0")}catch(A){}try{return new ActiveXObject("Msxml2.XMLHTTP")}catch(A){}return!1};var j="".trim?function(A){return A.trim()}:function(A){return A.replace(/(^\s*|\s*$)/g,"")};x.serializeObject=e,x.parseString=T,x.types={html:"text/html",json:"application/json",xml:"application/xml",urlencoded:"application/x-www-form-urlencoded",form:"application/x-www-form-urlencoded","form-data":"application/x-www-form-urlencoded"},x.serialize={"application/x-www-form-urlencoded":e,"application/json":JSON.stringify},x.parse={"application/x-www-form-urlencoded":T,"application/json":JSON.parse},C.prototype.get=function(A){return this.header[A.toLowerCase()]},C.prototype.setHeaderProperties=function(A){var M=this.header["content-type"]||"";this.type=o(M);var t=n(M);for(var I in t)this[I]=t[I]},C.prototype.parseBody=function(A){var M=x.parse[this.type];return!M&&N(this.type)&&(M=x.parse["application/json"]),M&&A&&(A.length||A instanceof Object)?M(A):null},C.prototype.setStatusProperties=function(A){1223===A&&(A=204);var M=A/100|0;this.status=this.statusCode=A,this.statusType=M,this.info=1==M,this.ok=2==M,this.clientError=4==M,this.serverError=5==M,this.error=(4==M||5==M)&&this.toError(),this.accepted=202==A,this.noContent=204==A,this.badRequest=400==A,this.unauthorized=401==A,this.notAcceptable=406==A,this.notFound=404==A,this.forbidden=403==A},C.prototype.toError=function(){var A=this.req,M=A.method,t=A.url,I="cannot "+M+" "+t+" ("+this.status+")",g=new Error(I);return g.status=this.status,g.method=M,g.url=t,g},x.Response=C,B(c.prototype);for(var y in r)c.prototype[y]=r[y];c.prototype.abort=function(){if(!this.aborted)return this.aborted=!0,this.xhr&&this.xhr.abort(),this.clearTimeout(),this.emit("abort"),this},c.prototype.type=function(A){return this.set("Content-Type",x.types[A]||A), -this},c.prototype.responseType=function(A){return this._responseType=A,this},c.prototype.accept=function(A){return this.set("Accept",x.types[A]||A),this},c.prototype.auth=function(A,M,t){switch(t||(t={type:"basic"}),t.type){case"basic":var I=btoa(A+":"+M);this.set("Authorization","Basic "+I);break;case"auto":this.username=A,this.password=M}return this},c.prototype.query=function(A){return"string"!=typeof A&&(A=e(A)),A&&this._query.push(A),this},c.prototype.attach=function(A,M,t){return this._getFormData().append(A,M,t||M.name),this},c.prototype._getFormData=function(){return this._formData||(this._formData=new a.FormData),this._formData},c.prototype.send=function(A){var M=s(A),t=this._header["content-type"];if(M&&s(this._data))for(var I in A)this._data[I]=A[I];else"string"==typeof A?(t||this.type("form"),t=this._header["content-type"],"application/x-www-form-urlencoded"==t?this._data=this._data?this._data+"&"+A:A:this._data=(this._data||"")+A):this._data=A;return!M||g(A)?this:(t||this.type("json"),this)},C.prototype.parse=function(A){return a.console&&console.warn("Client-side parse() method has been renamed to serialize(). This method is not compatible with superagent v2.0"),this.serialize(A),this},C.prototype.serialize=function(A){return this._parser=A,this},c.prototype.callback=function(A,M){var t=this._callback;this.clearTimeout(),t(A,M)},c.prototype.crossDomainError=function(){var A=new Error("Request has been terminated\nPossible causes: the network is offline, Origin is not allowed by Access-Control-Allow-Origin, the page is being unloaded, etc.");A.crossDomain=!0,A.status=this.status,A.method=this.method,A.url=this.url,this.callback(A)},c.prototype.timeoutError=function(){var A=this._timeout,M=new Error("timeout of "+A+"ms exceeded");M.timeout=A,this.callback(M)},c.prototype.withCredentials=function(){return this._withCredentials=!0,this},c.prototype.end=function(A){var M=this,t=this.xhr=x.getXHR(),e=this._query.join("&"),i=this._timeout,T=this._formData||this._data;this._callback=A||I,t.onreadystatechange=function(){if(4==t.readyState){var A;try{A=t.status}catch(I){A=0}if(0==A){if(M.timedout)return M.timeoutError();if(M.aborted)return;return M.crossDomainError()}M.emit("end")}};var E=function(A){A.total>0&&(A.percent=A.loaded/A.total*100),A.direction="download",M.emit("progress",A)};this.hasListeners("progress")&&(t.onprogress=E);try{t.upload&&this.hasListeners("progress")&&(t.upload.onprogress=E)}catch(o){}if(i&&!this._timer&&(this._timer=setTimeout(function(){M.timedout=!0,M.abort()},i)),e&&(e=x.serializeObject(e),this.url+=~this.url.indexOf("?")?"&"+e:"?"+e),this.username&&this.password?t.open(this.method,this.url,!0,this.username,this.password):t.open(this.method,this.url,!0),this._withCredentials&&(t.withCredentials=!0),"GET"!=this.method&&"HEAD"!=this.method&&"string"!=typeof T&&!g(T)){var n=this._header["content-type"],C=this._parser||x.serialize[n?n.split(";")[0]:""];!C&&N(n)&&(C=x.serialize["application/json"]),C&&(T=C(T))}for(var c in this.header)null!=this.header[c]&&t.setRequestHeader(c,this.header[c]);return this._responseType&&(t.responseType=this._responseType),this.emit("request",this),t.send("undefined"!=typeof T?T:null),this},x.Request=c,x.get=function(A,M,t){var I=x("GET",A);return"function"==typeof M&&(t=M,M=null),M&&I.query(M),t&&I.end(t),I},x.head=function(A,M,t){var I=x("HEAD",A);return"function"==typeof M&&(t=M,M=null),M&&I.send(M),t&&I.end(t),I},x.del=D,x["delete"]=D,x.patch=function(A,M,t){var I=x("PATCH",A);return"function"==typeof M&&(t=M,M=null),M&&I.send(M),t&&I.end(t),I},x.post=function(A,M,t){var I=x("POST",A);return"function"==typeof M&&(t=M,M=null),M&&I.send(M),t&&I.end(t),I},x.put=function(A,M,t){var I=x("PUT",A);return"function"==typeof M&&(t=M,M=null),M&&I.send(M),t&&I.end(t),I}},function(A,M,t){var I=t(234);M.clearTimeout=function(){return this._timeout=0,clearTimeout(this._timer),this},M.parse=function(A){return this._parser=A,this},M.timeout=function(A){return this._timeout=A,this},M.then=function(A,M){return this.end(function(t,I){t?M(t):A(I)})},M.use=function(A){return A(this),this},M.get=function(A){return this._header[A.toLowerCase()]},M.getHeader=M.get,M.set=function(A,M){if(I(A)){for(var t in A)this.set(t,A[t]);return this}return this._header[A.toLowerCase()]=M,this.header[A]=M,this},M.unset=function(A){return delete this._header[A.toLowerCase()],delete this.header[A],this},M.field=function(A,M){return this._getFormData().append(A,M),this}},function(A,M){function t(A,M,t){return"function"==typeof t?new A("GET",M).end(t):2==arguments.length?new A("GET",M):new A(M,t)}A.exports=t},function(A,M,t){A.exports=t(491)},function(A,M,t){(function(A,I){"use strict";function g(A){return A&&A.__esModule?A:{"default":A}}Object.defineProperty(M,"__esModule",{value:!0});var e,i=t(492),T=g(i);e="undefined"!=typeof self?self:"undefined"!=typeof window?window:"undefined"!=typeof A?A:I;var E=(0,T["default"])(e);M["default"]=E}).call(M,function(){return this}(),t(127)(A))},function(A,M){"use strict";function t(A){var M,t=A.Symbol;return"function"==typeof t?t.observable?M=t.observable:(M=t("observable"),t.observable=M):M="@@observable",M}Object.defineProperty(M,"__esModule",{value:!0}),M["default"]=t},function(A,M,t){function I(A){return g(A).replace(/\s(\w)/g,function(A,M){return M.toUpperCase()})}var g=t(495);A.exports=I},function(A,M){function t(A){return e.test(A)?A.toLowerCase():i.test(A)?(I(A)||A).toLowerCase():T.test(A)?g(A).toLowerCase():A.toLowerCase()}function I(A){return A.replace(E,function(A,M){return M?" "+M:""})}function g(A){return A.replace(N,function(A,M,t){return M+" "+t.toLowerCase().split("").join(" ")})}A.exports=t;var e=/\s/,i=/[\W_]/,T=/([a-z][A-Z]|[A-Z][a-z])/,E=/[\W_]+(.|$)/g,N=/(.)([A-Z]+)/g},function(A,M,t){function I(A){return g(A).replace(/[\W_]+(.|$)/g,function(A,M){return M?" "+M:""}).trim()}var g=t(494);A.exports=I},function(A,M){A.exports=function(){var A=document.getSelection();if(!A.rangeCount)return function(){};for(var M=document.activeElement,t=[],I=0;I=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}function i(A,M){function t(I,g){function i(A,t){var I=c.getLinkName(A),e=this.props[g[A]];I&&E(this.props,I)&&!e&&(e=this.props[I].requestChange);for(var i=arguments.length,T=Array(i>2?i-2:0),N=2;N=15||0===s[0]&&s[1]>=13?A:A.type}function T(A,M){var t=N(M);return t&&!E(A,M)&&E(A,t)?A[t].value:A[M]}function E(A,M){return void 0!==A[M]}function N(A){return"value"===A?"valueLink":"checked"===A?"checkedLink":null}function o(A){return"default"+A.charAt(0).toUpperCase()+A.substr(1)}function n(A,M,t){return function(){for(var I=arguments.length,g=Array(I),e=0;eo||N===o&&"application/"===M[T].substr(0,12))continue}M[T]=I}}})}var E=t(272),N=t(275).extname,o=/^\s*([^;\s]*)(?:;|\s|$)/,n=/^text\//i;M.charset=I,M.charsets={lookup:I},M.contentType=g,M.extension=e,M.extensions=Object.create(null),M.lookup=i,M.types=Object.create(null),T(M.extensions,M.types)},function(A,M){function t(A){throw new Error("Cannot find module '"+A+"'.")}t.keys=function(){return[]},t.resolve=t,A.exports=t,t.id=274},function(A,M,t){(function(A){function t(A,M){for(var t=0,I=A.length-1;I>=0;I--){var g=A[I];"."===g?A.splice(I,1):".."===g?(A.splice(I,1),t++):t&&(A.splice(I,1),t--)}if(M)for(;t--;t)A.unshift("..");return A}function I(A,M){if(A.filter)return A.filter(M);for(var t=[],I=0;I=-1&&!g;e--){var i=e>=0?arguments[e]:A.cwd();if("string"!=typeof i)throw new TypeError("Arguments to path.resolve must be strings");i&&(M=i+"/"+M,g="/"===i.charAt(0))}return M=t(I(M.split("/"),function(A){return!!A}),!g).join("/"),(g?"/":"")+M||"."},M.normalize=function(A){var g=M.isAbsolute(A),e="/"===i(A,-1);return A=t(I(A.split("/"),function(A){return!!A}),!g).join("/"),A||g||(A="."),A&&e&&(A+="/"),(g?"/":"")+A},M.isAbsolute=function(A){return"/"===A.charAt(0)},M.join=function(){var A=Array.prototype.slice.call(arguments,0);return M.normalize(I(A,function(A,M){if("string"!=typeof A)throw new TypeError("Arguments to path.join must be strings");return A}).join("/"))},M.relative=function(A,t){function I(A){for(var M=0;M=0&&""===A[t];t--);return M>t?[]:A.slice(M,t-M+1)}A=M.resolve(A).substr(1),t=M.resolve(t).substr(1);for(var g=I(A.split("/")),e=I(t.split("/")),i=Math.min(g.length,e.length),T=i,E=0;E0&&E>T&&(E=T);for(var N=0;N=0?(o=D.substr(0,a),n=D.substr(a+1)):(o=D,n=""),C=decodeURIComponent(o),c=decodeURIComponent(n),t(e,C)?Array.isArray(e[C])?e[C].push(c):e[C]=[e[C],c]:e[C]=c}return e}},function(A,M){"use strict";var t=function(A){switch(typeof A){case"string":return A;case"boolean":return A?"true":"false";case"number":return isFinite(A)?A:"";default:return""}};A.exports=function(A,M,I,g){return M=M||"&",I=I||"=",null===A&&(A=void 0),"object"==typeof A?Object.keys(A).map(function(g){var e=encodeURIComponent(t(g))+I;return Array.isArray(A[g])?A[g].map(function(A){return e+encodeURIComponent(t(A))}).join(M):e+encodeURIComponent(t(A[g]))}).join(M):g?encodeURIComponent(t(g))+I+encodeURIComponent(t(A)):""}},function(A,M,t){"use strict";M.decode=M.parse=t(276),M.encode=M.stringify=t(277)},function(A,M,t){"use strict";var I=t(7).default,g=t(6).default;M.__esModule=!0;var e=t(1),i=g(e),T=t(5),E=g(T),N=t(11),o=g(N),n=t(98),C=g(n),c=t(136),D=g(c),a=i.default.createClass({displayName:"ButtonGroup",propTypes:{vertical:i.default.PropTypes.bool,justified:i.default.PropTypes.bool,block:C.default(i.default.PropTypes.bool,function(A){if(A.block&&!A.vertical)return new Error("The block property requires the vertical property to be set to have any effect")})},getDefaultProps:function(){return{block:!1,justified:!1,vertical:!1}},render:function(){var A=o.default.getClassSet(this.props);return A[o.default.prefix(this.props)]=!this.props.vertical,A[o.default.prefix(this.props,"vertical")]=this.props.vertical,A[o.default.prefix(this.props,"justified")]=this.props.justified,A[o.default.prefix(D.default.defaultProps,"block")]=this.props.block,i.default.createElement("div",I({},this.props,{className:E.default(this.props.className,A)}),this.props.children)}});M.default=N.bsClass("btn-group",a),A.exports=M.default},function(A,M,t){"use strict";var I=t(15).default,g=t(14).default,e=t(41).default,i=t(7).default,T=t(6).default;M.__esModule=!0;var E=t(152),N=T(E),o=t(1),n=T(o),C=t(16),c=T(C),D=t(5),a=T(D),B=t(11),Q=T(B),r=t(171),s=T(r),x=t(48),j=T(x),y=t(40),u=T(y),w=function(A){function M(t){g(this,M),A.call(this,t),this.focusNext=this.focusNext.bind(this),this.focusPrevious=this.focusPrevious.bind(this),this.getFocusableMenuItems=this.getFocusableMenuItems.bind(this),this.getItemsAndActiveIndex=this.getItemsAndActiveIndex.bind(this),this.handleKeyDown=this.handleKeyDown.bind(this)}return I(M,A),M.prototype.handleKeyDown=function(A){switch(A.keyCode){case N.default.codes.down:this.focusNext(),A.preventDefault();break;case N.default.codes.up:this.focusPrevious(),A.preventDefault();break;case N.default.codes.esc:case N.default.codes.tab:this.props.onClose(A)}},M.prototype.focusNext=function(){var A=this.getItemsAndActiveIndex(),M=A.items,t=A.activeItemIndex;if(0!==M.length)return t===M.length-1?void M[0].focus():void M[t+1].focus()},M.prototype.focusPrevious=function(){var A=this.getItemsAndActiveIndex(),M=A.items,t=A.activeItemIndex;return 0===t?void M[M.length-1].focus():void M[t-1].focus()},M.prototype.getItemsAndActiveIndex=function(){var A=this.getFocusableMenuItems(),M=document.activeElement,t=A.indexOf(M);return{items:A,activeItemIndex:t}},M.prototype.getFocusableMenuItems=function(){var A=c.default.findDOMNode(this);return void 0===A?[]:[].slice.call(A.querySelectorAll('[tabIndex="-1"]'),0)},M.prototype.render=function(){var A,M=this,t=this.props,I=t.children,g=t.onSelect,T=t.pullRight,E=t.className,N=t.labelledBy,o=t.open,C=t.onClose,c=e(t,["children","onSelect","pullRight","className","labelledBy","open","onClose"]),D=j.default.map(I,function(A){var t=A.props||{};return n.default.cloneElement(A,{onKeyDown:u.default(t.onKeyDown,M.handleKeyDown),onSelect:u.default(t.onSelect,g)},t.children)}),B=(A={},A[Q.default.prefix(this.props,"menu")]=!0,A[Q.default.prefix(this.props,"menu-right")]=T,A),r=n.default.createElement("ul",i({className:a.default(E,B),role:"menu","aria-labelledby":N},c),D);return o&&(r=n.default.createElement(s.default,{noWrap:!0,onRootClose:C},r)),r},M}(n.default.Component);w.defaultProps={bsRole:"menu",bsClass:"dropdown",pullRight:!1},w.propTypes={open:n.default.PropTypes.bool,pullRight:n.default.PropTypes.bool,onClose:n.default.PropTypes.func,labelledBy:n.default.PropTypes.oneOfType([n.default.PropTypes.string,n.default.PropTypes.number]),onSelect:n.default.PropTypes.func},M.default=w,A.exports=M.default},function(A,M,t){"use strict";var I=t(15).default,g=t(14).default,e=t(7).default,i=t(6).default;M.__esModule=!0;var T=t(1),E=i(T),N=t(5),o=i(N),n=t(136),C=i(n),c=t(138),D=i(c),a=E.default.createElement("span",null," ",E.default.createElement("span",{className:"caret"})),B=function(A){function M(){g(this,M),A.apply(this,arguments)}return I(M,A),M.prototype.render=function(){var A=this.props.noCaret?null:a,M={"dropdown-toggle":!0},t=this.props.useAnchor?D.default:C.default;return E.default.createElement(t,e({},this.props,{className:o.default(M,this.props.className),type:"button","aria-haspopup":!0,"aria-expanded":this.props.open}),this.props.children||this.props.title,A)},M}(E.default.Component);M.default=B,B.defaultProps={open:!1,useAnchor:!1,bsRole:"toggle"},B.propTypes={bsRole:E.default.PropTypes.string,noCaret:E.default.PropTypes.bool,open:E.default.PropTypes.bool,title:E.default.PropTypes.string,useAnchor:E.default.PropTypes.bool},B.isToggle=!0,B.titleProp="title",B.onClickProp="onClick",A.exports=M.default},function(A,M,t){"use strict";var I=t(7).default,g=t(6).default;M.__esModule=!0;var e=t(1),i=g(e),T=t(48),E=g(T),N=/\%\((.+?)\)s/,o=i.default.createClass({displayName:"Interpolate",propTypes:{component:i.default.PropTypes.node,format:i.default.PropTypes.string,unsafe:i.default.PropTypes.bool},getDefaultProps:function(){return{component:"span",unsafe:!1}},render:function(){var A=E.default.hasValidComponent(this.props.children)||"string"==typeof this.props.children?this.props.children:this.props.format,M=this.props.component,t=this.props.unsafe===!0,g=I({},this.props);if(delete g.children,delete g.format,delete g.component,delete g.unsafe,t){var e=A.split(N).reduce(function(A,M,t){var I=void 0;if(t%2===0?I=M:(I=g[M],delete g[M]),i.default.isValidElement(I))throw new Error("cannot interpolate a React component into unsafe text");return A+=I},"");return g.dangerouslySetInnerHTML={__html:e},i.default.createElement(M,g)}var T=A.split(N).reduce(function(A,M,t){var I=void 0;if(t%2===0){if(0===M.length)return A;I=M}else I=g[M],delete g[M];return A.push(I),A},[]);return i.default.createElement(M,g,T)}});M.default=o,A.exports=M.default},function(A,M,t){"use strict";var I=t(15).default,g=t(14).default,e=t(41).default,i=t(7).default,T=t(6).default;M.__esModule=!0;var E=t(5),N=T(E),o=t(1),n=T(o),C=t(11),c=T(C),D=t(98),a=T(D),B=t(138),Q=T(B),r=t(40),s=T(r),x=function(A){function M(t){g(this,M),A.call(this,t),this.handleClick=this.handleClick.bind(this)}return I(M,A),M.prototype.handleClick=function(A){this.props.href&&!this.props.disabled||A.preventDefault(),this.props.disabled||this.props.onSelect&&this.props.onSelect(A,this.props.eventKey)},M.prototype.render=function(){if(this.props.divider)return n.default.createElement("li",{role:"separator",className:N.default("divider",this.props.className),style:this.props.style});if(this.props.header){var A=c.default.prefix(this.props,"header");return n.default.createElement("li",{role:"heading",className:N.default(A,this.props.className),style:this.props.style},this.props.children)}var M=this.props,t=M.className,I=M.style,g=M.onClick,T=e(M,["className","style","onClick"]),E={disabled:this.props.disabled,active:this.props.active};return n.default.createElement("li",{role:"presentation",className:N.default(t,E),style:I},n.default.createElement(Q.default,i({},T,{role:"menuitem",tabIndex:"-1",onClick:s.default(g,this.handleClick)})))},M}(n.default.Component);x.propTypes={active:n.default.PropTypes.bool,disabled:n.default.PropTypes.bool,divider:a.default(n.default.PropTypes.bool,function(A){if(A.divider&&A.children)return new Error("Children will not be rendered for dividers")}),eventKey:n.default.PropTypes.any,header:n.default.PropTypes.bool,href:n.default.PropTypes.string,target:n.default.PropTypes.string,title:n.default.PropTypes.string,onClick:n.default.PropTypes.func,onKeyDown:n.default.PropTypes.func,onSelect:n.default.PropTypes.func,id:n.default.PropTypes.oneOfType([n.default.PropTypes.string,n.default.PropTypes.number])},x.defaultProps={divider:!1,disabled:!1,header:!1},M.default=C.bsClass("dropdown",x),A.exports=M.default},function(A,M,t){"use strict";var I=t(7).default,g=t(6).default;M.__esModule=!0;var e=t(1),i=g(e),T=t(5),E=g(T),N=t(11),o=g(N),n=t(39),C=i.default.createClass({displayName:"ModalDialog",propTypes:{dialogClassName:i.default.PropTypes.string},render:function(){var A=I({display:"block"},this.props.style),M=o.default.prefix(this.props),t=o.default.getClassSet(this.props);return delete t[M],t[o.default.prefix(this.props,"dialog")]=!0,i.default.createElement("div",I({},this.props,{title:null,tabIndex:"-1",role:"dialog",style:A,className:E.default(this.props.className,M)}),i.default.createElement("div",{className:E.default(this.props.dialogClassName,t)},i.default.createElement("div",{className:o.default.prefix(this.props,"content"),role:"document"},this.props.children)))}});M.default=N.bsSizes([n.Sizes.LARGE,n.Sizes.SMALL],N.bsClass("modal",C)),A.exports=M.default},function(A,M,t){"use strict";var I=t(15).default,g=t(14).default,e=t(7).default,i=t(6).default;M.__esModule=!0;var T=t(1),E=i(T),N=t(5),o=i(N),n=t(11),C=i(n),c=function(A){function M(){g(this,M),A.apply(this,arguments)}return I(M,A),M.prototype.render=function(){return E.default.createElement("div",e({},this.props,{className:o.default(this.props.className,C.default.prefix(this.props,"footer"))}),this.props.children)},M}(E.default.Component);c.propTypes={bsClass:E.default.PropTypes.string},c.defaultProps={bsClass:"modal"},M.default=n.bsClass("modal",c),A.exports=M.default},function(A,M,t){"use strict";var I=t(15).default,g=t(14).default,e=t(7).default,i=t(6).default;M.__esModule=!0;var T=t(1),E=i(T),N=t(5),o=i(N),n=t(11),C=i(n),c=function(A){function M(){g(this,M),A.apply(this,arguments)}return I(M,A),M.prototype.render=function(){return E.default.createElement("h4",e({},this.props,{className:o.default(this.props.className,C.default.prefix(this.props,"title"))}),this.props.children)},M}(E.default.Component);M.default=n.bsClass("modal",c),A.exports=M.default},function(A,M,t){"use strict";var I=t(15).default,g=t(14).default,e=t(7).default,i=t(41).default,T=t(6).default;M.__esModule=!0;var E=t(1),N=T(E),o=t(364),n=T(o),C=t(62),c=T(C),D=t(137),a=T(D),B=t(5),Q=T(B),r=function(A){function M(){g(this,M),A.apply(this,arguments)}return I(M,A),M.prototype.render=function(){var A=this.props,M=A.children,t=A.animation,I=i(A,["children","animation"]);return t===!0&&(t=a.default),t===!1&&(t=null),t||(M=E.cloneElement(M,{className:Q.default("in",M.props.className)})),N.default.createElement(n.default,e({},I,{transition:t}),M)},M}(N.default.Component);r.propTypes=e({},n.default.propTypes,{show:N.default.PropTypes.bool,rootClose:N.default.PropTypes.bool,onHide:N.default.PropTypes.func,animation:N.default.PropTypes.oneOfType([N.default.PropTypes.bool,c.default]),onEnter:N.default.PropTypes.func,onEntering:N.default.PropTypes.func,onEntered:N.default.PropTypes.func,onExit:N.default.PropTypes.func,onExiting:N.default.PropTypes.func,onExited:N.default.PropTypes.func}),r.defaultProps={animation:a.default,rootClose:!1,show:!1},M.default=r,A.exports=M.default},function(A,M,t){"use strict";function I(A,M,t){if(A[M]){var I=function(){var I=void 0,g=void 0;return o.default.Children.forEach(A[M],function(A){A.type!==x&&(g=A.type.displayName?A.type.displayName:A.type,I=new Error("Children of "+t+" can contain only ProgressBar components. Found "+g))}),{v:I}}();if("object"==typeof I)return I.v}}var g=t(15).default,e=t(14).default,i=t(7).default,T=t(41).default,E=t(6).default;M.__esModule=!0;var N=t(1),o=E(N),n=t(282),C=E(n),c=t(11),D=E(c),a=t(39),B=t(5),Q=E(B),r=t(48),s=E(r),x=function(A){function M(){e(this,M),A.apply(this,arguments)}return g(M,A),M.prototype.getPercentage=function(A,M,t){var I=1e3;return Math.round((A-M)/(t-M)*100*I)/I},M.prototype.render=function(){if(this.props.isChild)return this.renderProgressBar();var A=void 0;return A=this.props.children?s.default.map(this.props.children,this.renderChildBar):this.renderProgressBar(),o.default.createElement("div",i({},this.props,{className:Q.default(this.props.className,"progress"),min:null,max:null,label:null,"aria-valuetext":null}),A)},M.prototype.renderChildBar=function(A,M){return N.cloneElement(A,{isChild:!0,key:A.key?A.key:M})},M.prototype.renderProgressBar=function(){var A,M=this.props,t=M.className,I=M.label,g=M.now,e=M.min,E=M.max,N=M.style,n=T(M,["className","label","now","min","max","style"]),C=this.getPercentage(g,e,E);"string"==typeof I&&(I=this.renderLabel(C)),this.props.srOnly&&(I=o.default.createElement("span",{className:"sr-only"},I));var c=Q.default(t,D.default.getClassSet(this.props),(A={active:this.props.active},A[D.default.prefix(this.props,"striped")]=this.props.active||this.props.striped,A));return o.default.createElement("div",i({},n,{className:c,role:"progressbar",style:i({width:C+"%"},N),"aria-valuenow":this.props.now,"aria-valuemin":this.props.min,"aria-valuemax":this.props.max}),I)},M.prototype.renderLabel=function(A){var M=this.props.interpolateClass||C.default;return o.default.createElement(M,{now:this.props.now,min:this.props.min,max:this.props.max,percent:A,bsStyle:this.props.bsStyle},this.props.label)},M}(o.default.Component);x.propTypes=i({},x.propTypes,{min:N.PropTypes.number,now:N.PropTypes.number,max:N.PropTypes.number,label:N.PropTypes.node,srOnly:N.PropTypes.bool,striped:N.PropTypes.bool,active:N.PropTypes.bool,children:I,className:o.default.PropTypes.string,interpolateClass:N.PropTypes.node,isChild:N.PropTypes.bool}),x.defaultProps=i({},x.defaultProps,{min:0,max:100,active:!1,isChild:!1,srOnly:!1,striped:!1}),M.default=c.bsStyles(a.State.values(),c.bsClass("progress-bar",x)),A.exports=M.default},function(A,M,t){"use strict";var I=t(6).default;M.__esModule=!0;var g=t(99),e=t(290),i=I(e);M.default={requiredRoles:function(){for(var A=arguments.length,M=Array(A),t=0;t1)||(e=A,!1)}),e)return new Error("(children) "+I+" - Duplicate children detected of bsRole: "+e+". Only one child each allowed with the following bsRoles: "+M.join(", "))})}},A.exports=M.default},function(A,M,t){"use strict";function I(A){var M=[];return void 0===A?M:(i.default.forEach(A,function(A){M.push(A)}),M)}var g=t(6).default;M.__esModule=!0,M.default=I;var e=t(48),i=g(e);A.exports=M.default},function(A,M,t){A.exports={default:t(295),__esModule:!0}},function(A,M,t){t(305),A.exports=t(49).Object.assign},function(A,M,t){var I=t(85);A.exports=function(A,M){return I.create(A,M)}},function(A,M,t){t(306),A.exports=t(49).Object.keys},function(A,M,t){t(307),A.exports=t(49).Object.setPrototypeOf},function(A,M){A.exports=function(A){if("function"!=typeof A)throw TypeError(A+" is not a function!");return A}},function(A,M,t){var I=t(143);A.exports=function(A){if(!I(A))throw TypeError(A+" is not an object!");return A}},function(A,M){var t={}.toString;A.exports=function(A){return t.call(A).slice(8,-1)}},function(A,M){A.exports=function(A){if(void 0==A)throw TypeError("Can't call method on "+A);return A}},function(A,M){var t=A.exports="undefined"!=typeof window&&window.Math==Math?window:"undefined"!=typeof self&&self.Math==Math?self:Function("return this")();"number"==typeof __g&&(__g=t)},function(A,M,t){var I=t(298);A.exports=Object("z").propertyIsEnumerable(0)?Object:function(A){return"String"==I(A)?A.split(""):Object(A)}},function(A,M,t){var I=t(85),g=t(144),e=t(301);A.exports=t(142)(function(){var A=Object.assign,M={},t={},I=Symbol(),g="abcdefghijklmnopqrst";return M[I]=7,g.split("").forEach(function(A){t[A]=A}),7!=A({},M)[I]||Object.keys(A({},t)).join("")!=g})?function(A,M){for(var t=g(A),i=arguments,T=i.length,E=1,N=I.getKeys,o=I.getSymbols,n=I.isEnum;T>E;)for(var C,c=e(i[E++]),D=o?N(c).concat(o(c)):N(c),a=D.length,B=0;a>B;)n.call(c,C=D[B++])&&(t[C]=c[C]);return t}:Object.assign},function(A,M,t){var I=t(84),g=t(49),e=t(142);A.exports=function(A,M){var t=(g.Object||{})[A]||Object[A],i={};i[A]=M(t),I(I.S+I.F*e(function(){t(1)}),"Object",i)}},function(A,M,t){var I=t(85).getDesc,g=t(143),e=t(297),i=function(A,M){if(e(A),!g(M)&&null!==M)throw TypeError(M+": can't set as prototype!")};A.exports={set:Object.setPrototypeOf||("__proto__"in{}?function(A,M,g){try{g=t(141)(Function.call,I(Object.prototype,"__proto__").set,2),g(A,[]),M=!(A instanceof Array)}catch(A){M=!0}return function(A,t){return i(A,t),M?A.__proto__=t:g(A,t),A}}({},!1):void 0),check:i}},function(A,M,t){var I=t(84);I(I.S+I.F,"Object",{assign:t(302)})},function(A,M,t){var I=t(144);t(303)("keys",function(A){return function(M){return A(I(M))}})},function(A,M,t){var I=t(84);I(I.S,"Object",{setPrototypeOf:t(304).set})},function(A,M,t){"use strict";var I=t(146);A.exports=function(A,M){A.classList?A.classList.add(M):I(A)||(A.className=A.className+" "+M)}},function(A,M,t){"use strict";A.exports={addClass:t(308),removeClass:t(310),hasClass:t(146)}},function(A,M){"use strict";A.exports=function(A,M){A.classList?A.classList.remove(M):A.className=A.className.replace(new RegExp("(^|\\s)"+M+"(?:\\s|$)","g"),"$1").replace(/\s+/g," ").replace(/^\s*|\s*$/g,"")}},function(A,M,t){"use strict";var I=t(50),g=t(315);A.exports=function(A,M){return function(t){var e=t.currentTarget,i=t.target,T=g(e,A);T.some(function(A){return I(A,i)})&&M.call(this,t)}}},function(A,M,t){"use strict";var I=t(86),g=t(147),e=t(311);A.exports={on:I,off:g,filter:e}},function(A,M,t){"use strict";function I(A){return A.nodeName&&A.nodeName.toLowerCase()}function g(A){for(var M=(0,T.default)(A),t=A&&A.offsetParent;t&&"html"!==I(A)&&"static"===(0,N.default)(t,"position");)t=t.offsetParent;return t||M.documentElement}var e=t(60);M.__esModule=!0,M.default=g;var i=t(42),T=e.interopRequireDefault(i),E=t(87),N=e.interopRequireDefault(E);A.exports=M.default},function(A,M,t){"use strict";function I(A){return A.nodeName&&A.nodeName.toLowerCase()}function g(A,M){var t,g={top:0,left:0};return"fixed"===(0,a.default)(A,"position")?t=A.getBoundingClientRect():(M=M||(0,N.default)(A),t=(0,T.default)(A),"html"!==I(M)&&(g=(0,T.default)(M)),g.top+=parseInt((0,a.default)(M,"borderTopWidth"),10)-(0,n.default)(M)||0,g.left+=parseInt((0,a.default)(M,"borderLeftWidth"),10)-(0,c.default)(M)||0),e._extends({},t,{top:t.top-g.top-(parseInt((0,a.default)(A,"marginTop"),10)||0),left:t.left-g.left-(parseInt((0,a.default)(A,"marginLeft"),10)||0)})}var e=t(60);M.__esModule=!0,M.default=g;var i=t(148),T=e.interopRequireDefault(i),E=t(313),N=e.interopRequireDefault(E),o=t(149),n=e.interopRequireDefault(o),C=t(316),c=e.interopRequireDefault(C),D=t(87),a=e.interopRequireDefault(D);A.exports=M.default},function(A,M){"use strict";var t=/^[\w-]*$/,I=Function.prototype.bind.call(Function.prototype.call,[].slice);A.exports=function(A,M){var g,e="#"===M[0],i="."===M[0],T=e||i?M.slice(1):M,E=t.test(T);return E?e?(A=A.getElementById?A:document,(g=A.getElementById(T))?[g]:[]):I(A.getElementsByClassName&&i?A.getElementsByClassName(T):A.getElementsByTagName(M)):I(A.querySelectorAll(M))}},function(A,M,t){"use strict";var I=t(59);A.exports=function(A,M){var t=I(A);return void 0===M?t?"pageXOffset"in t?t.pageXOffset:t.document.documentElement.scrollLeft:A.scrollLeft:void(t?t.scrollTo(M,"pageYOffset"in t?t.pageYOffset:t.document.documentElement.scrollTop):A.scrollLeft=M)}},function(A,M,t){"use strict";var I=t(60),g=t(150),e=I.interopRequireDefault(g),i=/^(top|right|bottom|left)$/,T=/^([+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|))(?!px)[a-z%]+$/i;A.exports=function(A){if(!A)throw new TypeError("No Element passed to ` + "`" + `getComputedStyle()` + "`" + `");var M=A.ownerDocument;return"defaultView"in M?M.defaultView.opener?A.ownerDocument.defaultView.getComputedStyle(A,null):window.getComputedStyle(A,null):{getPropertyValue:function(M){var t=A.style;M=(0,e.default)(M),"float"==M&&(M="styleFloat");var I=A.currentStyle[M]||null;if(null==I&&t&&t[M]&&(I=t[M]),T.test(I)&&!i.test(M)){var g=t.left,E=A.runtimeStyle,N=E&&E.left;N&&(E.left=A.currentStyle.left),t.left="fontSize"===M?"1em":I,I=t.pixelLeft+"px",t.left=g,N&&(E.left=N)}return I}}}},function(A,M){"use strict";A.exports=function(A,M){return"removeProperty"in A.style?A.style.removeProperty(M):A.style.removeAttribute(M)}},function(A,M,t){"use strict";function I(){var A,M="",t={O:"otransitionend",Moz:"transitionend",Webkit:"webkitTransitionEnd",ms:"MSTransitionEnd"},I=document.createElement("div");for(var g in t)if(N.call(t,g)&&void 0!==I.style[g+"TransitionProperty"]){M="-"+g.toLowerCase()+"-",A=t[g];break}return A||void 0===I.style.transitionProperty||(A="transitionend"),{end:A,prefix:M}}var g,e,i,T,E=t(32),N=Object.prototype.hasOwnProperty,o="transform",n={};E&&(n=I(),o=n.prefix+o,i=n.prefix+"transition-property",e=n.prefix+"transition-duration",T=n.prefix+"transition-delay",g=n.prefix+"transition-timing-function"),A.exports={transform:o,end:n.end,property:i,timing:g,delay:T,duration:e}},function(A,M){"use strict";var t=/-(.)/g;A.exports=function(A){return A.replace(t,function(A,M){return M.toUpperCase()})}},function(A,M){"use strict";var t=/([A-Z])/g;A.exports=function(A){return A.replace(t,"-$1").toLowerCase()}},function(A,M,t){"use strict";var I=t(321),g=/^ms-/;A.exports=function(A){return I(A).replace(g,"-ms-")}},function(A,M){function t(A){var M=A?A.length:0;return M?A[M-1]:void 0}A.exports=t},function(A,M,t){var I=t(332),g=t(350),e=g(I);A.exports=e},function(A,M,t){(function(M){function I(A){var M=A?A.length:0;for(this.data={hash:T(null),set:new i};M--;)this.push(A[M])}var g=t(346),e=t(61),i=e(M,"Set"),T=e(Object,"create");I.prototype.push=g,A.exports=I}).call(M,function(){return this}())},function(A,M){function t(A,M){for(var t=-1,I=A.length;++t=T?i(M):null,C=M.length;n&&(N=e,o=!1,M=n);A:for(;++Eg?0:g+M),t=void 0===t||t>g?g:+t||0,t<0&&(t+=g),g=M>t?0:t-M>>>0,M>>>=0;for(var e=Array(g);++I-1?t[N]:void 0}return e(t,I,A)}}var g=t(330),e=t(333),i=t(334),T=t(26);A.exports=I},function(A,M,t){function I(A,M,t,I,e,i,T){var E=-1,N=A.length,o=M.length;if(N!=o&&!(e&&o>N))return!1;for(;++E=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}function e(A,M){if(!(A instanceof M))throw new TypeError("Cannot call a class as a function")}function i(A,M){if(!A)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!M||"object"!=typeof M&&"function"!=typeof M?A:M}function T(A,M){if("function"!=typeof M&&null!==M)throw new TypeError("Super expression must either be null or a function, not "+typeof M);A.prototype=Object.create(M&&M.prototype,{constructor:{value:A,enumerable:!1,writable:!0,configurable:!0}}),M&&(Object.setPrototypeOf?Object.setPrototypeOf(A,M):A.__proto__=M)}Object.defineProperty(M,"__esModule",{value:!0});var E=Object.assign||function(A){for(var M=1;M1?t-1:0),g=1;g=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}function e(A,M){if(!(A instanceof M))throw new TypeError("Cannot call a class as a function")}function i(A,M){if(!A)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!M||"object"!=typeof M&&"function"!=typeof M?A:M}function T(A,M){if("function"!=typeof M&&null!==M)throw new TypeError("Super expression must either be null or a function, not "+typeof M);A.prototype=Object.create(M&&M.prototype,{constructor:{value:A,enumerable:!1,writable:!0,configurable:!0}}),M&&(Object.setPrototypeOf?Object.setPrototypeOf(A,M):A.__proto__=M)}Object.defineProperty(M,"__esModule",{value:!0});var E=Object.assign||function(A){for(var M=1;M=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}function e(A,M){if(!(A instanceof M))throw new TypeError("Cannot call a class as a function")}function i(A,M){if(!A)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!M||"object"!=typeof M&&"function"!=typeof M?A:M}function T(A,M){if("function"!=typeof M&&null!==M)throw new TypeError("Super expression must either be null or a function, not "+typeof M);A.prototype=Object.create(M&&M.prototype,{constructor:{value:A,enumerable:!1,writable:!0,configurable:!0}}),M&&(Object.setPrototypeOf?Object.setPrototypeOf(A,M):A.__proto__=M)}function E(){}Object.defineProperty(M,"__esModule",{value:!0}),M.EXITING=M.ENTERED=M.ENTERING=M.EXITED=M.UNMOUNTED=void 0;var N=Object.assign||function(A){for(var M=1;MT?T-N:0}function i(A,M,t,I){var e=g(t),i=e.width,T=A-I,E=A+I+M;return T<0?-T:E>i?i-E:0}function T(A,M,t,I,g){var T="BODY"===I.tagName?(0,N.default)(t):(0,n.default)(t,I),E=(0,N.default)(M),o=E.height,C=E.width,c=void 0,D=void 0,a=void 0,B=void 0;if("left"===A||"right"===A){D=T.top+(T.height-o)/2,c="left"===A?T.left-C:T.left+T.width;var Q=e(D,o,I,g);D+=Q,B=50*(1-2*Q/o)+"%",a=void 0}else{if("top"!==A&&"bottom"!==A)throw new Error('calcOverlayPosition(): No such placement of "'+A+'" found.');c=T.left+(T.width-C)/2,D="top"===A?T.top-o:T.top+T.height;var r=i(c,C,I,g);c+=r,a=50*(1-2*r/C)+"%",B=void 0}return{positionLeft:c,positionTop:D,arrowOffsetLeft:a,arrowOffsetTop:B}}Object.defineProperty(M,"__esModule",{value:!0}),M.default=T;var E=t(148),N=I(E),o=t(314),n=I(o),C=t(149),c=I(C),D=t(51),a=I(D);A.exports=M.default},function(A,M){"use strict";function t(){for(var A=arguments.length,M=Array(A),t=0;t=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}function i(A,M){function t(I,g){function i(A,t){var I=c.getLinkName(A),e=this.props[g[A]];I&&E(this.props,I)&&!e&&(e=this.props[I].requestChange);for(var i=arguments.length,T=Array(i>2?i-2:0),N=2;N=15||0===s[0]&&s[1]>=13?A:A.type}function T(A,M){var t=N(M);return t&&!E(A,M)&&E(A,t)?A[t].value:A[M]}function E(A,M){return void 0!==A[M]}function N(A){return"value"===A?"valueLink":"checked"===A?"checkedLink":null}function o(A){return"default"+A.charAt(0).toUpperCase()+A.substr(1)}function n(A,M,t){return function(){for(var I=arguments.length,g=Array(I),e=0;e=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}Object.defineProperty(M,"__esModule",{value:!0}),M.CopyToClipboard=void 0;var e=Object.assign||function(A){for(var M=1;M=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}function e(A){var M=A.style,t=g(A,["style"]),I=o({},M,{height:6,right:2,bottom:2,left:2,borderRadius:3});return C.default.createElement("div",o({style:I},t))}function i(A){var M=A.style,t=g(A,["style"]),I=o({},M,{width:6,right:2,bottom:2,top:2,borderRadius:3});return C.default.createElement("div",o({style:I},t))}function T(A){var M=A.style,t=g(A,["style"]),I=o({},M,{ +cursor:"pointer",borderRadius:"inherit",backgroundColor:"rgba(0,0,0,.2)"});return C.default.createElement("div",o({style:I},t))}function E(A){var M=A.style,t=g(A,["style"]),I=o({},M,{cursor:"pointer",borderRadius:"inherit",backgroundColor:"rgba(0,0,0,.2)"});return C.default.createElement("div",o({style:I},t))}function N(A){return C.default.createElement("div",A)}var o=Object.assign||function(A){for(var M=1;M=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}var e=Object.assign||function(A){for(var M=1;M=0;e--){var i=I[e]+g;if(i in M)return i}return!1}},function(A,M,t){function I(A){return g(A).replace(/\s(\w)/g,function(A,M){return M.toUpperCase()})}var g=t(388);A.exports=I},function(A,M,t){function I(A){return g(A).replace(/[\W_]+(.|$)/g,function(A,M){return M?" "+M:""}).trim()}var g=t(389);A.exports=I},function(A,M){function t(A){return e.test(A)?A.toLowerCase():i.test(A)?(I(A)||A).toLowerCase():T.test(A)?g(A).toLowerCase():A.toLowerCase()}function I(A){return A.replace(E,function(A,M){return M?" "+M:""})}function g(A){return A.replace(N,function(A,M,t){return M+" "+t.toLowerCase().split("").join(" ")})}A.exports=t;var e=/\s/,i=/[\W_]/,T=/([a-z][A-Z]|[A-Z][a-z])/,E=/[\W_]+(.|$)/g,N=/(.)([A-Z]+)/g},function(A,M,t){(function(M){for(var I=t(391),g="undefined"==typeof window?M:window,e=["moz","webkit"],i="AnimationFrame",T=g["request"+i],E=g["cancel"+i]||g["cancelRequest"+i],N=0;!T&&N=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}function e(A,M){if(!(A instanceof M))throw new TypeError("Cannot call a class as a function")}function i(A,M){if(!A)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!M||"object"!=typeof M&&"function"!=typeof M?A:M}function T(A,M){if("function"!=typeof M&&null!==M)throw new TypeError("Super expression must either be null or a function, not "+typeof M);A.prototype=Object.create(M&&M.prototype,{constructor:{value:A,enumerable:!1,writable:!0,configurable:!0}}),M&&(Object.setPrototypeOf?Object.setPrototypeOf(A,M):A.__proto__=M)}Object.defineProperty(M,"__esModule",{value:!0});var E=Object.assign||function(A){for(var M=1;M0||(this.setState({isDragActive:!1,isDragReject:!1}),this.props.onDragLeave&&this.props.onDragLeave.call(this,A))}},{key:"onDrop",value:function(A){A.preventDefault(),this.enterCounter=0,this.setState({isDragActive:!1,isDragReject:!1});for(var M=A.dataTransfer?A.dataTransfer.files:A.target.files,t=this.props.multiple?M.length:Math.min(M.length,1),I=[],g=[],e=0;e0?this.props.onDropRejected&&this.props.onDropRejected.call(this,g,A):I.length>0&&this.props.onDropAccepted&&this.props.onDropAccepted.call(this,I,A),this.isFileDialogActive=!1}},{key:"onClick",value:function(){this.props.disableClick||this.open()}},{key:"onFileDialogCancel",value:function A(){var A=this.props.onFileDialogCancel,M=this.fileInputEl,t=this.isFileDialogActive;A&&t&&setTimeout(function(){var I=M.files;I.length||(t=!1,A())},300)}},{key:"fileAccepted",value:function(A){return(0,n.default)(A,this.props.accept)}},{key:"fileMatchSize",value:function(A){return A.size<=this.props.maxSize&&A.size>=this.props.minSize}},{key:"allFilesAccepted",value:function(A){return A.every(this.fileAccepted)}},{key:"open",value:function(){this.isFileDialogActive=!0,this.fileInputEl.value=null,this.fileInputEl.click()}},{key:"render",value:function(){var A=this,M=this.props,t=M.accept,I=M.activeClassName,e=M.inputProps,i=M.multiple,T=M.name,N=M.rejectClassName,o=g(M,["accept","activeClassName","inputProps","multiple","name","rejectClassName"]),n=o.activeStyle,C=o.className,a=o.rejectStyle,B=o.style,Q=g(o,["activeStyle","className","rejectStyle","style"]),r=this.state,s=r.isDragActive,x=r.isDragReject;C=C||"",s&&I&&(C+=" "+I),x&&N&&(C+=" "+N),C||B||n||a||(B={width:200,height:200,borderWidth:2,borderColor:"#666",borderStyle:"dashed",borderRadius:5},n={borderStyle:"solid",backgroundColor:"#eee"},a={borderStyle:"solid",backgroundColor:"#ffdddd"});var j=void 0;j=n&&s?E({},B,n):a&&x?E({},B,a):E({},B);var y={accept:t,type:"file",style:{display:"none"},multiple:D&&i,ref:function(M){return A.fileInputEl=M},onChange:this.onDrop};T&&T.length&&(y.name=T);var u=["acceptedFiles","disablePreview","disableClick","onDropAccepted","onDropRejected","onFileDialogCancel","maxSize","minSize"],w=E({},Q);return u.forEach(function(A){return delete w[A]}),c.default.createElement("div",E({className:C,style:j},w,{onClick:this.onClick,onDragStart:this.onDragStart,onDragEnter:this.onDragEnter,onDragOver:this.onDragOver,onDragLeave:this.onDragLeave,onDrop:this.onDrop}),this.props.children,c.default.createElement("input",E({},e,y)))}}]),M}(c.default.Component);a.defaultProps={disablePreview:!1,disableClick:!1,multiple:!0,maxSize:1/0,minSize:0},a.propTypes={onDrop:c.default.PropTypes.func,onDropAccepted:c.default.PropTypes.func,onDropRejected:c.default.PropTypes.func,onDragStart:c.default.PropTypes.func,onDragEnter:c.default.PropTypes.func,onDragLeave:c.default.PropTypes.func,children:c.default.PropTypes.node,style:c.default.PropTypes.object,activeStyle:c.default.PropTypes.object,rejectStyle:c.default.PropTypes.object,className:c.default.PropTypes.string,activeClassName:c.default.PropTypes.string,rejectClassName:c.default.PropTypes.string,disablePreview:c.default.PropTypes.bool,disableClick:c.default.PropTypes.bool,onFileDialogCancel:c.default.PropTypes.func,inputProps:c.default.PropTypes.object,multiple:c.default.PropTypes.bool,accept:c.default.PropTypes.string,name:c.default.PropTypes.string,maxSize:c.default.PropTypes.number,minSize:c.default.PropTypes.number},M.default=a,A.exports=M.default},function(A,M){A.exports=function(A){function M(I){if(t[I])return t[I].exports;var g=t[I]={exports:{},id:I,loaded:!1};return A[I].call(g.exports,g,g.exports,M),g.loaded=!0,g.exports}var t={};return M.m=A,M.c=t,M.p="",M(0)}([function(A,M,t){"use strict";M.__esModule=!0,t(8),t(9),M.default=function(A,M){if(A&&M){var t=function(){var t=M.split(","),I=A.name||"",g=A.type||"",e=g.replace(/\/.*$/,"");return{v:t.some(function(A){var M=A.trim();return"."===M.charAt(0)?I.toLowerCase().endsWith(M.toLowerCase()):/\/\*$/.test(M)?e===M.replace(/\/.*$/,""):g===M})}}();if("object"==typeof t)return t.v}return!0},A.exports=M.default},function(A,M){var t=A.exports={version:"1.2.2"};"number"==typeof __e&&(__e=t)},function(A,M){var t=A.exports="undefined"!=typeof window&&window.Math==Math?window:"undefined"!=typeof self&&self.Math==Math?self:Function("return this")();"number"==typeof __g&&(__g=t)},function(A,M,t){var I=t(2),g=t(1),e=t(4),i=t(19),T="prototype",E=function(A,M){return function(){return A.apply(M,arguments)}},N=function(A,M,t){var o,n,C,c,D=A&N.G,a=A&N.P,B=D?I:A&N.S?I[M]||(I[M]={}):(I[M]||{})[T],Q=D?g:g[M]||(g[M]={});D&&(t=M);for(o in t)n=!(A&N.F)&&B&&o in B,C=(n?B:t)[o],c=A&N.B&&n?E(C,I):a&&"function"==typeof C?E(Function.call,C):C,B&&!n&&i(B,o,C),Q[o]!=C&&e(Q,o,c),a&&((Q[T]||(Q[T]={}))[o]=C)};I.core=g,N.F=1,N.G=2,N.S=4,N.P=8,N.B=16,N.W=32,A.exports=N},function(A,M,t){var I=t(5),g=t(18);A.exports=t(22)?function(A,M,t){return I.setDesc(A,M,g(1,t))}:function(A,M,t){return A[M]=t,A}},function(A,M){var t=Object;A.exports={create:t.create,getProto:t.getPrototypeOf,isEnum:{}.propertyIsEnumerable,getDesc:t.getOwnPropertyDescriptor,setDesc:t.defineProperty,setDescs:t.defineProperties,getKeys:t.keys,getNames:t.getOwnPropertyNames,getSymbols:t.getOwnPropertySymbols,each:[].forEach}},function(A,M){var t=0,I=Math.random();A.exports=function(A){return"Symbol(".concat(void 0===A?"":A,")_",(++t+I).toString(36))}},function(A,M,t){var I=t(20)("wks"),g=t(2).Symbol;A.exports=function(A){return I[A]||(I[A]=g&&g[A]||(g||t(6))("Symbol."+A))}},function(A,M,t){t(26),A.exports=t(1).Array.some},function(A,M,t){t(25),A.exports=t(1).String.endsWith},function(A,M){A.exports=function(A){if("function"!=typeof A)throw TypeError(A+" is not a function!");return A}},function(A,M){var t={}.toString;A.exports=function(A){return t.call(A).slice(8,-1)}},function(A,M,t){var I=t(10);A.exports=function(A,M,t){if(I(A),void 0===M)return A;switch(t){case 1:return function(t){return A.call(M,t)};case 2:return function(t,I){return A.call(M,t,I)};case 3:return function(t,I,g){return A.call(M,t,I,g)}}return function(){return A.apply(M,arguments)}}},function(A,M){A.exports=function(A){if(void 0==A)throw TypeError("Can't call method on "+A);return A}},function(A,M,t){A.exports=function(A){var M=/./;try{"/./"[A](M)}catch(I){try{return M[t(7)("match")]=!1,!"/./"[A](M)}catch(A){}}return!0}},function(A,M){A.exports=function(A){try{return!!A()}catch(A){return!0}}},function(A,M){A.exports=function(A){return"object"==typeof A?null!==A:"function"==typeof A}},function(A,M,t){var I=t(16),g=t(11),e=t(7)("match");A.exports=function(A){var M;return I(A)&&(void 0!==(M=A[e])?!!M:"RegExp"==g(A))}},function(A,M){A.exports=function(A,M){return{enumerable:!(1&A),configurable:!(2&A),writable:!(4&A),value:M}}},function(A,M,t){var I=t(2),g=t(4),e=t(6)("src"),i="toString",T=Function[i],E=(""+T).split(i);t(1).inspectSource=function(A){return T.call(A)},(A.exports=function(A,M,t,i){"function"==typeof t&&(g(t,e,A[M]?""+A[M]:E.join(String(M))),"name"in t||(t.name=M)),A===I?A[M]=t:(i||delete A[M],g(A,M,t))})(Function.prototype,i,function(){return"function"==typeof this&&this[e]||T.call(this)})},function(A,M,t){var I=t(2),g="__core-js_shared__",e=I[g]||(I[g]={});A.exports=function(A){return e[A]||(e[A]={})}},function(A,M,t){var I=t(17),g=t(13);A.exports=function(A,M,t){if(I(M))throw TypeError("String#"+t+" doesn't accept regex!");return String(g(A))}},function(A,M,t){A.exports=!t(15)(function(){return 7!=Object.defineProperty({},"a",{get:function(){return 7}}).a})},function(A,M){var t=Math.ceil,I=Math.floor;A.exports=function(A){return isNaN(A=+A)?0:(A>0?I:t)(A)}},function(A,M,t){var I=t(23),g=Math.min;A.exports=function(A){return A>0?g(I(A),9007199254740991):0}},function(A,M,t){"use strict";var I=t(3),g=t(24),e=t(21),i="endsWith",T=""[i];I(I.P+I.F*t(14)(i),"String",{endsWith:function(A){var M=e(this,A,i),t=arguments,I=t.length>1?t[1]:void 0,E=g(M.length),N=void 0===I?E:Math.min(g(I),E),o=String(A);return T?T.call(M,o,N):M.slice(N-o.length,N)===o}})},function(A,M,t){var I=t(5),g=t(3),e=t(1).Array||Array,i={},T=function(A,M){I.each.call(A.split(","),function(A){void 0==M&&A in e?i[A]=e[A]:A in[]&&(i[A]=t(12)(Function.call,[][A],M))})};T("pop,reverse,shift,keys,values,entries",1),T("indexOf,every,some,forEach,map,filter,find,findIndex,includes",3),T("join,slice,concat,push,splice,unshift,sort,lastIndexOf,reduce,reduceRight,copyWithin,fill"),g(g.S,"Array",i)}])},function(M,t){M.exports=A}])})},function(A,M,t){"use strict";function I(A,M){if(!(A instanceof M))throw new TypeError("Cannot call a class as a function")}function g(A,M){if("function"!=typeof M&&null!==M)throw new TypeError("Super expression must either be null or a function, not "+typeof M);A.prototype=Object.create(M&&M.prototype,{constructor:{value:A,enumerable:!1,writable:!0,configurable:!0}}),M&&(Object.setPrototypeOf?Object.setPrototypeOf(A,M):A.__proto__=M)}var e=function(){function A(A,M){for(var t=0;t=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}function e(A,M){var t=A.history,I=A.routes,e=A.location,E=g(A,["history","routes","location"]);t||e?void 0:(0,N.default)(!1),t=t?t:(0,n.default)(E);var o=(0,c.default)(t,(0,D.createRoutes)(I)),C=void 0;e?e=t.createLocation(e):C=t.listen(function(A){e=A});var B=(0,a.createRouterObject)(t,o);t=(0,a.createRoutingHistory)(t,o),o.match(e,function(A,I,g){M(A,I&&B.createLocation(I,T.REPLACE),g&&i({},g,{history:t,router:B,matchContext:{history:t,transitionManager:o,router:B}})),C&&C()})}M.__esModule=!0;var i=Object.assign||function(A){for(var M=1;M=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}function e(A){return function(){var M=arguments.length<=0||void 0===arguments[0]?{}:arguments[0],t=M.routes,I=g(M,["routes"]),e=(0,E.default)(A)(I),T=(0,o.default)(e,t);return i({},e,T)}}M.__esModule=!0;var i=Object.assign||function(A){for(var M=1;M=A&&N&&(T=!0,t()))}}var i=0,T=!1,E=!1,N=!1,o=void 0;e()}M.__esModule=!0;var I=Array.prototype.slice;M.loopAsync=t},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}function g(){function A(A){try{A=A||window.history.state||{}}catch(M){A={}}var M=n.getWindowPath(),t=A,I=t.key,g=void 0;I?g=C.readState(I):(g=null,I=s.createKey(),Q&&window.history.replaceState(e({},A,{key:I}),null));var i=N.parsePath(M);return s.createLocation(e({},i,{state:g}),void 0,I)}function M(M){function t(M){void 0!==M.state&&I(A(M.state))}var I=M.transitionTo;return n.addEventListener(window,"popstate",t),function(){n.removeEventListener(window,"popstate",t)}}function t(A){var M=A.basename,t=A.pathname,I=A.search,g=A.hash,e=A.state,i=A.action,T=A.key;if(i!==E.POP){C.saveState(T,e);var N=(M||"")+t+I+g,o={key:T};if(i===E.PUSH){if(r)return window.location.href=N,!1;window.history.pushState(o,null,N)}else{if(r)return window.location.replace(N),!1;window.history.replaceState(o,null,N)}}}function I(A){1===++x&&(j=M(s));var t=s.listenBefore(A);return function(){t(),0===--x&&j()}}function g(A){1===++x&&(j=M(s));var t=s.listen(A);return function(){t(),0===--x&&j()}}function i(A){1===++x&&(j=M(s)),s.registerTransitionHook(A)}function c(A){s.unregisterTransitionHook(A),0===--x&&j()}var a=arguments.length<=0||void 0===arguments[0]?{}:arguments[0];o.canUseDOM?void 0:T.default(!1);var B=a.forceRefresh,Q=n.supportsHistory(),r=!Q||B,s=D.default(e({},a,{getCurrentLocation:A,finishTransition:t,saveState:C.saveState})),x=0,j=void 0;return e({},s,{listenBefore:I,listen:g,registerTransitionHook:i,unregisterTransitionHook:c})}M.__esModule=!0;var e=Object.assign||function(A){for(var M=1;M=0&&M=0&&B0&&"number"!=typeof A[0]))}function e(A,M,t){var e,o;if(I(A)||I(M))return!1;if(A.prototype!==M.prototype)return!1;if(E(A))return!!E(M)&&(A=i.call(A),M=i.call(M),N(A,M,t));if(g(A)){if(!g(M))return!1;if(A.length!==M.length)return!1;for(e=0;e=0;e--)if(n[e]!=C[e])return!1;for(e=n.length-1;e>=0;e--)if(o=n[e],!N(A[o],M[o],t))return!1;return typeof A==typeof M}var i=Array.prototype.slice,T=t(421),E=t(420),N=A.exports=function(A,M,t){return t||(t={}),A===M||(A instanceof Date&&M instanceof Date?A.getTime()===M.getTime():!A||!M||"object"!=typeof A&&"object"!=typeof M?t.strict?A===M:A==M:e(A,M,t))}},function(A,M){function t(A){return"[object Arguments]"==Object.prototype.toString.call(A)}function I(A){return A&&"object"==typeof A&&"number"==typeof A.length&&Object.prototype.hasOwnProperty.call(A,"callee")&&!Object.prototype.propertyIsEnumerable.call(A,"callee")||!1}var g="[object Arguments]"==function(){return Object.prototype.toString.call(arguments)}();M=A.exports=g?t:I,M.supported=t,M.unsupported=I},function(A,M){function t(A){var M=[];for(var t in A)M.push(t);return M}M=A.exports="function"==typeof Object.keys?Object.keys:t,M.shim=t},function(A,M,t){"use strict";var I=t(423);M.extract=function(A){return A.split("?")[1]||""},M.parse=function(A){return"string"!=typeof A?{}:(A=A.trim().replace(/^(\?|#|&)/,""),A?A.split("&").reduce(function(A,M){var t=M.replace(/\+/g," ").split("="),I=t.shift(),g=t.length>0?t.join("="):void 0;return I=decodeURIComponent(I),g=void 0===g?null:decodeURIComponent(g),A.hasOwnProperty(I)?Array.isArray(A[I])?A[I].push(g):A[I]=[A[I],g]:A[I]=g,A},{}):{})},M.stringify=function(A){return A?Object.keys(A).sort().map(function(M){var t=A[M];return void 0===t?"":null===t?M:Array.isArray(t)?t.slice().sort().map(function(A){return I(M)+"="+I(A)}).join("&"):I(M)+"="+I(t)}).filter(function(A){return A.length>0}).join("&"):""}},function(A,M){"use strict";A.exports=function(A){return encodeURIComponent(A).replace(/[!'()*]/g,function(A){return"%"+A.charCodeAt(0).toString(16).toUpperCase()})}},function(A,M){"use strict";var t={childContextTypes:!0,contextTypes:!0,defaultProps:!0,displayName:!0,getDefaultProps:!0,mixins:!0,propTypes:!0,type:!0},I={name:!0,length:!0,prototype:!0,caller:!0,arguments:!0,arity:!0},g="function"==typeof Object.getOwnPropertySymbols;A.exports=function(A,M,e){if("string"!=typeof M){var i=Object.getOwnPropertyNames(M);g&&(i=i.concat(Object.getOwnPropertySymbols(M)));for(var T=0;T8&&u<=11),l=32,Y=String.fromCharCode(l),d=c.topLevelTypes,h={beforeInput:{phasedRegistrationNames:{bubbled:s({onBeforeInput:null}),captured:s({onBeforeInputCapture:null})},dependencies:[d.topCompositionEnd,d.topKeyPress,d.topTextInput,d.topPaste]},compositionEnd:{phasedRegistrationNames:{bubbled:s({onCompositionEnd:null}),captured:s({onCompositionEndCapture:null})},dependencies:[d.topBlur,d.topCompositionEnd,d.topKeyDown,d.topKeyPress,d.topKeyUp,d.topMouseDown]},compositionStart:{phasedRegistrationNames:{bubbled:s({onCompositionStart:null}),captured:s({onCompositionStartCapture:null})},dependencies:[d.topBlur,d.topCompositionStart,d.topKeyDown,d.topKeyPress,d.topKeyUp,d.topMouseDown]},compositionUpdate:{phasedRegistrationNames:{bubbled:s({onCompositionUpdate:null}),captured:s({onCompositionUpdateCapture:null})},dependencies:[d.topBlur,d.topCompositionUpdate,d.topKeyDown,d.topKeyPress,d.topKeyUp,d.topMouseDown]}},S=!1,z=null,U={eventTypes:h,extractEvents:function(A,M,t,I,g){return[N(A,M,t,I,g),C(A,M,t,I,g)]}};A.exports=U},function(A,M,t){"use strict";var I=t(195),g=t(10),e=t(17),i=(t(482),t(473)),T=t(487),E=t(491),N=(t(4),E(function(A){return T(A)})),o=!1,n="cssFloat";if(g.canUseDOM){var C=document.createElement("div").style;try{C.font=""}catch(A){o=!0}void 0===document.documentElement.style.cssFloat&&(n="styleFloat")}var c={createMarkupForStyles:function(A){var M="";for(var t in A)if(A.hasOwnProperty(t)){var I=A[t];null!=I&&(M+=N(t)+":",M+=i(t,I)+";")}return M||null},setValueForStyles:function(A,M){var t=A.style;for(var g in M)if(M.hasOwnProperty(g)){var e=i(g,M[g]);if("float"===g&&(g=n),e)t[g]=e;else{var T=o&&I.shorthandPropertyExpansions[g];if(T)for(var E in T)t[E]="";else t[g]=""}}}};e.measureMethods(c,"CSSPropertyOperations",{setValueForStyles:"setValueForStyles"}),A.exports=c},function(A,M,t){"use strict";function I(A){var M=A.nodeName&&A.nodeName.toLowerCase();return"select"===M||"input"===M&&"file"===A.type}function g(A){var M=u.getPooled(h.change,z,A,w(A));x.accumulateTwoPhaseDispatches(M),y.batchedUpdates(e,M)}function e(A){s.enqueueEvents(A),s.processEventQueue(!1)}function i(A,M){S=A,z=M,S.attachEvent("onchange",g)}function T(){S&&(S.detachEvent("onchange",g),S=null,z=null)}function E(A,M,t){if(A===d.topChange)return t}function N(A,M,t){A===d.topFocus?(T(),i(M,t)):A===d.topBlur&&T()}function o(A,M){S=A,z=M,U=A.value,p=Object.getOwnPropertyDescriptor(A.constructor.prototype,"value"),Object.defineProperty(S,"value",F),S.attachEvent("onpropertychange",C)}function n(){S&&(delete S.value,S.detachEvent("onpropertychange",C),S=null,z=null,U=null,p=null)}function C(A){if("value"===A.propertyName){var M=A.srcElement.value;M!==U&&(U=M,g(A))}}function c(A,M,t){if(A===d.topInput)return t}function D(A,M,t){A===d.topFocus?(n(),o(M,t)):A===d.topBlur&&n()}function a(A,M,t){if((A===d.topSelectionChange||A===d.topKeyUp||A===d.topKeyDown)&&S&&S.value!==U)return U=S.value,z}function B(A){return A.nodeName&&"input"===A.nodeName.toLowerCase()&&("checkbox"===A.type||"radio"===A.type)}function Q(A,M,t){if(A===d.topClick)return t}var r=t(24),s=t(52),x=t(53),j=t(10),y=t(18),u=t(37),w=t(119),L=t(122),l=t(222),Y=t(30),d=r.topLevelTypes,h={change:{phasedRegistrationNames:{bubbled:Y({onChange:null}),captured:Y({onChangeCapture:null})},dependencies:[d.topBlur,d.topChange,d.topClick,d.topFocus,d.topInput,d.topKeyDown,d.topKeyUp,d.topSelectionChange]}},S=null,z=null,U=null,p=null,O=!1;j.canUseDOM&&(O=L("change")&&(!("documentMode"in document)||document.documentMode>8));var m=!1;j.canUseDOM&&(m=L("input")&&(!("documentMode"in document)||document.documentMode>9));var F={get:function(){return p.get.call(this)},set:function(A){U=""+A,p.set.call(this,A)}},f={eventTypes:h,extractEvents:function(A,M,t,g,e){var i,T;if(I(M)?O?i=E:T=N:l(M)?m?i=c:(i=a,T=D):B(M)&&(i=Q),i){var o=i(A,M,t);if(o){var n=u.getPooled(h.change,o,g,e);return n.type="change",x.accumulateTwoPhaseDispatches(n),n}}T&&T(A,M,t)}};A.exports=f},function(A,M){"use strict";var t=0,I={createReactRootIndex:function(){return t++}};A.exports=I},function(A,M,t){"use strict";function I(A){return A.substring(1,A.indexOf(" "))}var g=t(10),e=t(484),i=t(23),T=t(227),E=t(2),N=/^(<[^ \/>]+)/,o="data-danger-index",n={dangerouslyRenderMarkup:function(A){g.canUseDOM?void 0:E(!1);for(var M,t={},n=0;n1?1-M:void 0;return this._fallbackText=g.slice(A,T),this._fallbackText}}),g.addPoolingTo(I),A.exports=I},function(A,M,t){"use strict";var I,g=t(46),e=t(10),i=g.injection.MUST_USE_ATTRIBUTE,T=g.injection.MUST_USE_PROPERTY,E=g.injection.HAS_BOOLEAN_VALUE,N=g.injection.HAS_SIDE_EFFECTS,o=g.injection.HAS_NUMERIC_VALUE,n=g.injection.HAS_POSITIVE_NUMERIC_VALUE,C=g.injection.HAS_OVERLOADED_BOOLEAN_VALUE;if(e.canUseDOM){var c=document.implementation;I=c&&c.hasFeature&&c.hasFeature("http://www.w3.org/TR/SVG11/feature#BasicStructure","1.1")}var D={isCustomAttribute:RegExp.prototype.test.bind(/^(data|aria)-[a-z_][a-z\d_.\-]*$/),Properties:{accept:null,acceptCharset:null,accessKey:null,action:null,allowFullScreen:i|E,allowTransparency:i,alt:null,async:E,autoComplete:null,autoPlay:E,capture:i|E,cellPadding:null,cellSpacing:null,charSet:i,challenge:i,checked:T|E,classID:i,className:I?i:T,cols:i|n,colSpan:null,content:null,contentEditable:null,contextMenu:i,controls:T|E,coords:null,crossOrigin:null,data:null,dateTime:i,default:E,defer:E,dir:null,disabled:i|E,download:C,draggable:null,encType:null,form:i,formAction:i,formEncType:i,formMethod:i,formNoValidate:E,formTarget:i,frameBorder:i,headers:null,height:i,hidden:i|E,high:null,href:null,hrefLang:null,htmlFor:null,httpEquiv:null,icon:null,id:T,inputMode:i,integrity:null,is:i,keyParams:i,keyType:i,kind:null,label:null,lang:null,list:i,loop:T|E,low:null,manifest:i,marginHeight:null,marginWidth:null,max:null,maxLength:i,media:i,mediaGroup:null,method:null,min:null,minLength:i,multiple:T|E,muted:T|E,name:null,nonce:i,noValidate:E,open:E,optimum:null,pattern:null,placeholder:null,poster:null,preload:null,radioGroup:null,readOnly:T|E,rel:null,required:E,reversed:E,role:i,rows:i|n,rowSpan:null,sandbox:null,scope:null,scoped:E,scrolling:null,seamless:i|E,selected:T|E,shape:null,size:i|n,sizes:i,span:n,spellCheck:null,src:null,srcDoc:T,srcLang:null,srcSet:i,start:o,step:null,style:null,summary:null,tabIndex:null,target:null,title:null,type:null,useMap:null,value:T|N,width:i,wmode:i,wrap:null,about:i,datatype:i,inlist:i,prefix:i,property:i,resource:i,typeof:i,vocab:i,autoCapitalize:i,autoCorrect:i,autoSave:null,color:null,itemProp:i,itemScope:i|E,itemType:i,itemID:i,itemRef:i,results:null,security:i,unselectable:i},DOMAttributeNames:{acceptCharset:"accept-charset",className:"class",htmlFor:"for",httpEquiv:"http-equiv"},DOMPropertyNames:{autoComplete:"autocomplete",autoFocus:"autofocus",autoPlay:"autoplay",autoSave:"autosave",encType:"encoding",hrefLang:"hreflang",radioGroup:"radiogroup",spellCheck:"spellcheck",srcDoc:"srcdoc",srcSet:"srcset"}};A.exports=D},function(A,M,t){"use strict";var I=t(201),g=t(447),e=t(452),i=t(3),T=t(474),E={};i(E,e),i(E,{findDOMNode:T("findDOMNode","ReactDOM","react-dom",I,I.findDOMNode),render:T("render","ReactDOM","react-dom",I,I.render),unmountComponentAtNode:T("unmountComponentAtNode","ReactDOM","react-dom",I,I.unmountComponentAtNode),renderToString:T("renderToString","ReactDOMServer","react-dom/server",g,g.renderToString),renderToStaticMarkup:T("renderToStaticMarkup","ReactDOMServer","react-dom/server",g,g.renderToStaticMarkup)}),E.__SECRET_DOM_DO_NOT_USE_OR_YOU_WILL_BE_FIRED=I,E.__SECRET_DOM_SERVER_DO_NOT_USE_OR_YOU_WILL_BE_FIRED=g,A.exports=E},function(A,M,t){"use strict";var I=(t(54),t(116)),g=(t(4),"_getDOMNodeDidWarn"),e={getDOMNode:function(){return this.constructor[g]=!0,I(this)}};A.exports=e},function(A,M,t){"use strict";function I(A,M,t){var I=void 0===A[t];null!=M&&I&&(A[t]=e(M,null))}var g=t(36),e=t(121),i=t(124),T=t(125),E=(t(4),{instantiateChildren:function(A,M,t){if(null==A)return null;var g={};return T(A,I,g),g},updateChildren:function(A,M,t,I){if(!M&&!A)return null;var T;for(T in M)if(M.hasOwnProperty(T)){var E=A&&A[T],N=E&&E._currentElement,o=M[T];if(null!=E&&i(N,o))g.receiveComponent(E,o,t,I),M[T]=E;else{E&&g.unmountComponent(E,T);var n=e(o,null);M[T]=n}}for(T in A)!A.hasOwnProperty(T)||M&&M.hasOwnProperty(T)||g.unmountComponent(A[T]);return M},unmountChildren:function(A){for(var M in A)if(A.hasOwnProperty(M)){var t=A[M];g.unmountComponent(t)}}});A.exports=E},function(A,M,t){"use strict";function I(A){var M=A._currentElement._owner||null;if(M){var t=M.getName();if(t)return" Check the render method of ` + "`" + `"+t+"` + "`" + `."}return""}function g(A){}var e=t(112),i=t(25),T=t(13),E=t(54),N=t(17),o=t(69),n=(t(68),t(36)),C=t(114),c=t(3),D=t(56),a=t(2),B=t(124);t(4);g.prototype.render=function(){var A=E.get(this)._currentElement.type; +return A(this.props,this.context,this.updater)};var Q=1,r={construct:function(A){this._currentElement=A,this._rootNodeID=null,this._instance=null,this._pendingElement=null,this._pendingStateQueue=null,this._pendingReplaceState=!1,this._pendingForceUpdate=!1,this._renderedComponent=null,this._context=null,this._mountOrder=0,this._topLevelWrapper=null,this._pendingCallbacks=null},mountComponent:function(A,M,t){this._context=t,this._mountOrder=Q++,this._rootNodeID=A;var I,e,i=this._processProps(this._currentElement.props),N=this._processContext(t),o=this._currentElement.type,c="prototype"in o;c&&(I=new o(i,N,C)),c&&null!==I&&I!==!1&&!T.isValidElement(I)||(e=I,I=new g(o)),I.props=i,I.context=N,I.refs=D,I.updater=C,this._instance=I,E.set(I,this);var B=I.state;void 0===B&&(I.state=B=null),"object"!=typeof B||Array.isArray(B)?a(!1):void 0,this._pendingStateQueue=null,this._pendingReplaceState=!1,this._pendingForceUpdate=!1,I.componentWillMount&&(I.componentWillMount(),this._pendingStateQueue&&(I.state=this._processPendingState(I.props,I.context))),void 0===e&&(e=this._renderValidatedComponent()),this._renderedComponent=this._instantiateReactComponent(e);var r=n.mountComponent(this._renderedComponent,A,M,this._processChildContext(t));return I.componentDidMount&&M.getReactMountReady().enqueue(I.componentDidMount,I),r},unmountComponent:function(){var A=this._instance;A.componentWillUnmount&&A.componentWillUnmount(),n.unmountComponent(this._renderedComponent),this._renderedComponent=null,this._instance=null,this._pendingStateQueue=null,this._pendingReplaceState=!1,this._pendingForceUpdate=!1,this._pendingCallbacks=null,this._pendingElement=null,this._context=null,this._rootNodeID=null,this._topLevelWrapper=null,E.remove(A)},_maskContext:function(A){var M=null,t=this._currentElement.type,I=t.contextTypes;if(!I)return D;M={};for(var g in I)M[g]=A[g];return M},_processContext:function(A){var M=this._maskContext(A);return M},_processChildContext:function(A){var M=this._currentElement.type,t=this._instance,I=t.getChildContext&&t.getChildContext();if(I){"object"!=typeof M.childContextTypes?a(!1):void 0;for(var g in I)g in M.childContextTypes?void 0:a(!1);return c({},A,I)}return A},_processProps:function(A){return A},_checkPropTypes:function(A,M,t){var g=this.getName();for(var e in A)if(A.hasOwnProperty(e)){var i;try{"function"!=typeof A[e]?a(!1):void 0,i=A[e](M,e,g,t)}catch(A){i=A}if(i instanceof Error){I(this);t===o.prop}}},receiveComponent:function(A,M,t){var I=this._currentElement,g=this._context;this._pendingElement=null,this.updateComponent(M,I,A,g,t)},performUpdateIfNecessary:function(A){null!=this._pendingElement&&n.receiveComponent(this,this._pendingElement||this._currentElement,A,this._context),(null!==this._pendingStateQueue||this._pendingForceUpdate)&&this.updateComponent(A,this._currentElement,this._currentElement,this._context,this._context)},updateComponent:function(A,M,t,I,g){var e,i=this._instance,T=this._context===g?i.context:this._processContext(g);M===t?e=t.props:(e=this._processProps(t.props),i.componentWillReceiveProps&&i.componentWillReceiveProps(e,T));var E=this._processPendingState(e,T),N=this._pendingForceUpdate||!i.shouldComponentUpdate||i.shouldComponentUpdate(e,E,T);N?(this._pendingForceUpdate=!1,this._performComponentUpdate(t,e,E,T,A,g)):(this._currentElement=t,this._context=g,i.props=e,i.state=E,i.context=T)},_processPendingState:function(A,M){var t=this._instance,I=this._pendingStateQueue,g=this._pendingReplaceState;if(this._pendingReplaceState=!1,this._pendingStateQueue=null,!I)return t.state;if(g&&1===I.length)return I[0];for(var e=c({},g?I[0]:t.state),i=g?1:0;i=0||null!=M.is}function B(A){D(A),this._tag=A.toLowerCase(),this._renderedChildren=null,this._previousStyle=null,this._previousStyleCopy=null,this._rootNodeID=null,this._wrapperState=null,this._topLevelWrapper=null,this._nodeWithLegacyProperties=null}var Q=t(426),r=t(428),s=t(46),x=t(109),j=t(24),y=t(67),u=t(111),w=t(441),L=t(444),l=t(445),Y=t(203),d=t(448),h=t(12),S=t(453),z=t(17),U=t(114),p=t(3),O=t(72),m=t(73),F=t(2),f=(t(122),t(30)),k=t(74),R=t(123),J=(t(228),t(126),t(4),y.deleteListener),G=y.listenTo,H=y.registrationNameModules,b={string:!0,number:!0},X=f({children:null}),v=f({style:null}),W=f({__html:null}),V=1,P={topAbort:"abort",topCanPlay:"canplay",topCanPlayThrough:"canplaythrough",topDurationChange:"durationchange",topEmptied:"emptied",topEncrypted:"encrypted",topEnded:"ended",topError:"error",topLoadedData:"loadeddata",topLoadedMetadata:"loadedmetadata",topLoadStart:"loadstart",topPause:"pause",topPlay:"play",topPlaying:"playing",topProgress:"progress",topRateChange:"ratechange",topSeeked:"seeked",topSeeking:"seeking",topStalled:"stalled",topSuspend:"suspend",topTimeUpdate:"timeupdate",topVolumeChange:"volumechange",topWaiting:"waiting"},Z={area:!0,base:!0,br:!0,col:!0,embed:!0,hr:!0,img:!0,input:!0,keygen:!0,link:!0,meta:!0,param:!0,source:!0,track:!0,wbr:!0},K={listing:!0,pre:!0,textarea:!0},q=(p({menuitem:!0},Z),/^[a-zA-Z][a-zA-Z:_\.\-\d]*$/),_={},$={}.hasOwnProperty;B.displayName="ReactDOMComponent",B.Mixin={construct:function(A){this._currentElement=A},mountComponent:function(A,M,t){this._rootNodeID=A;var I=this._currentElement.props;switch(this._tag){case"iframe":case"img":case"form":case"video":case"audio":this._wrapperState={listeners:null},M.getReactMountReady().enqueue(n,this);break;case"button":I=w.getNativeProps(this,I,t);break;case"input":L.mountWrapper(this,I,t),I=L.getNativeProps(this,I,t);break;case"option":l.mountWrapper(this,I,t),I=l.getNativeProps(this,I,t);break;case"select":Y.mountWrapper(this,I,t),I=Y.getNativeProps(this,I,t),t=Y.processChildContext(this,I,t);break;case"textarea":d.mountWrapper(this,I,t),I=d.getNativeProps(this,I,t)}E(this,I);var g;if(M.useCreateElement){var e=t[h.ownerDocumentContextKey],i=e.createElement(this._currentElement.type);x.setAttributeForID(i,this._rootNodeID),h.getID(i),this._updateDOMProperties({},I,M,i),this._createInitialChildren(M,I,t,i),g=i}else{var T=this._createOpenTagMarkupAndPutListeners(M,I),N=this._createContentMarkup(M,I,t);g=!N&&Z[this._tag]?T+"/>":T+">"+N+""}switch(this._tag){case"input":M.getReactMountReady().enqueue(C,this);case"button":case"select":case"textarea":I.autoFocus&&M.getReactMountReady().enqueue(Q.focusDOMComponent,this)}return g},_createOpenTagMarkupAndPutListeners:function(A,M){var t="<"+this._currentElement.type;for(var I in M)if(M.hasOwnProperty(I)){var g=M[I];if(null!=g)if(H.hasOwnProperty(I))g&&N(this._rootNodeID,I,g,A);else{I===v&&(g&&(g=this._previousStyleCopy=p({},M.style)),g=r.createMarkupForStyles(g));var e=null;null!=this._tag&&a(this._tag,M)?I!==X&&(e=x.createMarkupForCustomAttribute(I,g)):e=x.createMarkupForProperty(I,g),e&&(t+=" "+e)}}if(A.renderToStaticMarkup)return t;var i=x.createMarkupForID(this._rootNodeID);return t+" "+i},_createContentMarkup:function(A,M,t){var I="",g=M.dangerouslySetInnerHTML;if(null!=g)null!=g.__html&&(I=g.__html);else{var e=b[typeof M.children]?M.children:null,i=null!=e?null:M.children;if(null!=e)I=m(e);else if(null!=i){var T=this.mountChildren(i,A,t);I=T.join("")}}return K[this._tag]&&"\n"===I.charAt(0)?"\n"+I:I},_createInitialChildren:function(A,M,t,I){var g=M.dangerouslySetInnerHTML;if(null!=g)null!=g.__html&&k(I,g.__html);else{var e=b[typeof M.children]?M.children:null,i=null!=e?null:M.children;if(null!=e)R(I,e);else if(null!=i)for(var T=this.mountChildren(i,A,t),E=0;EM.end?(t=M.end,I=M.start):(t=M.start,I=M.end),g.moveToElementText(A),g.moveStart("character",t),g.setEndPoint("EndToStart",g),g.moveEnd("character",I-t),g.select()}function T(A,M){if(window.getSelection){var t=window.getSelection(),I=A[o()].length,g=Math.min(M.start,I),e="undefined"==typeof M.end?g:Math.min(M.end,I);if(!t.extend&&g>e){var i=e;e=g,g=i}var T=N(A,g),E=N(A,e);if(T&&E){var n=document.createRange();n.setStart(T.node,T.offset),t.removeAllRanges(),g>e?(t.addRange(n),t.extend(E.node,E.offset)):(n.setEnd(E.node,E.offset),t.addRange(n))}}}var E=t(10),N=t(477),o=t(221),n=E.canUseDOM&&"selection"in document&&!("getSelection"in window),C={getOffsets:n?g:e,setOffsets:n?i:T};A.exports=C},function(A,M,t){"use strict";var I=t(206),g=t(458),e=t(115);I.inject();var i={renderToString:g.renderToString,renderToStaticMarkup:g.renderToStaticMarkup,version:e};A.exports=i},function(A,M,t){"use strict";function I(){this._rootNodeID&&o.updateWrapper(this)}function g(A){var M=this._currentElement.props,t=e.executeOnChange(M,A);return T.asap(I,this),t}var e=t(110),i=t(113),T=t(18),E=t(3),N=t(2),o=(t(4),{getNativeProps:function(A,M,t){null!=M.dangerouslySetInnerHTML?N(!1):void 0;var I=E({},M,{defaultValue:void 0,value:void 0,children:A._wrapperState.initialValue,onChange:A._wrapperState.onChange});return I},mountWrapper:function(A,M){var t=M.defaultValue,I=M.children;null!=I&&(null!=t?N(!1):void 0,Array.isArray(I)&&(I.length<=1?void 0:N(!1),I=I[0]),t=""+I),null==t&&(t="");var i=e.getValue(M);A._wrapperState={initialValue:""+(null!=i?i:t),onChange:g.bind(A)}},updateWrapper:function(A){var M=A._currentElement.props,t=e.getValue(M);null!=t&&i.updatePropertyByID(A._rootNodeID,"value",""+t)}});A.exports=o},function(A,M,t){"use strict";function I(A){g.enqueueEvents(A),g.processEventQueue(!1)}var g=t(52),e={handleTopLevel:function(A,M,t,e,i){var T=g.extractEvents(A,M,t,e,i);I(T)}};A.exports=e},function(A,M,t){"use strict";function I(A){var M=C.getID(A),t=n.getReactRootIDFromNodeID(M),I=C.findReactContainerForID(t),g=C.getFirstReactDOM(I);return g}function g(A,M){this.topLevelType=A,this.nativeEvent=M,this.ancestors=[]}function e(A){i(A)}function i(A){for(var M=C.getFirstReactDOM(a(A.nativeEvent))||window,t=M;t;)A.ancestors.push(t),t=I(t);for(var g=0;g=M)return{node:g,offset:M-e};e=i}g=t(I(g))}}A.exports=g},function(A,M,t){"use strict";function I(A){return g.isValidElement(A)?void 0:e(!1),A}var g=t(13),e=t(2);A.exports=I},function(A,M,t){"use strict";function I(A){return'"'+g(A)+'"'}var g=t(73);A.exports=I},function(A,M,t){"use strict";var I=t(12);A.exports=I.renderSubtreeIntoContainer},function(A,M){"use strict";function t(A){return A.replace(I,function(A,M){return M.toUpperCase()})}var I=/-(.)/g;A.exports=t},function(A,M,t){"use strict";function I(A){return g(A.replace(e,"ms-"))}var g=t(481),e=/^-ms-/;A.exports=I},function(A,M,t){"use strict";function I(A){return!!A&&("object"==typeof A||"function"==typeof A)&&"length"in A&&!("setInterval"in A)&&"number"!=typeof A.nodeType&&(Array.isArray(A)||"callee"in A||"item"in A)}function g(A){return I(A)?Array.isArray(A)?A.slice():e(A):[A]}var e=t(492);A.exports=g},function(A,M,t){"use strict";function I(A){var M=A.match(o);return M&&M[1].toLowerCase()}function g(A,M){var t=N;N?void 0:E(!1);var g=I(A),e=g&&T(g);if(e){t.innerHTML=e[1]+A+e[2];for(var o=e[0];o--;)t=t.lastChild}else t.innerHTML=A;var n=t.getElementsByTagName("script");n.length&&(M?void 0:E(!1),i(n).forEach(M));for(var C=i(t.childNodes);t.lastChild;)t.removeChild(t.lastChild);return C}var e=t(10),i=t(483),T=t(227),E=t(2),N=e.canUseDOM?document.createElement("div"):null,o=/^\s*<(\w+)/;A.exports=g},function(A,M){"use strict";function t(A){return A===window?{x:window.pageXOffset||document.documentElement.scrollLeft,y:window.pageYOffset||document.documentElement.scrollTop}:{x:A.scrollLeft,y:A.scrollTop}}A.exports=t},function(A,M){"use strict";function t(A){return A.replace(I,"-$1").toLowerCase()}var I=/([A-Z])/g;A.exports=t},function(A,M,t){"use strict";function I(A){return g(A).replace(e,"-ms-")}var g=t(486),e=/^ms-/;A.exports=I},function(A,M){"use strict";function t(A){return!(!A||!("function"==typeof Node?A instanceof Node:"object"==typeof A&&"number"==typeof A.nodeType&&"string"==typeof A.nodeName))}A.exports=t},function(A,M,t){"use strict";function I(A){return g(A)&&3==A.nodeType}var g=t(488);A.exports=I},function(A,M){"use strict";function t(A,M,t){if(!A)return null;var g={};for(var e in A)I.call(A,e)&&(g[e]=M.call(t,A[e],e,A));return g}var I=Object.prototype.hasOwnProperty;A.exports=t},function(A,M){"use strict";function t(A){var M={};return function(t){return M.hasOwnProperty(t)||(M[t]=A.call(this,t)),M[t]}}A.exports=t},function(A,M,t){"use strict";function I(A){var M=A.length;if(Array.isArray(A)||"object"!=typeof A&&"function"!=typeof A?g(!1):void 0,"number"!=typeof M?g(!1):void 0,0===M||M-1 in A?void 0:g(!1),A.hasOwnProperty)try{return Array.prototype.slice.call(A)}catch(A){}for(var t=Array(M),I=0;I=400){var T="cannot "+M.method+" "+M.url+" ("+i.status+")";A=new e(T),A.status=i.status,A.body=i.body,A.res=i,I(A)}else g?I(new e(g)):t(i)})})},g.prototype.then=function(){var A=this.promise();return A.then.apply(A,arguments)}},function(A,M,t){function I(){}function g(A){var M={}.toString.call(A);switch(M){case"[object File]":case"[object Blob]":case"[object FormData]":return!0;default:return!1}}function e(A){if(!s(A))return A;var M=[];for(var t in A)null!=A[t]&&i(M,t,A[t]);return M.join("&")}function i(A,M,t){return Array.isArray(t)?t.forEach(function(t){i(A,M,t)}):void A.push(encodeURIComponent(M)+"="+encodeURIComponent(t))}function T(A){for(var M,t,I={},g=A.split("&"),e=0,i=g.length;e=200&&M.status<300)return t.callback(A,M);var I=new Error(M.statusText||"Unsuccessful HTTP response");I.original=A,I.response=M,I.status=M.status,t.callback(I,M)})}function D(A,M){var t=x("DELETE",A);return M&&t.end(M),t}var a,B=t(509),Q=t(510),r=t(507),s=t(234);a="undefined"!=typeof window?window:"undefined"!=typeof self?self:this;var x=A.exports=t(508).bind(null,c);x.getXHR=function(){if(!(!a.XMLHttpRequest||a.location&&"file:"==a.location.protocol&&a.ActiveXObject))return new XMLHttpRequest;try{return new ActiveXObject("Microsoft.XMLHTTP")}catch(A){}try{return new ActiveXObject("Msxml2.XMLHTTP.6.0")}catch(A){}try{return new ActiveXObject("Msxml2.XMLHTTP.3.0")}catch(A){}try{return new ActiveXObject("Msxml2.XMLHTTP")}catch(A){}return!1};var j="".trim?function(A){return A.trim()}:function(A){return A.replace(/(^\s*|\s*$)/g,"")};x.serializeObject=e,x.parseString=T,x.types={html:"text/html",json:"application/json",xml:"application/xml",urlencoded:"application/x-www-form-urlencoded",form:"application/x-www-form-urlencoded","form-data":"application/x-www-form-urlencoded"},x.serialize={"application/x-www-form-urlencoded":e,"application/json":JSON.stringify},x.parse={"application/x-www-form-urlencoded":T,"application/json":JSON.parse},C.prototype.get=function(A){return this.header[A.toLowerCase()]},C.prototype.setHeaderProperties=function(A){var M=this.header["content-type"]||"";this.type=o(M);var t=n(M);for(var I in t)this[I]=t[I]},C.prototype.parseBody=function(A){var M=x.parse[this.type];return!M&&N(this.type)&&(M=x.parse["application/json"]),M&&A&&(A.length||A instanceof Object)?M(A):null},C.prototype.setStatusProperties=function(A){1223===A&&(A=204);var M=A/100|0;this.status=this.statusCode=A,this.statusType=M,this.info=1==M,this.ok=2==M,this.clientError=4==M,this.serverError=5==M,this.error=(4==M||5==M)&&this.toError(),this.accepted=202==A,this.noContent=204==A,this.badRequest=400==A,this.unauthorized=401==A,this.notAcceptable=406==A,this.notFound=404==A,this.forbidden=403==A},C.prototype.toError=function(){var A=this.req,M=A.method,t=A.url,I="cannot "+M+" "+t+" ("+this.status+")",g=new Error(I);return g.status=this.status,g.method=M,g.url=t,g},x.Response=C,B(c.prototype);for(var y in r)c.prototype[y]=r[y];c.prototype.abort=function(){if(!this.aborted)return this.aborted=!0,this.xhr&&this.xhr.abort(),this.clearTimeout(),this.emit("abort"),this},c.prototype.type=function(A){return this.set("Content-Type",x.types[A]||A),this},c.prototype.responseType=function(A){return this._responseType=A,this},c.prototype.accept=function(A){return this.set("Accept",x.types[A]||A),this},c.prototype.auth=function(A,M,t){switch(t||(t={type:"basic"}),t.type){case"basic":var I=btoa(A+":"+M);this.set("Authorization","Basic "+I);break;case"auto":this.username=A,this.password=M}return this},c.prototype.query=function(A){return"string"!=typeof A&&(A=e(A)),A&&this._query.push(A),this},c.prototype.attach=function(A,M,t){return this._getFormData().append(A,M,t||M.name),this},c.prototype._getFormData=function(){return this._formData||(this._formData=new a.FormData),this._formData},c.prototype.send=function(A){var M=s(A),t=this._header["content-type"];if(M&&s(this._data))for(var I in A)this._data[I]=A[I];else"string"==typeof A?(t||this.type("form"),t=this._header["content-type"],"application/x-www-form-urlencoded"==t?this._data=this._data?this._data+"&"+A:A:this._data=(this._data||"")+A):this._data=A;return!M||g(A)?this:(t||this.type("json"),this)},C.prototype.parse=function(A){return a.console&&console.warn("Client-side parse() method has been renamed to serialize(). This method is not compatible with superagent v2.0"),this.serialize(A),this},C.prototype.serialize=function(A){return this._parser=A,this},c.prototype.callback=function(A,M){var t=this._callback;this.clearTimeout(),t(A,M)},c.prototype.crossDomainError=function(){var A=new Error("Request has been terminated\nPossible causes: the network is offline, Origin is not allowed by Access-Control-Allow-Origin, the page is being unloaded, etc.");A.crossDomain=!0,A.status=this.status,A.method=this.method,A.url=this.url,this.callback(A)},c.prototype.timeoutError=function(){var A=this._timeout,M=new Error("timeout of "+A+"ms exceeded");M.timeout=A,this.callback(M)},c.prototype.withCredentials=function(){return this._withCredentials=!0,this},c.prototype.end=function(A){var M=this,t=this.xhr=x.getXHR(),e=this._query.join("&"),i=this._timeout,T=this._formData||this._data;this._callback=A||I,t.onreadystatechange=function(){if(4==t.readyState){var A;try{A=t.status}catch(M){A=0}if(0==A){if(M.timedout)return M.timeoutError();if(M.aborted)return;return M.crossDomainError()}M.emit("end")}};var E=function(A){A.total>0&&(A.percent=A.loaded/A.total*100),A.direction="download",M.emit("progress",A)};this.hasListeners("progress")&&(t.onprogress=E);try{t.upload&&this.hasListeners("progress")&&(t.upload.onprogress=E)}catch(A){}if(i&&!this._timer&&(this._timer=setTimeout(function(){M.timedout=!0,M.abort()},i)),e&&(e=x.serializeObject(e),this.url+=~this.url.indexOf("?")?"&"+e:"?"+e),this.username&&this.password?t.open(this.method,this.url,!0,this.username,this.password):t.open(this.method,this.url,!0),this._withCredentials&&(t.withCredentials=!0),"GET"!=this.method&&"HEAD"!=this.method&&"string"!=typeof T&&!g(T)){var o=this._header["content-type"],n=this._parser||x.serialize[o?o.split(";")[0]:""];!n&&N(o)&&(n=x.serialize["application/json"]),n&&(T=n(T))}for(var C in this.header)null!=this.header[C]&&t.setRequestHeader(C,this.header[C]);return this._responseType&&(t.responseType=this._responseType),this.emit("request",this),t.send("undefined"!=typeof T?T:null),this},x.Request=c,x.get=function(A,M,t){var I=x("GET",A);return"function"==typeof M&&(t=M,M=null),M&&I.query(M),t&&I.end(t),I},x.head=function(A,M,t){var I=x("HEAD",A);return"function"==typeof M&&(t=M,M=null),M&&I.send(M),t&&I.end(t),I},x.del=D,x.delete=D,x.patch=function(A,M,t){var I=x("PATCH",A);return"function"==typeof M&&(t=M,M=null),M&&I.send(M),t&&I.end(t),I},x.post=function(A,M,t){var I=x("POST",A);return"function"==typeof M&&(t=M,M=null),M&&I.send(M),t&&I.end(t),I},x.put=function(A,M,t){var I=x("PUT",A);return"function"==typeof M&&(t=M,M=null),M&&I.send(M),t&&I.end(t),I}},function(A,M,t){var I=t(234);M.clearTimeout=function(){return this._timeout=0,clearTimeout(this._timer),this},M.parse=function(A){return this._parser=A,this},M.timeout=function(A){return this._timeout=A,this},M.then=function(A,M){return this.end(function(t,I){t?M(t):A(I)})},M.use=function(A){return A(this),this},M.get=function(A){return this._header[A.toLowerCase()]},M.getHeader=M.get,M.set=function(A,M){if(I(A)){for(var t in A)this.set(t,A[t]);return this}return this._header[A.toLowerCase()]=M,this.header[A]=M,this},M.unset=function(A){return delete this._header[A.toLowerCase()],delete this.header[A],this},M.field=function(A,M){return this._getFormData().append(A,M),this}},function(A,M){function t(A,M,t){return"function"==typeof t?new A("GET",M).end(t):2==arguments.length?new A("GET",M):new A(M,t)}A.exports=t},function(A,M,t){function I(A){if(A)return g(A)}function g(A){for(var M in I.prototype)A[M]=I.prototype[M];return A}A.exports=I,I.prototype.on=I.prototype.addEventListener=function(A,M){return this._callbacks=this._callbacks||{},(this._callbacks["$"+A]=this._callbacks["$"+A]||[]).push(M),this},I.prototype.once=function(A,M){function t(){this.off(A,t),M.apply(this,arguments)}return t.fn=M,this.on(A,t),this},I.prototype.off=I.prototype.removeListener=I.prototype.removeAllListeners=I.prototype.removeEventListener=function(A,M){if(this._callbacks=this._callbacks||{},0==arguments.length)return this._callbacks={},this;var t=this._callbacks["$"+A];if(!t)return this;if(1==arguments.length)return delete this._callbacks["$"+A],this;for(var I,g=0;g1&&(I=t[0]+"@",A=t[1]),A=A.replace(z,".");var g=A.split("."),e=T(g,M).join(".");return I+e}function N(A){for(var M,t,I=[],g=0,e=A.length;g=55296&&M<=56319&&g65535&&(A-=65536,M+=m(A>>>10&1023|55296),A=56320|1023&A),M+=m(A)}).join("")}function n(A){return A-48<10?A-22:A-65<26?A-65:A-97<26?A-97:j}function C(A,M){return A+22+75*(A<26)-((0!=M)<<5)}function c(A,M,t){var I=0;for(A=t?O(A/L):A>>1,A+=O(A/M);A>p*w>>1;I+=j)A=O(A/p);return O(I+(p+1)*A/(A+u))}function D(A){var M,t,I,g,e,T,E,N,C,D,a=[],B=A.length,Q=0,r=l,s=Y;for(t=A.lastIndexOf(d),t<0&&(t=0),I=0;I=128&&i("not-basic"),a.push(A.charCodeAt(I));for(g=t>0?t+1:0;g=B&&i("invalid-input"),N=n(A.charCodeAt(g++)),(N>=j||N>O((x-Q)/T))&&i("overflow"),Q+=N*T,C=E<=s?y:E>=s+w?w:E-s,!(NO(x/D)&&i("overflow"),T*=D;M=a.length+1,s=c(Q-e,M,0==e),O(Q/M)>x-r&&i("overflow"),r+=O(Q/M),Q%=M,a.splice(Q++,0,r)}return o(a)}function a(A){var M,t,I,g,e,T,E,o,n,D,a,B,Q,r,s,u=[];for(A=N(A),B=A.length,M=l,t=0,e=Y,T=0;T=M&&aO((x-t)/Q)&&i("overflow"),t+=(E-M)*Q,M=E,T=0;Tx&&i("overflow"),a==M){for(o=t,n=j;D=n<=e?y:n>=e+w?w:n-e,!(o= 0x80 (not a basic code point)","invalid-input":"Invalid input"},p=j-y,O=Math.floor,m=String.fromCharCode;s={version:"1.3.2",ucs2:{decode:N,encode:o},decode:D,encode:a,toASCII:Q,toUnicode:B},I=function(){return s}.call(M,t,M,A),!(void 0!==I&&(A.exports=I))}(this)}).call(M,t(127)(A),function(){return this}())}]);`) +},function(A,M,t){var I;(function(A,g){!function(e){function i(A){throw RangeError(U[A])}function T(A,M){for(var t=A.length,I=[];t--;)I[t]=M(A[t]);return I}function E(A,M){var t=A.split("@"),I="";t.length>1&&(I=t[0]+"@",A=t[1]),A=A.replace(z,".");var g=A.split("."),e=T(g,M).join(".");return I+e}function N(A){for(var M,t,I=[],g=0,e=A.length;g=55296&&M<=56319&&g65535&&(A-=65536,M+=m(A>>>10&1023|55296),A=56320|1023&A),M+=m(A)}).join("")}function n(A){return A-48<10?A-22:A-65<26?A-65:A-97<26?A-97:j}function C(A,M){return A+22+75*(A<26)-((0!=M)<<5)}function c(A,M,t){var I=0;for(A=t?O(A/L):A>>1,A+=O(A/M);A>p*u>>1;I+=j)A=O(A/p);return O(I+(p+1)*A/(A+w))}function D(A){var M,t,I,g,e,T,E,N,C,D,a=[],B=A.length,Q=0,r=Y,s=l;for(t=A.lastIndexOf(d),t<0&&(t=0),I=0;I=128&&i("not-basic"),a.push(A.charCodeAt(I));for(g=t>0?t+1:0;g=B&&i("invalid-input"),N=n(A.charCodeAt(g++)),(N>=j||N>O((x-Q)/T))&&i("overflow"),Q+=N*T,C=E<=s?y:E>=s+u?u:E-s,!(NO(x/D)&&i("overflow"),T*=D;M=a.length+1,s=c(Q-e,M,0==e),O(Q/M)>x-r&&i("overflow"),r+=O(Q/M),Q%=M,a.splice(Q++,0,r)}return o(a)}function a(A){var M,t,I,g,e,T,E,o,n,D,a,B,Q,r,s,w=[];for(A=N(A),B=A.length,M=Y,t=0,e=l,T=0;T=M&&aO((x-t)/Q)&&i("overflow"),t+=(E-M)*Q,M=E,T=0;Tx&&i("overflow"),a==M){for(o=t,n=j;D=n<=e?y:n>=e+u?u:n-e,!(o= 0x80 (not a basic code point)","invalid-input":"Invalid input"},p=j-y,O=Math.floor,m=String.fromCharCode;s={version:"1.3.2",ucs2:{decode:N,encode:o},decode:D,encode:a,toASCII:Q,toUnicode:B},I=function(){return s}.call(M,t,M,A),!(void 0!==I&&(A.exports=I))}(this)}).call(M,t(237)(A),function(){return this}())}]);`) -func productionIndex_bundle20161124t003902zJsBytes() ([]byte, error) { - return _productionIndex_bundle20161124t003902zJs, nil +func productionIndex_bundle20170123t041649zJsBytes() ([]byte, error) { + return _productionIndex_bundle20170123t041649zJs, nil } -func productionIndex_bundle20161124t003902zJs() (*asset, error) { - bytes, err := productionIndex_bundle20161124t003902zJsBytes() +func productionIndex_bundle20170123t041649zJs() (*asset, error) { + bytes, err := productionIndex_bundle20170123t041649zJsBytes() if err != nil { return nil, err } - info := bindataFileInfo{name: "production/index_bundle-2016-11-24T00-39-02Z.js", size: 2265900, mode: os.FileMode(436), modTime: time.Unix(1479947956, 0)} + info := bindataFileInfo{name: "production/index_bundle-2017-01-23T04-16-49Z.js", size: 2271198, mode: os.FileMode(436), modTime: time.Unix(1485145028, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -369,7 +368,7 @@ func productionLoaderCss() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "production/loader.css", size: 1738, mode: os.FileMode(436), modTime: time.Unix(1479947956, 0)} + info := bindataFileInfo{name: "production/loader.css", size: 1738, mode: os.FileMode(436), modTime: time.Unix(1485145028, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -443,7 +442,7 @@ func productionLogoSvg() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "production/logo.svg", size: 3079, mode: os.FileMode(436), modTime: time.Unix(1479947956, 0)} + info := bindataFileInfo{name: "production/logo.svg", size: 3079, mode: os.FileMode(436), modTime: time.Unix(1485145028, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -460,7 +459,7 @@ func productionSafariPng() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "production/safari.png", size: 4971, mode: os.FileMode(436), modTime: time.Unix(1479947956, 0)} + info := bindataFileInfo{name: "production/safari.png", size: 4971, mode: os.FileMode(436), modTime: time.Unix(1485145028, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -521,7 +520,7 @@ var _bindata = map[string]func() (*asset, error){ "production/favicon.ico": productionFaviconIco, "production/firefox.png": productionFirefoxPng, "production/index.html": productionIndexHTML, - "production/index_bundle-2016-11-24T00-39-02Z.js": productionIndex_bundle20161124t003902zJs, + "production/index_bundle-2017-01-23T04-16-49Z.js": productionIndex_bundle20170123t041649zJs, "production/loader.css": productionLoaderCss, "production/logo.svg": productionLogoSvg, "production/safari.png": productionSafariPng, @@ -573,7 +572,7 @@ var _bintree = &bintree{nil, map[string]*bintree{ "favicon.ico": {productionFaviconIco, map[string]*bintree{}}, "firefox.png": {productionFirefoxPng, map[string]*bintree{}}, "index.html": {productionIndexHTML, map[string]*bintree{}}, - "index_bundle-2016-11-24T00-39-02Z.js": {productionIndex_bundle20161124t003902zJs, map[string]*bintree{}}, + "index_bundle-2017-01-23T04-16-49Z.js": {productionIndex_bundle20170123t041649zJs, map[string]*bintree{}}, "loader.css": {productionLoaderCss, map[string]*bintree{}}, "logo.svg": {productionLogoSvg, map[string]*bintree{}}, "safari.png": {productionSafariPng, map[string]*bintree{}}, @@ -634,6 +633,6 @@ func assetFS() *assetfs.AssetFS { panic("unreachable") } -var UIReleaseTag = "RELEASE.2016-11-24T00-39-02Z" -var UICommitID = "d05763641d2e2317182021378f4bb4b3e530b9a2" -var UIVersion = "2016-11-24T00:39:02Z" +var UIReleaseTag = "RELEASE.2017-01-23T04-16-49Z" +var UICommitID = "fd95c844548088ea0fe15ceadf4f119a71e736c8" +var UIVersion = "2017-01-23T04:16:49Z" diff --git a/vendor/vendor.json b/vendor/vendor.json index 0ec019075..2411c44d9 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -200,10 +200,10 @@ "revisionTime": "2016-08-18T00:31:20Z" }, { - "checksumSHA1": "Npl9R1pdTKB5LDiGh/hls8nMNQc=", + "checksumSHA1": "lkkQ8bAbNRvg9AceSmuAfh3udFg=", "path": "github.com/minio/miniobrowser", - "revision": "d9214007ee7c5404358031c3e5af7df69300f874", - "revisionTime": "2016-11-24T00:39:49Z" + "revision": "10e951aa618d52796584f9dd233353a52d104c8d", + "revisionTime": "2017-01-23T04:37:46Z" }, { "checksumSHA1": "GOSe2XEQI4AYwrMoLZu8vtmzkJM=", From 586058f0790ed7eddbc526393670f2008ed062db Mon Sep 17 00:00:00 2001 From: Krishnan Parthasarathi Date: Mon, 23 Jan 2017 14:02:55 +0530 Subject: [PATCH 090/100] Implement mgmt REST APIs to heal storage format. (#3604) * Implement heal format REST API handler * Implement admin peer rpc handler to re-initialize storage * Implement HealFormat API in pkg/madmin * Update pkg/madmin API.md to incl. HealFormat * Added unit tests for ReInitDisks rpc handler and HealFormatHandler --- cmd/admin-handlers.go | 69 ++++++++ cmd/admin-handlers_test.go | 272 +++++++++++++++-------------- cmd/admin-router.go | 2 + cmd/admin-rpc-client.go | 35 ++++ cmd/admin-rpc-server.go | 43 +++++ cmd/admin-rpc-server_test.go | 80 +++++++++ cmd/globals.go | 7 + cmd/server-main.go | 9 + cmd/test-utils_test.go | 12 ++ cmd/xl-v1.go | 8 + pkg/madmin/API.md | 29 ++- pkg/madmin/examples/heal-format.go | 49 ++++++ pkg/madmin/heal-commands.go | 29 +++ 13 files changed, 511 insertions(+), 133 deletions(-) create mode 100644 pkg/madmin/examples/heal-format.go diff --git a/cmd/admin-handlers.go b/cmd/admin-handlers.go index 794f89689..ffd64c2ec 100644 --- a/cmd/admin-handlers.go +++ b/cmd/admin-handlers.go @@ -19,6 +19,7 @@ package cmd import ( "encoding/json" "encoding/xml" + "fmt" "io/ioutil" "net/http" "net/url" @@ -374,6 +375,7 @@ func (adminAPI adminAPIHandlers) ListBucketsHealHandler(w http.ResponseWriter, r } // HealBucketHandler - POST /?heal&bucket=mybucket +// - x-minio-operation = bucket // - bucket is mandatory query parameter // Heal a given bucket, if present. func (adminAPI adminAPIHandlers) HealBucketHandler(w http.ResponseWriter, r *http.Request) { @@ -425,6 +427,7 @@ func isDryRun(qval url.Values) bool { } // HealObjectHandler - POST /?heal&bucket=mybucket&object=myobject +// - x-minio-operation = object // - bucket and object are both mandatory query parameters // Heal a given object, if present. func (adminAPI adminAPIHandlers) HealObjectHandler(w http.ResponseWriter, r *http.Request) { @@ -473,3 +476,69 @@ func (adminAPI adminAPIHandlers) HealObjectHandler(w http.ResponseWriter, r *htt // Return 200 on success. writeSuccessResponseHeadersOnly(w) } + +// HealFormatHandler - POST /?heal +// - x-minio-operation = format +// - bucket and object are both mandatory query parameters +// Heal a given object, if present. +func (adminAPI adminAPIHandlers) HealFormatHandler(w http.ResponseWriter, r *http.Request) { + // Get current object layer instance. + objectAPI := newObjectLayerFn() + if objectAPI == nil { + writeErrorResponse(w, ErrServerNotInitialized, r.URL) + return + } + + // Validate request signature. + adminAPIErr := checkRequestAuthType(r, "", "", "") + if adminAPIErr != ErrNone { + writeErrorResponse(w, adminAPIErr, r.URL) + return + } + + // Check if this setup is an erasure code backend, since + // heal-format is only applicable to single node XL and + // distributed XL setup. + if !globalIsXL { + writeErrorResponse(w, ErrNotImplemented, r.URL) + return + } + + // Create a new set of storage instances to heal format.json. + bootstrapDisks, err := initStorageDisks(globalEndpoints) + if err != nil { + fmt.Println(traceError(err)) + writeErrorResponse(w, toAPIErrorCode(err), r.URL) + return + } + + // Heal format.json on available storage. + err = healFormatXL(bootstrapDisks) + if err != nil { + fmt.Println(traceError(err)) + writeErrorResponse(w, toAPIErrorCode(err), r.URL) + return + } + + // Instantiate new object layer with newly formatted storage. + newObjectAPI, err := newXLObjects(bootstrapDisks) + if err != nil { + fmt.Println(traceError(err)) + writeErrorResponse(w, toAPIErrorCode(err), r.URL) + return + } + + // Set object layer with newly formatted storage to globalObjectAPI. + globalObjLayerMutex.Lock() + globalObjectAPI = newObjectAPI + globalObjLayerMutex.Unlock() + + // Shutdown storage belonging to old object layer instance. + objectAPI.Shutdown() + + // Inform peers to reinitialize storage with newly formatted storage. + reInitPeerDisks(globalAdminPeers) + + // Return 200 on success. + writeSuccessResponseHeadersOnly(w) +} diff --git a/cmd/admin-handlers_test.go b/cmd/admin-handlers_test.go index 138478213..ee6376f86 100644 --- a/cmd/admin-handlers_test.go +++ b/cmd/admin-handlers_test.go @@ -29,6 +29,79 @@ import ( router "github.com/gorilla/mux" ) +// adminXLTestBed - encapsulates subsystems that need to be setup for +// admin-handler unit tests. +type adminXLTestBed struct { + configPath string + xlDirs []string + objLayer ObjectLayer + mux *router.Router +} + +// prepareAdminXLTestBed - helper function that setups a single-node +// XL backend for admin-handler tests. +func prepareAdminXLTestBed() (*adminXLTestBed, error) { + // reset global variables to start afresh. + resetTestGlobals() + // Initialize minio server config. + rootPath, err := newTestConfig(globalMinioDefaultRegion) + if err != nil { + return nil, err + } + // Initializing objectLayer for HealFormatHandler. + objLayer, xlDirs, xlErr := initTestXLObjLayer() + if xlErr != nil { + return nil, xlErr + } + + // Set globalEndpoints for a single node XL setup. + for _, xlDir := range xlDirs { + globalEndpoints = append(globalEndpoints, &url.URL{ + Path: xlDir, + }) + } + + // Set globalIsXL to indicate that the setup uses an erasure code backend. + globalIsXL = true + + // initialize NSLock. + isDistXL := false + initNSLock(isDistXL) + + // Setup admin mgmt REST API handlers. + adminRouter := router.NewRouter() + registerAdminRouter(adminRouter) + + return &adminXLTestBed{ + configPath: rootPath, + xlDirs: xlDirs, + objLayer: objLayer, + mux: adminRouter, + }, nil +} + +// TearDown - method that resets the test bed for subsequent unit +// tests to start afresh. +func (atb *adminXLTestBed) TearDown() { + removeAll(atb.configPath) + removeRoots(atb.xlDirs) + resetTestGlobals() +} + +// initTestObjLayer - Helper function to initialize an XL-based object +// layer and set globalObjectAPI. +func initTestXLObjLayer() (ObjectLayer, []string, error) { + objLayer, xlDirs, xlErr := prepareXL() + if xlErr != nil { + return nil, nil, xlErr + } + // Make objLayer available to all internal services via globalObjectAPI. + globalObjLayerMutex.Lock() + globalObjectAPI = objLayer + globalObjLayerMutex.Unlock() + return objLayer, xlDirs, nil +} + // cmdType - Represents different service subcomands like status, stop // and restart. type cmdType int @@ -115,17 +188,11 @@ func getServiceCmdRequest(cmd cmdType, cred credential, body []byte) (*http.Requ // testServicesCmdHandler - parametrizes service subcommand tests on // cmdType value. func testServicesCmdHandler(cmd cmdType, args map[string]interface{}, t *testing.T) { - // reset globals. - // this is to make sure that the tests are not affected by modified value. - resetTestGlobals() - // initialize NSLock. - initNSLock(false) - // Initialize configuration for access/secret credentials. - rootPath, err := newTestConfig(globalMinioDefaultRegion) + adminTestBed, err := prepareAdminXLTestBed() if err != nil { - t.Fatalf("Unable to initialize server config. %s", err) + t.Fatal("Failed to initialize a single node XL backend for admin handler tests.") } - defer removeAll(rootPath) + defer adminTestBed.TearDown() // Initialize admin peers to make admin RPC calls. Note: In a // single node setup, this degenerates to a simple function @@ -139,29 +206,12 @@ func testServicesCmdHandler(cmd cmdType, args map[string]interface{}, t *testing globalMinioAddr = eps[0].Host initGlobalAdminPeers(eps) - if cmd == statusCmd { - // Initializing objectLayer and corresponding - // []StorageAPI since DiskInfo() method requires it. - objLayer, xlDirs, xlErr := prepareXL() - if xlErr != nil { - t.Fatalf("failed to initialize XL based object layer - %v.", xlErr) - } - defer removeRoots(xlDirs) - // Make objLayer available to all internal services via globalObjectAPI. - globalObjLayerMutex.Lock() - globalObjectAPI = objLayer - globalObjLayerMutex.Unlock() - } - // Setting up a go routine to simulate ServerMux's // handleServiceSignals for stop and restart commands. if cmd == restartCmd { go testServiceSignalReceiver(cmd, t) } credentials := serverConfig.GetCredential() - adminRouter := router.NewRouter() - registerAdminRouter(adminRouter) - var body []byte if cmd == setCreds { @@ -174,7 +224,7 @@ func testServicesCmdHandler(cmd cmdType, args map[string]interface{}, t *testing } rec := httptest.NewRecorder() - adminRouter.ServeHTTP(rec, req) + adminTestBed.mux.ServeHTTP(rec, req) if cmd == statusCmd { expectedInfo := newObjectLayerFn().StorageInfo() @@ -232,17 +282,11 @@ func mkLockQueryVal(bucket, prefix, relTimeStr string) url.Values { // Test for locks list management REST API. func TestListLocksHandler(t *testing.T) { - // reset globals. - // this is to make sure that the tests are not affected by modified globals. - resetTestGlobals() - // initialize NSLock. - initNSLock(false) - - rootPath, err := newTestConfig(globalMinioDefaultRegion) + adminTestBed, err := prepareAdminXLTestBed() if err != nil { - t.Fatalf("Unable to initialize server config. %s", err) + t.Fatal("Failed to initialize a single node XL backend for admin handler tests.") } - defer removeAll(rootPath) + defer adminTestBed.TearDown() // Initialize admin peers to make admin RPC calls. eps, err := parseStorageEndpoints([]string{"http://localhost"}) @@ -254,10 +298,6 @@ func TestListLocksHandler(t *testing.T) { globalMinioAddr = eps[0].Host initGlobalAdminPeers(eps) - // Setup admin mgmt REST API handlers. - adminRouter := router.NewRouter() - registerAdminRouter(adminRouter) - testCases := []struct { bucket string prefix string @@ -308,7 +348,7 @@ func TestListLocksHandler(t *testing.T) { t.Fatalf("Test %d - Failed to sign list locks request - %v", i+1, err) } rec := httptest.NewRecorder() - adminRouter.ServeHTTP(rec, req) + adminTestBed.mux.ServeHTTP(rec, req) if test.expectedStatus != rec.Code { t.Errorf("Test %d - Expected HTTP status code %d but received %d", i+1, test.expectedStatus, rec.Code) } @@ -317,17 +357,11 @@ func TestListLocksHandler(t *testing.T) { // Test for locks clear management REST API. func TestClearLocksHandler(t *testing.T) { - // reset globals. - // this is to make sure that the tests are not affected by modified globals. - resetTestGlobals() - // initialize NSLock. - initNSLock(false) - - rootPath, err := newTestConfig(globalMinioDefaultRegion) + adminTestBed, err := prepareAdminXLTestBed() if err != nil { - t.Fatalf("Unable to initialize server config. %s", err) + t.Fatal("Failed to initialize a single node XL backend for admin handler tests.") } - defer removeAll(rootPath) + defer adminTestBed.TearDown() // Initialize admin peers to make admin RPC calls. eps, err := parseStorageEndpoints([]string{"http://localhost"}) @@ -336,10 +370,6 @@ func TestClearLocksHandler(t *testing.T) { } initGlobalAdminPeers(eps) - // Setup admin mgmt REST API handlers. - adminRouter := router.NewRouter() - registerAdminRouter(adminRouter) - testCases := []struct { bucket string prefix string @@ -390,7 +420,7 @@ func TestClearLocksHandler(t *testing.T) { t.Fatalf("Test %d - Failed to sign clear locks request - %v", i+1, err) } rec := httptest.NewRecorder() - adminRouter.ServeHTTP(rec, req) + adminTestBed.mux.ServeHTTP(rec, req) if test.expectedStatus != rec.Code { t.Errorf("Test %d - Expected HTTP status code %d but received %d", i+1, test.expectedStatus, rec.Code) } @@ -556,36 +586,19 @@ func TestValidateHealQueryParams(t *testing.T) { // TestListObjectsHeal - Test for ListObjectsHealHandler. func TestListObjectsHealHandler(t *testing.T) { - rootPath, err := newTestConfig("us-east-1") + adminTestBed, err := prepareAdminXLTestBed() if err != nil { - t.Fatalf("Unable to initialize server config. %s", err) + t.Fatal("Failed to initialize a single node XL backend for admin handler tests.") } - defer removeAll(rootPath) + defer adminTestBed.TearDown() - // Initializing objectLayer and corresponding []StorageAPI - // since ListObjectsHeal() method requires it. - objLayer, xlDirs, xlErr := prepareXL() - if xlErr != nil { - t.Fatalf("failed to initialize XL based object layer - %v.", xlErr) - } - defer removeRoots(xlDirs) - - err = objLayer.MakeBucket("mybucket") + err = adminTestBed.objLayer.MakeBucket("mybucket") if err != nil { t.Fatalf("Failed to make bucket - %v", err) } // Delete bucket after running all test cases. - defer objLayer.DeleteBucket("mybucket") - - // Make objLayer available to all internal services via globalObjectAPI. - globalObjLayerMutex.Lock() - globalObjectAPI = objLayer - globalObjLayerMutex.Unlock() - - // Setup admin mgmt REST API handlers. - adminRouter := router.NewRouter() - registerAdminRouter(adminRouter) + defer adminTestBed.objLayer.DeleteBucket("mybucket") testCases := []struct { bucket string @@ -695,7 +708,7 @@ func TestListObjectsHealHandler(t *testing.T) { t.Fatalf("Test %d - Failed to sign list objects needing heal request - %v", i+1, err) } rec := httptest.NewRecorder() - adminRouter.ServeHTTP(rec, req) + adminTestBed.mux.ServeHTTP(rec, req) if test.statusCode != rec.Code { t.Errorf("Test %d - Expected HTTP status code %d but received %d", i+1, test.statusCode, rec.Code) } @@ -704,36 +717,19 @@ func TestListObjectsHealHandler(t *testing.T) { // TestHealBucketHandler - Test for HealBucketHandler. func TestHealBucketHandler(t *testing.T) { - rootPath, err := newTestConfig("us-east-1") + adminTestBed, err := prepareAdminXLTestBed() if err != nil { - t.Fatalf("Unable to initialize server config. %s", err) + t.Fatal("Failed to initialize a single node XL backend for admin handler tests.") } - defer removeAll(rootPath) + defer adminTestBed.TearDown() - // Initializing objectLayer and corresponding []StorageAPI - // since MakeBucket() and DeleteBucket() methods requires it. - objLayer, xlDirs, xlErr := prepareXL() - if xlErr != nil { - t.Fatalf("failed to initialize XL based object layer - %v.", xlErr) - } - defer removeRoots(xlDirs) - - err = objLayer.MakeBucket("mybucket") + err = adminTestBed.objLayer.MakeBucket("mybucket") if err != nil { t.Fatalf("Failed to make bucket - %v", err) } // Delete bucket after running all test cases. - defer objLayer.DeleteBucket("mybucket") - - // Make objLayer available to all internal services via globalObjectAPI. - globalObjLayerMutex.Lock() - globalObjectAPI = objLayer - globalObjLayerMutex.Unlock() - - // Setup admin mgmt REST API handlers. - adminRouter := router.NewRouter() - registerAdminRouter(adminRouter) + defer adminTestBed.objLayer.DeleteBucket("mybucket") testCases := []struct { bucket string @@ -771,7 +767,8 @@ func TestHealBucketHandler(t *testing.T) { req, err := newTestRequest("POST", "/?"+queryVal.Encode(), 0, nil) if err != nil { - t.Fatalf("Test %d - Failed to construct heal bucket request - %v", i+1, err) + t.Fatalf("Test %d - Failed to construct heal bucket request - %v", + i+1, err) } req.Header.Set(minioAdminOpHeader, "bucket") @@ -779,12 +776,14 @@ func TestHealBucketHandler(t *testing.T) { cred := serverConfig.GetCredential() err = signRequestV4(req, cred.AccessKey, cred.SecretKey) if err != nil { - t.Fatalf("Test %d - Failed to sign heal bucket request - %v", i+1, err) + t.Fatalf("Test %d - Failed to sign heal bucket request - %v", + i+1, err) } rec := httptest.NewRecorder() - adminRouter.ServeHTTP(rec, req) + adminTestBed.mux.ServeHTTP(rec, req) if test.statusCode != rec.Code { - t.Errorf("Test %d - Expected HTTP status code %d but received %d", i+1, test.statusCode, rec.Code) + t.Errorf("Test %d - Expected HTTP status code %d but received %d", + i+1, test.statusCode, rec.Code) } } @@ -792,29 +791,22 @@ func TestHealBucketHandler(t *testing.T) { // TestHealObjectHandler - Test for HealObjectHandler. func TestHealObjectHandler(t *testing.T) { - rootPath, err := newTestConfig("us-east-1") + adminTestBed, err := prepareAdminXLTestBed() if err != nil { - t.Fatalf("Unable to initialize server config. %s", err) + t.Fatal("Failed to initialize a single node XL backend for admin handler tests.") } - defer removeAll(rootPath) - - // Initializing objectLayer and corresponding []StorageAPI - // since MakeBucket(), PutObject() and DeleteBucket() method requires it. - objLayer, xlDirs, xlErr := prepareXL() - if xlErr != nil { - t.Fatalf("failed to initialize XL based object layer - %v.", xlErr) - } - defer removeRoots(xlDirs) + defer adminTestBed.TearDown() // Create an object myobject under bucket mybucket. bucketName := "mybucket" objName := "myobject" - err = objLayer.MakeBucket(bucketName) + err = adminTestBed.objLayer.MakeBucket(bucketName) if err != nil { t.Fatalf("Failed to make bucket %s - %v", bucketName, err) } - _, err = objLayer.PutObject(bucketName, objName, int64(len("hello")), bytes.NewReader([]byte("hello")), nil, "") + _, err = adminTestBed.objLayer.PutObject(bucketName, objName, + int64(len("hello")), bytes.NewReader([]byte("hello")), nil, "") if err != nil { t.Fatalf("Failed to create %s - %v", objName, err) } @@ -823,16 +815,7 @@ func TestHealObjectHandler(t *testing.T) { defer func(objLayer ObjectLayer, bucketName, objName string) { objLayer.DeleteObject(bucketName, objName) objLayer.DeleteBucket(bucketName) - }(objLayer, bucketName, objName) - - // Make objLayer available to all internal services via globalObjectAPI. - globalObjLayerMutex.Lock() - globalObjectAPI = objLayer - globalObjLayerMutex.Unlock() - - // Setup admin mgmt REST API handlers. - adminRouter := router.NewRouter() - registerAdminRouter(adminRouter) + }(adminTestBed.objLayer, bucketName, objName) testCases := []struct { bucket string @@ -899,9 +882,42 @@ func TestHealObjectHandler(t *testing.T) { t.Fatalf("Test %d - Failed to sign heal object request - %v", i+1, err) } rec := httptest.NewRecorder() - adminRouter.ServeHTTP(rec, req) + adminTestBed.mux.ServeHTTP(rec, req) if test.statusCode != rec.Code { t.Errorf("Test %d - Expected HTTP status code %d but received %d", i+1, test.statusCode, rec.Code) } } } + +// TestHealFormatHandler - test for HealFormatHandler. +func TestHealFormatHandler(t *testing.T) { + adminTestBed, err := prepareAdminXLTestBed() + if err != nil { + t.Fatal("Failed to initialize a single node XL backend for admin handler tests.") + } + defer adminTestBed.TearDown() + + // Prepare query params for heal-format mgmt REST API. + queryVal := url.Values{} + queryVal.Set("heal", "") + req, err := newTestRequest("POST", "/?"+queryVal.Encode(), 0, nil) + if err != nil { + t.Fatalf("Failed to construct heal object request - %v", err) + } + + // Set x-minio-operation header to format. + req.Header.Set(minioAdminOpHeader, "format") + + // Sign the request using signature v4. + cred := serverConfig.GetCredential() + err = signRequestV4(req, cred.AccessKey, cred.SecretKey) + if err != nil { + t.Fatalf("Failed to sign heal object request - %v", err) + } + + rec := httptest.NewRecorder() + adminTestBed.mux.ServeHTTP(rec, req) + if rec.Code != http.StatusOK { + t.Errorf("Expected to succeed but failed with %d", rec.Code) + } +} diff --git a/cmd/admin-router.go b/cmd/admin-router.go index a21b1ffb6..3136670f6 100644 --- a/cmd/admin-router.go +++ b/cmd/admin-router.go @@ -57,4 +57,6 @@ func registerAdminRouter(mux *router.Router) { adminRouter.Methods("POST").Queries("heal", "").Headers(minioAdminOpHeader, "bucket").HandlerFunc(adminAPI.HealBucketHandler) // Heal Objects. adminRouter.Methods("POST").Queries("heal", "").Headers(minioAdminOpHeader, "object").HandlerFunc(adminAPI.HealObjectHandler) + // Heal Format. + adminRouter.Methods("POST").Queries("heal", "").Headers(minioAdminOpHeader, "format").HandlerFunc(adminAPI.HealFormatHandler) } diff --git a/cmd/admin-rpc-client.go b/cmd/admin-rpc-client.go index c117eac9c..5f22e6177 100644 --- a/cmd/admin-rpc-client.go +++ b/cmd/admin-rpc-client.go @@ -38,6 +38,7 @@ type remoteAdminClient struct { type adminCmdRunner interface { Restart() error ListLocks(bucket, prefix string, relTime time.Duration) ([]VolumeLockInfo, error) + ReInitDisks() error } // Restart - Sends a message over channel to the go-routine @@ -73,6 +74,20 @@ func (rc remoteAdminClient) ListLocks(bucket, prefix string, relTime time.Durati return reply.volLocks, nil } +// ReInitDisks - There is nothing to do here, heal format REST API +// handler has already formatted and reinitialized the local disks. +func (lc localAdminClient) ReInitDisks() error { + return nil +} + +// ReInitDisks - Signals peers via RPC to reinitialize their disks and +// object layer. +func (rc remoteAdminClient) ReInitDisks() error { + args := AuthRPCArgs{} + reply := AuthRPCReply{} + return rc.Call("Admin.ReInitDisks", &args, &reply) +} + // adminPeer - represents an entity that implements Restart methods. type adminPeer struct { addr string @@ -159,6 +174,8 @@ func sendServiceCmd(cps adminPeers, cmd serviceSignal) { errs[0] = invokeServiceCmd(cps[0], cmd) } +// listPeerLocksInfo - fetch list of locks held on the given bucket, +// matching prefix older than relTime from all peer servers. func listPeerLocksInfo(peers adminPeers, bucket, prefix string, relTime time.Duration) ([]VolumeLockInfo, error) { // Used to aggregate volume lock information from all nodes. allLocks := make([][]VolumeLockInfo, len(peers)) @@ -206,3 +223,21 @@ func listPeerLocksInfo(peers adminPeers, bucket, prefix string, relTime time.Dur } return groupedLockInfos, nil } + +// reInitPeerDisks - reinitialize disks and object layer on peer servers to use the new format. +func reInitPeerDisks(peers adminPeers) error { + errs := make([]error, len(peers)) + + // Send ReInitDisks RPC call to all nodes. + // for local adminPeer this is a no-op. + wg := sync.WaitGroup{} + for i, peer := range peers { + wg.Add(1) + go func(idx int, peer adminPeer) { + defer wg.Done() + errs[idx] = peer.cmdRunner.ReInitDisks() + }(i, peer) + } + wg.Wait() + return nil +} diff --git a/cmd/admin-rpc-server.go b/cmd/admin-rpc-server.go index 4bd05549a..470082fde 100644 --- a/cmd/admin-rpc-server.go +++ b/cmd/admin-rpc-server.go @@ -17,6 +17,7 @@ package cmd import ( + "errors" "net/rpc" "time" @@ -25,6 +26,8 @@ import ( const adminPath = "/admin" +var errUnsupportedBackend = errors.New("not supported for non erasure-code backend") + // adminCmd - exports RPC methods for service status, stop and // restart commands. type adminCmd struct { @@ -57,11 +60,51 @@ func (s *adminCmd) Restart(args *AuthRPCArgs, reply *AuthRPCReply) error { // ListLocks - lists locks held by requests handled by this server instance. func (s *adminCmd) ListLocks(query *ListLocksQuery, reply *ListLocksReply) error { + if err := query.IsAuthenticated(); err != nil { + return err + } volLocks := listLocksInfo(query.bucket, query.prefix, query.relTime) *reply = ListLocksReply{volLocks: volLocks} return nil } +// ReInitDisk - reinitialize storage disks and object layer to use the +// new format. +func (s *adminCmd) ReInitDisks(args *AuthRPCArgs, reply *AuthRPCReply) error { + if err := args.IsAuthenticated(); err != nil { + return err + } + + if !globalIsXL { + return errUnsupportedBackend + } + + // Get the current object layer instance. + objLayer := newObjectLayerFn() + + // Initialize new disks to include the newly formatted disks. + bootstrapDisks, err := initStorageDisks(globalEndpoints) + if err != nil { + return err + } + + // Initialize new object layer with newly formatted disks. + newObjectAPI, err := newXLObjects(bootstrapDisks) + if err != nil { + return err + } + + // Replace object layer with newly formatted storage. + globalObjLayerMutex.Lock() + globalObjectAPI = newObjectAPI + globalObjLayerMutex.Unlock() + + // Shutdown storage belonging to old object layer instance. + objLayer.Shutdown() + + return nil +} + // registerAdminRPCRouter - registers RPC methods for service status, // stop and restart commands. func registerAdminRPCRouter(mux *router.Router) error { diff --git a/cmd/admin-rpc-server_test.go b/cmd/admin-rpc-server_test.go index 5577c1b2c..32d1dd288 100644 --- a/cmd/admin-rpc-server_test.go +++ b/cmd/admin-rpc-server_test.go @@ -17,6 +17,7 @@ package cmd import ( + "net/url" "testing" "time" ) @@ -61,6 +62,85 @@ func testAdminCmd(cmd cmdType, t *testing.T) { } } +// TestAdminRestart - test for Admin.Restart RPC service. func TestAdminRestart(t *testing.T) { testAdminCmd(restartCmd, t) } + +// TestReInitDisks - test for Admin.ReInitDisks RPC service. +func TestReInitDisks(t *testing.T) { + // Reset global variables to start afresh. + resetTestGlobals() + + rootPath, err := newTestConfig("us-east-1") + if err != nil { + t.Fatalf("Unable to initialize server config. %s", err) + } + defer removeAll(rootPath) + + // Initializing objectLayer for HealFormatHandler. + _, xlDirs, xlErr := initTestXLObjLayer() + if xlErr != nil { + t.Fatalf("failed to initialize XL based object layer - %v.", xlErr) + } + defer removeRoots(xlDirs) + + // Set globalEndpoints for a single node XL setup. + for _, xlDir := range xlDirs { + globalEndpoints = append(globalEndpoints, &url.URL{Path: xlDir}) + } + + // Setup admin rpc server for an XL backend. + globalIsXL = true + adminServer := adminCmd{} + creds := serverConfig.GetCredential() + args := LoginRPCArgs{ + Username: creds.AccessKey, + Password: creds.SecretKey, + Version: Version, + RequestTime: time.Now().UTC(), + } + reply := LoginRPCReply{} + err = adminServer.Login(&args, &reply) + if err != nil { + t.Fatalf("Failed to login to admin server - %v", err) + } + + authArgs := AuthRPCArgs{ + AuthToken: reply.AuthToken, + RequestTime: time.Now().UTC(), + } + authReply := AuthRPCReply{} + + err = adminServer.ReInitDisks(&authArgs, &authReply) + if err != nil { + t.Errorf("Expected to pass, but failed with %v", err) + } + + // Negative test case with admin rpc server setup for FS. + globalIsXL = false + fsAdminServer := adminCmd{} + fsArgs := LoginRPCArgs{ + Username: creds.AccessKey, + Password: creds.SecretKey, + Version: Version, + RequestTime: time.Now().UTC(), + } + fsReply := LoginRPCReply{} + err = fsAdminServer.Login(&fsArgs, &fsReply) + if err != nil { + t.Fatalf("Failed to login to fs admin server - %v", err) + } + + authArgs = AuthRPCArgs{ + AuthToken: fsReply.AuthToken, + RequestTime: time.Now().UTC(), + } + authReply = AuthRPCReply{} + // Attempt ReInitDisks service on a FS backend. + err = fsAdminServer.ReInitDisks(&authArgs, &authReply) + if err != errUnsupportedBackend { + t.Errorf("Expected to fail with %v, but received %v", + errUnsupportedBackend, err) + } +} diff --git a/cmd/globals.go b/cmd/globals.go index c2f2a7861..e1b943b27 100644 --- a/cmd/globals.go +++ b/cmd/globals.go @@ -18,6 +18,7 @@ package cmd import ( "crypto/x509" + "net/url" "os" "runtime" "strings" @@ -70,6 +71,9 @@ var ( // Indicates if the running minio server is distributed setup. globalIsDistXL = false + // Indicates if the running minio server is an erasure-code backend. + globalIsXL = false + // This flag is set to 'true' by default, it is set to `false` // when MINIO_BROWSER env is set to 'off'. globalIsBrowserEnabled = !strings.EqualFold(os.Getenv("MINIO_BROWSER"), "off") @@ -112,6 +116,9 @@ var ( // Secret key passed from the environment globalEnvSecretKey = os.Getenv("MINIO_SECRET_KEY") + // url.URL endpoints of disks that belong to the object storage. + globalEndpoints = []*url.URL{} + // Add new variable global values here. ) diff --git a/cmd/server-main.go b/cmd/server-main.go index 3bd31c4d2..431d25a14 100644 --- a/cmd/server-main.go +++ b/cmd/server-main.go @@ -418,6 +418,12 @@ func serverMain(c *cli.Context) { fatalIf(initDsyncNodes(endpoints), "Unable to initialize distributed locking clients") } + // Set globalIsXL if erasure code backend is about to be + // initialized for the given endpoints. + if len(endpoints) > 1 { + globalIsXL = true + } + // Initialize name space lock. initNSLock(globalIsDistXL) @@ -453,6 +459,9 @@ func serverMain(c *cli.Context) { fatalIf(apiServer.ListenAndServe(cert, key), "Failed to start minio server.") }() + // Set endpoints of []*url.URL type to globalEndpoints. + globalEndpoints = endpoints + newObject, err := newObjectLayer(srvConfig) fatalIf(err, "Initializing object layer failed") diff --git a/cmd/test-utils_test.go b/cmd/test-utils_test.go index 21641b42c..bd9537ca6 100644 --- a/cmd/test-utils_test.go +++ b/cmd/test-utils_test.go @@ -488,6 +488,14 @@ func resetGlobalEventnotify() { globalEventNotifier = nil } +func resetGlobalEndpoints() { + globalEndpoints = []*url.URL{} +} + +func resetGlobalIsXL() { + globalIsXL = false +} + // Resets all the globals used modified in tests. // Resetting ensures that the changes made to globals by one test doesn't affect others. func resetTestGlobals() { @@ -501,6 +509,10 @@ func resetTestGlobals() { resetGlobalNSLock() // Reset global event notifier. resetGlobalEventnotify() + // Reset global endpoints. + resetGlobalEndpoints() + // Reset global isXL flag. + resetGlobalIsXL() } // Configure the server for the test run. diff --git a/cmd/xl-v1.go b/cmd/xl-v1.go index d9ad936e7..a94cdd05a 100644 --- a/cmd/xl-v1.go +++ b/cmd/xl-v1.go @@ -163,6 +163,14 @@ func newXLObjects(storageDisks []StorageAPI) (ObjectLayer, error) { // Shutdown function for object storage interface. func (xl xlObjects) Shutdown() error { // Add any object layer shutdown activities here. + for _, disk := range xl.storageDisks { + // This closes storage rpc client connections if any. + // Otherwise this is a no-op. + if disk == nil { + continue + } + disk.Close() + } return nil } diff --git a/pkg/madmin/API.md b/pkg/madmin/API.md index 8b6d0393a..c54fd292e 100644 --- a/pkg/madmin/API.md +++ b/pkg/madmin/API.md @@ -38,8 +38,11 @@ func main() { | Service operations|LockInfo operations|Healing operations| |:---|:---|:---| -|[`ServiceStatus`](#ServiceStatus)| | | -|[`ServiceRestart`](#ServiceRestart)| | | +|[`ServiceStatus`](#ServiceStatus)| [`ListLocks`](#ListLocks)| [`ListObjectsHeal`](#ListObjectsHeal)| +|[`ServiceRestart`](#ServiceRestart)| [`ClearLocks`](#ClearLocks)| [`ListBucketsHeal`](#ListBucketsHeal)| +| | |[`HealBucket`](#HealBucket) | +| | |[`HealObject`](#HealObject)| +| | |[`HealFormat`](#HealFormat)| ## 1. Constructor @@ -185,14 +188,14 @@ __Example__ } ``` - -### ListBucketsList() error + +### ListBucketsHeal() error If successful returns information on the list of buckets that need healing. __Example__ ``` go - // List buckets that need healing + // List buckets that need healing healBucketsList, err := madmClnt.ListBucketsHeal() if err != nil { fmt.Println(err) @@ -244,3 +247,19 @@ __Example__ log.Println("successfully healed mybucket/myobject") ``` + + +### HealFormat() error +Heal storage format on available disks. This is used when disks were replaced or were found with missing format. This is supported only for erasure-coded backend. + +__Example__ + +``` go + err := madmClnt.HealFormat() + if err != nil { + log.Fatalln(err) + } + + log.Println("successfully healed storage format on available disks.") + +``` diff --git a/pkg/madmin/examples/heal-format.go b/pkg/madmin/examples/heal-format.go new file mode 100644 index 000000000..4b915747d --- /dev/null +++ b/pkg/madmin/examples/heal-format.go @@ -0,0 +1,49 @@ +// +build ignore + +/* + * Minio Cloud Storage, (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package main + +import ( + "log" + + "github.com/minio/minio/pkg/madmin" +) + +func main() { + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY are + // dummy values, please replace them with original values. + + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY are + // dummy values, please replace them with original values. + + // API requests are secure (HTTPS) if secure=true and insecure (HTTPS) otherwise. + // New returns an Minio Admin client object. + madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + if err != nil { + log.Fatalln(err) + } + + // Heal storage format on available disks. + err = madmClnt.HealFormat() + if err != nil { + log.Fatalln(err) + } + + log.Println("successfully healed storage format on available disks.") +} diff --git a/pkg/madmin/heal-commands.go b/pkg/madmin/heal-commands.go index e439b84d2..ba6d6fa12 100644 --- a/pkg/madmin/heal-commands.go +++ b/pkg/madmin/heal-commands.go @@ -403,3 +403,32 @@ func (adm *AdminClient) HealObject(bucket, object string, dryrun bool) error { return nil } + +// HealFormat - heal storage format on available disks. +func (adm *AdminClient) HealFormat() error { + queryVal := url.Values{} + queryVal.Set("heal", "") + + // Set x-minio-operation to format. + hdrs := make(http.Header) + hdrs.Set(minioAdminOpHeader, "format") + + reqData := requestData{ + queryValues: queryVal, + customHeaders: hdrs, + } + + // Execute POST on /?heal to heal storage format. + resp, err := adm.executeMethod("POST", reqData) + + defer closeResponse(resp) + if err != nil { + return err + } + + if resp.StatusCode != http.StatusOK { + return errors.New("Got HTTP Status: " + resp.Status) + } + + return nil +} From e1f64141a275329be787766eea72c711aaa2a504 Mon Sep 17 00:00:00 2001 From: Anis Elleuch Date: Mon, 23 Jan 2017 17:54:32 +0100 Subject: [PATCH 091/100] presign-v2: Compute signature on encoded URL path (#3612) Encode the path of the passed presigned url before calculating the signature. This fixes presigning objects whose names contain characters that are found encoded in urls. --- cmd/signature-v2.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cmd/signature-v2.go b/cmd/signature-v2.go index 2698891d3..c5540201e 100644 --- a/cmd/signature-v2.go +++ b/cmd/signature-v2.go @@ -92,9 +92,10 @@ func doesPresignV2SignatureMatch(r *http.Request) APIErrorCode { if encodedResource == "" { splits := strings.Split(r.URL.Path, "?") if len(splits) > 0 { - encodedResource = splits[0] + encodedResource = getURLEncodedName(splits[0]) } } + queries := strings.Split(encodedQuery, "&") var filteredQueries []string var gotSignature string From d1d89116f1eee523f93e311185ef3df9861c55a5 Mon Sep 17 00:00:00 2001 From: Anis Elleuch Date: Mon, 23 Jan 2017 17:56:06 +0100 Subject: [PATCH 092/100] admin: Add version to service Status API response (#3605) Add server's version field to service status API: "version":{ "version":"DEVELOPMENT.GOGET", "commitID":"DEVELOPMENT.GOGET" } --- cmd/admin-handlers.go | 26 +++++++++++++++++++++++++- cmd/admin-handlers_test.go | 7 +++++-- docs/admin-api/management-api.md | 2 +- pkg/madmin/API.md | 8 +++++--- pkg/madmin/service.go | 28 +++++++++++++++++++++------- 5 files changed, 57 insertions(+), 14 deletions(-) diff --git a/cmd/admin-handlers.go b/cmd/admin-handlers.go index ffd64c2ec..2639d7c3b 100644 --- a/cmd/admin-handlers.go +++ b/cmd/admin-handlers.go @@ -46,6 +46,18 @@ const ( mgmtDryRun mgmtQueryKey = "dry-run" ) +// ServerVersion - server version +type ServerVersion struct { + Version string `json:"version"` + CommitID string `json:"commitID"` +} + +// ServerStatus - contains the response of service status API +type ServerStatus struct { + StorageInfo StorageInfo `json:"storageInfo"` + ServerVersion ServerVersion `json:"serverVersion"` +} + // ServiceStatusHandler - GET /?service // HTTP header x-minio-operation: status // ---------- @@ -57,8 +69,20 @@ func (adminAPI adminAPIHandlers) ServiceStatusHandler(w http.ResponseWriter, r * writeErrorResponse(w, adminAPIErr, r.URL) return } + + // Fetch storage backend information storageInfo := newObjectLayerFn().StorageInfo() - jsonBytes, err := json.Marshal(storageInfo) + // Fetch server version + serverVersion := ServerVersion{Version: Version, CommitID: CommitID} + + // Create API response + serverStatus := ServerStatus{ + StorageInfo: storageInfo, + ServerVersion: serverVersion, + } + + // Marshal API response + jsonBytes, err := json.Marshal(serverStatus) if err != nil { writeErrorResponse(w, ErrInternalError, r.URL) errorIf(err, "Failed to marshal storage info into json.") diff --git a/cmd/admin-handlers_test.go b/cmd/admin-handlers_test.go index ee6376f86..250c144ca 100644 --- a/cmd/admin-handlers_test.go +++ b/cmd/admin-handlers_test.go @@ -227,8 +227,11 @@ func testServicesCmdHandler(cmd cmdType, args map[string]interface{}, t *testing adminTestBed.mux.ServeHTTP(rec, req) if cmd == statusCmd { - expectedInfo := newObjectLayerFn().StorageInfo() - receivedInfo := StorageInfo{} + expectedInfo := ServerStatus{ + StorageInfo: newObjectLayerFn().StorageInfo(), + ServerVersion: ServerVersion{Version: Version, CommitID: CommitID}, + } + receivedInfo := ServerStatus{} if jsonErr := json.Unmarshal(rec.Body.Bytes(), &receivedInfo); jsonErr != nil { t.Errorf("Failed to unmarshal StorageInfo - %v", jsonErr) } diff --git a/docs/admin-api/management-api.md b/docs/admin-api/management-api.md index dc0167c82..90f38b848 100644 --- a/docs/admin-api/management-api.md +++ b/docs/admin-api/management-api.md @@ -25,7 +25,7 @@ * Status - GET /?service - x-minio-operation: status - - Response: On success 200, return json formatted StorageInfo object. + - Response: On success 200, return json formatted object which contains StorageInfo and ServerVersion structures * SetCredentials - GET /?service diff --git a/pkg/madmin/API.md b/pkg/madmin/API.md index c54fd292e..420f20984 100644 --- a/pkg/madmin/API.md +++ b/pkg/madmin/API.md @@ -74,9 +74,11 @@ Fetch service status, replies disk space used, backend type and total disks offl | Param | Type | Description | |---|---|---| -|`st.Total` | _int64_ | Total disk space. | -|`st.Free` | _int64_ | Free disk space. | -|`st.Backend`| _struct{}_ | Represents backend type embedded structure. | +|`st.ServerVersion.Version` | _string_ | Server version. | +|`st.ServerVersion.CommitID` | _string_ | Server commit id. | +|`st.StorageInfo.Total` | _int64_ | Total disk space. | +|`st.StorageInfo.Free` | _int64_ | Free disk space. | +|`st.StorageInfo.Backend`| _struct{}_ | Represents backend type embedded structure. | | Param | Type | Description | |---|---|---| diff --git a/pkg/madmin/service.go b/pkg/madmin/service.go index 442ab3ffb..d5c732cb1 100644 --- a/pkg/madmin/service.go +++ b/pkg/madmin/service.go @@ -41,8 +41,8 @@ const ( // Add your own backend. ) -// ServiceStatusMetadata - represents total capacity of underlying storage. -type ServiceStatusMetadata struct { +// StorageInfo - represents total capacity of underlying storage. +type StorageInfo struct { // Total disk space. Total int64 // Free available disk space. @@ -60,10 +60,23 @@ type ServiceStatusMetadata struct { } } +// ServerVersion - server version +type ServerVersion struct { + Version string `json:"version"` + CommitID string `json:"commitID"` +} + +// ServiceStatusMetadata - contains the response of service status API +type ServiceStatusMetadata struct { + StorageInfo StorageInfo `json:"storageInfo"` + ServerVersion ServerVersion `json:"serverVersion"` +} + // ServiceStatus - Connect to a minio server and call Service Status Management API // to fetch server's storage information represented by ServiceStatusMetadata structure func (adm *AdminClient) ServiceStatus() (ServiceStatusMetadata, error) { + // Prepare web service request reqData := requestData{} reqData.queryValues = make(url.Values) reqData.queryValues.Set("service", "") @@ -72,29 +85,30 @@ func (adm *AdminClient) ServiceStatus() (ServiceStatusMetadata, error) { // Execute GET on bucket to list objects. resp, err := adm.executeMethod("GET", reqData) - defer closeResponse(resp) if err != nil { return ServiceStatusMetadata{}, err } + // Check response http status code if resp.StatusCode != http.StatusOK { return ServiceStatusMetadata{}, errors.New("Got HTTP Status: " + resp.Status) } + // Unmarshal the server's json response + var serviceStatus ServiceStatusMetadata + respBytes, err := ioutil.ReadAll(resp.Body) if err != nil { return ServiceStatusMetadata{}, err } - var storageInfo ServiceStatusMetadata - - err = json.Unmarshal(respBytes, &storageInfo) + err = json.Unmarshal(respBytes, &serviceStatus) if err != nil { return ServiceStatusMetadata{}, err } - return storageInfo, nil + return serviceStatus, nil } // ServiceRestart - Call Service Restart API to restart a specified Minio server From fc6f804865937fccb211f4b46da0c103f2416108 Mon Sep 17 00:00:00 2001 From: Anis Elleuch Date: Mon, 23 Jan 2017 18:55:34 +0100 Subject: [PATCH 093/100] server-mux: Keep listening after Accept() err (#3613) Accept() can return errors like: `too many open files`, no need to totally quit listening in this case. --- cmd/server-mux.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/cmd/server-mux.go b/cmd/server-mux.go index e98741bb6..16d525d28 100644 --- a/cmd/server-mux.go +++ b/cmd/server-mux.go @@ -191,20 +191,20 @@ func newListenerMux(listener net.Listener, config *tls.Config) *ListenerMux { } // Start listening, wrap connections with tls when needed go func() { + // Extract tcp listener. + tcpListener, ok := l.Listener.(*net.TCPListener) + if !ok { + l.acceptResCh <- ListenerMuxAcceptRes{err: errInvalidArgument} + return + } + // Loop for accepting new connections for { - // Extract tcp listener. - tcpListener, ok := l.Listener.(*net.TCPListener) - if !ok { - l.acceptResCh <- ListenerMuxAcceptRes{err: errInvalidArgument} - return - } - // Use accept TCP method to receive the connection. conn, err := tcpListener.AcceptTCP() if err != nil { l.acceptResCh <- ListenerMuxAcceptRes{err: err} - return + continue } // Enable Read timeout From 8489f22fe28ad671117aa45135afb7988560f781 Mon Sep 17 00:00:00 2001 From: Krishna Srinivas Date: Mon, 23 Jan 2017 17:01:44 -0800 Subject: [PATCH 094/100] signature-v2: Use request.RequestURI for signature calculation. (#3616) * signature-v2: Use request.RequestURI for signature calculation. * Use splitStr instead of strings.Split --- cmd/signature-v2.go | 29 ++++++----------------------- cmd/signature-v2_test.go | 2 ++ cmd/test-utils_test.go | 15 ++++++++++----- 3 files changed, 18 insertions(+), 28 deletions(-) diff --git a/cmd/signature-v2.go b/cmd/signature-v2.go index c5540201e..107dbe8c5 100644 --- a/cmd/signature-v2.go +++ b/cmd/signature-v2.go @@ -85,16 +85,9 @@ func doesPresignV2SignatureMatch(r *http.Request) APIErrorCode { // Access credentials. cred := serverConfig.GetCredential() - // url.RawPath will be valid if path has any encoded characters, if not it will - // be empty - in which case we need to consider url.Path (bug in net/http?) - encodedResource := r.URL.RawPath - encodedQuery := r.URL.RawQuery - if encodedResource == "" { - splits := strings.Split(r.URL.Path, "?") - if len(splits) > 0 { - encodedResource = getURLEncodedName(splits[0]) - } - } + // r.RequestURI will have raw encoded URI as sent by the client. + splits := splitStr(r.RequestURI, "?", 2) + encodedResource, encodedQuery := splits[0], splits[1] queries := strings.Split(encodedQuery, "&") var filteredQueries []string @@ -213,19 +206,9 @@ func doesSignV2Match(r *http.Request) APIErrorCode { return apiError } - // Encode path: - // url.RawPath will be valid if path has any encoded characters, if not it will - // be empty - in which case we need to consider url.Path (bug in net/http?) - encodedResource := r.URL.RawPath - if encodedResource == "" { - splits := strings.Split(r.URL.Path, "?") - if len(splits) > 0 { - encodedResource = getURLEncodedName(splits[0]) - } - } - - // Encode query strings - encodedQuery := r.URL.Query().Encode() + // r.RequestURI will have raw encoded URI as sent by the client. + splits := splitStr(r.RequestURI, "?", 2) + encodedResource, encodedQuery := splits[0], splits[1] expectedAuth := signatureV2(r.Method, encodedResource, encodedQuery, r.Header) if v2Auth != expectedAuth { diff --git a/cmd/signature-v2_test.go b/cmd/signature-v2_test.go index 91f4176a8..56cc38957 100644 --- a/cmd/signature-v2_test.go +++ b/cmd/signature-v2_test.go @@ -101,6 +101,8 @@ func TestDoesPresignedV2SignatureMatch(t *testing.T) { if e != nil { t.Errorf("(%d) failed to create http.Request, got %v", i, e) } + // Should be set since we are simulating a http server. + req.RequestURI = req.URL.RequestURI() // Do the same for the headers. for key, value := range testCase.headers { diff --git a/cmd/test-utils_test.go b/cmd/test-utils_test.go index bd9537ca6..a9c544576 100644 --- a/cmd/test-utils_test.go +++ b/cmd/test-utils_test.go @@ -1739,20 +1739,25 @@ func prepareXLStorageDisks(t *testing.T) ([]StorageAPI, []string) { // initializes the specified API endpoints for the tests. // initialies the root and returns its path. // return credentials. -func initAPIHandlerTest(obj ObjectLayer, endpoints []string) (bucketName string, apiRouter http.Handler, err error) { +func initAPIHandlerTest(obj ObjectLayer, endpoints []string) (string, http.Handler, error) { // get random bucket name. - bucketName = getRandomBucketName() + bucketName := getRandomBucketName() // Create bucket. - err = obj.MakeBucket(bucketName) + err := obj.MakeBucket(bucketName) if err != nil { // failed to create newbucket, return err. return "", nil, err } // Register the API end points with XL object layer. // Registering only the GetObject handler. - apiRouter = initTestAPIEndPoints(obj, endpoints) - return bucketName, apiRouter, nil + apiRouter := initTestAPIEndPoints(obj, endpoints) + var f http.HandlerFunc + f = func(w http.ResponseWriter, r *http.Request) { + r.RequestURI = r.URL.RequestURI() + apiRouter.ServeHTTP(w, r) + } + return bucketName, f, nil } // ExecObjectLayerAPIAnonTest - Helper function to validate object Layer API handler From cead24b0f7cbeb6e8be5032b852c56a9727f9d25 Mon Sep 17 00:00:00 2001 From: Krishna Srinivas Date: Mon, 23 Jan 2017 18:07:22 -0800 Subject: [PATCH 095/100] miniobrowser: Bring Minio browser source into minio repo. (#3617) --- browser/.babelrc | 8 + browser/.editorconfig | 16 + browser/.esformatter | 23 + browser/README.md | 37 + browser/app/css/loader.css | 98 +++ browser/app/fonts/lato/lato-normal.woff | Bin 0 -> 38240 bytes browser/app/fonts/lato/lato-normal.woff2 | Bin 0 -> 30348 bytes browser/app/img/arrow.svg | 3 + browser/app/img/browsers/chrome.png | Bin 0 -> 3726 bytes browser/app/img/browsers/firefox.png | Bin 0 -> 4795 bytes browser/app/img/browsers/safari.png | Bin 0 -> 4971 bytes browser/app/img/favicon.ico | Bin 0 -> 1340 bytes browser/app/img/logo.svg | 57 ++ browser/app/img/more-h-light.svg | 3 + browser/app/img/more-h.svg | 1 + browser/app/img/select-caret.svg | 3 + browser/app/index.html | 56 ++ browser/app/index.js | 116 +++ browser/app/js/__tests__/jsonrpc-test.js | 43 + browser/app/js/actions.js | 509 ++++++++++++ browser/app/js/components/Browse.js | 734 ++++++++++++++++++ browser/app/js/components/BrowserDropdown.js | 56 ++ browser/app/js/components/BrowserUpdate.js | 42 + browser/app/js/components/ConfirmModal.js | 50 ++ browser/app/js/components/Dropzone.js | 65 ++ browser/app/js/components/InputGroup.js | 49 ++ browser/app/js/components/Login.js | 133 ++++ browser/app/js/components/ObjectsList.js | 75 ++ browser/app/js/components/Path.js | 41 + browser/app/js/components/Policy.js | 80 ++ browser/app/js/components/PolicyInput.js | 83 ++ browser/app/js/components/SettingsModal.js | 215 +++++ browser/app/js/components/SideBar.js | 85 ++ browser/app/js/components/UploadModal.js | 141 ++++ .../app/js/components/__tests__/Login-test.js | 54 ++ browser/app/js/constants.js | 23 + browser/app/js/jsonrpc.js | 91 +++ browser/app/js/mime.js | 106 +++ browser/app/js/reducers.js | 176 +++++ browser/app/js/utils.js | 85 ++ browser/app/js/web.js | 124 +++ browser/app/less/inc/alert.less | 68 ++ browser/app/less/inc/animate/animate.less | 13 + browser/app/less/inc/animate/fadeIn.less | 26 + browser/app/less/inc/animate/fadeInDown.less | 54 ++ browser/app/less/inc/animate/fadeInUp.less | 54 ++ browser/app/less/inc/animate/fadeOut.less | 26 + browser/app/less/inc/animate/fadeOutDown.less | 54 ++ browser/app/less/inc/animate/fadeOutUp.less | 51 ++ browser/app/less/inc/animate/zoomIn.less | 23 + browser/app/less/inc/base.less | 31 + browser/app/less/inc/buttons.less | 53 ++ browser/app/less/inc/dropdown.less | 26 + browser/app/less/inc/file-explorer.less | 160 ++++ browser/app/less/inc/font.less | 7 + browser/app/less/inc/form.less | 249 ++++++ browser/app/less/inc/generics.less | 83 ++ browser/app/less/inc/header.less | 242 ++++++ browser/app/less/inc/ie-warning.less | 81 ++ browser/app/less/inc/list.less | 352 +++++++++ browser/app/less/inc/login.less | 104 +++ browser/app/less/inc/misc.less | 102 +++ browser/app/less/inc/mixin.less | 52 ++ browser/app/less/inc/modal.less | 294 +++++++ browser/app/less/inc/sidebar.less | 187 +++++ browser/app/less/inc/variables.less | 94 +++ browser/app/less/main.less | 39 + browser/build.js | 126 +++ browser/karma.conf.js | 40 + browser/package.json | 82 ++ browser/tests.webpack.js | 2 + .../miniobrowser => browser}/ui-assets.go | 0 browser/webpack.config.js | 105 +++ browser/webpack.production.config.js | 88 +++ cmd/web-handlers.go | 2 +- cmd/web-router.go | 2 +- vendor/github.com/minio/miniobrowser/LICENSE | 202 ----- vendor/vendor.json | 6 - 78 files changed, 6451 insertions(+), 210 deletions(-) create mode 100644 browser/.babelrc create mode 100644 browser/.editorconfig create mode 100644 browser/.esformatter create mode 100644 browser/README.md create mode 100644 browser/app/css/loader.css create mode 100755 browser/app/fonts/lato/lato-normal.woff create mode 100755 browser/app/fonts/lato/lato-normal.woff2 create mode 100644 browser/app/img/arrow.svg create mode 100644 browser/app/img/browsers/chrome.png create mode 100644 browser/app/img/browsers/firefox.png create mode 100644 browser/app/img/browsers/safari.png create mode 100644 browser/app/img/favicon.ico create mode 100644 browser/app/img/logo.svg create mode 100644 browser/app/img/more-h-light.svg create mode 100644 browser/app/img/more-h.svg create mode 100644 browser/app/img/select-caret.svg create mode 100644 browser/app/index.html create mode 100644 browser/app/index.js create mode 100644 browser/app/js/__tests__/jsonrpc-test.js create mode 100644 browser/app/js/actions.js create mode 100644 browser/app/js/components/Browse.js create mode 100644 browser/app/js/components/BrowserDropdown.js create mode 100644 browser/app/js/components/BrowserUpdate.js create mode 100644 browser/app/js/components/ConfirmModal.js create mode 100644 browser/app/js/components/Dropzone.js create mode 100644 browser/app/js/components/InputGroup.js create mode 100644 browser/app/js/components/Login.js create mode 100644 browser/app/js/components/ObjectsList.js create mode 100644 browser/app/js/components/Path.js create mode 100644 browser/app/js/components/Policy.js create mode 100644 browser/app/js/components/PolicyInput.js create mode 100644 browser/app/js/components/SettingsModal.js create mode 100644 browser/app/js/components/SideBar.js create mode 100644 browser/app/js/components/UploadModal.js create mode 100644 browser/app/js/components/__tests__/Login-test.js create mode 100644 browser/app/js/constants.js create mode 100644 browser/app/js/jsonrpc.js create mode 100644 browser/app/js/mime.js create mode 100644 browser/app/js/reducers.js create mode 100644 browser/app/js/utils.js create mode 100644 browser/app/js/web.js create mode 100644 browser/app/less/inc/alert.less create mode 100644 browser/app/less/inc/animate/animate.less create mode 100644 browser/app/less/inc/animate/fadeIn.less create mode 100644 browser/app/less/inc/animate/fadeInDown.less create mode 100644 browser/app/less/inc/animate/fadeInUp.less create mode 100644 browser/app/less/inc/animate/fadeOut.less create mode 100644 browser/app/less/inc/animate/fadeOutDown.less create mode 100644 browser/app/less/inc/animate/fadeOutUp.less create mode 100644 browser/app/less/inc/animate/zoomIn.less create mode 100644 browser/app/less/inc/base.less create mode 100644 browser/app/less/inc/buttons.less create mode 100644 browser/app/less/inc/dropdown.less create mode 100644 browser/app/less/inc/file-explorer.less create mode 100644 browser/app/less/inc/font.less create mode 100644 browser/app/less/inc/form.less create mode 100644 browser/app/less/inc/generics.less create mode 100644 browser/app/less/inc/header.less create mode 100644 browser/app/less/inc/ie-warning.less create mode 100644 browser/app/less/inc/list.less create mode 100644 browser/app/less/inc/login.less create mode 100644 browser/app/less/inc/misc.less create mode 100644 browser/app/less/inc/mixin.less create mode 100644 browser/app/less/inc/modal.less create mode 100644 browser/app/less/inc/sidebar.less create mode 100644 browser/app/less/inc/variables.less create mode 100644 browser/app/less/main.less create mode 100644 browser/build.js create mode 100644 browser/karma.conf.js create mode 100644 browser/package.json create mode 100644 browser/tests.webpack.js rename {vendor/github.com/minio/miniobrowser => browser}/ui-assets.go (100%) create mode 100644 browser/webpack.config.js create mode 100644 browser/webpack.production.config.js delete mode 100644 vendor/github.com/minio/miniobrowser/LICENSE diff --git a/browser/.babelrc b/browser/.babelrc new file mode 100644 index 000000000..9cd3d553f --- /dev/null +++ b/browser/.babelrc @@ -0,0 +1,8 @@ +{ + "presets": [ + "es2015", + "react" + ], + + "plugins": ["transform-object-rest-spread"] +} diff --git a/browser/.editorconfig b/browser/.editorconfig new file mode 100644 index 000000000..92926b6de --- /dev/null +++ b/browser/.editorconfig @@ -0,0 +1,16 @@ +# editorconfig.org +root = true + +[*] +indent_style = space +indent_size = 4 +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true + +[*.json] +indent_size = 2 + +[*.md] +trim_trailing_whitespace = false diff --git a/browser/.esformatter b/browser/.esformatter new file mode 100644 index 000000000..1677d7c4b --- /dev/null +++ b/browser/.esformatter @@ -0,0 +1,23 @@ +{ + "plugins": [ + "esformatter-jsx" + ], + // Copied from https://github.com/royriojas/esformatter-jsx + "jsx": { + "formatJSX": true, //Duh! that's the default + "attrsOnSameLineAsTag": false, // move each attribute to its own line + "maxAttrsOnTag": 3, // if lower or equal than 3 attributes, they will be kept on a single line + "firstAttributeOnSameLine": true, // keep the first attribute in the same line as the tag + "formatJSXExpressions": true, // default true, if false jsxExpressions won't be recursively formatted + "JSXExpressionsSingleLine": true, // default true, if false the JSXExpressions might span several lines + "alignWithFirstAttribute": false, // do not align attributes with the first tag + "spaceInJSXExpressionContainers": " ", // default to one space. Make it empty if you don't like spaces between JSXExpressionContainers + "removeSpaceBeforeClosingJSX": false, // default false. if true => + "closingTagOnNewLine": false, // default false. if true attributes on multiple lines will close the tag on a new line + "JSXAttributeQuotes": "", // possible values "single" or "double". Leave it as empty string if you don't want to modify the attributes' quotes + "htmlOptions": { + // put here the options for js-beautify.html + } + } +} + diff --git a/browser/README.md b/browser/README.md new file mode 100644 index 000000000..74bb563f2 --- /dev/null +++ b/browser/README.md @@ -0,0 +1,37 @@ +# Minio File Browser + +``Minio Browser`` provides minimal set of UI to manage buckets and objects on ``minio`` server. ``Minio Browser`` is written in javascript and released under [Apache 2.0 License](./LICENSE). + +## Installation + +### Install yarn: +```sh +$ curl -o- -L https://yarnpkg.com/install.sh | bash +$ yarn +``` + +### Install `go-bindata` and `go-bindata-assetfs`. + +If you do not have a working Golang environment, please follow [Install Golang](https://docs.minio.io/docs/how-to-install-golang) + +```sh +$ go get github.com/jteeuwen/go-bindata/... +$ go get github.com/elazarl/go-bindata-assetfs/... +``` + +## Generating Assets. + +### Generate ui-assets.go + +```sh +$ yarn release +``` +This generates ui-assets.go in the current direcotry. Now do `make` in the parent directory to build the minio binary with the newly generated ui-assets.go + +### Run Minio Browser with live reload. + +```sh +$ yarn dev +``` + +Open [http://localhost:8080/minio/](http://localhost:8080/minio/) in your browser to play with the application diff --git a/browser/app/css/loader.css b/browser/app/css/loader.css new file mode 100644 index 000000000..d7ae07b02 --- /dev/null +++ b/browser/app/css/loader.css @@ -0,0 +1,98 @@ +.page-load { + position: fixed; + width: 100%; + height: 100%; + top: 0; + left: 0; + background: #32393F; + z-index: 100; + transition: opacity 200ms; + -webkit-transition: opacity 200ms; +} + +.pl-0{ + opacity: 0; +} + +.pl-1 { + display: none; +} + +.pl-inner { + position: absolute; + width: 100px; + height: 100px; + left: 50%; + margin-left: -50px; + top: 50%; + margin-top: -50px; + text-align: center; + -webkit-animation: fade-in 500ms; + animation: fade-in 500ms; + -webkit-animation-fill-mode: both; + animation-fill-mode: both; + animation-delay: 350ms; + -webkit-animation-delay: 350ms; + -webkit-backface-visibility: visible; + backface-visibility: visible; +} + +.pl-inner:before { + content: ''; + position: absolute; + width: 100%; + height: 100%; + left: 0; + top: 0; + display: block; + -webkit-animation: spin 1000ms infinite linear; + animation: spin 1000ms infinite linear; + border: 1px solid rgba(255, 255, 255, 0.2);; + border-left-color: #fff; + border-radius: 50%; +} + +.pl-inner > img { + width: 30px; + margin-top: 28px; +} + +@-webkit-keyframes fade-in { + 0% { + opacity: 0; + } + 100% { + opacity: 1; + } +} + +@keyframes fade-in { + 0% { + opacity: 0; + } + 100% { + opacity: 1; + } +} + +@-webkit-keyframes spin { + 0% { + -webkit-transform: rotate(0deg); + transform: rotate(0deg); + } + 100% { + -webkit-transform: rotate(360deg); + transform: rotate(360deg); + } +} + +@keyframes spin { + 0% { + -webkit-transform: rotate(0deg); + transform: rotate(0deg); + } + 100% { + -webkit-transform: rotate(360deg); + transform: rotate(360deg); + } +} diff --git a/browser/app/fonts/lato/lato-normal.woff b/browser/app/fonts/lato/lato-normal.woff new file mode 100755 index 0000000000000000000000000000000000000000..f2317755c63f47a6bd74acdf0140651b266f9c69 GIT binary patch literal 38240 zcmY(q1AHY<@Gp8c+zmFiHg>YHZQJ&VZ6_Psw!KMCHnwfsw*B(^-}mmjcRu~Cny#tp zs?*ccJ*Pi4u5zNHfNy}WBS{~C^2NSUS^cm2f3pAk5EWIH`BEtTlH&dcdqRE{F<}wW zFInGLo%jo6-(CPWV)AlIU$P?rfQS?TPy!P;EBY>`q#_6akRSp8U}gZo4|Tm?xsP&6 z^o(C7ZeMn~U(mOJ*#j9_8`yoxmcI19+5=0UbI{#0a&h_v08qSr=}7(u)NJ@hQ#&(j z0Dw~b%TMi>o{18OzQoMH@ynL#KfUk&FA&YF+)V)hYKSivUvXBkSGwow15FH!0e~>+ zFCC^Yu);I4-vhsdUoyY1I>8r65G$cJfYwf5v4!P+wXY5UfESJa*C?8f!mkgsQ>!l*M9(m`gUMrU~Tdx+XDa~?f?LUhhEM)7du-=CjcP+>$iP7 z{c3ZC-JcWO&cWo%Ht|2(#4n6*meWDFnizh?ob=@r0QDb)qgCg>`2XCcF`oR%uXDik zQK|cXeCFjI&TYj=-&kLN3*3+c3Q-?SP&AmF|1EJFGIksc-RP@BPQcCqZY%SdKipJQ z;s@n!>e@*D@Gl`D^$-z7zy?szGK0^+P*72Y%W}DJjf6@W$;H5)4-qwO^$@{*cUoHf zOAl5@N}D14JvNHej}S)>87!1vzw|lFl_tri}aGS;2 zgEZw3ghS--sB-w~XOF5xPWaPzt=UV>#mlMk(?(a z0MlUWA9}mh1e&Nm96KNKqArUunAi-m#}%P6$067*Hj#1iU$Tyo3b;5VgG1HhzbxLg<;D2mjSGYl-%MTfROS;`Pq=TUd-YIJ@@T| zq$Z=)grK0Ui^2%Yc zUPO7yBBGTp2QeSxA~@U~lKha3-};Z>$|`OfBW6YhCXjC%^JL=Mu}cIbko@07nO!0% z;eQDE`+rx+Yj*YoP`rzXa^d8ZH#2>PxG*Pq5U#;Dru z@Vrv#my@V~ zn<3hyF(IW-nI&5i&i5}W#;-RQlF1t`oRn9BKNlUQLy>v4xq8Q4U{Lj$LnkCtt&8Q? zsTZEGw^gkgv2Va5mB;D0FtzTr9bV*v&swdV@JwL~u(g za`3Mwev3Ao16I@eUl!eL&K38}j>eBg#vh;RUX+WTTNt;YL=#_o&@I>%f&_ln%-$g0 z+RB{@9gI^$#|dz0)`&q_JK2~|tlez=ifb-ncjW3JL8C8!i#Q8ITj&R~?E%Xm3VIkaV`+0PJ}kcD22fWUFcdJyt@0z-3~*KFn!AF){LaPGIP5c>5HPe-BS=et%a(JNn3lTEk0Rt z)C{1^Dk_m#TMuegYHH16N+sjTtvRmG|e}!MQ*k!R)J4CP#jR- z8QU@+!0KbC#xi5s9n*(0@L>uq8HDi29Dvgmtg}uP;WM4aDcjf{+oKk(#s1_$KKEy~ zHNoSnVHy!tveD6VvF<0DW}l7Zn&|PybY)R2``7pC6ZZIyoRbnbIoiFszjf)LKIq&3 zdQg}AIieYDLW|I|xW6+Ry&I&q738`Tw765_aHYoaRDt`Z^7DfV+BYe%bEF&ga3{#| zx~JJ~TidJqSKF25S9z2A>0^M1?}okMij5u%JK6k{s_i9Rt$AgzAW;|AP1G_%M00zo ze%)A^!c>{ywJ@Zss>DJ=6rY4RPGc0Sp_V2$dJlTzqLrJv`9<4E9AR-p9{)U;n^7(P z566`^WKewqn5GTO&)5LcK|X(&qKk8}2dzrCTHiS(x`dr@{md6)<9v&@XUnVq?sOp9 z<2oxQQ@9$Z+n66B-zK6qe4v*7puG|4GV7Q(LB`)M@_SLS*~QP&eXTjna+wO@l+DG# z6^zz&+7OR+CvBh7L%d*Kc0rDXTM+KuS%hr8)IGYEvDWgGKE62}Y|-A%TI@5q;&pCQ zU9Uc8r2~$gDIjg zJ>*22tm19ezt89gXkcgj19fvtV1*}$ZIw47-FvQ3F`*3G;r9Kn6?5ly2lqt;8%u35 z@XZX)0XX4pZQ_4Sk@9y7Ufq^)A;at$*=k2bm_5Kh39}gk#+>R-ssa|=$ zBcvmSBYPm=9llSEH=@QX+CG?J!VU<*jO;;x4Jl(5slGDlXDV2qyF7&pq+>=14@~Bl z$}^dQ0rI@t{?7!JQ6`mJ!%VY>==6uf6dBTzD8b7#c0`l=$0J4;Mt|jSLglbg<*-*1 zjiCcT>9YLKL|75xR^zBthP!1n_obm{01r=4G_)b@BqClm=>pGSCaZl1 z^*c)7!=S<-ZVZL~8y%;Hl2=&o=n ziot&xEiTS+N58xB9N{^T8EH2KJp*cZlO1x>q>n8=Obs=rZ^yi5*J(=bexFkCjGK0a**>Z}7BUCZIc}b%+pg zZrs%C6a^5}jDmw3F>y1k!OO~wjsTk@kKpCS$jggGkP{grM}{xP_$aO_#F@(aO}Z6c zI>V3D_;-P6WdM_;}EP!0oZoHxQyG1Z{9~&oJYFZgh>-@TJO>1rQ#D zUMJ3=t0qBi)yCv_6hNNwla&8`ivwPUEPq&-IW3e7<=XWbSWmW6IjoJGI?8!am)%?` zl;JsLYWdZb))&UQr&GmQs~(`{glE#+aUI|-4(J(vMY<1qiD!Y=tBdy_dthxg)f3xM z&gjB?j3=u3Qgk=0ayR(t$Vfc6;hOkmQb+83r3(jNtc!+0x6QTh(=@oL`3@nP5Wh z@RXBV`ZqbIJdG(U;7?|Zbm;Mr9LOm_AQA{OoGA;J@*804(gNX>;tY_K#arg<>OjozG+>@K!H;idD2PjP z-ujqFbw$VMm`aLo-2c{zJA!mBdwNZbRvl~4o0lH6-|QXpRbbnEq}V&sjHr*$OIbQ? z_m`k7VAamq|Ap8D+Jszey=38X*|kL&iHZD02-I(6n!yxJhlCo^2dQ-d2B$AhaF^XzNg?rVwbYgQopm7AogVeJ^A z^dg`AJMh_OQLUKM1iom8+wW7i1KW9h%vRt=$E!hL4wOqapr-U(-L~_7TGROo`{xzm znsOkLDM7tTX)a_*Ts%vO<0w(VG;;Xo5&B;e{hFG$BT9c3FiY*-jmnd( zM6*sM4A~Rn&((208gL;Op>esR~vC9*`9cxn{udobN zQybK0JdBuTwJ6}F_1#=8S(=-fRLV3UcX zA*U9vRJ+QrK^>K}YNM>|atTUxve7*v*@Yv#2yz`h^c>t5K@SDQp4l|eG`?rs+}0CH zj?$Cyd%s(hHI_AwmJvQ2AF}iY=vQ~S&lf!(B_HxdVR*b8GI!G$*0N&VXR_+eZ}P_C z#rARBWs*n0Z*Wc?2(RI|@+Y35sa-qjo2l2EUlaHHZ>al0?3epLY~(y9(q0jOpwQ(0 z53iRep7T3j?engJs9ig^72Vqgj*I#brHM?&Oi#KQ&KhnmJ7({`lchNIf5t0F>U;xn zy#_ftsH23|o&akyM<3)YJpacEw}H#$%lRqnv{GEJLCFX0*8vV275AD%i6Mue%ZZ4W z^LlaavQHifha9hn{=2y58?9rCK;r^yLRY~_DK#vM%UE~^@=lMxAF8h}&v|emOmmXD zMg-we`AKLCJ!z9BIq%sE4W{R=vnmgs=&$SkV>59hP#nb%8FZdfHJp1Yg*Th~aaEIs zuy>l(v!*4R4LXbf_`)I74rujL{|iENk461>pPcR!mo5Wt>i5uk(P+m-tVyb%7;g5r zh1xy1mzATWs)GI54kRThd(}!sN>kp~P$&Gb!-`FgeLK&Q3LFXJGsD&m=MVC~;XHOO zg}7YQ$xt3}LBWCQ=g;Ibnl_3!Z&S4*jaEBP)!ZcBq{16p+?bb|Y97OZigKKzt2dm^ zlS5K>Y+#qR?|TaEkGa)(OwsT7N@<6$UP>*FF2S!e*H2KqOu4pGoA{ONy?RixtFCvR zzd`G9&$7?-4?Qk)#}k>lwU2qVJc+(4K6)fS+XyFTZhy56b_RM!N)No^p}ZCjR|kZ7 zT&myW4C49z6GXRro8JBu!zyt}F*_tQs&tB-_rjw04Sr=2{*DRP`GV?42t|u8#NKIVY7RlD_WJlLMR% zq0c7a%Rgw_mZzZ!Kv%Ux&EU!)zTO`z8xT6;>k|leMgQWqnnFV?{xIi@>40SaVH**Ji%gqH;%I;w2(q2M*;X z-F|D)u|U|j^?PI9MRk}W#lEQ zp2dnX=ut0|Q7v;)b^ovOD=8Ot4-1x$X^i`Tag!!EfqlHJI#)P7NxwBX{o3k#&+2!C z3<{fZp9MwGp(@@S!*ybC`HvKdVh~hovEPRx<-81b5eEfpR(fFK$pKmWrHvoCfY=IntbJX4x*XTtWq3tm}5w0+|bCoOK<0JD&`!6OFj#fW)~y`!s_5SO0rZ2%0la_KRnmGTHSL+S?{f+mitIrG|$T>4b@@ztrUQ zItC}XeK?TUp&Ba|)6_pLu!5LjIQk^*^4oHOg$>_>y0fx2pEi3VjW z=Aa*Q25f%jPG-bDGVE;FpWTU6I%5qo+Z$*+3r~y}ouk4hFRrFogt=QF9AoDpZ(S1} zp4{GGg&t$J7xRl$2B~Xk25@h05S2~bUA4bOy-w7IaKWoB{KIi$Fgb2e^B~x-c<+l? zZ=Y7V^!iKnA+;>#JAZDdLvO@FrTH3KW~Kf49_`~>?ipyb0)34N?;Qwgzjg%{oENB;ux(Y(Q#UAv2q=p*xH-nd~a)*f1#@@M3iH{khZVkyOoEVz5)b(ynowElb8hzR@PIMaRoprrWFwrnl z2Em_GyeNgTktkl2>ePek6zYE{06IcFZ9&PLRi1-Yg+sIgf>s#`uupSBD#BZK))yvQdlFzLp%ZXJ~#% za5z2@Dh~vu2e9&pP;go(Ha}z_9N$LqPF~MG&>Q(|!K_s@=UlX_oAJP$NxAX&4f<5Y z*Q051V>54ThJvX(P89Zc>F@KMW64WQZ>7J|L5q*qauic#YtJcb_+Y}G-eGg$PEj4R zi*}chPLpgwCS5`qRfp^@C~h<5MMsIVE`obZw_!5rnDJPWHM^x?js7uX#o>4)mfEvM zn=rXCS@tHNjnsO1o+a6{CbRQqbEX41>^y}yUX|0wA6#~bZao1V$r_d<88%N{++VCN z9WvdZKB#39x1u2-R1W8eR>yPA=JC8pUffVadHBO6#;bqYl*sjW%5cY6j+UdEnMwAN zSwq}hRBR&}!MjE=d(}>jK!AHuEdi^&qGK6GR+d+sDUm~_cG&n78e$QGqf7YZt!#FI zJh7C*{)4>X;hkNJv2j1K1};-pWUP+a;1&r^qDLL_Mx*DgEos58&GxzlfvQcSJPDuY z?b)NP$T77rkt@%_fzjKW;F|dX2T}J;LyFGLPrCPIgF*wdh$0My4-RVczl*<>q!?@V zBy8;lTW=_xSEnQ@&k`x?GP8zrjI(BiOexRw-f!Ll&OYJ|j#^BB%3)tn<+#Fq|@D4K@uY z5gLrA5jZ>Gi79WUNS<@hrA!#-H{Pq6v5j7Qj!4$6sqS-U*jb&mUFgfc3Oi$v&yOT{1Cy8c1~r8j1@jSoGo~obxC2;06&^TW8T_I!OevVFU$0svakhNg%>+i8wC9ueKas&lbDuJ-y_D50m@Y%=+i9xZA0i zMxrYi8>qRLB}rhQzWE>KJyzsMYGLu5XMU=hTB=(6U0S%lf2e(`2-0{hwtnnQ7T02+ zEU$y-xadahC`gIcp0B?6u;w4A$Z@qTaC0vSH+j$BCSH(_vbEm4Royl(b9meY5*A@^ zC{B#a;!n@rBv>?1v2*yc{Jh>OQ)Jn2j@?pouW(e}lo`AI1W}rmCQsDzzPa-ifqY2^ zInZ_DB-wo=da9qS0`SCD>U3Y37P|&tAV67b2iHp^uYW}S( z!iR%dT5hmNGgZ?vFRlpISKc#st_U(!&@yMN=+>7tnzJ?}m?{~4)kLH$$BQ>Amgmyb zW#AXBELg^igDqJbLQIvwS>)EG9Lu-UOD>Ai<}LnX&nk@vYM&CfbL%Xl9`&Xgw(~TW zYU6bqMH&mQ%0z7C!r123EYdk!s*919u;UH?rnLWK8^z5F3+X0RWf}{1Tpe@jUlK9UY9Hm3aabgB1h;Aa-#_?Ei+#gJa3X&9$N*@oS}-G$`4|z=g&d0 zaDf|)dmhMF-b3?O(Y!;Q@l(c?#SJfvQ!b=F>l{trBhT{mYWHfUmWu%ko7vli1wyM= zA*{#ImBvRIuERwnTWPwC0>h|Dn&IZ%pq=;gcZ?lUS*B7Pr^&k{nyJjQR?;-dai7EE z*WgQyI~tTZvFh2~!q2RKOZ2|Q$e-%ydG9ov^+CXQn^NmDE#mt9zmXOBHJwNJlnFZH zSTrZ>^7SEfTyNs-t&3#kdsiWwEpM7<8s{t`qe>AFtlDiuyv(sMCfsx9j|D`s`8{ zOO+b4x4@!u-i69zMR3Y-E8;8FX!_@gLvv}xO{#b2dPjY9=g7yVaT@igq_!5#19wAk zN_~9Qr6by~?)xTy&byIe&R9vXJGl8p55Y!d48Js$M}FD^LF82Co;Hjud^zJ-3t0xW znQgX_B%Q*iQ(`k7m`~A>)AibGXxo7>b8xATI7x;RKt zZCg#z+PZUE9A2YE(P2|;%gn5hxGt^W!X&Z`k7hD>gaa>w%EhKMhg;ybTyJJise4i8 z#-V;)Cw1*0*FVaGO799ythH=R9LMkK{QjDnmIoUrZ00TM?w{m}*vEn4enEPo9gA1@ zgG<+BvnDwaWaCX|~gg)xunddE;hxA|G=9Rmk;FBb=g`izY>8c)#yrJV=P3c2OIh59 ze_cf2Jwxhw?0p#bKq;GSDv`29xOg}<_`_W=Sfb&2S@(jUH<(h`Iu^e<48HOBAvtbe z638!v*Voc@d7>C~L_TU7{l?_#_Ro5vgFC4yqfeZ*8h$>r$Qyz7^Kc*H%+qV4wsQ_@ zmYa(0xHa){MBZID-7OnxHMM$>;bZc-sHXC&?XtAfLyGIT=nZ0OP^qZ?Vn*2&JchdE zrGOXyv2FMgqdl(BwC4VWWRm4s`lIiRCSCV_wo1#v$MAJ6o;&X|!C^s1cfLSRD*A_( zT<1g5HSAd=_vOS#h}dhxGPdoewI@zNQifD|*B?SxkohxA-I7Fk;*aFT+D}>Ue2I zcd2Ap_&ln~3{@VYFMoip8hdJx9~`^J*rv$Po#oqYX{uIHk)W4zBt*fTZnHIa4= z&MlDoy`>jilCrE>^LZz#TybO)^INN+!0zF=&ZD49A6YqhXl6IN6&%3D;8Hl?383v_ z?ePXuZ_>L8Z>HqVvw9ga2Sv08aM4q@8831J#oPi)mU>uq^`Yr&BO3R>HC%-y+8nsa zmH*jo0G$l}T3w}eKVrATLN&tq%%+y%%)JqL4sVhD>Cp+ioNGyVkA1J+;NHA0d5ZCuq2T4nJ|59VZGd2ml9W+ z@F+*jCa`uQH&Bz^i*=Esa=$rXDI3mkW0OBPA`UrzkiLv@sL%8|Yr)-2_Bo?AQNnE$ zwdiarkD^n(8phj1ZCf?gWkAQ_wSB_Pb%S!w?>_!{aINC5e4XZ^;`s)_odmM_8fQlO zrVIv|U0owxJ)9Bqss8>7_<~G&#-XNQGtzIB($dm2W71;?Sg9Y-zuTTCySj3}_rZ}S zedC4u2?x`uu>|vp#`xQg#yDI>Uq2He-e}uo9lj71v+3LXJ>vh2# zT0<~|6508&aabmoaT*Wd!64ZA3_(wN8QK+3b6b-;jMU*{j-GAXUo|E6Z(HP#nXMD= z8=e=By8MCmEc~&wg6Ww*jG7hYhd1DmNLL)32paUMOHtp$1r)UA(C$Tqrq!9z#zm;7 zWh+@3S)cEf%2Z=y<2-L5Gk(zIN0*XgOY75q8bV@Gmxc(O7yZM3=D86%NqH&6FVi}S z)hyyHQ-(>R34VdNAxl6ZG5A3o2a)xKkhodh^%-tx#qW4v&B93ZNuq; z?NZaQ;VrsUMb+xR%cW+^$L#jk2Lg7tqbsD-g2Ctb7yyg{90BcsZa^p? z8PEbq2SfsL00n>mKoP(NpbjtrSOBa6;eZA}EkGC&>A!hXN1;_#O8{{l9s%6HZ|N7u z3Tk~??0{z;I*F8|nd%g{P-ck)S|mO)Jy85$D$rSg4HY zL@o=!K94OW0+tMyzb2^UWBLsy`1Ot8mJ1WaJbO3C`Qw-n;vmCbD)16(yk!*OC18Hr z@b~Y+)Z!gFzGB6XM&?6{(Z3CG(`|uFNYBH}{7RPA5a}Otg!8xds8X?`#kH)dsdXPD zDBVHBPAW+Y2bFdc4A3!J`Dx0U?LfE8B+TF z5Ec|wc@D;&Y^Qh;F-NR>f?PfH*Rke8{LeSN= z+@f@ba{N_yqGJ#|8y~h;z^FT8^7TW~|Am{Tni=>LLdb*7<1@eEB($OpgSU=+wzF3A z>+XMLRDS#X1R%Jr^tD5~;VdT6^MdP7t>!+;`jLqH0lo+5V{lQ%Ba4K^mu^JN(`dw7 zYdA#h>!PmOYa4OY;*sM_`q_n{lGkM|WpudPs!{rAZ46}mdFBe+t#mCAJj!x})<@Zo zf9$v<5bI(Fd(3{}I?VRu4=a@oN47RiRxp$l?RQHLXWKW9-y(=_OMKRo;i&!olTtS_ z1*uGZ(@*yIbBNQYpk59n`}hRcU#Ry={yRoq-)%7--HP-Zb)Vhnbk zgJyX60TyIYim)RY5?T?5Z)U;70h(r;vKLMx1%-PeTR9oKQD$Q)*%JV7pH_4sHsSY* z6|2-WSul&;gp~}D(khXphp=gaDk@x`uqbPPPaX{>3Lg+=;KWb>rHw;pUO;JHDpHw{ z2kbMoBC9rc88?zkyEb?F`(%rVcS6m8YGjo}$zo(xm;EM@kA&0rfvUnWWq3%GqWnzG zq?GO<5`~G_LJruUnHasl`aN_Z!RLDTtDF+cCAKJxH90ZAi;WZ`*t(~iWa_49NxCLQ zlaL>U_e|#8gb03=6i0twPp6Qq!|33XF*y!Wru*i!D_E8ZHPCIjtW1@YkAf}Zq`g-? z`lphTmoyC%L9`;C(2|ipC05H&%^Tfzcsp6Jq_K-O-7?e>&Fq#CZ9l;L&mFgv*sZC< z0Nad0^@M%-{;RKhBBNN6Hc|0@qS=$h0dBAPWTu7tIlc8>3x0L4p6ZTsdv@dR!=^tz z+KKYm+g!sHV0z6K?@iU3mVGr?u0b?*>oU55Pa`TPg9S+*+qG0Tm|o&_cOlMuFx&~< zW&@{Ea;DcUsUwus6-Kp~>!#>oXp*$j6Z=0^MJvithFhe)Sy?wfn^>Xi$PyT#m}s4d zt@|+SM^Ys|~I6qI7ug#g}nCq6V=!Lar1xZIA?$}cAg>KALarny5TJS3)0`lYE z$7ql*sh2c3!MNGep9@?wzFs@?_a-81R#CZzkCPq=H-47tJ|)w&Kej`7Vm&|GD0
$Ig+EERbQWh;IG7P~%y}z};M6MVPR&n4px;;QMv`#Q%A@i6hCEf8`?!>gWQ>tmqV1kH9o_#<~C+2WLGEp3vr@yMw zja|2PzK-o{#Y|)`tj;O-8x1Ax+b8sw@-uXnA*P23GAd*x|LP1B7^i$c{VxuL^i~Us z*AHah;rTeRtaKH3PnV`niiEPXT7|YDspIKsRgQ_mngsXi3)R+I%(uNgJxOxdJUVMM z7UqtN&ILN#avSIAc>dYk51&GtxECBYp2+NMlD#mZ(Y&LWi;Q-Ov@&nhR};?bVJs-Y zvA7(|DdOFNkBB#yLClL(72+2-kj3V$fw^sCNRm0QBBtX8LKSH;#_1(%bOH0<7}?=Y zh79Bx1ssj~xIG~}x2JFS_30CP(iA7jw=1>SPZtXO_A?-~>R^nk*TA`_7?x=|n{cAh zji>DBnYsH7%NaJ`ML>@(O zbtce^6R+V0<(^*M^m#}`&6u>F_sF5y%u2fE^Wa{Hx2z9G+coiQqw^%G`;_9~OYQ8g zha{6y5b1je{TV6E%9_B7%&D<(Rzf z&VJ94F@&1Vs~}Q2Ts-z9?KT$gt@-Mn{B_Tx-FCKf?0Y&N;B}7{o3W+XW25@s{)E-q zuBA3AEbY-*II3t1fd&wS|MEB<)VZihpj*#XzdOx?r_~}P<$2`wFse^&em4)wc{<76 zDvPj5KTswmkohP(wjSj0p zdY7dtidPQb3Ak?478s6|EMv#u9vzN<_6s)@NJ5k7KqsLBz;I3g+mH}3cQ_1Y7VJo} z7&8hPapTd3zWF#D~ zx-lP6edKKOcni66=Q%Klv}?hBNI2>y;zdJ>7Z;uR*#@y~K+h#9<<_{!%hbaD+=Ccd zNRq}uMm1k1nh*#7Rpz= z*4(7L&REIs0_n&xVTi6pm-%NXk>B06`7y*t0f$6pNmoEfx1;`a zCZ(S`?U4eqs6aoGxg?UfDOj#vmLrI6n$>xRkNNoz*{R03^b7c5Q66Av;Xuka*$Q&i zqL2u2GFIWn<@nFmZUFnSL;fh)?+P-W%WrfPNhn^BZJyF1dLq{XuW+5lzn=V9Q)P$@ z!H!3{WpCr5H<~;)wH{+4*FG0IDmLBphfnVDG}P8E0teGDe7s#9Z`V70wu2ro%q?0Dc?ut4{hr~+8Yz=4N*ok%+u?*Y zGD%pY4&dx%e-21<#G#BTNcO{huhfnV`GtZ=-_6~31PRQqW=7IN++2YxC=$TY%hmHq zeb9L$UjId>J;hE-CORXc=lU_|x=EW|8vULjNi{tbI#wBCU+5RjGABwUw#}X&9+rty zRw_hAwv%Zc9xe$&{Y_?~FX;$CyCTDZ6OP|cw2HP*K_Nhe3_B#X8#`XIA25VP6iXE3 zpzon9BwtEo&y+O>BVzVNjvVz!E}CSuI-;mJ+K4UFTg;FzqwYNg5f92%>p0%nnK>L^ zB=<~*7a_fP%jNc~56^T0*Xmt;R|DXyczt_!(qogRZGF7Nbl#)I7U`Q02Ts#r7fV3` zhq|ek;Zg+R$iFvTPE@HD18yN>vnqO?Jql6W6KacKwqk#5%Zp`3WbOz4zR$&f|CS>! zvv>9*vt|9e`dalNsH2`H2eSF(&uaJgu`~f(3$sBq6=zzYlsj?P6fP-PxiJUI=)4IB zLeq&jk<=eLC9_(OJ|)KF4n}KkCy54Cnj2OiGn!7TC=>#mXq%KAuDa){cvG9DH(7l( zecntWcu_L0r^|VI$wMO)*8=Ot=hoKY6D@QO*Ob`hz4@H%Tr#^VL4hSBoVxt8pOt;LcD{XiLOY!cp<=Y3+(Nr?khA5$Nsr8-(w zn8OZg%{RhiA$h|n$MSka&Ra>q?|c$lp?4+!RCPa+N~oWuG-CC3RO0k6X%z4-iwM=6 z;11d;wP78fJDr#^HM~6TQNzSo7{?^YaoXHI_D-!En(U&RhDMFy3!GZs0q0x*FXx^m z!#Q~6YUF1s=nl=e8k=X~4YN`Zn4UM>6$}0$v)MD_mZl7Yyu|hmrKFKInu&dOh zImaYcc%$1dIIKqX_3ScW0UXri>KVn}jidoS6e~Gv3R3XDnZ9Z{9QZOC!@J(S03Z8x z=~uVhjV*@x2ip@|?m8k}UaxbzgO!DMXptIHY?!iW@Kb~~r&9N)52jai+?9Vf)r2MV zJ5rS$_NA7W`g8g>5goM@@llkV;#{veTV9nv*Sl0%I#IF6g(^SmNtUu0kyH*;EV@3Q z$%IwZz>Ymrs`3yl5EZC?*M+MkWG=v8Q1T53M8>eTOtKB_;2 zh*j=9PdD5BKjZ?9zE-3$>~Q8YPnXx^6q}3Fj{Q0D$?l@sry209cIe=OVUt(CLr(#G zG4YOJ1|e+scHClMzw! zy&?AAc%|slX-Hislqtt=OEC2Mr8XyIRENUh$zkLURBzcX*TTFwuH}B^8i^J7f+_?{ zusPWkBBQ?`VWf7ws$l4^lTw!gT4~*+5SD5%=saRxj~* z-x`2>Nw151|62159*r{68}E`*9<9ysm*%%`ozIZ1rK(S>%1?A!ohzAeFK`P&wJ*SU zgTkG(H6o?Ma-swKos-jk1Dx}<7bbFl5n`w#@WNm6LdX}>sQthnFek<@9{Z7Er9n3_ zfub&3?`is*NhVu|hwZR><@%l&?_c@CO^;FfpG(LWu7$sO9~~mZ2s-N@lJ1|?(H|8d zROP`g4bB%DxM+HspFPtX7X_P3(=EP})i1*lq8X)=2`R}!N%!a=|_-99CV0=r@oLrQ3+WV@T)un~k!J8iN88=88LPzGe+^Qg!9a^4HKLU@sosvsN;)mNJy0m09IEf7Y zjQ#;PEB_Hv$EvVj(!psF?NQDp~TOx_54Lmw{aLq!G{O z`du7n4d2fSDo7pw5KSpLB-l45>`}*yzb(e+wz~Cb-*ufda-Z#YGFW6#Q7Ig9E}St7 zE@i~@IJ5N!yk)?*zVn9t-}tIluUo3^O3z$Ly5)>=6nMc}Fj`P{xWp+RG&X?taQUb? z2C5W+Y!%qjtWu0?7a9BNH!EIBlGU$tNq1&j)H?P7w~b%8r1x$0rPkwDHZ;%!Y11`P zOdTh1F99UeETN}PUFv!)b{gjVF@p>=Zv3gIsETn%NBSgdM3iNkzY)_$Kan=OPVi`Z zU9BeuTEt!~Ne2onBtTyqyhKyXxRx5r$f{hR7+tdO_t{QCw7ee1cEHWL1?l$BVGJW3 z8Tj^!m2wZopK@u7F58Zd{n$XzZM{q9&hoz5gn=BVOx;)bDbMB7$z*>YWJs^WfC82ZAudLMVtV8p6`NOhlJ z1gQ!8I3{H}T#e}EF?%;X21sJ$2=(3y#sVJo#f9Y<)P*{2gr0uZzn@t=%QSqdkl6xK z?lfTois)is%8B2V;lvW=1*DBlfxdxtp8^WG59ee6(&r7qN>>h{E z&DO1*GD_sxPrD8YatBLVsap;~IUly6oLsBk*5i~erER8H#q$mM7v6kWzHVYh{X0T> z-0fzUWv#(b7_`S_>P=Pl9KN5a{mMlil&qLBG?2}? zbV&c&$tZ!diPHm(E$i^c6S}|5ZVhe(Go*GUs3>NHU&mwFk#*xGZH;^BGNKavB$aGS z(*WO~8BT*lo9))XeD9QI>Q?Ix`inJC1~uS$&7ZRGMU&}}1SK0BX!h=S<(roCn*;X` zu*S(L{bO)HI?2Rv%%UMSTXU1rs$yAh?J9%?P&&o^9a zAJ56;X%$Hs_k6EuVm>~f!$|`Cx#t9dEYszOz}C%T849*8CR1Fu;^H%9?>HlDjW{D0 ztiQto1J&OKfOjO-$=%GE?C@Lzl%%ZUG^v<=dFUJfllwMB*(PqK2Z?o6CpLbJ;Qy3GA9A73f?Q!jk!|lIqYP<=~XVRl~*v#*#ZMBloXJxi2U0I(B z1he|X^_Q(H(}g=ajfSUT6&Ml`d>2EyhMeM>kG-rnCuh(2`rcq935A0oj#UrK%LuGH z2fk2Z^8W!jK*qmB(cP4@oD!799R<#mXL(~7;uR&Ou@x;Oku=P3Yv~y&ZZv}|*nno* z&!P;1tgYYqCT;!t@`rES9k16!?e^AHH!Qp1`}YnkI`Okxwmv=Zp3z3Xx%d1deBr$x z>=ym=3$*!ryB#%M(J_-sr>zJ(hre+4b9epfi;H6wbca=c_2iEh4-tVQbY|8Q{jBpVK&hX0tUPdxn&+Be~E?Uz0fy$pYA#hjmo$fA6L zQ|LJfG5I ze!J<=*Y|Bvdm7xV#ZW$ae6{$y7u$}!b$d^dR*T^KeN?tcD^rnHDxj5MNyR87ejzR( zw8HuTiWhrf4En4vC$daY3P%CX5i=8^!4%Q#>9-d~;zoMHRPBFZ@@J-q*;Q>8^?u#x zGV5dosi~%B3}|}PWEE97O_O)peM&{hJ=w>o^h!~W!#ugZNMrFu7$;NmJiJ&wQKYl9 zMCY8CDkbW8*)#*qr{5MG0g}J}{1m^WIruDpNe?+ganAY_%>~G(`5|8P3%1ArkSvIh z%n$QbfaKGdSW|ei!qma4(lBA*1DQX_3P$GAC>@B<7BhSL^G=tBzM!@#l~xV?q1xj- z@eQ%4Ah*TcHMYrulsT=mUcA;`>vGlE#K&!l^IeiUOTuw+XDlXutS)}x2UBAvGHoqc zP)?gFRY{cAOsp_w>vU}d5fn03xv?}>Tx$Zfk0K5J7{(0)+@>*VwQ}*WNc@dWHn0BT zOCk|-S}fBjFTCVTS|qyjU3!ybi6ddY@OvSaTz~1$0Ndx}eHEp=uVOmytDVmKYCj?G zi$-J0yrXrP8!oQj@oidpa@y3NEd1BEcl@ybjx&c2pSh#2?~XHv51qNAU-Zvkp-u1X z-TO}AeX#p~cxU(ScWC2Rj-P#K(V~aW9zXu`hZiq?_~%8RB)K2x32_r@ zN90~3opMcDkb7N{(+VqsXdnddq+{oTgeYyNg>i?Y^l3VG_fL+u_uT!%&%OWtvRhVG z{~h-}bGq&D3){AS?MUb3yP~(Mhxc{f^OQi>4TYac2Y{|lu-Xv+S&^>(5?!N;DO2qN zy1MjyxJXyD&tVs?9ht2#usXcl{(zWax_wI$Zx zzAWsbzYYOJ;a0Z1@(Vv3y=`Un*T4Qt=5+neFRU3lzH6b$Qa<_3N|D-bo_s>|QRU#y z1-E{V=w;`n55yOMXRXv(D%Xr0Q-PmVHKQF;NdT*P2rLC#Rq4TNm1T(4+938h%g2=y zTwKrVVJh{?34VbP!n~h<^E`$yTEMHE@eH5TpW|xJz{$ADY&2q^IG#$@8lyRQovz}@ z!A*FhUN)M{$y9BzdSliZiFx9vbuvtl>)COYC(?{@PrZTnm!VEpsxVP*uSYlUVLAVF z$%dpTA<|N?GaE@tixE+0R&LX30?|y*Shi=+U`6xB8*jaFQ&ZD5U)^)$`-3$Ki;k`7 z-rU`>r7PIH>DX<@HZ`{&d-dS`6ECe*=`Fs9-xBHB*xa$Sp}aD);_G zgO$Nh^GHqel5Ayl{piuv>%V%mXB(lv7i8%PkR=(_AgtjCVnoStAqNuyob)WufNNl+ zNPY%@R8C4zvlN$3rn3aN;OLbcEtI{@n4W+B{QHcFeEwH2{fWuK=i8|al{1r^wnFeb zV1P^30bCCjr2%H(^@3^OHDVIK7uUZ_8dmbpG0|7wuHE$Ds#W)IN`G(okzXD^{;Mwx zi(17uT)chtV|!bg_kM9T9N%{1FP|D4d+Jv>j#BJpK#XC)c~T;xHdARWe3sCY3_^wLoAA~$U z=TDKZ<8_`?3WF*Pn^0EY{Ezqln}pjGgxidsqx8IlJ;%TyU&)?3_2z~5d2-v2C2VJS zi{czeG-m`+HezL(8DFtnEfDgPLN$2J!L1p^N#<-0ZsJieOSLH^jL{S@TY{mf@*}!U zP2p94pmNsrqQC=LDlrhU76YNVq`8bzxN=9zFnnKe1{pdo(@JGJCB0G_vNs?NS19x{ zMlGjnn2`PPLS3$K52IihrL=JOIFj?C#MK%KEpUi0vK~BhHv0q3aAt6WV_&B)Gn>lT z3(p8X9VCpw^%5r)E?Pl-Dknm=YEoFAKnfFh5qfZ$nRsvoO~zk@T8g3-$4qO`M4v0T z-nN+J^o^jJJLMMr>jek>-`}&^<%L6XW#ML}%_RCz^nI;nGH{@oRZIZERMceJ{ zj7J2zbdki?HB`K+xMxW3)<{k~(--hv*e}FgrwYFirKAB$1K0^O zg>uORCyVnmCYZD&!KBerDoGTp6y&5zp>#o<)-vI)miw3 ztiRXfI`_QW)GP3GdOe*IXo4*28uTYMqHTkoteSZ*;6E=82JzX}!Y}C8$ha0zoF>kT zC1j~WSsa(53)xt>B`n;rAKoQ52;sD{VgaKM>>kxYi0hYhjg~M~v*Asf zPP6Fyq7NsvMm_z#x@udEv(TtDliHg9CEjsqImAF#iWA3qN-}}}NZo@F`B=fmc8J3l zfA!TP@ciwP=S6Ptn+hsG5i!e46_gh9+a&zWGh}|nXcsBzB}2RY?*e}97k5}3TINR< z>t8i`#1kFhgAYo4R5|eXE}iQ{f+d`?}@F z*TjdNE`_OLXmrE1R}IBpa>QCgHH*6!c#q}wCTVf={uNot@O4K56#=8JVPr|e)(4kQ zZf6}E!wuzTf9JME6^&umSeEktT;fZ0Bds2|^r0jK@@WC=wSmg%kyca+i{NrnTF6)D zq(0&wX+$al{<11b*k=*~3@}T)N-1D4tBK_JI5*2n^#~z+4luVWTs133hBMbD!x5P@ znDM}sIzkw!?19(Xzr1VJiEVW)2cFx$^`5js9%)Zr+jHWHyKY)}TRrpMgHWcaS}nCO_0DA1x_M7#S1F|!kj#be89{F#Cu>e_z0q6 z1pf?5s8(1i2)c0bSaeDB1_f~=_JyeoWc@y5>2y>s)H2iHWGETli`O8da>!WXP*gIJFVp%MP{J|^6+$J9385CusEmYK;o^Eo90MFc13AX%Lyjp~-sVJE z(;Bf301mk_mZy}nS0JT9zfjdN&A*Ias0Ucqal^~|);xX90)u06p}1!BaDUgP-WnD; zxb~_8^}Alat^bs$KGe0b_0~s4uHE0dYfBL#p*kD%c;N}o;mKUqCS$;)?HMA)L#%Q{=f8od~w5(uOEQHm8 zR@RWGq>-A1czJ~7RUu%z!!YjF4y_=WN9)wm{L~7)1$>T4M_UBr15Rx^Qkk_LErDCc zsZ+E|ts9ocGHf8>8(h|~{lU@BYiejm^iZ&2+x<)X+Wf&Lj9oT1wI6+X|Na;EWxo{a z-#gHMa5x(6Tfb{>D%+QJH!RI$mS)}8?B07|)8I~@sjISPVEMlYC2VU!VX6Le7mRG-a)t=#d8o??*-Lo{i?ZK6kyF?Eix@D++QiA$^4Jg93 z%vs6^7+|1SV}QPQ=IXWW^6JHLs}i>#k@~hcnm1DDWu3|t&9it7gS-a!89C!2izQU! zaK?hB$Scf5F1=c~n?6j&7NM3?IVBE@jq_45$f`b1 z32HjKDnM}-=>%YFT(9!9d&EHCk>DzEId;7p^ zag$3&_iBC4uM}=^`3xQ{{qVCkzq;@>oyS|tym#?V6-%#UO@*C7quOsOP&zaHRg0#e z$8mlEUIt;DV#+0qb1E$ke8zv%Y4I<-!n}9hCCp8#1HVy;UXjEB7Jn+0qhWSAIZeVO zKFk0(OO_V+D&d19U~ek*Wb2w=Ns_l#q1cD4@l{GlxjyelL04rtE6eFY?!yS@7*#1f z>LnP>VlA$u7d1dTOBeP;6q@Vcg3X3vkikwc(Mm$%OVf5O-z_NAq)9MTZ-WP7JvdvLw5#lRJ z3dU!o{8M>eYsr7RF*_HSS>Nj@*7str+%cW!bz85v?<8Zgl#rlSG}?9Zs|WVKa&u?r z&97d4;MJSEPJi)<$G-6B7l*DH42xvfzkT<>z};_Of8@LO^!MNM-Rocb$xmMU=1)X=^B?MJoZ;erL-Gz)$@`j&w!g8Ng1$oVFC_guoD0dL_L1aYhQ9kwJk;cAB#bIbSpmfc{Xd zY)sKl87OjqWvlAa@=Hl+A+!h(Ndht7;ju{6i3hg%o2?qDU0WYW3^vyoB^rzCsh!v9 zbayy3?)s6s$*YSo`v>9($ay364wcK293L1@Ck`nr5z2QgXK_m(7Zu!Rtx%+c8L&Sn z-uyQqTWipB`ZIh?c8;q#11ICG0rybG^yBeZjWL>o*BRMbxJk0L@EVKPl(Mxm&q%g5 zYC&uSg2!ptak(>`MetK=;5|W7` zRBj1XG>){U7pEPufel+X48(B%)z)X5D%1{LdHqm2)?e$24Xod~ejt|G^5DAN+@3Ct z&K-0)<1N8xeYwY1*|w&manDF%L4AjnwzzDziaK9;I^qdbG_P#!y>2DOo-|c;=>yRZ zn6+R{ZlH1wq_YkrTIWC~r85EKRmC|E**}Y!7@FYD9IYaINu=n*z}&b6rz}h6)NZ_? z#{DL4H6FSFCu3?vEClT0II%=xHxNk#)Y3uB$B`xKbcXCjCCleRISvUC(C4KwIInuK zQfy&Y(^Z9kY)iT;ycW4uRTuwU&ym}X>aF4@do45!xA6FFlMlAH>Wn&ec~$bxn?*;A zILF&Tmi|Mc0R*&^%4tx(F_DHiL$K`1snt4gewZ{?2yhGr&g($Jba8@y6k;K))(LKk zmKK*R{3X0^;i~$r zCj`HPHRK8yLcL+p<9Z9h zT{IN{UmlU@sA{@}%2lI}^QOqx@mi8gj>R#&Lrr?i^M3c-T(V5aC0EPNoof2i`+EdM zlj}Lv8NSkRj;lCx>MhCp4+)opdMUS>Eie40a#d-?1}_%w%} z_ZZ>R{_y@*+z<;NHm;DXjL~r=zMgvXPw)SdTn7a<8q9Frgs(YxW=2w4na#n?85c}h zs!cCplq!SK?g#`cD&-23dG1D8y3ItDp7D6`sp|P2n~}N38b6sOJe|2VT96OMV&d8; zu*E2_B@ht{scSK=J>PF}Gw-k}g;D)3<1j2XIhnI6qnyzxf55m+cNVw@oXX$Gl?lLE3(ek9tDW%PNwN_7Sr5?irliyaj+{eEy(O7kp2cq$ssOTQO<@_5aJN-EMzDA(I zYWP0gRIXYBDlVf1U8gyH9AVn*T(uOF)r}gpC_VW<#I!~=`D0Q2fF`6D{rUbOW0_{s zbe}Mn?!qrbWzqoEM&C?vwQ`4x$Ie9-jGa^-R8E+WD@)nscg%) z2&v+hIL|bvQu(m3ZPgh`CUfByw3Xo$*h*cJZ!hk?uzZw7b&+rlF)kA08uWan^c)w_ z^Ucz8r`}Nf=bc0|kRxL$#;@J-{54b5=cgs(X`o}pGv*&IBDN9EZZeAV6f=WDK_J;AYBpI=AKg>Mpnzz*JA3p-edV%aBN?~aym(A!&{-?% z1A+QVD;%y`WLCQ~Bgd9i!b82Hp505@Q=7MJP4^rgjSriQ+g4_GZrz*;ccjA#MOnHd zoZ7r~XT!>^M$?j-(ZfCIEpT(e2$7X<79OX2K~}^N2^K@Eq5~|xm9`p!zxd|;g~w&b z|9Tx+w+0yt5*a&6ac#odLo%LE2r}l43%k)l#$+I44v?`1kg>`v$e0>)JSr=QRay|M z>N*fBb((nNHV~`8v{=dh^WO!rvc|Z|7y{I4kQ|8894I*-exxK~rHXOc7-li+;AU3O z%kk!^|5{JvMFj?0B}W5|ucunO{%ZxfZq<+5tPZ$t$5-rC5v{I-(<*$OmdZN)c&09k zEiLdh2Y)g{i0s)MJdQ`vYz`iqajQO?gD=D*1t?7R~1T|JzABi zpCw;u8ORq4@|6UC&Z`?hz7i~4;JgOjQjyIT*KMXnEF<(qAk_)DmQxBdK*Un!X~B!q zoo=;h@tD@AwNz#Ufo!D(4#yUoR30K#055wwK>`gdk-)Pi)$vU_9!T;GCnSR>e?BlHWBxY$5Bsb5nFT&yXcfnZ9S%rPW6$!HY7 z8pbA%a*7mhgj=R$&PX=+8Y#qVE+Udn!5}#ae#V+@6dZT4;Njgj|5)7m#VLr8NSSk- zLeHC+bEhVL`PN5-ky8D*OsW7z%JCK1Z?>A?)Pk=$xHW@$@@&q6`6FCDrobMVEhQU( z4%cFx#UqUqBO~LiL=yt6R53`!knTvhdwY9&$A%SU9SiBthr4$yD4ozRbnRaf3q%6T zd*GnjUmh6kBYRH@&(JqPOo4rP7EvT(%WH+bC%$;83(xE{n3tr%e7I35j{OHnD#Po; z^(GQXYkg$5&GaazwsL^nq`(xh!JCZ17;Y>Pn()Me#wv+aA;TU#l5dEg!md2nq$iGV z@yXuU@|A1T>9s4D$0pC>nD4u!leifP6{9lLkEvV;J$q$}w?Uu@8XhA24wZw3S0qUs z$}uU-2XHBf+3E?VjYG_qE$aTe7hWb@tpETfJ;Rsj&T+~!3|FBaXDXz?!7}~0stmi= zSjnr6yoT@N5q&Ou^{p<^&1gqOcrc6a$o1ouV1ebIV_#}rN`^ya-B}?kF`bX7K}DU zSe1cEHGg}rTI95JvTMI|&4O3%x}k39@S<{&bpLnm8WKO!lH&S9yU-|JKRvKh?0#Z;RcYR z3J|BNG&vv{BwlY2oE=$3p6*c*E>-Zd!v0C26G6&PLPtODlw}is&z+g8pWnCm==vUe zzcU8fnC^-#Sl{la-||=s`x{d3Mz`n{wb^aBSQczccti&qon^FY{a26mm3OaB$2DS` zD;_dM7w_vX{H5REJvX>7#>j`XE}Qm{GFaOar^`V$k5m64aZ8R+L6FUx1%1LEOa$r) z*<5R!uRxy#lIWY+aVa*vNd!cb4}lhq(@u8~#UsX!i{vH;+52GtKf@YH{al3=o-vpl zF1(Rq$5pKGIAh>Na$(P~RmeGHtYlMwRA!Y$gdP{NNJiLBWt~%2cYMv`)mJq(?5KWx z_2a%^!1Kh~$E$ZXHf*nYYSrUjIPVR%jnvdEYYl~3m(|pav;`%u)lZNI;SYBHa19%61<0C|d%iU(c4|$^sUV3ajIeF?QZg0PEiNzzWBZE zyLJ>_r#p7++D>w0-vS%EkYFDfL?R@?CnuZvB*{_|Kw3H~ReAvo62rf5-GBf6g=fUE zi~mEiR9t=Wf0EDJcIhVZ{mc)jUTTcGjS`g30%{hI^NlE3tB8&3!{sf+Kv*Bpr{~)+ zyNt`PNp2V~hrn{LhHFlU*>mDLQcKsywka(tZ@jTC9|A` zeOaontRstAoXSu%TXi$mvgT%QruUwPBN-thSxWd|+72YNxU{ux#ro8O#M--8Y=M<*Y@G$;?Y!VYSZV}?&g}Ka*+Ir=Fzs=;d)nU=~er#T3UOuci-{W z`h+*#J3Ly^Q7`)8@QtC!RfCOtmL%GW=^jTk>x(obs^ay1YgEYq`%z-){6) z!WFL(-fgxDuaPM)-M3YEj7Cp|ZO%Ean{K=Gp|lljwv$Rz>#6&N+CbdBW&v|eHhag( zaeno%8TWjz7Ek0Gd&nWkQm&^FwWy~Z5z+=SCzsF*#sw%Cgl7N*YJlv&McV~8_>CxL z1FYdyb5IsS_o+!>))J5hEZ{6{2$WdoE#fS0 z2l5t^O55r--tpXbwrxA}+?^ZJ=?!;2cV^qR?>u+M#=7m*BZuyLZfxxN2M#TbZF%XB z$H$(#@6br~54w(iZS%gz8{!PB()v=18v70`iB=8mYEAdnRXKE;=7z5x-u#tAZK8kP zcINpzHl@>>?s)z?+qa&9k4&e@SFL*C#NlPJ*s{YXURZVKpU1bymtFVy7guik#togF z4ee5m)oF{39vNJ8!$>u2b*pLhg3g8AN54+wXRCNbw3GQURRR2X5V%41qzM_s>8c;D zES|3x3|%6QwLcb=sX9XcO2J@2Y*ItYe-N^|2xP?yAXW&9GRTCf3ZNP`rzSlIN(DgG zxN+R+6Hq>t<;@Dv?N3t*G`$#tl|Wk~s~+0b&=9E&d-PhP)LMIVVP>?+(`+!x)%KcX zV<6I=aNU@xs2vFQZftgm&jmZ{1ETPPW`l**E2GJHuz56HxL##UT6{K@sj{Ue8jOT? ziCI@jpcQqS=nvx8C_Q+IFhvYLeKv2=Vt1mFQMb@oP|g}1#b!il8e1@lr7}!HNpEq*h^Vs+oI1WU$fqxl~bW;UkAiV=sJUt5mt^`{ua_e(X`YLn0S_ zL^0SqN!Z<6>M$fx2zx#xB+`XIA}1kDStQidwh!zg6l5P2?|V$N^X|eB9f5gEX6fug zYLxogm9@5i097^LFTk(<0sQ>_I*_3DWWIissBt~Igyr+-?n2bvB)5<>XyY5Wm)8y7D%Qb!qjiy@L`nYRpJKG ze}mqm-{_@^TTmoI9$iw}iIO)7x-4u)DfZL**!?IU-}P=)YV5k6p6gcC)~>?$vDD4p z%))4NAr@!|?~+@)u3MQ(t-P+Q^YF@adgb8`xa#xa-GO?aub%83-$pHwG)aC6^VU%t zXY66eA}|Jy6v>=~)UZjkxam_u;%ym9zaFb^5Zq{lxLTgfX|#A-qf%fQYfh`dGcBkJ zM8Br|zf@TN$dbG65?y^aeRJW)y9>wZ>quNOAbw1=TzW56 zPW4f_AR_K^psXsMHx~=GDzHOR-a*KjlaqBEIVxc#CR~H%*nY@I78u7Fy}=>u4+Ln8 zkyx?(k2Khs%5vz6CE0FzK&Ae5s=K+mra`N8I{I|??MN?g2{@t|@ALK=^WU3n;>W5q zZuS38?W(V>PectmiDvgMZ)Q76wMw$wu28T_pVwmcRhYe1ZtY6Cxh|EisQ??HsT;0yd1|{WZjFVH){i!L>?y3V zrYj`Ri@sZ`yOp5DqvqD#DyQmhg{FjL&vwP5+JJBTPg<8*^88 zFVTP>*)CN4@H&tY7pVhdrq;Z;5Mc8Nx*4+U6-bi(b~q(^F!hR|DaP|c0ET95@2g!EQAPYn8ZM*ix zo#K{%d#3Q-Gdo|rwoQ^;@!+=X6X%W0AI=-a`&)KBNczIje@{OvIuGNBQN>y+;8^i( zMngEQkW11|HH4dM$M#z*>%6{fh236}^?B97xtAZnp9zng4`}FL~GfcNNhr7&|%t>s6a+Yr-9^0GO6i^<*%&N zAl2J^=4jREqWW#_b<6yXOA?g}3%Pm&EmO#Y&WKm_+%{ux#-A=T8OXS<0(;aUJr1~K zfC2!JmkHw%V|Q$1h>R3EvBW3|QQs9hw;5)4Zp&crD}Or0W@Mys+g0!FxxKLN5oV9V zpx26-@4hv;bNO=mt?E@Wi5TpMum@7SSo90fBLn5377NmmcL}xyIAkeW6fZ$mrCuMT zgIUS@;6`h^amxF+EMQ4!tYHPR8D{g1o>36@d8+R2jj1q-umyCG$bSxnhk- z5RmP-R%en5n6EC3%XJWdM#N#T*|b?430;2a4u$rlTq}qF1IV*AQkO0@($>qbeZA5~ ztJEebgG@r^lPoLld^M-|a#rs< zLt@1$K^Q#ATou{6UgcIq^D-$*1B{yz6w*uY6Ql#pQW!#j>SVr5z;*5L$`9VQ~yOR9aG;ZOC;$Un#I+vmh4`RlE z;+HOlTOC=hmgQDt|KH-8PZa;phM3SFX$jh^JB(UU?j_n3MwXNBrQIW9_Y3d6W1SnTY? zaw_0~CCFm=7MEdjl-!vWmIqrTt#rz2(Mm4gjxx77_p(^eNMBlFxIrz6b}j8o1e+Ge zTE<$!YNxu!H!yZp^YEQpvb1MzSEumMuA%xwGVtWrYTD}(%}ZOVyOU0Dna4Va-JW72 z*Gcqqn>vYw{iw3-6jnNUFYg9qo9+hWivzO5P6XJY)O}euAfc|Mcs_q$peNr71Pl|m zTJ5@D&DS7Ewq0o{{Fc#O_?M|hK{Ll^o^O2GdB*qg-HOD)12A22B|d^ofBk}zHajJKLfQ-|h`H&z-i zXa=WE@ixiCc)2ZzzbpbFfjdbuZQ8T>YQZK|lhRvkb&p9A%&b)-2Vz)#`!a2`1w|>; z4Ud6~V+C9c?xil5o~Z>}Fhf@X%m5(VX zBZ5L8OEZZdiVb)$E0NMp@GZEqq9x@iTm|52npCHxko`anY()~jA~nz;py4f&U~U+2 z8A*O(@RnPw6+VN(S6Nl=HKa>qc#!p0*sO5H;H|LOD!lA0y1Zzs#C&+A%^JZcA{Xu} z5=4}qb4S2)0*!7YI$2KDQdeJ5D=|@91a!$bu2+#nZ5r!v^A&>lROqpAFz*w@2lH<6 z3fAzlh<1j`gjKwvLi5QgK-oS^_R6X~n`|@D_Q&Y@Bfi@1NO@1%=S%kvb=UeI+m(UEGs7t0|S% zsHVy49j^uQXp(tGuAb^rWshT(G_Jazx&E&Svg)pmagrDol`2f_#KxZuHhNUKbe1ud|Fy26-y=7Nl}xi zaq<@}qW$#6iR9$luPGx=dWim)4H~z0ss|SRVBtxVL*y4rqrHC8=d5so^f~LnzOGY! z&N`-Alg{pNhAI1|q9pW|dYxebf2;7r4CrO?r&n4nxQmQoo#hpY+V&123QY+Rg<5P~ zHC|PhX-CM3{n5tdRG@5T+p3RgkcLG9GuGQ?#kDcfCpAuE4SMyWrN29AfH=2dNwRXG zZ)W^kdE$>>UG?$Z)HWQDx0Hfm?TirlnBw4fY2|cZl`imkD$uV*pVZmR@=2Y|aQ6&p zGo{YX>2O9*x$K#}&MyALl$)MndC4_jKE9s+7vq~dt|=Eib6k_BW;*5Kz`)Iw9C2w} zT`(@kC)3UON#k-|nQq3JZhF=jnc1#+<~V1#<6{5AF7Pud%0nefKKM$b#T~VdR6ZoQ z8a7Fq$A0YS$JOMgz)-UkLbII;@I}~G)`A<|SZDUKNzFE%bJtsE%~CYS&(L4L+`p72 zKG(Te=eiWpr5C}7d>!OT0UpIo<&_Y_Fk%=u1Q^gx162SyF?^&%kV9S@X_pDJg!R_|m)ZecM4+43vec#2h~9 z>4853hv^?}D(?~mgAw-88Ns4zCdQQ(BS-+H#hPR6FNG!+ZGm4)cGBU9=D-XX(q+r; zb2$BP?=W*&eHn+(?Z$Cmc3I$w7_h^4N#A=vwX?VvjRmO}i-gvOd79FR!9n^`SSOt# znIN*VsUzE~Sq&o49IGSV#HWbnbYzRH4)=_(!UPpsvUjc2-_0siM9u0u>1U18ohMNN z#NQW{ourlD-m&A`g^!+@eD@uC?V&Tr`}>c-ajVMdn#|g?VP?n`ixuc}D-1dCm@wKiyp4nQdV|?R00l>Zv&!$eeqq zqw~xeL%;GAp9I}*Pd3ZRFT-(}WC`FFNr5?~9zmOX^cX4U>xHxWX)+=5&GfK_S9JDZ zb+VcHo234V%Kj;-_A%=J=}7qFE7$@_k(O>k%LZx)yV_n(&Hgx7m&&&ZUaeaAUB+2t zqf{Z^Pl`#eI%GFdodS=qYwx-0D3e;<%xl#b=9nh1Xl3;<}xQ-s6goJ0BeO1m_ z>{@R)hAAJor~&%d;O1AmH;eg?m$fWa7ONMEaEsfOixC@Hqn1r^CLAF@EowvtS_OSW2?Jd3}3;MflbyG747Ke`gR@!{d8kvZ)x zMWQ!l_PeZq-dyNn!CLYTD6&uyn5F!7K*G9A>8H z*p3<15b{O-aWnjQt2N)0KRJrKG8HC1*)EBMK7=5LCWvt_>qO|C>O|=K^qmN&llSxY zB&?X3BABl|q4+m*@&)&kSj34@)l~0bp=E$rI+540lvq>z^)|8rif}~4CX@IJ&Lr5- z2){eYJ6Y0Iy3~bbz9zzNzduhC;lhV)hhNyf^~EEdg$X)eJ$!Y?Jx^hOg-ahuHZnh> zdZ{~$BlDn8RRXSPkMmN=)Nh^kDkr#JpWQQIL2VmQ;!Vy7L=HJ6qQ6OE<6ROeJdl<>^>W?~X-_k1mgO*OxjMw1>03 zwb7^P- zR%^iyLhck0Geu}TZ)-vP?Yb2H;*i!*fy8WP^VnQ4Hlw>>3u)Nl%Vy#GK4n>P(L)HW z6UCpg*WgoAtrq|FGj<(h^rSW8#hy9c2WQh2SIHlnr(0w573o?#4_!Z@Wn<@@mW_m} z*(s{xuj%0GckSOq+nL=PjZ>v^pS_RciK*JU&)U&ZeD!oKUWvA+fwoz&OHa?CZ7a~W z@`|+Wnnl}wLR)O=)}Tik&P&^cK--2HGTX_%iqx%)re+hk3JBcwHxalLy3yE>R7Xo? zpH+sh{f`&lyIy5e9aG2{h2%zyt>-g}viQ%mw(y}`eO#_$l#=5{`r_KTLS3T!*MROF z)Cl$F9J=>isRQYv86*eCPPX~iCC5{-1rP`}!AS);{H25wovMlPicU%%&36cqU#C#v zQDjRU?)fife`c|08L+6M0`0aI7*xl;Y!1Z~O?|ljk)a45D1Oo3W>@CjDf^ijbeAL1 zSsrSskK4-1>AjAcj*3uACSmIr4SYsMNsNi%2A?PBj#k$UH+bDa&)CJL%j}P&$g3yG z8tnkJp8ClgZna;DTfH;6RUhY8lOHyXNe;x^Q8(+57Oa9a&Snm5PXO8K72BKlodE0du|aQX$r8 zJl|4Wa9lal*<|TxsjzBL&yUbkTG;`PNHxE)#PX{_?AKI=-78TKTY43!ht3i=FY`hC z=wi6ik0?P&L<52Rj+53J}gwVKKczoy5vr1%Q)M6MM%*i+;n=S-0@ffdP_ zd|VQ01wL|vsr(Y55*HUX)|3n=+ROn#8AEWkB#Cj;2u*g}^K0R~a|$O6u<)_1=+i<5 z-iBQpM%bLQB7;|a20oU=RFN z3>RsC5d(L3d_mw;dqUvUH?KMJa3C{&{n8UVTmDaJ=N{D5nFa8BZxTp=gxo+L1VTs% zBoIs_Kthn0AX1b^QAANHRvoB-t->Nj>!S~~g?4N0*1n%(XKIx;F{pjG&eZK=wocnW z+Pdz}?CwmbosKh|-9ENET`h6H^L;lrA-N$q9cR?sz`ggJ?{&WOo%1_v*6bd!vty{S zd1I|jsZb>?Hm~a+c2B(L%~^NnL{0PW);QkmUtQJTYTxI-%f;+vQcbn%y|r7G+iAAu z?v2&r@`(fab_;H_ZN2NaHS7(H78`n}dL6k12AQ-D5Vxb_b?d9L@^9I`x@VQ8b!Y1u ziN(8uHZ+cOI*V@|#Xh9&;8DzAUIw+C>q&HOJPfnX;hgbu1FDIqlB$*dYBYq3$O-;( z)DYEbIKfv+^aX{E(z<%=jPpYLPbp;528znc3t{BkJj8jvBYaUJxO3IXO{%^wQ&nea zahs7%LVvV*8oRx_t+?mOJ8R}4KDVrtHV7TvACPuD_($mXn3IfbV&EiusJkOb$qpt& z`xC$=SH{65dpTV4j3gt^fn%s*fysV*9_QTjJ0x6W)q>E;;r+zTK$NBYULrA0nfZi( z-J|s~5uq2?Z_!Ht^g`5fT9;ksji(rw1c#$7R5APp;ARwak~jSmD~5iJH}*ric^B!X zf~uwNi_lFWuN!1T&bWzg+?;Oec-^?MZakcBAc$z5o<$lev^p!Wl2k}ZwkRc4 z^GYg|;9p9hq*z0K;Uw-izkdCjIX3+0LeShIZIt0Ydx?W}i#gprk91c_Ev44LPH8Ti z9Bq|myhL$cvXYcLeVt`9#l+xiM$1Y!uSl;eP@n*t&q<6qq}2vApS9vofvVO9DN*rS z2vGypb#Xb;8*H3lfHrr+RF=}pX*QB+?p+YmJjR^kG#e8foLJMRqb)gUNYnICv=KM2 z?vF;9XR~*ZkzEvRwlO!pMT^KjoR21_in&+|@bzK93?FcjLIN)QR4RwJ6@}dzY#E!y zPcJp+xVjM6t2y*Dy4fW9t03wb;<(Qcr{ZKLh;t8(Xn4jXKT>s<;P+B0cy~0xsLyuVD+TO@r<) zMcqGZ!#VMERNub7niEz8`eYQCqoAeiK#q;1aQm!0Qvt$pOK>G=ZOYX^iZ&BPgT2_X!2sOJYBg2F`66!#KbHys? zQ+Dtp+o4aXV!#X6z6ZHtE<>Ak-S|mtk-SDZs0L~ksZ!)GM;2i%R{^>)#3OZ%&h{1~-i9a^&jGZ?Asck}74l*7Ubn>-W98XY2I6 zE6zGs4cD~n>T-&dp0=%ZZDZ{=`tO@g-rKsiXX;|ZRFx_}bwyEja*3rry{>d`6`P;h zx2t`lFFjWsj2u>X`jpKH{v*w zfW_$I4xaTc9Ag`XO$YQ@FtL@B-A$N09k3}`blpm>Tb*3233euPKx)6W9LjKjM^^;Z+RU$;7;Rl5gtlJ9p{)by z60;Q#UY>64H^WM3O+>j4+fVcHuFYY4DL(mHG{{xJnP0BL-zXPe;mrx<;wQ`bQosi}AGG{jw8wGxJQQ=0DcVTs33JUuvY*6lT21W{Ah7$Y zoPH}1id=ybYZV&C6;Pssq!ff(*^JDQYBE-)Gn>4b-o-Bgk^Uk$#B_I3_NSKwRFKNR1Dz(SeSFF!1 zLVwYgT8!e}iD4FnHAx?gvCLw6*>h3r=db?_EA>MGwoyNIQCuy44%hOsQ;ihR5@aOj zjbl`xDc`^q$|wO4i%;j8Q79%(cO$Dz+rK2%F$L6QC-8e1o6mP+^VFgql(uX{N7&UK&DZ>q~?l-66xD<$| zQzHWbc0^_|$yY67&!p)MvVszrGStLP=g^~8rVYqGx3V*0mD+-LNir4Y(Q6}cT^~-j&EUly`KIjf-6eh14nzG-AwFX z%p^x^gBEFHAeu_9V>kvsMw6T80oW5K>_?MhpgM&yl;HEqFpyOL2t>J0$KoWvIH2-~ zN^Y^p@>lp(PY_-%KE8>m|<6Tpm`^QW)tv!r3ojp{j&Tx)C5oJz5NBa|l=Y)H`G$D-+ z#slV`qwxSSGiZ;CdrKq+-ROjP86JZUjjoOdLl^NKIvy5X8m7Sr0Mf-n;XaLPvq-*1 z@0CxjA@v3UD9aZc1W*d?4271#2ANU2Twt|iM^qWi222yFFQ`+J7RzJ(sUof-3p;;3 zmbV&S9S#*^*>pIn7)x>^lo(ug=tEqLW%<&eEvguch|}%!NCr4HTpP)khxe{5o6(bw z=^&Pu`tppCbT%W-=j2WUv5jR^!%UNjgBnpgB;wMG5_Gh94fJW^-X_>`-zDyB47K^j zkJ62(ujNpSVZ`c4^(ntudKCbnGKMSMiUUAPq*rI;N}VKBPQ8?rp;BD+8E~2c1V;JN za99*teZfzHcBx8yrlZW@5|fnaGCL-4SPkGC!@LxwnH1)cv!^L+Wx$-x#f3s|5Nj*K zIZCDqhzxbmF)_QF`%eA)=&Q82TeM@g9%m`NNB?@HJ9z#0W8a_ZyVSUQdU)`SiH3%W z>7n82-Hlur=;WUNa9K(}?|E?hhY)^y<;jkYC$AhneC4Um&Zn*r+db)oI2T6`wNXz~ zryZarTbbW2g2GkUHbq^?UyDArxMmtP4)8WMa&Fpo5;K&7V}`t>jFqp^1*tgTz)&GO zr2`xpWk??mC=a7y{Zc!`V5m;ZvA5QzV*PozN{q%0=T!N?Ztyj-0ZK)}Gip_HPE11B zh8PcHp*2(qNW+gQ1yc|@qv~#$IJxc6pFH}OzOiLpj}BHV^|SlMj*g+m+TNv(3{7f< zxu<`)V&a2J_eT%+pBQgA|HzTwKb*gOu(@ThIiL9#lbl!6Q(3#ExtQ3k_v`>BNBA&` z_bcw&J-RovALxC2YXvf1Y5pKe#KjC6&=>_$L-*XMLC7q~FPm9J;wbfmnykL;H==Hc zhG+)b1F7UV2F{j>W5jVRoXWVc*uwcED1`$Kzgj>hoKPI1`1XVWlr*(TJk5NLeUXbu zjbC7VDLIx4OG*R~>W?av?&M4HLN;5}QokvBGaKW1#Akpb;!Dg&XrHR5nt&1Smxv)l znNZaTlJD+k^pY>b-`y`+75;9A=o7j#DVJg?3e8X8 zL`+sN8th9f+?v&1UzskFW*4Sp=E+ktcG|^fUpP{gpOPyzZ5$qPYfRFT7Icqc(IvVy z$$;*`Y}WE*cleXE~t&8~-9i!?G# z&_5f{PtzSeGd2s8qOnI?s+X$=CfhqE2Wn~tCOg_E2dbIdJErcauD)ZcqhqqarlxMA6G-QTR#9UEEU)dS8jn7H&8OBvV^#FYVvuvrvNLoPx zEjM=2eg1~|Xq0twBZcE#-4@^vHj(+xk=m+S3 z7<9o+hWDTFbk&xavgEQfX8hc-r03J}RC)6AGRdt&LqR$HQ&7%q zl*)r2=jaVtv|BEneTClD8a!bz&}& zEssW5FmRq=piLZG(dwXG$Cr%z$E>SzrLes*M88hyLSTp&$lQ;!Q#6S`c*C#lX4D@fnk5RFRKCxJy(pwlW z;GW50)aLa~o*$s{j{M5|{d{{4jrt0*x*jzqhz7OFdDLMW>$b{sXqN&7YJl|z+}Uen z7~dhywkP719=uZD5od4RW*l+(4KdY&829emHS>MyC{9~H zdeedx<}avyh^Sr+IrH?igf3?pVKt0Scjg=5xm5mTeQ-UfK>ErP^dU!`BOlF9s5KAP z!@h>Cx;|9k`tX+T?dm<}N)IaG{J+qLThuo;)+0Fn--Gq!Ijb8u;`&et zeQ^DvRuyo@tnw^-;;bShtzHA3>%f{yBIb$zFIc8BhXnWd4XnX>)8-V0#0#Xvb0kFp z5xImENzywzj}Hb&+q=?_m^{d{eAj*}Mj)&=GtR-Qr{ML}l2$bR0aPL2!FbwZU|`UJ z!VZQLj3=00FmGXrV`*dA#qxYY=(f=QxC;)SG(l!8>2)DCF{=@RKa z877$^nPsv9vNf{D3*#yiHj^e(Ez>H~V`egDi_FfM{W3Q)pJBmfG0Bp}vcmF{)eLJV>lEuRHeNPM zY?*BPZ136W*j=!%anN#j=D5dcm2;O%iYuGzBsT@OeeNyp+uUDx=y)vf)bZTorQtQj zTgZEfkDt#5Uo+nYeq4Sn{u=%z{?`KH0zL$;2+|8m3)+MO9tb)YbSLOl(2roLV4L8$ z;5os!LL@>0LY4sGiI8_8??Qt@Cxp%kJs0{f%r9&X0JR03W&i;I008L#VE_dH00000 z0ssL30ss~O00962fdBvi0C?K%S4(djRTMt93#27@RiTL0D63W^#z~v13ae5Cic~0~ z5#2HwPwY{~GiL5gO!@;VR{R3Q@8Kb_M`B&EWW}0uzH{z8l0a%;!2(&yT;F@%=R4mW z6VXljod(pqcAcpAoXFNaTH|lJ?$faMwXFwq*!#xT*Jz{no2_4=S%1yeuhK?;Ve8lE z^Zr-1UZZjUS6g4FY_MVL8*48H-q-6iUQcbkPP^;g*Bia(>p$6gNOy;aw*HRZ9D4iT z(|Gu;t$(25(98cwH-^9H`sV??HGDx2DW?UUQ9%h!X+|Xlbc-Uo&CgvL(++>OZMDaH zM=Tq#)ERxknkj{p(S&y%QAH>G&CxUd_CEhlDCQG+MzATiOZK%-@9<}d&j^+lElVSo z&S}mP62X$SOV$kNQ?};#ub@-d4d?@w%MkC!tUKr3fHpafkoR(4jS!W1k6F%{Rs}C& zCxB(qdO`;*8PH>nL}c5xyB4;1=QC(K-p?Vs!=837$jQ$Byp zULJ9l8mC4rSxz}dsYbDKmG^5q!8uF&*1@}fDseumU5-n8{%(zgJx0Qq?&JSG>|fr~ z)O;LU6V-v9PF|4m#8%Ln{T`cv>zx;2N)3Ga#3)$uo4S>q}^H^_XzyMn)f4tZDN zoUoq=XWrL)yi=mKg4TpBOC%+vVn+03**j<}=94;WLFh4`L_qE_JPWR5Dkqo`nd8jR zo}iZv&MTIl8~U}U<5rY0eaW)1O~}jX->Es_DRP&nm)zYW)K0B*<0@W?E!vYDz^7J2 zX1pE8W*wz=SL!#jsXD*}m#AM1U9EA1NTrT~KCPX^tJu)WI^b1S3-m4qk`qX&Mg(*p z!)JuEj58GZ0-Y{^5cRx|C`;I#@tOFY;9CrI2lNH+CGaB|D)J@Yg~ck~H-WpEaa2{; zSMxt&yAxw)?WF8gcrIH$C#cZ^*`9SW&FvNtdy=Q0zyve=SH5+Ir`BER^)uu>!c-`F zB1k!$MrL1Xm7~Ii&4=W?g_F?sqM9SL?x%=9;oMT#?P9cI%?09!41r=zayrGimlNF* zl`6nf1=qlJCMtnX;kXc|65Cm4T2e5W$IJ*^P_}%y`d*Se<`#>urh|SAR=YUsDf`QD zCfs*o_>GbM#PHB~QbzDQq1{}bQJuDHE>gl%%wXTqf$FT2X#@;&D z=Z5cnU#Eyf^40x|JbtTnUwKvvJtxoH#BRr%Jn7(O%1Ih%fE6Nh&*?^p34KDu|4a=yHWgy1j5{+?+)v^V3_ zyk4anSVxwqZCZc-5FOH4_q9CW8|wk^GXuV2W4kVEHaKqHrUJVYSa=R`>A#ubKbCubIE2`IO^0s(Y$&jCl$wkOT(q89Z|H_pJHb4zxo6cCDBAt=Dj`kGOfsS;dGhOIPH@eeQQ}QHV3Zzhq?Ebpu#(-{NIKuToP4jA&KGpkNN{_OA-oBch>n zDcyeopJJSE0C?Kl!T7&*17k0P_XduDh>eVleUTg8*t8=QHwZ*}Zx9IrG8!UfH?V2P zL@;b%R&d?GtnIRaMZt9wb1aiKn8OOLnwz6$l)-6 za2PjmL5<}GigIN!X>Vk9D+6+Q6kK;PaMk~3fQ#?~74QVZxqLt_uPuxlxj{r50M-gS MkN^Qz)kKW|0C6biBme*a literal 0 HcmV?d00001 diff --git a/browser/app/fonts/lato/lato-normal.woff2 b/browser/app/fonts/lato/lato-normal.woff2 new file mode 100755 index 0000000000000000000000000000000000000000..2a119ebd595ba7e29b9ca44d78df16a35720d586 GIT binary patch literal 30348 zcmV)NK)1hlPew8T0RR910CtQ36951J0V-es0CpSz0t43o00000000000000000000 z0000#Mn+Uk92zDYfu;f+l2`^{0EHR|N(qEz5eN!`+AM;%00A}vBm;*e3xiYu1Rw>U zb_a!841rBQcV%K`23w64af9im<9!jNH>)DY-^>L-iRwgo<>5MS(K#=Md9OtWbGIXr zaJc^kvj6}8q@*&2CjCrW0CmdT2StJcXChIpqtIKSP^iL4o@jk9Hjb=Hn&N$j_+;@r z8pu#ms8DnSLhI4xP&W@6q0j@t4rE<{F>3jopI-5xrAP|a$}qJR-=Xw^``JjsNHR>{ zV;5Q}RwvV~M3uDMyCMv~Ze~SYqsYAUY(g zJ?G`&zcs(^dsSWCN&aRQ5QHI;go>Cv(eWe{ky^KIW>z7je1M-@@7MO1!Gy5_$r|BC z^vKYg_UVW;&<69uO42A`vyqEP^Lm0__W*4(rq0H|0K@=Put1DNR5&tK3`|N4K*0hd zYMZfkbJ6PDyLR*U`*&Z<|9vh$-1e0lHg5kw46KvhYLJHrIDIOEc+vo+togp_ngO4A zkUJ!bip=gb4ecT#347Mx@*A-dB(gP+w|F&a>Ff^T_wz{rEY1C!gHlo)=tzSNQ?11Ii=n3^6g|}2oI(~c}oL`lMYxq{QqmJ0g9yZfK&`r8pWX` z8_ucqZqan_etgR3sM@A-Q+cb9t(`YavyzCE604X7>+UUBR{v1Zv>aWQbh~#e>2vIN z3hR`h0m9=<--H{G;!g5E*YZ}lD`0LqFa71s9=HNRQebsL7KF)6mEqT`t3siOgsYIW zKQ(_?GmLsS^)~(9XACmdH!Vn*T!^y4%##otQsE^6e3h8pJB6%LO?72?pqBlzyegI; zL$d8aj&Q%Qsu^Ne6CGPqC{)*|A+I7tg~mvPq{%}b;_cll+67$jt*fv?(r4W%*EU4rm$H2Gopfgafq= z&1p8>`aq}%g#wi7E_+DQiRQT-VFygvID24B+J}wPJuo|F2dI(!jC>Q~+*ib%R_I>0H;H%wG%N%J z!dYc1Dal^lutFFy7-LN1xt!dOXULt{|Nl)&&)Z7g+PhUnMMcbyQ6nNojQmjtdfg9u zsQX*;q|nOH#kpf_xx-xQr}1Edl=dRR-yz=DUlvpmRM7orE|K#SaEy2!! z^f}?4&Zj)6ncg2f`&;KK%|jo1yrUAj3mV#82)x9&a2aB*EbeT^tgvl*zvJoBxZWnS)N=lvJGDKDv|y0n)5;w~?h zd*#0xRg=BXs`Yin+bW0aGxd{!X4Hda*gv?NnB!>e?cT`Y4vsI4KTMEGJ0(v`ruJzs z>2O9K=N@00ZcIPS+<0M(%o1jmv-Vlwq#b{l|GS`%OC#qN zs~5X}Z@spVkL5}qWHIrX`7e-Vge}fBXpUNvwutkDOSpM>`Re{!;NUW{mb7E|B4#9? z5D5O@hhMAtYjF~sWk#|FuHWnGEiqeNAePpTRGYxxpQ>AzO?cNX-uU3n z58Y08ZU6y;!DblB9F2Z9K6ArhFc=I5L!nTpwLio5{d2Q`{40xU!ch2Z-w)C+o5qU8 zjfb~iP$S$TqAt!$cB@*|Q8YDLf^Lg`x}naPHHEEt`-pS33vM1>zNY`It%bCVtem`J z6L1y0AA!Q)2xOpYXpEZrw1ywKUDtW0+uEz?i-Dn$NtlLb<`(0d!nS7Ts(t<-j!si9 z`KyT~!X|L(0I_SmYY+G@VpqHF{K=aiaSX^NO*?;1Ld%NFv0Cik)uS%YRNkATvtx4N z8b{@X6d{vj<($%--#Wr<))7QmG z0!u#C;7DU}5YbiRX;4PuF(*uv#Ej%O$ZMHk zE%93h3yqp(ET5a4r>==-^O7QI;CMfHznMd4*y*)A0MPMw&+hmqI`Ny%{Jis@o4V25 zmtFqu;t#BUyz8s7iF8e|rPY>Ib#SUtW@9t0KbM;?qQgLpqwymIP8A|X++yMlC1tYa z^2tw?dQSP6YDB7|tsYb(eOss6gT40I?|?($aHb=UI@atzr>E+BraU{<^KH5Lcy|a> znWo+M;K%b43M4{gie=lAohc_rE=oPPyq)nAGACk7UdR;4wlll&oRh0`iPyuIw>37Q zU?Q1J`E0@L%sIh%GI_^6(Ktm23<}ZbY_QZLl##kI_a)rNB&L!#W>1hamfw5CGbUv!WNXU~ z%Bh`qv{nQnAyQwPm?Pu0CULcMB|3+3CWEjd*$bbL3lTDkPQ=LlS_HG};5b>aI5$F( zOOw&r@;OOoN2n=oIy$zDMrw@&9F=32li2dJlrWS=h)n`WR-e2-Q!z_d;k!y$F7ZUx zM9JAwj+DByv{-gzS=6#afkF;Nij^o;rb5-UTICv)nyr$P*oLk)icNa;*kfn z9#;dYM#09Kp33x$mZ|Bc!+j5WIU%B1UT7+3OE%5v`PAwZY7nhazS*)lxq8adGnTw< znW{F8`yT8qiV==~2gDp^x-rphI`4pkT4w*?9+EA|Ry0#=u~n*F zTV^>lW;0-r-6^MK@W_@d6I-%9UCHsR=j6KK!@t07cNBItV?~Ji0~$Q~gKJNP51WzS&>me%(7uU?+r#5f`jPQkToJg?a@+VT&jz zPRp4qEvcnwf?7?wqomKzG4vUGONFm5>OzMHAs@s zgmi72X>qP`88;6vU(^4ap%emz!4b$ng=mbLdcEbQwzqlOdYbwe7#f-6ZGqJPGO%!5 z699HK6${TB$OrMmujU^dD_N=atf1hS@VF@J;;39mr6D!F=9o6G`M6%|hM=+C)NIz? z60!DIbKFjF_PDrtc=^Wt;XowrPdhztPfZzQ<>VEcfV&L^5hx6fKnCg{BH-_xO0z5>~|_4E2_-S&spuuE#!ex|(@j1Cr%Eilx~Y(XKo z84@ZqTUax^yNGO&qC~$rCRA*;xKQ!gc;yd!^);*NnudesT978sp{}zg@$%xs;e*NPK$NcsVcZ=A9oJAf}4kzukP;)go4?S z2#bh{iA$7ZS4vAdltETbUNMwf%POklzy$+YXc7)10)@d5$UrsF7&Z0Km}qKgm(KAn zrPt8Mz|hF#rb0nmY0s`Et1W1qVe>#zs=tDf7bcEnMB)WCwSfg zti$yqJ@35&krDVkyuDdww%}!(Jz_fSF&6K<+uAgS}2oji$wj$B#~^ zNaU&8IM`LnW>JV@B~O2d52rFNB7^eGQ0QqEH%8^AXt&dy&MP-6g@l}tB+_e`C}e^N zJv}{rQ^rN2NA65dFN0hLd79ycVZo2OPymr=CXsBq#V!yp5*##&axsM?)0_giDD?<7 zh-Z|>m>d+e+C+GQNCc6TsI&21W1Dw|HoLH!O=1i=3Ettxalut$*NM{j8Gk5!kn;>Q zj4w~!)WTr?6&1WCXbBBKUG}Nxk{schNXQoJ%=anFXBSdGlX&hyk*2@`SzJUil&CZ| z;DU$@QF7o_r{u^aX+9j1mwhXtV-xZG%MFH-AW;uANo>+-4DO9Ee+$dRx^zy#V4xtB z=a3?pYYF(^j5MC|5>^Sib;B%DJVawc5?B;`5gmUU2(Q^0xcoXoA-KhY!Z9v%B)sUD z0Mx#mXds1>%~$%Ew4yqcj+o-f);gVm0Fh6*E!)RPNL+{ag5&nk_>S2k13vX;fFseO zk3<$73Qk@?y(eQL32Kmf=m){!H#zzK1j2EZAV3LC42Kuv`Jl?%*8iw6;rV9l2NoNS z;g$t=$4`p}iyRGj1==8kQlaip8Mom^PbS%h;Uz61VY4@0&Vs9>lCYQUxJqOtyq_}1 z`H-2=?iWdEIr!SWatK~f5I3QnvH2Z_hfk_@<_3r2)F zXrVw66_HF;0?vVUC5J5?Kuz&lQXqoVQWtVRIdN_%rFbO_F8S6_E)KT(dcLe0kGVS# z?V;h5cL`&#(O157k!8V!mbl5sK6A5It+hD#!)&T;G(lP@6b%8Jsg@MRTjr(+t^{j} zq7!=A0u&vp8B?&oviYMhvThnZKd|DhLk_9QgL?DP7N9N*qfTZ*y|P&vEb}L`Iohl! zw;(v|jIc>UP;$VGY6$RBy$l4ct{@Lx&uhfx!oP(&)%M_&+^NZO-gZKR6wq00Ic(raD`(<;E5dhqD15J>=cNFP#l4HA_>G2NhFc7 zl1V0)LLrq>8kH?*TmO@`c?9iIJ8=cLr0izo+T#F$7*_=jJ4UJ>O3wz(V1JcJ4onBS zC?2U0G65`=(`TA0uT&W|1AZ#tsUQm`L*H`vq5v5v%b8?T$f1)3b+qa~J{p9-6|C9Jn#{C1gKXHd(xb|d3Oy<+ zG%P8DcBe4p6qD1=(;1ahUl$lfF1ku5c2=G1o+R`X!3v+DV0yvgpDNE;=q@2CA0osJ zYB!19VsV?=9b$J`d@R7HX8J6Gq|*@a%W9DZ?sI^CNWbVLkr@9>LQd-GW_Z1=r(yzV zBU9_Z6V{_rI4uNzYo&?J#n_iV=W#NoJRUV!B`Cr(5~3Q!Oj&@Th48sVhKOS10=C?3 zl9mg;408Gug1z2_{T>WCDS?+lYIuwv8X>D;c}E|(^sAiX5XXk9k^|v2N&-%9%>(a@ z6%%G+(SA~|QwlQtxs$oK{|w5>tDz_8m_}QrHR7ewqC!-8=0<>yVBI9kP>I8e)fY6w zs-zbJVk#*tLS_yIHwiwJ@3A1=*pk{Pl%@}A zv%g&^Sl9pIa;NW6o6;5^PGw4QYcz+^1PLJ#F$pOdImMxL$_MTAnRLc^bbG_% z6A+M4AW}58gEpmub^pa8Wiky6hK_$~c7lfkd$=EDC$&vysgZ z;t}H{G5O+5!-ocuAD(HD%m+ip@=-dfK9RhMOr8tCkY#s3a-?O%%UptXXf&_e2Q0sHz+Dqu;MaanGvb} zjOw!2C;Yv=Dc5?5Ucek*{KSKQPW?6PBmWVaa{1}b5AwLjyljRwp7esdy#BxV%hPBb z*(JzhubdD3;_LxPLdqmgg%%V)|Dj$(pmSO}l;f=}spy~<-*bm~qwU4YW+N=(lC<15 zAuvFV27`f(>abrEV`Zz_j1FVk2}Qv+YeN@b?nuDA0Xc(wmO=?i3|x|{QV9}Ykltb; zaW27&3T#*`i9)E8DuhudBr2c}H5@L^lamTuB$fyQ9V)TbVI@g4dNvG?E1V$11SdUX z^*EG+Xko0=ixizk7X;f|WmUnlfDR0Hk9HV%s8;v^!b7ju#~A+vfGS&3jQ!u|=Gv&X zzix|dPJtXTWPv5dPO~g;CV7@?G$0}cKj>TQOKS=?gc>{^VHRO~Q&p~1!&oe^Pm>Ln zQiCs&&$WDAd^ct!$Z|K~QsYGvbib-SN-(?}&~Lta!>UK)%7O_N9FtD)i=>idv0#RN z!=)l(9)!bq<*=L)*XcyeSdJwSymy!|^#rgyM@*ATm6uLN$6Y^P02x9NHg{Pn}M%>8cyz?ypB7 z-{`S*-yD&TVV_{OEpzeGZlCLgqyg(}rKx33ko;@g&7Q`NE@=<7OBklTfr9mgGAl>1 z{^S5MH%FD5-|Evs&$aUpo{P9?gJiFC+>vvNanvcsQIr*)q>;N66?7UGylEGz@1TRT z0Kg9$%3cm;{?k>2@eV<|1IWR#~UXWnXNZvPerc*!(Y%v8@Uad z@AY6^f5}FaheBEEbV>hD>>%nx#})EeE^5Zg63{T-LA$FuZmnay`VD#2G{;DzKJBrQ zO6&ZkZnZHF?*=P#wzvgTjnQ)SEWvm{yGTh4SL4zL@=1fn`F$ogXI9USu%>6}rnhTD z{pQN_euo5U0;eiZ4Fz%U3d%fMCvjB|gGUJqiPc>^{KuA=vkA|Rglx=GuTRHD91T{| zw$<8dChHx2#ErJSQQe9sZ>Q4`+3^wP8rMvG6LS$LQTzC~-x`jFR*lR$`Iv8l+8%82 z(>IHe7FV};HHyUz|Gc?I+CZeK=u8+3EKV#2$jPHR_gHw-;!v1tSJYyWkAo`{x3_GG zw6a#H44LE~r$`D59!QV=Circc_Fb)h2!1(O`Fo+vLfM!%JTHUho-VNRKJ%w zQwJ-ADnCC)J%WWH6IiFipaU;dUO1q0)IlAHwRim__XefG_Vc-;kFPLf;krO{?zcIp zWiZ>j69HlgKDI< zsz3Xo9QxB)suaz=)~7PWgVBf50!F}ZHj167S^wl(hrGiyA(6EagM1B7xI(6OZE&<2 z>D8yVBKn zvh0Y4s8D7N4R29}LSa%Ru+qa~wmTSh)lzJ=RKO~M2O}*HMp{_WRTN6DkIS#92!7Up zs;Sj|cpJv8_q#sz8FFLiAFIFWaydcPRv%GY=oVc=4h@?)hX3hONLW7@=GBd8C}zM@ zP^6D8LU$SV=ui=J_WS5tsHXbguqbHZu^YzGueOcYu*x@0EuukUF475!GJBg3vVk}h zgTi8tt?YFo=nYR+RlB1_>1bB$G=BC>8WpFcQ2<)bfUG8jI?bYUu=lzaSfD^m z$OJWAsI-Z|AEN9tGz79@OZ98m{zQ$`7FBj=6AOlRj21h(!= zaW4U7R2M$WxHYXECC^b!oWpu;I_EVWf8fFZ<;wV`HZn@bO^c{Z} zxe4U4LbyDwDsobatg}eXMPT8=Tw=5bp-WyK$LUfWUZ%+My)kjmk|DvV-UbL%-jeQmw!B0WZ)g&@KWYo?Q3>>fL6whmc)ryR~wkp|NgwdO|xg+d`YWy zyo4&Bd&8g-VuvE%tf@E@_~ zr&OrIn0Cn_d5B+?ncK-+<)zt>_?`W*1(nMoib371(;6b1N*$A4H&^AfMCw_{{2_!; zAm?n_sHU0SCBn$Me?FQM@dkJ8kU)8?_<-!f;4K_!(<$6COq0JIdl^wv$z-0B%pITE zV0@ENOkuQ-UEvT9+O}AlV>Sm{LZcTyUPN6M+{S=gvpOmDvEJ60KHPZ>yaS z8g$wP&v*p~3{FQxTuPd(+{PlGTl*GpS=?h~pY*M9+|II}`VRFlc05|D8N3R?Jwx6z zdbZEnZ;#3ANOHAf#OS~_IyO%6Zd88h6fKAK7HmC)#=%8mpqS*Fhk}xdnueASof&)? zlv!Y*MHc%a93~bY5bK{LfHzABQY5v48ODN9C^61zW9uQnJYxxH4u?ZzbTLB4?0CX_ z;Cn!2$&10nxnRmsaug_jFwx$um1ub!N>x6Xn3@^`u`86TR=+Tj<60cJU8Pnd=fcFV z(&0kyN^{J!Xkn5p?2FB#t1YmEYhhCDRgafbS6r%D`@$q7h9^iH^6RRtI;}_v(`FJf z5X2R!O^Vo}63r+IGU3dVvc&uu7_q;mgRUV7QC8{D86EzhRNWKdGg(=B#HuwtreE`H2OcrRbovLxM6N4TO53vxt ztV@+|G-P$IW61v-wH;}Sq&`RXZ@%g0up4mWZKV#_8UC&IZL3?DUdh?Jf_Mq2n=q4L zSAy)&->!E=0P!O5{b5IRtN{Bnpz*=o{*M?dvOX^*?aDWJqZ}2eL=94C#u|=|=<1(x z{XguA!I%!K9tJ5&k;vA>3qjo4TK3or|6l$8150=^Y_u>n(n4_g^XUs8bK!&4S{vdE zr=hCRM4H=<{`by@hnOBSaU7SSKHZr-lQmha%`H!6T7CXNFcgkNWAQ{XmCj^y`9iT& z7L_XSq(xaUf?_!F%*E|OGqrl7*=l#Xz5ZZ08c(LP`C_?RNBhrC0g;l~|D1~A(RebQ zl~p|taj~pYr#ru}xU{^oy0*TtxwXBcdEPEM-CkMs2gA{LGM&vA%hh^QZ+H8{@pL8w zh%ljy3#qiR&WD(CsjZK>_POty1AX0rP%2+VU=+s)ilJ-uW~0^a_IrcTa5kMU&KMSG zOJ&CXW``}i&KLnBqHKHLgHsnA{{sMo)mycZ+x+>#hqv$CxOwd=h!gs3Y8v&{LLYEN zg0Xlan#rD-n5~q~+%UI}Df<5pFVUv$opry2BnZ zV8lizbXVKLme$mEmwB|Z%TU>A4BRja`7E@2G&_RaR!UBo)8^8Pw(sy*R{_2>B4Lp4 zbqjGM4QJZ+iPOeF8Eq+O7h2tn73_IC%~iV#gE%;BxsWdcE2 zhKvC>)`umIIb+(kRkP)^w=rK4A~5c^q9xl|I%^<#Xi7!o8RWhK{0?OZNvfJ9AYH>I zcrWn3_5Xwntv4#Io$>_#V^oq1O|EE7EH)Iq{t2PEifSc8(COvZ&pWH z2$5E38NwSF{>fOpq2H{E5WytR4(uA55c90WAiF@U#c|uS(Wn)kOZxy;FlB-GtXTq0 zV=6Jhcvj3Rup=`{lW?`ZH5OUo%}Af$h+^tM&7oT_qI3-KTvmG;t|bTVw^OpMTJHq4 zEJaic?EWo%)|0(0p378dTh&$Wdn%(YaEA7b?x66R2AU5xl+v!sfga^KvI!#3P!*dEDXxts`u6k~cV6UQ+#(Q^z{gaS8-Ok5Md92~%~&(%Q+C*a;SEiT5&IRHDiz!-6AK zFa@UTZv6##iKuYr$YgRR6CWF->fZt}Vx2scSSvML>KzMeai+1d4*5dKO@XknjtYk4 z<<|wKHD-RG9YsQ&_WS{3u&&w}#_j^wdn5xEwp5F;h|M)(nM@A^KdXWE=&IDsM>Y~# z>1Iv-ss7{VfaEjjO{umm^(c|423(bnBS~g!i1i0X#Sc35KGOO3T%@nhP78!1xfSs z!^GvHG*F3Y+*6MdWmai2r|l|G4+ziz%Y=9pgwrAGe`>$F*2Y*Lb8wU&CaujbnYRO0 zMiNPe$RT?~werH7w+dR=b6EL+k$bNpso0KrJ<1~460~uQwSFpTn&fZD6_fuz?aLQ%XrOOYktUyXhNDqEhN*V`g#m_loDht!68Yf&M~<9r<+#xdfK3{E zWAcf8K<0pV)gGl#Vh>5iIRPsOyZs+)rNT^gS$b}vv=VFQbpL{TL`etShw2#$>a_p0 z=!HNxraQmV#yn8kEZl~JY!=(SX@v1h?R>SAK$GsgAi#{Arlj}romSxfTikNd+kx=@ z-&gZ{w%aOmz2Wc2k~dYPEe#R=^UgRKj_Y5KQxPA4O*mLq-|7TYA^k8^nf~c%ZrZBzuHGS`qnR^dO zH0su0u1F&6X?3vaqY;zCS8ob5TI4_sOEs#?&^hc?2xZa3r=Vi9D0$l8LliOUBO`s}OI5Mm4FQ(t)3yz`7omLwKls+~5EAU!SMJR*_k#q8E} zNR!}*Y!l!bWq4Q}Fka?joPAuNQ<|n^wkbC`%G8M=K3R^5)*DPhLh(J7EwquoOh31v zPqn;%Z1x6?c0~8@VU3Kx6tG+gTgIIhEUnOA_8Kob6-3p(R4oT*@fFm=V@4(g4Dug! zTEcVee}c6<%HJ7*%>ux@xObuhNp6x*B1uF&DTVs-eN42nuwjz7%2;riUk#uiV_85F z)kizZt(-8H?v&hSxrQg|%wn1`=5E~j45+Nd(B*8WRXqI;kp$7Sr_o9iNyu>YS)8TG zE?qt`pTcSq(hfYBW7xU!PU@9{K2#-8homIxc2{1IWUgcM}xf8emOZD% zLdq|=!d92c>P^q7rzOol+z!%c{*E7;6<*)u;{}e@<-}~vzfCw!bKqdXy5()D(CEjsO`uWvz4fMZWtb8+MtfNYTn`a_r#scp+ zm&xi2&e&r+S0y;~W*dyX_R{c1PceB+lww6xKA+c8Tx&?86tl8x9l_#*KDr;e?N!BR>j<;vTbG|Rd z4_+maOJ+Rx`%LTTaKJ+y0rwn}Y3;(B$~Y`%=W#Iu3>fQx#g8SWlE${~HFRd`0oSEF zZc$gZ!MZ#!`;jXW!P*9sCQ_WdlJ*keqGXCaL2bw#8}UPMyY;_CV{qu73jWw)^8^fT zzrN5Pn!Vo&3ikc~)|_I6L#{NJ&)#2gbo(~81nfizXlkj@Q*6@o)_?Af>p6`~Fo}uy zz+{S7-`E8OP=T3AH^ojIW-E!5NZLHtYAK4nrm(Cu)IwWji|~!}ff=s0iPswvP6e}( zC{2h!ZA|>t97DamTu?)h`Us5fT*~E;#FZexFnSQMLmjO;lAMc;Dc2R3@H|@}*kBLS z7t&}*l?No3isE6=uDk2#*kc_Z&GMB^D3=yuWC>{9W&t{+X_XFBFDZ+dji0bWH6Y`O z@nZ{-SUOu~ps{uthd2OuWkxv=-dYJ7;8?oxg&}42;a=KodmnfvTjf?xTHr`I#sJol z&dQagJAzy!@i^0`DCXyYg0Ika;XFNeK{$JkoIeX!Old~YNPQAbBuWRuO(rox&K<>z zR61DUVx1^**=94tSG&lBlQvs9FlXy3(_lq2lZ_TI+@joZHdLHNS$q0po%V2C$eWmT zpu>kTkk_9~$?9gWMBAo>Wume}6A<|SW)szmZoShvjwmxmvZP$X2Y6;@Zvt!EWbGc% zmFt>Ogr0?jT^%#Fgnruy56%DvY`HJw8lr-c==yp(5{h_O70|86vk~j+PCo<=Hp?BI zgUD&)!oe{{NLblmWIAdq3W}tG4&o>_kmZid0j8W_PP_8H?c|#*I^V#R2p6-fhmEAt zBM5sEjRx7IAG?xLoNWZV77o>UW~HItXCszA3sEjSy}ztVmUeca(n z#ReA(THz*)^<_nN&~1H5c^yitMb8zIK7~u@z{s13wUx2@2<%LrB(m!becvs7kC8YS z2II&45Gq_|;mR;cYslTGxTX@@+}KfLzz=tXT8g)`R;QwnZoeUB^-`&|T;XiFd8>o| zr>2nSpZ6a2Q-CR{)5AQYvy7UqAuU+In6`#+7%{!g_{b9-}b zXCwE;jrXmQ@JKsn`{EL$_a!ieEC|v;+G1mnLdrQr*W=fH3DzUl{ z97~jknR-L*n^{!W>N5otZR5e@(M_`#0Vr=H3^LW>2_d)CnYW=|8+FUW<7v)iiNk!X5?&sWs|f4t|XmZ;TSR=z9-ycKJ!43|GJPmk8vds|K0X3 znfD6JDaym{XI3fB1c_y*{;Qi&6ll-Q*F6Gri*m6;6y9oJtGxdgnOg$Z-OEEUi~q}8 zDtjF$&N@96FlH+KFQe$c|BFjcCP^Bb-QOef^@u9L(;P#B7(RXRmD%L>QclW{zUly7 zW9zir<}ws;y`^=XldeSq>3iLFj`NXF)N2-WIL;`cSqt;i_L&-vax3j!F4}DRP~D$5 zxx8QE9Q+~%&D;=$z470qY%dDh3gke57r=I91dSf7NR10+WhP{Y2EEPQeo&r6=*?1? z2{gKikgb@NFcdg^ zSOlp41qh1YyA#>Z1!>_=fE+?!mcm4!^NJKPeXt|!hn^k~9VZSt8o_zWhRSlIVk4VGP$?{+#k6aLZy1{DJb_T&-M_kWjnnU5zeR8QJk_WH}?J%emnQQ9MkYao^wj2#>V6k z<5b##y~|U}`v&%{POZu+tpqZV6!iR$UxM(kl9(b!{{1a;zLUeFSu?+GTN5E2ZR5%G zv=-RR-!mqI9)W{AMIi2?`F{R9^tXP9Zy~q@Y97Y)pIe6FV`zUr-xlbe_V}6zK62rz z+@QZMAHE=lU%MuT4WEar-f&eua$W?#cx5r^+k$>e8kO3VR@iS0i;GE53I{Aifa=16 z0lr&Q=T6#E{S=6aMGim)QoqCD?7q6aAW5TBiqgHDKqH!Kri8QiVHse))`5voAqWurJB10d^FLV+JJ674?tX2l^#o?2myEM0H7a6+h_5 z+1nq438*Yxtb}Kb;R-M%1Y%&ZgfSLY(^&x8gb@Rw1t}R=1vRZOFeyoaWQxzVZswKo z_WwoA#~Vu0`(<==1w~Ylyni@k^lZ11yRXx5ny54_q!^XV!49ZH)Wo@?N59l~V7$+_ z?`a?Oen2>@bXUws&tMz-l6ZcB-We=RNf4FSNXX8^H}h%ODj~NnLX=eJ`NV*%J+Ugl z+N5q$T8*%xnv;OZl(w(XB)mA>b%m}cn_#MloFq%tlOpM7`AxOIybDL|KNk{4`aRZy zVaw-IOx3jg8YSywN9!t6Uww{dZ38Zx6f?qV`4T;j#Wv%m5}c9Bj53Oah6ozl6eE^K zR_ELQX0tpM2mjE5v3u|KGi(Q@^POBIoBw6U{7OcLCsukfH4(K(>qO~;wd(!EveE-Y z14UC*S9|o=V)kMtmPd-1yV@#NQYx#KQ`##SyDpZH!przfJ(jr=@|lH!#EsEODOEvf zX$Ykhn8H-C2t@{?ERxBDJ)PlSphC&XKuIWta`3_mAmy$0>u(?ZaI57EL#I7PYqU^K zXfvRO$jWqGoznKY19I^)vr-DT4B-WmiA24QJhyD1V=Gu39R^_Q2|Rw7CYcsl#pCJ1 zGpUu40$zkp-f}?>&OhC!bjNkWVNC7rUujl@V4QUYG`&F6zUV z%90y-d|Wk6Do$r5CJ`WWR+l5B2r7*zNntPuxtUQY`dJ|ypPmzNSK;dSz^SvT&VRwp zTADt2Q+ak|@|5OwYPDi1Y>%MJRI)SSovLcmh04AuUKYQM3+7uz19VYZZWxj6uOeoX zPqz3(=+?g1nltBB|Lh&BaJY2lt$ciL z3&@Hi{yEnNsABm_PM;;{zy7o(7M8;Xawy1OpNq}*pzv_@1YS#Ko;t4kl6}bX+It}j zYr7E6HCv0v`-(+Ta2sUEbLosPZHJFID{2kIOy2ETkGB@54&^2yO8}XQO7q?qxPP&) zfV!)r>MF4@bDkt?#PDe$#i_=99s5Z`_R^jZVJ4;^seQKYpj)Z)@ga@(#hVV5gY&P- z@?_*(d71|#TSjQbDvWFVo+-}#o<6=W>EtT6f}(NDF>qtVfu0^0`BJ$W)VZ>ITgBy@ zWnPS0Fof)P);x7h4PID4c=_sN9?suePk0&!hIGSe*k_FaEToF)GD zJZOkP?qS>VhV$w&3m-3Py|3NytvH@N32%V-`Kd?M7(b5I!4zk6Ij;DMh6G`1oJX1bse>cHz!xKR@b?V zmE&GZ?AO}Aa;(2b_mc_DQrPQXUh<0J9zMRwg^w3BUY9nU zs)qMFS3R|%_8LkIlH8mthW9&FJh`EP+&SBN0Li>JuyAxVuL$bQi&4)c z>poTf1NrR0W=6M}d|0ibAMJ0v4Rrk&y8ffAEhJ<89P2#(C=WQ9T+rC!23DO?(UEeW6h)#r=`_|x3=G{Ma$;Mm0zp?OG9ewo4 zje>(do|HvQ69($2eB%Uq`~97NXw`FHp(!>bieX3g&q-%&MJqLpWz<|0CF~~+WdE>} zto){^A-g*zUx^>h-YG(y?(-_{;-$#2Mc(!0NFIPwo#cnl-1|C&Ab`+9GGo7fV6{CH zR23UT>mLO%_f{*&oCETTyfK=pBVo~R&?DGw_}uvn~@j{>UB z#ErF9C3+OdFOJB_?Gh!ie~Y1-<5{C~n>s0?lCsv+x5U>M>`t#SWgf4J^!Hn~DUKYI z6`7;iw2~l3$p12xhaIedSdKA5k}H-N9fPbp7rsf(2-Z@$#o-wpJ9esVZO1qV%j7Zr z`HFUem_BKntA#{Fb786gIZznc8EQIjG`dPp9`w<L>ar8Dtj0&Wxb(ru+XIv536m5qg7oTOO!JKa+9TKDb;8^ z5>?GfF4@BiCbA-{;9PdCpO6p%gkw%J;e{?bri#JzGlj|D;QhJNk_URgvF_mS8NWmWc#SvOnzS2#7sWlk~zHyfLP_11wEmZ8Q0i$`65r(t#H=G@Hm=IVtVcXsh> z;<=@v? zE`#3yx9Uvxq1;MYTT-?%a!k6d6FdoB7C6i-dopSPL^iw0Y9Nf8&NGF}-ot*i;Dace z5b%iaO~J;D2g^Bg1B80+vpxI?h|v20|K6Sh5SpIs2JC%rMsU-+&&LkD6EXhma@`&V zV^8nPOYO+Mw`5qkSN@;l8j|Jz-Zg$-GKPfii}|n015Vu(A6j-(q}j$d&SP~xm~CaE zdDtO^_u$``Km8j#P)lp5msJDcwqAF}{>Z6AV^6nOc}Qx6<_L(k9Du zl>#5v>)zrdi8Wd(3pXd%+sePyQ6Gz*A|0wjQ{)kV}q{Q?iJHnq0A9)^c zo5kd?1q)vM!3qGU-8LfPq+n>fG4pck&_!{Bt?Om4d0(pgmc+T6EPCDey}K*;ABU(? zVJ0Q>KUePQ0;9)frZF#Zs_b1ZKu-EnJ)RXf?Ge^=XI@yYUFf^f1hp*{@EEi8`hJ%l z*d){v5q5!nE=388dg~#kUTDD<(&%}eEd23Lk|Iw6R4+WaFoG;VR@1@jY{-|;;lFs+ zYTS*(0V{mBHO407b94C5Q~*6Xtl6>FH$QJ?_QP^CF;#=w9EVL_{>;q3Xl+_5O8t0t zSAk+?_Kp@sOjRQ<&Clf)%*_0VE>BHGX+CG%iAIb6E4L5`(vYoSaWo-V@>&{9Ac%wC zq9tv%p=&!4j#wME&(B_h%f&Qts=&1u;B$<0=vh7ZTKwI=nhb$c!mSmk8Qq zn9%exIy1JRBxgKUQF)5K5ka$5dO`-)W&WA*Lb&xZ79R^p;>shcqsrqXj5x)g&O}0>Vm?;Uyo-f zyhgS-!rHgOh^oixge%2|dU_U0F2}3YSK^bgx2Nb({AGLsC<{cs%OJdpTx|Ix|D{^$FgAAR=tVIQLhedQOUhcdpzm>t>y z{n;68+<E0Zx#%*TW+F+CHyk2z4k zt>e_8w=Z?jV(aBO^|1_wJIKw6tEW-p%t_YAlL^Jkb7IY{l*mA~iX9XgSuoWAp&H0h z6PUXETQOCl{(Jo-v0-3O+jQIBfxQjW4JCH#Gg08fzKvw+A(Mx9 z{PDmWdG3|NmzR9d=RH7-eznF_-czL*D8uKSI{&M%ltoQr!+uYR-G+S}#qjxvcwwUI2QF;XOoGO^gpY7>cTA{ngONU^x& zvPz=M&64U=DrtFEu0$*BIo9`JpL5gurrJ*|1)k5edz+5^Z+cyE-q-o7K47*V{5#c$ zn#0p;@k(pJTcjXaXFe1L8c~12+TBsBprvQ zoe}uq)8&D83tSN^(cI^q&X+u_97+=P;>yIy^`1{Xprl}pkj)u8J-E&K! z6WBMVEs5wAH5i9yAs-Y6z>q^rkm@L?@V{&e4$nk=92vxV&dbhDnn(l9SIqVKQ5by7 z=nHNC4-SL+{=X~C@XFU#+Wr$Mqwz4Ot#%mHxV3c}y=wD?wY{=e<(qj*8Wu!Jr}<&S z=zjcxgS zQPF+H;jt81IDs;s~#U)j!DD1@*igN8hT3ux&NPz_~ZxW@6Vgu10r8@OS zGvDZcN|oiUx)LjjR`x3{1Pa%kjq|OT+bjLt2Q+p>+^*X@BIRdxeroAv>;}m>UbK-n zNvv}GkOuZSY$Rbp`~e-YRAmZbk#XGx4{4Rg{Up8a;Sus(X@|5Bk)+#9JeOKPz{lIW z{fI#$8vc|18fr(QY{p7__@cr3ylI0x2?^3~*yJ3FlB@QV~uOEn*~S z1i}_kS4W6sk*!%jZv!xCPR~5B(5?XgG6831zG;w@?i6g8(NZ^){4KztGGI}&{Jm?hz7dip{VIf}_e}siLx>iTQg@0w} z!$TgIfMM)ya(ey7U6Z@sC5NSorD|8$Qkj&x+12-{V!N|7X08O-5C=(#eh-Ra<}3gtdTyQ%CrrPaz>Ue-(SZK7-)j>Ix0 zO7!@{Hio|w#N3S1&Eom7;E1 zT;{{m5X(~NF0N01lyt7Liytp2ypC3UWIqXcY!vpQyCJ}2jXO1fz9(q{xW`vtJ@n|;G|O+etU5* zRv#I`1aU6@8Bo7BG~ZdLj6kBuz`e9%arDnp%z=IF(->nPx+ksJ4{}r@q%(iQ^r2Hg z>{qG#h>uZmQ{NMxqX@H zxe43r|H=9%Q%Mq^kCxrLz)qLzxpb!LZtYmzCE?&y_5=4W5OYGP`+3>KFRee%{G@2| zd~xx3!uwjw!~8wLxfc+fqT1}hnasL~gdjiIEI4wyLFgpHVDq~j7A>VPb+_4gI^Bd7 zIX!Yv@NS7bd{+)k?ToMa>uT4IuXyds_R-ykNm+3-<%ZpWPB7*|#cB}q+A@eKNT%~z zn2?8t4{l4m**np$V3jp`TVCL+wvb)vDohN^S#EQ zS-r0rCN-{DpR_oc0lr+lNN;{yu6@(5Re11eTg#hK*7RKE^>8#l&Q&t(Z?}n?=UsJq zU3GaXgKt&dtGMq8_sb{wSL)UyYuck<8$|c(VSFd{0(_PPmsm47{{MD4;}0>aOb#uTY6um_P?V>WNZ!cHu%x1#$FbUf(lS~V{u968ZX7vm(my&{baT%mmnk0)iD>RHG zgQMH>tnj>99w2-M>S5q4TyP413A{C3Ft^@LBG8OQ4kU4KQ_ zpW}D`vFD+ta`6fdXn>uI+=0?Kn`rg1jf5IBgrc6#v?{VC+*!yoBP8YbsDjdWjYb}* z0+O>z@=7h2*3)IfB-KX`+h=@qdZtD3l{GXhtJpGcZGHFqlkLmqo0Hc)Gkl#V{V1(I z{X{?sE^Mo;QaN8(l%b_pDo*^*iZ^t!#nsSe6pqdqpU*5fzuinPU+P! zCG;C|d*n$>DeD+5i$>?|#BhuI%lns4zC`%{OYxP~uDbczr>}~YjayeJ zeyXdK5N*zSK;=>WpeP24ETsXki=EASZxPIdDa$^rD|xci?GOfpyBM-~Kf8L)1o4@> zE3Y8;kFPCA#4v;ydjf9*##!jrz6EhRzDND}MvLB}N~kVp-k(O~!&Nr$So**sCaxdMMJEUBTF6%IM|VF$IqAKH5FnbNs6jcol-ZO7 z%s*M)s>z;tOCu6u7FuNOEf!ep9jImPcLJoQz!^X`paD)4dd8_IqKD^_@Y#>fKw{!8{f^3rE%E0H6de-;zkrtjJf1T$d&2F@axfs) z0_UU*(s2(!PY>i{u%K8=(|~9B?GW7{Np1L?@+n7%8G9y~Cz4L8Fty-;VRLbNI|Ir@ zKcmqEBo(HEE@H^`7jN+wX8T|x&LJTRqrv$hph#a?2)mP{Vu6FD9lat)Mg5oE0d!npui%3%^rN2P!iMk3o@8Mda?$sp>*nsu zZ!4&@uuJ&y0JO^7YjHfkIldjKS9|Bf{R89ODq*XfRU@Ve-sUQxCD~Oo^MG;V2D{zS zAc6&VFNS4IirAASBB?ruSX6}ur%Reiis=b*qsC}AL7Sx>E8$tM)blVtG&YeFv-J_I za;A=VDN#&mVmiKMzf`h)m@j25V7F;JbTHGxkIeDeVUI+_-8d2FMwJx0n`#4rfb*J{ z2qb!3yFPR?j0X&4urKslpCxnG8GPuv8GZ^Tx$!tZ0|J5IK`{@&sxYv?)#<}9KWlcf zLx56aAU&;;;Q+LfAPP(g9?9A6WRg7bfwg-h(#ISO3tx)ouX8J+X~yOceoyQen>**_ z`r9jb!>LSNel(0dhyVZn6~El+i?yd?duQulo&B#7Khz*qi^%j{F?hoTylB3@Dz)FdzUNRZjVfoq$c?zEL@ z=;?^=2u}IYhXZ}6U6PSjK%Z90B;^7mkXE{rtns=We(M^gfCAp#X!1$HoL3~)5=bH( z5d4Md;(VA8Tvb3s!>X)cI3vvQHGYx;UV`@DB{?olU;`NG2VsK!S)y*pt2Q$>+pFza z$(%ePZV4`?rCv@`QIS>eGEB8dM^!S-DeK!)4~1hOI5p%Cf1*@>G{7|z-P@K;X z_wr`3Nb(I9NvhOZGex#urXOAC2t2!vI!m2!j$xsM;D_ps3e+S^=bjWqd?o zh9tD4OP5<_ifpo83cnoG=HXLL2E@7YD$H-CIqtL!1|?0RXC2Eej}|p6=`&me7wRlgCdXvBz zJTlI+V?cAuo1A>`qC+aL`KSsXley%n24Yd1Tj^7j}R-_JUt7}d&2H9^@RO-yMt z+eTs=SWuX3D{dGOosw8A)S)$ll)-yWRMf3{MLeLKg)|?<1T)qL4c|aVM)A~T4iNHr zmbCe5Nplu;(RZLP2BI7KbBZhUbI{B<1OY(6e_VVQRZc!#|2GG=D%xx8tRG-YX5aF9 z69Knw>B>Pg+u7zhGAuh--GZf_QUv)$pJk(Q+ufO8*xft3$i-K(ee%fdo#TC70y+Bx zjv~N_VNse`!}O#`8OG`_kA_l|=yzCc^5i`&lG$Pu#MZ*7IdgXa)B-{N^aO-t(IhyVYtmSMt5RB>Z?H>cS?tX`&!{+{AP-^~*DEc=E+6iZ!39qOz0iBB z96E}ZHEB$0zyqMytU8OiV!FsbQS$_(x}T}2b2`7`${3)C`4q>=!+>Bu7bBPCe#LQmiA$v-M1%|^o!CZlXLcfW zDP;*7T4-wyMPN!EfK{ELRzSm>1Wt5He4ZzX$L1jr z8G4{ZSBqrmaL5xuvrLlD9s#J>U^ z$WXM6-5EKftjcpdeq>pOB4@O43*<#$5~8b}pM}XF=j4~4rBH0nax6zoz1r$fO668O z9dvT8V@LwM?9G1PIBha%eGzFw$06VXzlnDf! zq80-k{8513|5GNuB;nP5ESDoCyaB*N-O=)67mutfJ#Tg_^)jBgB^?W#iwr5Fz7mx1 z*Q4lGmyhlo4U>k;iO-#8^>YBZ3duHkOW58F72ZA5N2Q{O6kI=)t1r(G#gV4Yh!Yg^ zNRNh+Cqil`OqSZWflvqyQh_#w1keC6!3iZUA!1}iW!VEaAL!vk+I8CR80w)U4kh=; znnp^y!4dDA!hV_E()Z|_kXTgZH2WC(H}CtWPjmyMtr>NDx7S$TqKCOotr3)QbJ3}7 z6U^S!0)|v$?(F;mHN9d;=?@g%fifgb_cJUa@wO|)m8R+r7JjgG^?5!BisvBtM0K`0 zu?tdlB@t5mO^tfwIOpL!!Z9Mt&&<^*451S58KqCCy zQo|%o?Q0sI!bOO~=eeLjo=6o|i|V102XT=Ji$XP+yD*t0jT2#C7z~A2W?fv!S50ZtH??OLu>M&v`!sBWi|u&X8f{!DJJ#S`d8mU&dm1 z1|#FDZaT{=JG)26t+JaxyEqtMvCwR9Z0@gnx*N<$@$^SF2m4_S(de-N4W3Lov9Zrt z0y?azC`M?tPXbz4hXsz(fb@<=4o*!AK0uw9ZZ^%&y=vW~rcoc(XIww#mP?NWywmS{ z=Vb%Ie$!azE+Cb?{=CXuZL-}x_#=Ltk#8Gy8*PCq>74aLbx_1bDbxSPmHxl2^gnB*|2u1IDIfhq>~5?a9^E&8gGTyyw^1tw zKvN|Ynh}SI8nFPHDNc`hy6x(BvB>A@oaPs1B(>Q zD{!LjaD{tH zYZmnYw>|rb_ygtZ%k{~9vsRR47j2e=KsD62;mDysM3k(pd`%Yxeewj2E=GHDY=`66Gwh^dZr-1!*O5BDPlS(-; zQh+p^Sjh?`YC&U1uARa5IaLX+!Hlh4U1aCbSIM&rPf#X9HRc?Ba|7da>IS*{*Z+pE zgqO{Xi|Z6suUcP~P~hQMTdROVb|>1pIQOzEwdwa#<&>F_=5@pNS0DC-XBJXOi zcXZtJ#dZN_RHXvK`t~@zhoGpm@COM`jxQ(5TEK=vXc=Jd!#R`)z@p7_kzU(Ykm=-z zZv^F=uW=$CbgNr9NiJUVY@+%aO(y$TgGZ0j&+2|TAs ziqCOWi)o?3wipw;GBLj~CH~;>Q7cH{ghda)S&J&pQPxLM(6;ieTN@-P8GnhfZGXmR z{ME_5^M|jCifbIW&#o$9?&loXQI#}{vT-27@r`>oBNCcNl~htL4jDAF^7vVYXKB|F zTXY6wX8dj|B`8<#xwX|1c^}R3>wEaOA5`N(d~4XOPgdhq3_QP- zC9e|}4WFlW0ZiNSUlVC!_*4rB2s=xkwiumY^`M0YbXrZvn?R`46Oqdrh)rtTT7#tB&l6rHpe7q zh@?g>IoJIX$4uC)q+g4B$J%!?T~!(PI6G!JFO^t|1#nnlVPVU2Yg%( zxF!AJ77d9G;ae~S|Hfpq=7M>w2>{UH6|x#six6^3xrJ4g+7!KO!ffMY94ZPdu^6ww zYH8#qae)OP$mqH1EQ&3(_Ik0VT{&?9We3q0VvfuLZKHU{+;z}hEI^nA_VyzWV@5-e zAnFf6eoZJ2j|GohD&HDPs40`1Bxzh4I_^qiSGS6yHB485IWmj}aR?TPzEx7C9k=lZUt!p!}5iw6a-m`DF)@3F@IK4NZJ^n zQ`V5Z=uw^)F|&eKtAiHIPD)i26dGl400D^d1h0>mE*vdN=Dc&`r5sw9P-`{X+lgNw zr;Y^uK9XOM?bWT^C{~+eu}?1^4lSf}w^-8S(`z?__^2?oLv9xIBGyq04#FQ~=1pyC zzvuM#&(`j=bSwUTQ>7=iPv2)oK4*K5&bR5E{)tBas)L_bec)9~pRvDGDcp2@eU#yq z%-z?IEBVVHm<@dTKKnOx9koDx8G8jjls&m-yQHy6B+Eb>-9u*(>UZRSGd&R#s4jL<5 zv)(+z!A;myhvK8m+P0tP=!afVXFJg0t~f8rG2m`0&?=Q^4ZRHK^Za-L&Rf6XPIb{K zklxm(W`qM+f-TBiL5br~iYqcctQ$1+)?RPAt(WyOW+zo59n@w*4p=pFNTRf!`Uo+B z8Yt{Aos&um9^o&Q_WZu8m^mOf66)`j0o41aBmh>T^9!`jBfY1d>x|N$LYy6Chs~`g zheU8&<^ha~(WK+Fn$$H@tOcRwkf!AODoEZ_YszfkM4F`rjt}HC==F>@POFDUmsc+` zp?_e1RwXT>dRPXR_g(lU$tIUCAUSkteoZ?>L&bLUateGY=-Y~#m zXD1s~O4iV2j$bm7(c!f~#lr$1H2eN?u-|X+pwkkd1R!w3p*nL~rMD)YnT3ud9-~`0 zwHTI_igTVT5xbkF8dkLR!rpkMSAyV{XDVJ`^@N#auK025uz)#h?lts;=|Cy&st)dqH4)l$Z8D|ygsh+ahHusJ?rHOQ zbOVotmH65G#PG0=<_s-nG0bp>H(fVbF-MMbA@iLEmU2XyXvqUF+EKiVSxiftA{rI1y(s z8E?wu(87l&^9ieB7!DMDhT5`pj%i@iNV659SckEX0NE>aW?AfAg398G5bcT*ychR? zfMLX7;K3j$M?juyXpDC?W5V+sRzgJO&R0ST8tuYv2D7VR62G~HQg|9)HS&r4h+BILnYS1C)V-~$-531!D93eQHXKas zWc>3VgTnPM( zTpwmAN_+7N2iDG6``LTjXRTy?Vu3g1$8|O0d45CscJhu*`eTMR?HuHm?ua>p3Wrs9 zrEcpB#l_2(!VCEq&&@@k_@^<4d};5_w5j3?VrBNd?*boxW6gX~kj>vW;+2{2obGAq zI{!KOadN!sE0Z>A(3he|UVu-uUf}vH=H962f>n8m`6B8QP3(U1jyDjrZt596f1gTR zTM22s_$ZWyo)k*l6~tE{i<{HF-g&xDFETW&iqt}`Sm4+(GCaqQfIw_w1}rrE!WGmAbliW*$TW%eJ;sD&7~+ep)pXna{T#S zxolr&urlHoWL%c0&U5aqsdkjNWwYSdiUn`Cs3tom@qB{?GUELS*Og4nx5(wejOpHsd_-%%Iel}jXMRd z7N#rH-hhGKRwme$%!tInnlJ5Tvp_(9)slDTjX&0|SlG(|2`@W6sPaPHbeEhkHV4zW zc^2JT94>e{e~RnIsd=f)&>25(%t3ev#XLOBHn6eDO!xF|Ozw{Cu

63G5A8(ny|T z0Y$ivi$rm--ts;U)c(T8-e~vZO8lD9fn;rfkrY3HEzz|Ya~bSEXo)2zGH5MO1d*-F zrsXa6*q4~RgNf^VRq%-8cj2&FTffd>Jl%o5n1YD*jnGk{nbVGEUPf<%53+;M3Zha6 zTa0p?opXLOFSN?9$j{GgHkOVLXsR9~)gMNyr>~zDm>x?&gGd5lxVjfiz3`aP2d1@p zq2;pEgBP8QZ!Btoo2^6>Nq>SMA>P#uU;WBY=G{P?`m}*@$!;QeK}mJlTxi6QjcL+YGRq=szo+r|;&?}M{$YK1wU=YE`;?|mbE>7) zF>5h@(UrixsqqL-I=swu*{h;?EFG5O3}GgRT63GqWbG$yZbvA8i?hwho@tiKTt0(i z1j#-&p)=VQuM@nv<)1rHQfrN@I>CSV!Wp>?O?eDMG-dr4S>KV6?XKH}irZk?QqIdP zu{M`KsjaTEv%IUCt+rZK<(r~v(2T`yJL7Aeq1E+OTCI8=XnXD4BkQ(Vyhq_YnO|Ot zci&dtPlVMjo2!Ag5{mQoE!7Uy}6NI1X5M(_~h+HhE2Dz!uU!`POUM9$J; z)9~rnDeFy*!}sb-pQ^aMd)6M?x{5$4evb=)?A?a%O`J9}e+xjZ7uIyHtt0wY0Rg)R zv-+yZqrlF%E!U*hDyO_>d$FpT`Hvo_)U!PRd~d2-RK{=moIYYv-KuCc%sc`rK9i;i z^V;8Y{L85+v7FJ;UY>>=i5pJs8Q+&3k?rN4c06&I0KtB)0x5B$WN zi*|10Pt!%mClIyfT3je*%_oY@Yj63);+B<-uTU?gOJ41}>~DOHc-gw_>-fu$9Tc1CX8kM&VmOh`0 zVlBmho`U!aBMW(CP+)I}yF@KM2tkm#Azrk}f-QN9ZP6$$6e`SWMM&5R26$ODKh-g` zdg%QUY%3XZ`eae%Y7mhuUDU+<1d?6hu^g(vMEy%C9Zh%Jl4+WgV=B_KbMs9Ayhc75S8a zE7Y`l&bEoD5kAC@dh`<|W{Qj8H;|Yc7OEkr7h(c0F+YT6b5XUSQ1fC~Pt6gJsy>jO zQ8Ief-j)h)$D=A_(x;V6K^%RAwUc@!LU@{Cw|-=16F0!zfsA7U7SK_~x1l``*|b?z zZOi5Ez?(VwDw4uLvNXlQcNE5w>i(%M%7VC=!=b~k-V~?xlAiYJGr*w6n%&R5gQJ{B zj88v&e*rNNeYfH&QZZt9^hU}s4$|f!7g<%ITb)2^U0$};NYZ9+eW5UkggE@R&(Syk z4vYT^^g+h)uCf2H!3izs>)*u>{W6BZo(IVG!wfzbyh#x&JtofUSy#mk8O>c{I!}K+ z$bxrA!@W zs8y$4gGRH>@errEL1Sdb(rbRweh+N+*t6-FGmhKtj!pIiLvZkDaIw|%J_#26&id6a zelu*;t6nx{p80lK;B^bV;x%u3!<*jnrA6NNj(3e)><7Et@PQ94@wIPlv-D2F)0Ez9 zP>Z&^%X`|POQ&wjePxAJR_d|FYS#=|XYEX@Z{9b)bDf*Vq+4zWkKhx6JrTSGgqV;J z(vXEb6rl`NsAo;BUvbq7bEi&y(H7@Ib8>4#8$KQhUCapI@C*I7w^zFw^qQFM zW-Wn0CRlwbv0NONP@1w>EP+7aVz^i?j*I6KxI`|AOBQ97k)tsLxYQVD)x&C&QCU`8 zqw)I0($f>@cC;>IHS*5@01-N+34FlCa&b2=ezgI_e_tJU0G{fk$=Y)+c + + \ No newline at end of file diff --git a/browser/app/img/browsers/chrome.png b/browser/app/img/browsers/chrome.png new file mode 100644 index 0000000000000000000000000000000000000000..278ef4d15ecf2d715b900a25064ecb2fe8728f1b GIT binary patch literal 3726 zcmV;94sr2`P)Oxc$W0hQ82|tPL@foWdm1%?Gqfuumvj&n7YBx94TwB3hb19UuSG9>H!O29 z7e5@ACL-rnP1-6c$EXR$OF!{6GdPAcBxEnlof#rtDIR1b&y*&pcpl=q4&-4`LZCoq zND)sr4ljExbyX1j(g{ttNlds&OtnehAt(CL3res^;VdoTC@bJ4Dg4n2OS4GjKR8LQ zNJphcH-N+AF)-vgG~+ZeN{Y$<|Nr~W4d+EY@xl~Gs7LwC4n&5>=0ZE-FD~p^Oh~Io z##!OhHATMeVvBR+7y_nnW~y!0As!YNFQb zRY-BD+97GB>QY9~9w9Axy6$6CWuDYwn$j_RzW&n){?GvFwjL;TwIp$|T9wb-qbKCA zBlZ6O@oHEgZmd6*Lp+c|-VQ0?swG{Q(e7VS|Ni;^_5J?&@8+@~>|IXZ6BhhzTTr=9 zC!J10v_9~#EZqkTCUw42#ZWY}G5^~Hd8^t0NRHYdCCeEe=LQ_-O&A<+{t+4rZ?gT%!e2lvBP^|6ftl9wGGJ4C=W8in-#Ty3XgZv-0Jr zKcGtC7cA$fD*Uo0{J$jp#Tfn254_Xnp2Fk)QAdGxIx?X&>?tg!wkYF%A=nch&)MPQ z$;oq^zL(0D=8TP6)>b#BOY>1i_18fci#`7{Gq5@Wsy9NCXSVVj2~vu&|PV;0tVL zBR;^^7Ps^Pd{8RWK5rZzc6Rd z>zB8({5w>Z*~@yt=HK=GBs{IN^7!=Ixf10|jm_xH*9l!);=v4^1^mv7BFLMjSuS+N z+)Wrk(-c9GX9JTB<#`bVL7Jp(lW1RIrjyAZqi_0j9|TbHd;}m6vEQdj63f1o+Wb!C zVLEve}{%ASFIIkf@X*fmY>_5r*8YHzw}g zWK6aCgWI;J80z#qB0<6c0fY0%bvK6vX1T>M>X3(_4MsUc#Y2*WkqCJN!`W^Q+V!*G zoKZ(^#79O@Qgsv*!HIBuf7@NJU}EjyIIbIk;R6FmH2{hjjjh#HS8fdd5A)#oqY~0I z1bxmXfe3ykw!0mar)x&R8CG?;;`?C886Z$90Hg3fVOj58rC%NeC+1yc?RHSWjLsmb zI02_Uod_~sU*D_MjFbz;qfuEvpeTmosv|HA;V1$IRcUzT67r9)x(96{jN>@2MXe$t z3K0+qra5_1k3ILdZaA7BLjw^C+ab!I|fmy5=L&Dh)T3YcD zIUcd&4=L9f;%GEyJ{HHe^1Xw@em`}X)RIaWDP;ZqOzL+i-+$VDlq-ute&ojNtgRph zJ@6Z;hnou0r9-@UK2^$?&?yx z{DyX(_KCK0-ffNXB0(R=dy2hGv1>^gPk&f~K46-rZaY)m<#P8sT8CX{XV>FLfwuV0 zSr_E=jkO@haXdzuUvHm1vd}RT9anE^0qJI&l8uc3cq!5R@G}eZ$+z@Ia8yT4SoTri^(_FU4dv zYL_MdVnTKl55p!nl-qq>$Z$S_kTs@?u>nICR8>)wC`m!8q(b@L=lvfb_Z2wPXq0nn zJVnSFLz%#X+|y0R8LKs?3TY|fe`m73QE zqTyH)3T3DfONOeru)e;dNMPk^gGo-^%`gOOY~*rSvHGT_e+Weqi6j^}h!Tl}5GskI z%Z|t`7)~|wFDOFS*vO%JMKcrONW@~`(2^kV*|V&!?KvRrLw8O&f2Uk1ay&!0k@Y%^ z!ozWKx<`;@n}LHM*n})g;gY!PJ)#5xLyyilBbPEf5H`xlR=t)h;I^`90SXv6hzKBL zK}c7{(Ip2YKIHV%g^zA$f*!%Ys?1%yA0@E%{SP?#5|(>-B{GAsxv zjYpXx2s7FO0axU2r*#w3WE{r2zrl-vf&PmXS+0dybLllR@)`m zVP#{6%D}C|UWl2O)+TgFn8;AKc@jijK@o(8Bz7<%JJhL&h#>m@U+U?3=9cy)kfwV+ zet*CJU$wS!gdbyqOai$I29W_0yn5eRiNI*Dh1jjuzmX?9zfmOMbY5i+4=0??`@RYU zMO^JJ8By;deZLbS4yE`sS+VZN50Fz5OoZ0s|cl4p*%@sBbi{Y9|gdWJ5;jJJ zy;E+RO^qa4GpJGs27`B$!3)JN(UH%$fv7gpapmDY%%4SlQKsCYdMl{1mX1yr3&Gyr zpd!7&VlksGf=dFU>q>r`jHr(n5Y??ml~gtzT^%a+s#7dY4vbe`lJ@qlE9J7GS`&lA z>aN^MEY+I9T84&iDpM*=M@KSk6&pd3%lV`FjR?LJ2ex9jLjfZzA15%eVFfchbaOHq zdG`MHE#<7}usYvX{kseEnv&e&Mknkey1e>Q8Y&Ibl!k_e7bXTG&pv%$S1%pLQmpcvB5pA7tFyS$%n;{Uthe4zZ}HY zWrQ!J^ZMI;bug+$&@|c{4oTulC6h{Ah{a-w7lls#OG4 zbeiokkF2nfsbUp@Eam9dWut=|*T=SzBDJgN4_Lhpu2_*u#ZH>=t#jg!asA#{lb~Zo zYVF=EIlSn?%TNYUAxn<$gKbPe3rdiE!00iNBDJFxG}}cF6nSKnksbV!7DtJp)WLmcP%KB3VKp=ynHDELe zc=zT~uQ44E`*<0d0b$2KQCwPje2WQ$pv&Nf9>yl4t=AY2N_{B9+qfyi3W;tp{_-{z zXLdp64I=pWSsjJrr+h=C_ zF*DMJLkO-Rj;{;PX96d};rS%~K7p~N;Mz7EmioZR4uKqw_@{CiGvWf-MHn@X?$HP^ zwyI$L$8t0M{e9d4gn01Nw~WU#fC%AGXl{1DQCqLmY&|G1G#tp~`r!cLk~h9GPoK{K z2%%)MX^&QiJNREgf8(wLxtTuVaA9!{|EZzX)*(WDyZG;kB0AX%yXlU4NX>K`ks7XSbN literal 0 HcmV?d00001 diff --git a/browser/app/img/browsers/firefox.png b/browser/app/img/browsers/firefox.png new file mode 100644 index 0000000000000000000000000000000000000000..2803f10a79548091e18bdff5ccafb9b9720b6e4e GIT binary patch literal 4795 zcmV;s5=8BZP) z-zfj#PdBI4{q=#+N)FIg6VXx-)LRqKMGDGE57tKs)?XCOP!hyK4$1cY*=HA)4+9!b zkJnfX()s?~ZX4{A57bW!+iMr*feYMD2cHuJy7Bvu3BlLxkX12V~Cj^R+nRXb&WO#QM%RQ@HF%u zL8#_DpWZZ=+4#~<0S-=<=k{>g^<}@~|Mpe=+&=;V9_e@qpB)8j%IN;Z9s9Ky=y4L4 z@BR73Me~Lp;BXZHCwug^B>8v=h~xGyai1x?vg#@lp%WL_b6wk1PEW|P6%1J+8Y{4RiW5~w81W5wEq5?Ei_(vX1U3Kw06BD0PE!B_I4TNv9TNUy z{wDtZc~1RY{QmpX{?a%7J`BKT9ig3U%*45ccS|(doc64YdE=M<&9T+VxU`tCYz>M4 z01r1wL_t(&-t5+COpF}1Ti6tG?%HVSXN%O*)+S=n%(z0*4FR;Jkm9?`(0m<@ruvBzu*7=%s&6~ zk1}xDxN%`&V4SXsO5Fig;rujrrpitLS#8$;{`c@t4Ja0mNs-$H&ujrjoY`VE%v@FeN#8m5A$f z^UXpbKORp8l8CP~8jWvjYkTpMCmuWPw`(LP4`W^BhPur*9iLB&|0Xm@s!(75$n)W) zSp17+LWU6An6nKTXV26=}?XxH#FpDk%@Id@Y#PD7^{?kw3_s_wa3k;m%VsF z^T%(brlzL3-TJJ|$L;Oy-MlBoK|g1VEGnAZr%%6O*?Hyn-#a#RN&t-voVBMhBe zuh)~DPU%r`vmvN^|RNRTVKuO+PeEEmU)S*L*i;HhNE?v1%RaMp1_2}h;!r0i@yvurX zT3TukL-f#@^sw|263Ho5JoL2XTrp^aD>_cDo$DLfyU}-gS#fcpBVnku5~ZlFuG9fW zTweE{Gzg*iPZg{~56}Eow33dCZ#r9Cpgg{`YgBOW53}){;=cUZToHf`9jfw8{y zd?Xk#Wj9DdoemTj88Irp&V&rS2;0fNbLOGveM_|#l*Z?~?k?>8CMG%tMOaU%4s2MB zX9ES0$ndhpd_Zso7$D%Atp{ud}J5z1d>XVii}OU&eYJ*yy#p$fr*2-@)N< zRDiRMuE76!vt1AVX)i*=%|esg;y}d?)v;J*Nf3Dw?XRwb-j?>#+H~ zBI_bgS$AyYaN;=8oVd8?@R*p$m~ckk%90BgfH9k4{U~O$P6#<(Ys%lVCqKWOZRZUO zHel_|5>T!`cNvECwv$*eILH735>BO3DA5ecpuuey@R<1k5roZl90J6aE#>8QB~)=? z6?c8ruI3TdSWs_GtjI`A!lM~6D~CH~R6)8PwdueR`VFW7$j+VRT%~we zu*-!t`9Nv8XPEbWW6Wghejoq=0x47~7zkPGxQ&PUteY=HK?Gqy4|%y-M9+SWX$@Gl z@2-K#hoiU5L~C^8CE0b~gUkiz1`yu!lDmIC-;6TG(cq#Rl(6?VDw%(Z5% z)@6{~ZI<+ggjiJ@^N1qksE7zcfhtRqRt|D7&LK_F_cLgCfemGqnz zm&D?#scx1TjIxQ}yY5!i4o;K<))9e>NTkHTypoozu6p;*19`;C^pdvpv>>V3p#|~+ zwMN4g(dl%BBImkMBGDSPO~x7{^Lt2;RRu*L1Of~~DA7p_Mo~jU!~4gbnLMu(exFFF z9a7H*L@fe?o_p|$MIx~njZH?Q43NG75p;eLk^l|-(~6;lCoN)}F9oF$h)}g02jmbn z*KYSJxf-@cs-P=a-QFBPd=i)+i>f-sN4RP^o&vNA z_^9`KMP9Zhiw?ttgllaYqOP6{&lDgsuLR z0-!cPE_{mM`yvD8>fB^Vs8?IY3yA$>z&@92&skQ0cTz9?zb%DsD+ zr!T(Wd8MlH92dn8NztMGMC9h?BE)FX8fDuh20)ITOrH^O@qk3&2$f8xpr`NP#p&tk z%hx>-XXYfcvZPWe_j9u7xdIRf0wKHaVKQ_?K zS9Y-r-A1z&R74k|dr_38P0^&|j^gwT70RH}xx5^(LmjHI6=ckZd?`gGoPrSxmSC6* zxo;{gOrb?kRHIb#44w$#06xyvOz0zR;;ouzR$FJ@k zY^{}jJNHNg0_1GxL#HU7EIYFp$&+u3Yc1iW7Y>-zL5e7WXdTqxI92|6-@Su0l64Ph zC3g-U2LgiRNbGjG^I>Oywp@0K$>HH|$u%{Rdb2U;0iybWP{QL;cITe=^-%-4*VlLP z=+TSx0T_~Cv)PIz>>RT?87B&{+Y8B*eReHmtzyT4pj=H56+t)$A%a0;TX*ioX>1$8 z?SmJquHJp~DS?Mzlcs{voP!o!z`(JIJ96{y=hy- zBO%*cnsk)FhcOTolX?8&#p6sa*Vcx&v$v0~Ux#vv3P|y(vydue%gL;m6@i3VX3Z!^ zcJ*piL=J?0RJEu;sLkdpX5}u4LDFrzGMS9e*L?Z@tha|=9jA31IdY^pTu$|eo#mt* zVeE{H4bQ8MEUXWzB9Wj53?|>wq*m{Mapl_7sm*O80itiZ*3r0Y_|U0S#{nSz*w?Zc z4kwdI+A2FMu;^CpDC8>6;?WTVrRa??IyyS8sjUuW^N7)`G@2{VZrY9=I&{cJ34gJa zP1-X|GL-^>xCD0XXvi{o&|}~_DV zhh(b1*-nvgmSvEzN70ZSNUYI|sfqq3egLj+Pq18$a1A|I@-EOOuLmZX&1O6Tu`}gn16~FzV z{YU)8uPF@MPQtpN@Vs`}=*U8O#A`9=ID<|jDHwx~B-b)f8W<>*{Gm_}0Lkd_`$M?3 zg`N#EqKJiy3+r;R!nKbTAfslH9EewEaA-%AUCl`M<5DpMfe=09a-=upKXAXrzqO3x?d z*1F$jSp=0>4SOxRpS7T6V5rklX(+{Fv2!%=`^B(86c^9uqM@St;k@4? z3lu2Du-A%)>7-gnF;ym=&*?u_-XoQd%{kW)3>Wrrre6>Qw%`)@!1Pk+@trMof5?1V ziq*Q8+eQ>ERVYJ~>CWYUB%BYxVA*&)ObQYU0?%LjQhNEVOeQjhhM| zqsb+Tg{VnjQpwcdrZJr;QFyH*io_@>BSG$_?MRcpDQYB2z-NlGFjyCp!h6`@rm>wN zSQxPo#UM?e?3XISz@c%I*PjvGN)=U%CSSO}P$)3;qdm2>Zj4zGv5-O_d3BPaL?|T1 zXf#H(S{(XEqnJJR^+JKZ{|*f<8ml2GAjtmWky=K!uT-j3s40r5R9Y?Us5=m6u?E6u z7S`F&(DJbxP`q9%RjdYw4h%X*yG0=y^a}Gx#{&prX z84N>8R{%)|ek8;5cw=-e|Lc=N6Xwb-_&lSb|e=#r^g&f)hGGMfGxT zf-vOEid-%og(DshS=rHOyJ>hMhVh?~Evm1NI2>Li0s$cgzG0Z6yf2+!w^sI_CmBg_ z=4rV?USIF=a2zM+eh_Mt`P%fZm5WwP#?z$Kjr;43Pna-a^5n^rCQXvbCQXpdTs>DY zHF)6?%@rJkcNiPzuAVYeCL52)@SI6__5_4-e~u&&fNo;u%$ZZBOqn@l;>7i<=T@_N z_4@S_rQnZv3M4>~#>BuFQAQXVK)(ipM8K#P0|fbtj#YaG4FULTWQg?ovGw0<{yR@| VpWD}dK9~Ri002ovPDHLkV1mB@DEj~a literal 0 HcmV?d00001 diff --git a/browser/app/img/browsers/safari.png b/browser/app/img/browsers/safari.png new file mode 100644 index 0000000000000000000000000000000000000000..4ed52b904f1a7caf4b7144e45c301cb6ace38005 GIT binary patch literal 4971 zcmV-x6O`o}D^6IxH(ISXWo4rlt-K z4w8|OG&C~-0RaaG2LJ#70000)L_|+cO%V|hdU<(@iiv1vXE--Erlh1MCMK(^t0W{O z0002EwzgYYT7`v$dwP0`iiyF%!Tu(r)6es^Z??;8lXDet&^`dwUg-&d0{a!otILc6aRQ<_fFi z1f<|WacD6`9uu zl+?}5&!Mi#7Ky|abGi*ViOR{zUteD`GBW=C_?fB53VFkOlDQ#Tsr~-`ft|VoSEF-r zavU8TBc0fkrNWD)zbAXYAv=34Heder?IexNR(iHhbFv#-ljP#yyvEs*ti@@QvV(+$ zZff+bi;RlV)(9Otqo58EEv^#U5 zC~255OL+c2Bf{YCpuW{IqR~p6%-_+;1&G0!t-vdbzDa+%GFhS`O`Hx`hao3c{$4AD zzu@lQ+BliPv9`U3m9_v(f(p9u26?qCcC>1Bj07Th6BtqI_4#SD*-Ng{YogD%yvs_7 zz%6p84rQ1uv*J5{v9+nAdW4`%WSGK7Kh7T~*2A`ejH^0WgydF6{~Hi8#PQPR@1M@+ zfve7Do4!30`&#PUzQ{jF`%Rd;jTuYN;Jt`5Rc@_Z=f@ZD9aJ zqM103i$oT&$)=t*hVd73>d%-kSt$bFF;ihArjB9s|2s*Ht#v5st8qm=Jf@bhElm{U7qJVflV#5f65ZDlt(P*5n zjK$${I2;yJ(@5G71Vw{0!A!(FuI7)@FevFN@z@BdVGS6tK&Hu<&E`hGl(~SrK$;Su zm>LlQIguIs``?gpYP_OA&8#^CoO+vyZQ0C&Y__mXM5Bz$@nfjhhXCMc8uy!n#+ZV_ zG{SO6(~@nhlu?dbj-p|O#jec{YxNu^Ca5zgpyijRY^*d09deAbtubgZ>SN{N((IPxo;?(m zfMm0!QZ}@!P%bLNCMj2s`sn&7mPxy}*pqja61OM`1#n?9DT%nK z3ZW2{P3$Tcf&g1khh)||g62^}f8itD{3`Mj8h}TV@HNH5Fo1LE`gNhxSypqN(c9DvM>-3__$5+qs8*lh$^az(ll&WsaocqbP1ykkcCxl zUT%AGC%5pjsyW8CWDK@hPKTgZ6E<8UuRrc}vXpFfPfO@B9r8=q?UDpfC_E-Sc1$Rt zIrXsX$=mjpTuW~I@_g8#5$HJvt-lwk3>~HhK~m3EiyY;AWMM;tunKfYBZ-78Owx>8u;@wOQr>CpJuHhZZ5+Ic16>|&%nsKJ)oqM zNLpxer`~owX}hL#Lsq&Q_MfmQGgyjimiHo+AqEiCr)CjhG`1_6PEu)!WDTUcLkTYA zuSxUWEw^vK>gqc9&f{K5i-W+A|bTdjs^VC+syny8dGk@TpmBgVAg%dX+nqdZqtEFB##mm+Pp@Ck=coB! z@$9Lu=jrTj_nGd;(9u@xpba$L-PYxoR*+sbM=CN5{&}^IH>I2o;N7I-uc!GUXs=3e zK+T=I%>DTM`L=`aI#)r3)oO*Cj=y#N3e+c9%MqhLsaFjPotZM5{c=n3M0&m5uKP@T zyT=1p=Sldjl$V~}sW|Sv?C7@Av_d+%XFs|q_Q85#?4Qin;8_8!Kb0Y2wH`qS#8Z)Y zgnQaMIy_shZ+Xqn-FR@GcC_U?VHS|)=H?9>nyuD(r&kaJ$!)^x7(Z@_v>wrreiUG_ zyY{(NO9fpP>}fx8sJ*ju=Z;3&K^MzfN#ty6bF+P+t98-UONUW4!II(nA2(&Z9;oZc zOfa0@RR0%rMTRFw<@6povT-$yOMo+<;v>Ci|-xmf{yoX#dFqu`}SG8 zbE{et+e6!1x1T-RsudH0I_`Ya_#a6=wPxorO`dTaz_WLXn`7b(P6u1eqW`^bkGB_V zyI$+P(uP8zjE&YApoP$jbY;*k?aBsm(gITu#UTz-jysX7va^HQTv-81~hgUzXzW~FOvNKsN zS+-)MvADRn|AT!M7Y`5n}0--}w%P@3dqcgsDUlP4nBI^>gDxo#oX&Uj)AzQ02KHS%pdu-uJ>nG* z1|Er$(HN#1lg3XUT(7IUUgv6=-A#qzDY(pd7lt!U^m9GGe>&$IFS}ixp6>Jc($mwe z4TdE|MOBevp=8|(2vvtDnM}^HBa^Pux;ocI_}7y=DcHuF0kcriXr`zOUDJ zt9v{>kOmM#Ex)#ZpdajLN%;_qw_!OXC?t;~%%{o3ygGo~xX5WAxp(=qOA8BU&Rkl! z12Cy9p^SQebhym5uIFia!MXhdpbU5U-OcVyCN!vuq&`~pm&Eo#nxbK+KKCndA1x2h_& zBekkZ9|(qg(A>{+<5jU_EfKf;S$GY~;GwYzbaBu1mfU84OBVl;;iSL`i^LE1e?O0^Fe%BnBqVI5 z-HHV!ij*W0wdce>e*_UC3vD1 z((bXpu+ny06a%x_qy)UK@vW@dpCW+av{rYz7k{lEj0OgQK*CjTdf2_kv@@& zMJ{*bi8l(w}4y7op)eFPQ>0gu|k@RS_E3}npM;Rv*Bz}lm;}Vac`D&V#3;R>l&C=MVL?& z5299gl{tNh>LP%EU=3NAHSi87@I((8iL?i=rWE1j#h@sim?mi?I?I2wT2Gk<>wS8R zSTYLx1`Z!Cu64+{1qB7bz!W}M?D({7GET_$zOky3ra+<;35Z0zqVdG6)!MytZm?TV z&<6;$T0Is$(7)^5@AnNjxHk(5z=-rY`XcVQ^YGFn6h#p4>b^9-P)XMyUZqAO;WeI) zn4O*-?YcdkN_b=x7S;}w9WoyOyx7U*7Tlb7I2QV>Z?{SCJQ4!2+2qPq);>Z}O)_Lp zQpHH5jSrmGpVA4Azlw?yfdPmywXI3~-Jx1rf0LYh^ClOZSmus{0vJ9f(vD-}D#KP= z&-R)VQbZP}%A}KCJ3VXF=MZwL;u8Y~cTC-6JI;YAhl-oH@`9NzxzBp1Ev_9e&9++* zoM>r{tggdilLR6*P!!T@#O{a=$3jt{9T+k;!RNpC9_N&CVO}nuA13sja|45?gir)W z2BW^}Y^#il5_;K8O^l-b*tL(N$D`#A^{^ZmU`8d@Hk~?j{IG4fi#tr{z1!}N!z*}7 zI~l>Dl^dU=`uL_OHXrd?z_{=f(jMX$dY?XVxNxkn3n1Ja{^y7M{`rEw_}-&C+uW@> zi8NaT3|wXK^G{TZg6t_~M1)3ZOlf$0PtUox9lZo~;@B`iy5_mo?jdzmd|&sevu=(= zA~u;tD8j~hH#}keehmZ(1PEj|(__+j3c#pN&kR?otH!{Ox{z>lYJPb4!8SLADyB+k zh#iL@h$!SqYn`AiBB2%`i*S21QYlX%IeWI}*v{SeXYS5)&&=FEh3Cb&Gcq_-4ks<& zZbuN0=P3-Jgm~FPFeyb^nTdzj=_KLkoERYCdo0I7(sSdP9EWxYjQ34eFO<+!m zVnur2T=Ptqd^eIsWH5Dvm?N@-haRog!$u+Sm@P=zADhI?MxocAf7Xi|VHR*aMCkDd z5w-;-gP<0+P+;Dl9hF3t(#}4dGEGcUrD^Jo{fC3i4wuFbT p59Gy1S-+I<@GXHRY+Am<{sB?361EBG{x|>t002ovPDHLkV1jkngU$c| literal 0 HcmV?d00001 diff --git a/browser/app/img/favicon.ico b/browser/app/img/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..0718efa8328532746848d3ef36b3f1befc69c061 GIT binary patch literal 1340 zcmeAS@N?(olHy`uVBq!ia0vp^3LwnE1|*BCs=fdzwj^(N7a$D;Kb?2i11Zh|kH}&m z?E%JaC$sH9f@KAc=|EZmjN5x7xLyfrRub*JWVB>yR_F@XD@r>8#G5*HY-nN?*J?8LD&q?MaQ={jUX~A+mgsEP zHF`>+9y(5I>Mw;%Ubb@9%oy?aOZg_X%v3q*%Wre9_4o6d_wVNwzvC54_*r9}QeemA zm$+;B^4*WNI;l1v|Kz)F>pivPt%q01b3QV2V*CA(OHI4=M2Od)#sAvc|K5sBfAv!% z=RoO=-rZhTBC8rU)(FQK>3x57)$jeCcSi)@bnR(Oy|HTo?^IL8(tT3v-4mnT-rUK1 za_)pio_Bj~^Gh$rqEw4E(-$2v(?s9@SGe}|9rxGo&u$1$+NbxX*(*8WoWdXF`zDtw zE$1Ee*qU_Q_nQB;*|%hmxL--~kk2Fqgv%i~CoIGthll2GbZx4L` z>=k(aS;kSNvtP90Icx9n-)napO@C8n-1oq{hj+vjo_-o^tZ(J2ytUIDPu0OW!S*~>@slK?_J5jHrOHM^)>%!&B zZ{OP58{OUNbp7bp)yJfz!uVSSu03>`Gv&^t?0^YJ&ipOAuJXU%U$nFHCH2jYv@ZsKUz zxi)3`@y|cM-TQfLo@j$*LfY0Y`H9}S&l%1!>WC!>HEZaVG#*{@kkdxDQu~iS1M^F9 z@y|N{*{@9H+Bomib!lKmimVEWC<)F_D=AMbN@ZZExb^vruaEy%U!U_n=fVsw8(-QS zU|?|7+~B&oNf3vLNwCN1GoD*7>FerU(m&^+<8}4Bi^g?Rkg^N@C;hLTKjZDW_QGk6 zlV=$j*7`}TDG}An23n#BvLqysN(R + + + + + + image/svg+xml + + + + + + + + + + Minio Logo + + + + + + + + diff --git a/browser/app/img/more-h-light.svg b/browser/app/img/more-h-light.svg new file mode 100644 index 000000000..0c2e2da60 --- /dev/null +++ b/browser/app/img/more-h-light.svg @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/browser/app/img/more-h.svg b/browser/app/img/more-h.svg new file mode 100644 index 000000000..cf69dcf6b --- /dev/null +++ b/browser/app/img/more-h.svg @@ -0,0 +1 @@ + diff --git a/browser/app/img/select-caret.svg b/browser/app/img/select-caret.svg new file mode 100644 index 000000000..b2b26b86b --- /dev/null +++ b/browser/app/img/select-caret.svg @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/browser/app/index.html b/browser/app/index.html new file mode 100644 index 000000000..dfc0f555b --- /dev/null +++ b/browser/app/index.html @@ -0,0 +1,56 @@ + + + + + + + Minio Browser + + + + +

+
+ +
+
+
+ + + + + + + diff --git a/browser/app/index.js b/browser/app/index.js new file mode 100644 index 000000000..d750577bc --- /dev/null +++ b/browser/app/index.js @@ -0,0 +1,116 @@ +/* + * Minio Browser (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import './less/main.less' + +import React from 'react' +import ReactDOM from 'react-dom' +import thunkMiddleware from 'redux-thunk' +import createStore from 'redux/lib/createStore' +import applyMiddleware from 'redux/lib/applyMiddleware' + +import Route from 'react-router/lib/Route' +import Router from 'react-router/lib/Router' +import browserHistory from 'react-router/lib/browserHistory' +import IndexRoute from 'react-router/lib/IndexRoute' + +import Provider from 'react-redux/lib/components/Provider' +import connect from 'react-redux/lib/components/connect' + +import Moment from 'moment' + +import { minioBrowserPrefix } from './js/constants.js' +import * as actions from './js/actions.js' +import reducer from './js/reducers.js' + +import _Login from './js/components/Login.js' +import _Browse from './js/components/Browse.js' +import fontAwesome from 'font-awesome/css/font-awesome.css' + +import Web from './js/web' +window.Web = Web + +import storage from 'local-storage-fallback' + +const store = applyMiddleware(thunkMiddleware)(createStore)(reducer) +const Browse = connect(state => state)(_Browse) +const Login = connect(state => state)(_Login) + +let web = new Web(`${window.location.protocol}//${window.location.host}${minioBrowserPrefix}/webrpc`, store.dispatch) + +window.web = web + +store.dispatch(actions.setWeb(web)) + +function authNeeded(nextState, replace, cb) { + if (web.LoggedIn()) { + return cb() + } + if (location.pathname === minioBrowserPrefix || location.pathname === minioBrowserPrefix + '/') { + replace(`${minioBrowserPrefix}/login`) + } + return cb() +} + +function authNotNeeded(nextState, replace) { + if (web.LoggedIn()) { + replace(`${minioBrowserPrefix}`) + } +} + +const App = (props) => { + return
+ { props.children } +
+} + +ReactDOM.render(( + + + + + + + + + + + + + ), document.getElementById('root')) + +//Page loader +let delay = [0, 400] +let i = 0 + +function handleLoader() { + if (i < 2) { + setTimeout(function() { + document.querySelector('.page-load').classList.add('pl-' + i) + i++ + handleLoader() + }, delay[i]) + } +} +handleLoader() + +if (storage.getItem('newlyUpdated')) { + store.dispatch(actions.showAlert({ + type: 'success', + message: "Updated to the latest UI Version." + })) + storage.removeItem('newlyUpdated') +} diff --git a/browser/app/js/__tests__/jsonrpc-test.js b/browser/app/js/__tests__/jsonrpc-test.js new file mode 100644 index 000000000..341d0c286 --- /dev/null +++ b/browser/app/js/__tests__/jsonrpc-test.js @@ -0,0 +1,43 @@ +/* + * Minio Browser (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import expect from 'expect'; +import JSONrpc from '../jsonrpc'; + +describe('jsonrpc', () => { + it('should fail with invalid endpoint', (done) => { + try { + let jsonRPC = new JSONrpc({ + endpoint: 'htt://localhost:9000', + namespace: 'Test' + }); + } catch (e) { + done(); + } + }); + it('should succeed with valid endpoint', () => { + let jsonRPC = new JSONrpc({ + endpoint: 'http://localhost:9000/webrpc', + namespace: 'Test' + }); + expect(jsonRPC.version).toEqual('2.0'); + expect(jsonRPC.host).toEqual('localhost'); + expect(jsonRPC.port).toEqual('9000'); + expect(jsonRPC.path).toEqual('/webrpc'); + expect(jsonRPC.scheme).toEqual('http'); + }); +}); + diff --git a/browser/app/js/actions.js b/browser/app/js/actions.js new file mode 100644 index 000000000..598124b84 --- /dev/null +++ b/browser/app/js/actions.js @@ -0,0 +1,509 @@ +/* + * Minio Browser (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import url from 'url' +import Moment from 'moment' +import web from './web' +import * as utils from './utils' +import storage from 'local-storage-fallback' + +export const SET_WEB = 'SET_WEB' +export const SET_CURRENT_BUCKET = 'SET_CURRENT_BUCKET' +export const SET_CURRENT_PATH = 'SET_CURRENT_PATH' +export const SET_BUCKETS = 'SET_BUCKETS' +export const ADD_BUCKET = 'ADD_BUCKET' +export const ADD_OBJECT = 'ADD_OBJECT' +export const SET_VISIBLE_BUCKETS = 'SET_VISIBLE_BUCKETS' +export const SET_OBJECTS = 'SET_OBJECTS' +export const SET_STORAGE_INFO = 'SET_STORAGE_INFO' +export const SET_SERVER_INFO = 'SET_SERVER_INFO' +export const SHOW_MAKEBUCKET_MODAL = 'SHOW_MAKEBUCKET_MODAL' +export const ADD_UPLOAD = 'ADD_UPLOAD' +export const STOP_UPLOAD = 'STOP_UPLOAD' +export const UPLOAD_PROGRESS = 'UPLOAD_PROGRESS' +export const SET_ALERT = 'SET_ALERT' +export const SET_LOGIN_ERROR = 'SET_LOGIN_ERROR' +export const SET_SHOW_ABORT_MODAL = 'SET_SHOW_ABORT_MODAL' +export const SHOW_ABOUT = 'SHOW_ABOUT' +export const SET_SORT_NAME_ORDER = 'SET_SORT_NAME_ORDER' +export const SET_SORT_SIZE_ORDER = 'SET_SORT_SIZE_ORDER' +export const SET_SORT_DATE_ORDER = 'SET_SORT_DATE_ORDER' +export const SET_LATEST_UI_VERSION = 'SET_LATEST_UI_VERSION' +export const SET_SIDEBAR_STATUS = 'SET_SIDEBAR_STATUS' +export const SET_LOGIN_REDIRECT_PATH = 'SET_LOGIN_REDIRECT_PATH' +export const SET_LOAD_BUCKET = 'SET_LOAD_BUCKET' +export const SET_LOAD_PATH = 'SET_LOAD_PATH' +export const SHOW_SETTINGS = 'SHOW_SETTINGS' +export const SET_SETTINGS = 'SET_SETTINGS' +export const SHOW_BUCKET_POLICY = 'SHOW_BUCKET_POLICY' +export const SET_POLICIES = 'SET_POLICIES' +export const SET_SHARE_OBJECT = 'SET_SHARE_OBJECT' +export const DELETE_CONFIRMATION = 'DELETE_CONFIRMATION' +export const SET_PREFIX_WRITABLE = 'SET_PREFIX_WRITABLE' + +export const showDeleteConfirmation = (object) => { + return { + type: DELETE_CONFIRMATION, + payload: { + object, + show: true + } + } +} + +export const hideDeleteConfirmation = () => { + return { + type: DELETE_CONFIRMATION, + payload: { + object: '', + show: false + } + } +} + +export const showShareObject = url => { + return { + type: SET_SHARE_OBJECT, + shareObject: { + url: url, + show: true + } + } +} + +export const hideShareObject = () => { + return { + type: SET_SHARE_OBJECT, + shareObject: { + url: '', + show: false + } + } +} + +export const shareObject = (object, expiry) => (dispatch, getState) => { + const {currentBucket, web} = getState() + let host = location.host + let bucket = currentBucket + + if (!web.LoggedIn()) { + dispatch(showShareObject(`${host}/${bucket}/${object}`)) + return + } + web.PresignedGet({ + host, + bucket, + object, + expiry + }) + .then(obj => { + dispatch(showShareObject(obj.url)) + }) + .catch(err => { + dispatch(showAlert({ + type: 'danger', + message: err.message + })) + }) +} + +export const setLoginRedirectPath = (path) => { + return { + type: SET_LOGIN_REDIRECT_PATH, + path + } +} + +export const setLoadPath = (loadPath) => { + return { + type: SET_LOAD_PATH, + loadPath + } +} + +export const setLoadBucket = (loadBucket) => { + return { + type: SET_LOAD_BUCKET, + loadBucket + } +} + +export const setWeb = web => { + return { + type: SET_WEB, + web + } +} + +export const setBuckets = buckets => { + return { + type: SET_BUCKETS, + buckets + } +} + +export const addBucket = bucket => { + return { + type: ADD_BUCKET, + bucket + } +} + +export const showMakeBucketModal = () => { + return { + type: SHOW_MAKEBUCKET_MODAL, + showMakeBucketModal: true + } +} + +export const hideAlert = () => { + return { + type: SET_ALERT, + alert: { + show: false, + message: '', + type: '' + } + } +} + +export const showAlert = alert => { + return (dispatch, getState) => { + let alertTimeout = null + if (alert.type !== 'danger') { + alertTimeout = setTimeout(() => { + dispatch({ + type: SET_ALERT, + alert: { + show: false + } + }) + }, 5000) + } + dispatch({ + type: SET_ALERT, + alert: Object.assign({}, alert, { + show: true, + alertTimeout + }) + }) + } +} + +export const setSidebarStatus = (status) => { + return { + type: SET_SIDEBAR_STATUS, + sidebarStatus: status + } +} + +export const hideMakeBucketModal = () => { + return { + type: SHOW_MAKEBUCKET_MODAL, + showMakeBucketModal: false + } +} + +export const setVisibleBuckets = visibleBuckets => { + return { + type: SET_VISIBLE_BUCKETS, + visibleBuckets + } +} + +export const setObjects = (objects) => { + return { + type: SET_OBJECTS, + objects + } +} + +export const setCurrentBucket = currentBucket => { + return { + type: SET_CURRENT_BUCKET, + currentBucket + } +} + +export const setCurrentPath = currentPath => { + return { + type: SET_CURRENT_PATH, + currentPath + } +} + +export const setStorageInfo = storageInfo => { + return { + type: SET_STORAGE_INFO, + storageInfo + } +} + +export const setServerInfo = serverInfo => { + return { + type: SET_SERVER_INFO, + serverInfo + } +} + +const setPrefixWritable = prefixWritable => { + return { + type: SET_PREFIX_WRITABLE, + prefixWritable, + } +} + +export const selectBucket = (newCurrentBucket, prefix) => { + if (!prefix) + prefix = '' + return (dispatch, getState) => { + let web = getState().web + let currentBucket = getState().currentBucket + + if (currentBucket !== newCurrentBucket) dispatch(setLoadBucket(newCurrentBucket)) + + dispatch(setCurrentBucket(newCurrentBucket)) + dispatch(selectPrefix(prefix)) + return + } +} + +export const selectPrefix = prefix => { + return (dispatch, getState) => { + const {currentBucket, web} = getState() + dispatch(setLoadPath(prefix)) + web.ListObjects({ + bucketName: currentBucket, + prefix + }) + .then(res => { + let objects = res.objects + if (!objects) + objects = [] + dispatch(setObjects( + utils.sortObjectsByName(objects.map(object => { + object.name = object.name.replace(`${prefix}`, ''); return object + })) + )) + dispatch(setPrefixWritable(res.writable)) + dispatch(setSortNameOrder(false)) + dispatch(setCurrentPath(prefix)) + dispatch(setLoadBucket('')) + dispatch(setLoadPath('')) + }) + .catch(err => { + dispatch(showAlert({ + type: 'danger', + message: err.message + })) + dispatch(setLoadBucket('')) + dispatch(setLoadPath('')) + }) + } +} + +export const addUpload = options => { + return { + type: ADD_UPLOAD, + slug: options.slug, + size: options.size, + xhr: options.xhr, + name: options.name + } +} + +export const stopUpload = options => { + return { + type: STOP_UPLOAD, + slug: options.slug + } +} + +export const uploadProgress = options => { + return { + type: UPLOAD_PROGRESS, + slug: options.slug, + loaded: options.loaded + } +} + +export const setShowAbortModal = showAbortModal => { + return { + type: SET_SHOW_ABORT_MODAL, + showAbortModal + } +} + +export const setLoginError = () => { + return { + type: SET_LOGIN_ERROR, + loginError: true + } +} + +export const uploadFile = (file, xhr) => { + return (dispatch, getState) => { + const {currentBucket, currentPath} = getState() + const objectName = `${currentPath}${file.name}` + const uploadUrl = `${window.location.origin}/minio/upload/${currentBucket}/${objectName}` + // The slug is a unique identifer for the file upload. + const slug = `${currentBucket}-${currentPath}-${file.name}` + + xhr.open('PUT', uploadUrl, true) + xhr.withCredentials = false + const token = storage.getItem('token') + if (token) xhr.setRequestHeader("Authorization", 'Bearer ' + storage.getItem('token')) + xhr.setRequestHeader('x-amz-date', Moment().utc().format('YYYYMMDDTHHmmss') + 'Z') + dispatch(addUpload({ + slug, + xhr, + size: file.size, + name: file.name + })) + + xhr.onload = function(event) { + if (xhr.status == 401 || xhr.status == 403 || xhr.status == 500) { + setShowAbortModal(false) + dispatch(stopUpload({ + slug + })) + dispatch(showAlert({ + type: 'danger', + message: 'Unauthorized request.' + })) + } + if (xhr.status == 200) { + setShowAbortModal(false) + dispatch(stopUpload({ + slug + })) + dispatch(showAlert({ + type: 'success', + message: 'File \'' + file.name + '\' uploaded successfully.' + })) + dispatch(selectPrefix(currentPath)) + } + } + + xhr.upload.addEventListener('error', event => { + dispatch(showAlert({ + type: 'danger', + message: 'Error occurred uploading \'' + file.name + '\'.' + })) + dispatch(stopUpload({ + slug + })) + }) + + xhr.upload.addEventListener('progress', event => { + if (event.lengthComputable) { + let loaded = event.loaded + let total = event.total + + // Update the counter. + dispatch(uploadProgress({ + slug, + loaded + })) + } + }) + xhr.send(file) + } +} + +export const showAbout = () => { + return { + type: SHOW_ABOUT, + showAbout: true + } +} + +export const hideAbout = () => { + return { + type: SHOW_ABOUT, + showAbout: false + } +} + +export const setSortNameOrder = (sortNameOrder) => { + return { + type: SET_SORT_NAME_ORDER, + sortNameOrder + } +} + +export const setSortSizeOrder = (sortSizeOrder) => { + return { + type: SET_SORT_SIZE_ORDER, + sortSizeOrder + } +} + +export const setSortDateOrder = (sortDateOrder) => { + return { + type: SET_SORT_DATE_ORDER, + sortDateOrder + } +} + +export const setLatestUIVersion = (latestUiVersion) => { + return { + type: SET_LATEST_UI_VERSION, + latestUiVersion + } +} + +export const showSettings = () => { + return { + type: SHOW_SETTINGS, + showSettings: true + } +} + +export const hideSettings = () => { + return { + type: SHOW_SETTINGS, + showSettings: false + } +} + +export const setSettings = (settings) => { + return { + type: SET_SETTINGS, + settings + } +} + +export const showBucketPolicy = () => { + return { + type: SHOW_BUCKET_POLICY, + showBucketPolicy: true + } +} + +export const hideBucketPolicy = () => { + return { + type: SHOW_BUCKET_POLICY, + showBucketPolicy: false + } +} + +export const setPolicies = (policies) => { + return { + type: SET_POLICIES, + policies + } +} diff --git a/browser/app/js/components/Browse.js b/browser/app/js/components/Browse.js new file mode 100644 index 000000000..671552529 --- /dev/null +++ b/browser/app/js/components/Browse.js @@ -0,0 +1,734 @@ +/* + * Minio Browser (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react' +import classNames from 'classnames' +import browserHistory from 'react-router/lib/browserHistory' +import humanize from 'humanize' +import Moment from 'moment' +import Modal from 'react-bootstrap/lib/Modal' +import ModalBody from 'react-bootstrap/lib/ModalBody' +import ModalHeader from 'react-bootstrap/lib/ModalHeader' +import Alert from 'react-bootstrap/lib/Alert' +import OverlayTrigger from 'react-bootstrap/lib/OverlayTrigger' +import Tooltip from 'react-bootstrap/lib/Tooltip' +import Dropdown from 'react-bootstrap/lib/Dropdown' +import MenuItem from 'react-bootstrap/lib/MenuItem' + +import InputGroup from '../components/InputGroup' +import Dropzone from '../components/Dropzone' +import ObjectsList from '../components/ObjectsList' +import SideBar from '../components/SideBar' +import Path from '../components/Path' +import BrowserUpdate from '../components/BrowserUpdate' +import UploadModal from '../components/UploadModal' +import SettingsModal from '../components/SettingsModal' +import PolicyInput from '../components/PolicyInput' +import Policy from '../components/Policy' +import BrowserDropdown from '../components/BrowserDropdown' +import ConfirmModal from './ConfirmModal' +import logo from '../../img/logo.svg' +import * as actions from '../actions' +import * as utils from '../utils' +import * as mime from '../mime' +import { minioBrowserPrefix } from '../constants' +import CopyToClipboard from 'react-copy-to-clipboard' +import storage from 'local-storage-fallback' + +export default class Browse extends React.Component { + componentDidMount() { + const {web, dispatch, currentBucket} = this.props + if (!web.LoggedIn()) return + web.StorageInfo() + .then(res => { + let storageInfo = Object.assign({}, { + total: res.storageInfo.Total, + free: res.storageInfo.Free + }) + storageInfo.used = storageInfo.total - storageInfo.free + dispatch(actions.setStorageInfo(storageInfo)) + return web.ServerInfo() + }) + .then(res => { + let serverInfo = Object.assign({}, { + version: res.MinioVersion, + memory: res.MinioMemory, + platform: res.MinioPlatform, + runtime: res.MinioRuntime, + envVars: res.MinioEnvVars + }) + dispatch(actions.setServerInfo(serverInfo)) + }) + .catch(err => { + dispatch(actions.showAlert({ + type: 'danger', + message: err.message + })) + }) + } + + componentWillMount() { + const {dispatch} = this.props + // Clear out any stale message in the alert of Login page + dispatch(actions.showAlert({ + type: 'danger', + message: '' + })) + if (web.LoggedIn()) { + web.ListBuckets() + .then(res => { + let buckets + if (!res.buckets) + buckets = [] + else + buckets = res.buckets.map(bucket => bucket.name) + if (buckets.length) { + dispatch(actions.setBuckets(buckets)) + dispatch(actions.setVisibleBuckets(buckets)) + if (location.pathname === minioBrowserPrefix || location.pathname === minioBrowserPrefix + '/') { + browserHistory.push(utils.pathJoin(buckets[0])) + } + } + }) + } + this.history = browserHistory.listen(({pathname}) => { + let decPathname = decodeURI(pathname) + if (decPathname === `${minioBrowserPrefix}/login`) return // FIXME: better organize routes and remove this + if (!decPathname.endsWith('/')) + decPathname += '/' + if (decPathname === minioBrowserPrefix + '/') { + dispatch(actions.setCurrentBucket('')) + dispatch(actions.setCurrentPath('')) + dispatch(actions.setObjects([])) + return + } + let obj = utils.pathSlice(decPathname) + if (!web.LoggedIn()) { + dispatch(actions.setBuckets([obj.bucket])) + dispatch(actions.setVisibleBuckets([obj.bucket])) + } + dispatch(actions.selectBucket(obj.bucket, obj.prefix)) + }) + } + + componentWillUnmount() { + this.history() + } + + selectBucket(e, bucket) { + e.preventDefault() + if (bucket === this.props.currentBucket) return + browserHistory.push(utils.pathJoin(bucket)) + } + + searchBuckets(e) { + e.preventDefault() + let {buckets} = this.props + this.props.dispatch(actions.setVisibleBuckets(buckets.filter(bucket => bucket.indexOf(e.target.value) > -1))) + } + + selectPrefix(e, prefix) { + e.preventDefault() + const {dispatch, currentPath, web, currentBucket} = this.props + const encPrefix = encodeURI(prefix) + if (prefix.endsWith('/') || prefix === '') { + if (prefix === currentPath) return + browserHistory.push(utils.pathJoin(currentBucket, encPrefix)) + } else { + window.location = `${window.location.origin}/minio/download/${currentBucket}/${encPrefix}?token=${storage.getItem('token')}` + } + } + + makeBucket(e) { + e.preventDefault() + const bucketName = this.refs.makeBucketRef.value + this.refs.makeBucketRef.value = '' + const {web, dispatch} = this.props + this.hideMakeBucketModal() + web.MakeBucket({ + bucketName + }) + .then(() => { + dispatch(actions.addBucket(bucketName)) + dispatch(actions.selectBucket(bucketName)) + }) + .catch(err => dispatch(actions.showAlert({ + type: 'danger', + message: err.message + }))) + } + + hideMakeBucketModal() { + const {dispatch} = this.props + dispatch(actions.hideMakeBucketModal()) + } + + showMakeBucketModal(e) { + e.preventDefault() + const {dispatch} = this.props + dispatch(actions.showMakeBucketModal()) + } + + showAbout(e) { + e.preventDefault() + const {dispatch} = this.props + dispatch(actions.showAbout()) + } + + hideAbout(e) { + e.preventDefault() + const {dispatch} = this.props + dispatch(actions.hideAbout()) + } + + showBucketPolicy(e) { + e.preventDefault() + const {dispatch} = this.props + dispatch(actions.showBucketPolicy()) + } + + hideBucketPolicy(e) { + e.preventDefault() + const {dispatch} = this.props + dispatch(actions.hideBucketPolicy()) + } + + uploadFile(e) { + e.preventDefault() + const {dispatch, buckets} = this.props + + if (buckets.length === 0) { + dispatch(actions.showAlert({ + type: 'danger', + message: "Bucket needs to be created before trying to upload files." + })) + return + } + let file = e.target.files[0] + e.target.value = null + this.xhr = new XMLHttpRequest() + dispatch(actions.uploadFile(file, this.xhr)) + } + + removeObject() { + const {web, dispatch, currentPath, currentBucket, deleteConfirmation} = this.props + web.RemoveObject({ + bucketName: currentBucket, + objectName: deleteConfirmation.object + }) + .then(() => { + this.hideDeleteConfirmation() + dispatch(actions.selectPrefix(currentPath)) + }) + .catch(e => dispatch(actions.showAlert({ + type: 'danger', + message: e.message + }))) + } + + hideAlert(e) { + e.preventDefault() + const {dispatch} = this.props + dispatch(actions.hideAlert()) + } + + showDeleteConfirmation(e, object) { + e.preventDefault() + const {dispatch} = this.props + dispatch(actions.showDeleteConfirmation(object)) + } + + hideDeleteConfirmation() { + const {dispatch} = this.props + dispatch(actions.hideDeleteConfirmation()) + } + + shareObject(e, object) { + e.preventDefault() + const {dispatch} = this.props + dispatch(actions.shareObject(object)) + } + + hideShareObjectModal() { + const {dispatch} = this.props + dispatch(actions.hideShareObject()) + } + + dataType(name, contentType) { + return mime.getDataType(name, contentType) + } + + sortObjectsByName(e) { + const {dispatch, objects, sortNameOrder} = this.props + dispatch(actions.setObjects(utils.sortObjectsByName(objects, !sortNameOrder))) + dispatch(actions.setSortNameOrder(!sortNameOrder)) + } + + sortObjectsBySize() { + const {dispatch, objects, sortSizeOrder} = this.props + dispatch(actions.setObjects(utils.sortObjectsBySize(objects, !sortSizeOrder))) + dispatch(actions.setSortSizeOrder(!sortSizeOrder)) + } + + sortObjectsByDate() { + const {dispatch, objects, sortDateOrder} = this.props + dispatch(actions.setObjects(utils.sortObjectsByDate(objects, !sortDateOrder))) + dispatch(actions.setSortDateOrder(!sortDateOrder)) + } + + logout(e) { + const {web} = this.props + e.preventDefault() + web.Logout() + browserHistory.push(`${minioBrowserPrefix}/login`) + } + + landingPage(e) { + e.preventDefault() + this.props.dispatch(actions.selectBucket(this.props.buckets[0])) + } + + fullScreen(e) { + e.preventDefault() + let el = document.documentElement + if (el.requestFullscreen) { + el.requestFullscreen() + } + if (el.mozRequestFullScreen) { + el.mozRequestFullScreen() + } + if (el.webkitRequestFullscreen) { + el.webkitRequestFullscreen() + } + if (el.msRequestFullscreen) { + el.msRequestFullscreen() + } + } + + toggleSidebar(status) { + this.props.dispatch(actions.setSidebarStatus(status)) + } + + hideSidebar(event) { + let e = event || window.event; + + // Support all browsers. + let target = e.srcElement || e.target; + if (target.nodeType === 3) // Safari support. + target = target.parentNode; + + let targetID = target.id; + if (!(targetID === 'feh-trigger')) { + this.props.dispatch(actions.setSidebarStatus(false)) + } + } + + showSettings(e) { + e.preventDefault() + + const {dispatch} = this.props + dispatch(actions.showSettings()) + } + + showMessage() { + const {dispatch} = this.props + dispatch(actions.showAlert({ + type: 'success', + message: 'Link copied to clipboard!' + })) + this.hideShareObjectModal() + } + + selectTexts() { + this.refs.copyTextInput.select() + } + + handleExpireValue(targetInput, inc) { + inc === -1 ? this.refs[targetInput].stepDown(1) : this.refs[targetInput].stepUp(1) + + if (this.refs.expireDays.value == 7) { + this.refs.expireHours.value = 0 + this.refs.expireMins.value = 0 + } + } + + + render() { + const {total, free} = this.props.storageInfo + const {showMakeBucketModal, alert, sortNameOrder, sortSizeOrder, sortDateOrder, showAbout, showBucketPolicy} = this.props + const {version, memory, platform, runtime} = this.props.serverInfo + const {sidebarStatus} = this.props + const {showSettings} = this.props + const {policies, currentBucket, currentPath} = this.props + const {deleteConfirmation} = this.props + const {shareObject} = this.props + const {web, prefixWritable} = this.props + + // Don't always show the SettingsModal. This is done here instead of in + // SettingsModal.js so as to allow for #componentWillMount to handle + // the loading of the settings. + let settingsModal = showSettings ? : + + let alertBox = +
+ { alert.message } +
+
+ // Make sure you don't show a fading out alert box on the initial web-page load. + if (!alert.message) + alertBox = '' + + let signoutTooltip = + Sign out + + let uploadTooltip = + Upload file + + let makeBucketTooltip = + Create bucket + + let loginButton = '' + let browserDropdownButton = '' + let storageUsageDetails = '' + + let used = total - free + let usedPercent = (used / total) * 100 + '%' + let freePercent = free * 100 / total + + if (web.LoggedIn()) { + browserDropdownButton = + } else { + loginButton =
Login + } + + if (web.LoggedIn()) { + storageUsageDetails =
+
+
+
+
    +
  • + Used: + { humanize.filesize(total - free) } +
  • +
  • + Free: + { humanize.filesize(total - used) } +
  • +
+
+ + } + + let createButton = '' + if (web.LoggedIn()) { + createButton = + + + + + + + + + + + + + + + + + } else { + if (prefixWritable) + createButton = + + + + + + + + + + + + + + } + + return ( +
+ +
+ + { alertBox } +
+
+
+
+
+
+
+
+ +
+
+ + { storageUsageDetails } +
    + + { loginButton } + { browserDropdownButton } +
+
+
+
+
+ Name + +
+
+ Size + +
+
+ Last Modified + +
+
+
+
+
+ +
+ + { createButton } + + + +
+
+ + +
+
+
+
+ + +
+
+ +
+
+
    +
  • +
    + Version +
    + { version } +
  • +
  • +
    + Memory +
    + { memory } +
  • +
  • +
    + Platform +
    + { platform } +
  • +
  • +
    + Runtime +
    + { runtime } +
  • +
+
+
+
+ + + Bucket Policy ( + { currentBucket }) + + +
+ + { policies.map((policy, i) => + ) } +
+
+ + + + + Share Object + + +
+ + +
+
+ +
+
+ +
+ Days +
+
+ +
+ +
+
+ +
+ Hours +
+
+ +
+ +
+
+ +
+ Minutes +
+
+ +
+ +
+
+
+
+
+ + + + +
+
+ { settingsModal } +
+
+
+ ) + } +} diff --git a/browser/app/js/components/BrowserDropdown.js b/browser/app/js/components/BrowserDropdown.js new file mode 100644 index 000000000..1aa272551 --- /dev/null +++ b/browser/app/js/components/BrowserDropdown.js @@ -0,0 +1,56 @@ +/* + * Minio Browser (C) 2016, 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react' +import connect from 'react-redux/lib/components/connect' +import Dropdown from 'react-bootstrap/lib/Dropdown' + +let BrowserDropdown = ({fullScreen, showAbout, showSettings, logout}) => { + return ( +
  • + + + + + +
  • + Github +
  • +
  • + Fullscreen +
  • +
  • + Documentation +
  • +
  • + Ask for help +
  • +
  • + About +
  • +
  • + Settings +
  • +
  • + Sign Out +
  • + + + + ) +} + +export default connect(state => state)(BrowserDropdown) diff --git a/browser/app/js/components/BrowserUpdate.js b/browser/app/js/components/BrowserUpdate.js new file mode 100644 index 000000000..be4c7af3a --- /dev/null +++ b/browser/app/js/components/BrowserUpdate.js @@ -0,0 +1,42 @@ +/* + * Minio Browser (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react' +import connect from 'react-redux/lib/components/connect' + +import Tooltip from 'react-bootstrap/lib/Tooltip' +import OverlayTrigger from 'react-bootstrap/lib/OverlayTrigger' + +let BrowserUpdate = ({latestUiVersion}) => { + // Don't show an update if we're already updated! + if (latestUiVersion === currentUiVersion) return ( ) + + return ( +
  • + + + New update available. Click to refresh. + }> + +
  • + ) +} + +export default connect(state => { + return { + latestUiVersion: state.latestUiVersion + } +})(BrowserUpdate) diff --git a/browser/app/js/components/ConfirmModal.js b/browser/app/js/components/ConfirmModal.js new file mode 100644 index 000000000..fd98fa313 --- /dev/null +++ b/browser/app/js/components/ConfirmModal.js @@ -0,0 +1,50 @@ +/* + * Minio Browser (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react' +import Modal from 'react-bootstrap/lib/Modal' +import ModalBody from 'react-bootstrap/lib/ModalBody' + +let ConfirmModal = ({baseClass, icon, text, sub, okText, cancelText, okHandler, cancelHandler, show}) => { + return ( + + +
    + +
    +
    + { text } +
    +
    + { sub } +
    +
    +
    + + +
    +
    + ) +} + +export default ConfirmModal diff --git a/browser/app/js/components/Dropzone.js b/browser/app/js/components/Dropzone.js new file mode 100644 index 000000000..0ddab2661 --- /dev/null +++ b/browser/app/js/components/Dropzone.js @@ -0,0 +1,65 @@ +/* + * Minio Browser (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react' +import ReactDropzone from 'react-dropzone' +import * as actions from '../actions' + +// Dropzone is a drag-and-drop element for uploading files. It will create a +// landing zone of sorts that automatically receives the files. +export default class Dropzone extends React.Component { + + onDrop(files) { + // FIXME: Currently you can upload multiple files, but only one abort + // modal will be shown, and progress updates will only occur for one + // file at a time. See #171. + files.forEach(file => { + let req = new XMLHttpRequest() + + // Dispatch the upload. + web.dispatch(actions.uploadFile(file, req)) + }) + } + + render() { + // Overwrite the default styling from react-dropzone; otherwise it + // won't handle child elements correctly. + const style = { + height: '100%', + borderWidth: '2px', + borderStyle: 'dashed', + borderColor: '#fff' + } + const activeStyle = { + borderColor: '#777' + } + const rejectStyle = { + backgroundColor: '#ffdddd' + } + + // disableClick means that it won't trigger a file upload box when + // the user clicks on a file. + return ( + + { this.props.children } + + ) + } +} diff --git a/browser/app/js/components/InputGroup.js b/browser/app/js/components/InputGroup.js new file mode 100644 index 000000000..c2b0e2ab2 --- /dev/null +++ b/browser/app/js/components/InputGroup.js @@ -0,0 +1,49 @@ +/* + * Minio Browser (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react' + +let InputGroup = ({label, id, name, value, onChange, type, spellCheck, required, readonly, autoComplete, align, className}) => { + var input = + if (readonly) + input = + return
    + { input } + + +
    +} + +export default InputGroup diff --git a/browser/app/js/components/Login.js b/browser/app/js/components/Login.js new file mode 100644 index 000000000..b5db1a872 --- /dev/null +++ b/browser/app/js/components/Login.js @@ -0,0 +1,133 @@ +/* + * Minio Browser (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react' +import classNames from 'classnames' +import logo from '../../img/logo.svg' +import Alert from 'react-bootstrap/lib/Alert' +import * as actions from '../actions' +import InputGroup from '../components/InputGroup' + +export default class Login extends React.Component { + handleSubmit(event) { + event.preventDefault() + const {web, dispatch, loginRedirectPath} = this.props + let message = '' + if (!document.getElementById('accessKey').value) { + message = 'Secret Key cannot be empty' + } + if (!document.getElementById('secretKey').value) { + message = 'Access Key cannot be empty' + } + if (message) { + dispatch(actions.showAlert({ + type: 'danger', + message + })) + return + } + web.Login({ + username: document.getElementById('accessKey').value, + password: document.getElementById('secretKey').value + }) + .then((res) => { + this.context.router.push(loginRedirectPath) + }) + .catch(e => { + dispatch(actions.setLoginError()) + dispatch(actions.showAlert({ + type: 'danger', + message: e.message + })) + }) + } + + componentWillMount() { + const {dispatch} = this.props + // Clear out any stale message in the alert of previous page + dispatch(actions.showAlert({ + type: 'danger', + message: '' + })) + document.body.classList.add('is-guest') + } + + componentWillUnmount() { + document.body.classList.remove('is-guest') + } + + hideAlert() { + const {dispatch} = this.props + dispatch(actions.hideAlert()) + } + + render() { + const {alert} = this.props + let alertBox = +
    + { alert.message } +
    +
    + // Make sure you don't show a fading out alert box on the initial web-page load. + if (!alert.message) + alertBox = '' + return ( +
    + { alertBox } +
    +
    + + + + + + + +
    +
    +
    + +
    + { window.location.host } +
    +
    +
    + ) + } +} + +Login.contextTypes = { + router: React.PropTypes.object.isRequired +} diff --git a/browser/app/js/components/ObjectsList.js b/browser/app/js/components/ObjectsList.js new file mode 100644 index 000000000..623594218 --- /dev/null +++ b/browser/app/js/components/ObjectsList.js @@ -0,0 +1,75 @@ +/* + * Minio Browser (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react' +import Moment from 'moment' +import humanize from 'humanize' +import connect from 'react-redux/lib/components/connect' +import Dropdown from 'react-bootstrap/lib/Dropdown' + + +let ObjectsList = ({objects, currentPath, selectPrefix, dataType, showDeleteConfirmation, shareObject, loadPath}) => { + const list = objects.map((object, i) => { + let size = object.name.endsWith('/') ? '-' : humanize.filesize(object.size) + let lastModified = object.name.endsWith('/') ? '-' : Moment(object.lastModified).format('lll') + let loadingClass = loadPath === `${currentPath}${object.name}` ? 'fesl-loading' : '' + let actionButtons = '' + let deleteButton = '' + if (web.LoggedIn()) + deleteButton = showDeleteConfirmation(e, `${currentPath}${object.name}`) }> + if (!object.name.endsWith('/')) { + actionButtons = + + + shareObject(e, `${currentPath}${object.name}`) }> + { deleteButton } + + + } + return ( +
    + +
    + { size } +
    +
    + { lastModified } +
    +
    + { actionButtons } +
    +
    + ) + }) + return ( +
    + { list } +
    + ) +} + +// Subscribe it to state changes. +export default connect(state => { + return { + objects: state.objects, + currentPath: state.currentPath, + loadPath: state.loadPath + } +})(ObjectsList) diff --git a/browser/app/js/components/Path.js b/browser/app/js/components/Path.js new file mode 100644 index 000000000..6ca85869b --- /dev/null +++ b/browser/app/js/components/Path.js @@ -0,0 +1,41 @@ +/* + * Minio Browser (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react' +import connect from 'react-redux/lib/components/connect' + +let Path = ({currentBucket, currentPath, selectPrefix}) => { + let dirPath = [] + let path = '' + if (currentPath) { + path = currentPath.split('/').map((dir, i) => { + dirPath.push(dir) + let dirPath_ = dirPath.join('/') + '/' + return selectPrefix(e, dirPath_) }>{ dir } + }) + } + + return ( +

    selectPrefix(e, '') } href="">{ currentBucket }{ path }

    + ) +} + +export default connect(state => { + return { + currentBucket: state.currentBucket, + currentPath: state.currentPath + } +})(Path) diff --git a/browser/app/js/components/Policy.js b/browser/app/js/components/Policy.js new file mode 100644 index 000000000..65930ad64 --- /dev/null +++ b/browser/app/js/components/Policy.js @@ -0,0 +1,80 @@ +import { READ_ONLY, WRITE_ONLY, READ_WRITE } from '../constants' + +import React, { Component, PropTypes } from 'react' +import connect from 'react-redux/lib/components/connect' +import classnames from 'classnames' +import * as actions from '../actions' + +class Policy extends Component { + constructor(props, context) { + super(props, context) + this.state = {} + } + + handlePolicyChange(e) { + this.setState({ + policy: { + policy: e.target.value + } + }) + } + + removePolicy(e) { + e.preventDefault() + const {dispatch, currentBucket, prefix} = this.props + let newPrefix = prefix.replace(currentBucket + '/', '') + newPrefix = newPrefix.replace('*', '') + web.SetBucketPolicy({ + bucketName: currentBucket, + prefix: newPrefix, + policy: 'none' + }) + .then(() => { + dispatch(actions.setPolicies(this.props.policies.filter(policy => policy.prefix != prefix))) + }) + .catch(e => dispatch(actions.showAlert({ + type: 'danger', + message: e.message, + }))) + } + + render() { + const {policy, prefix, currentBucket} = this.props + let newPrefix = prefix.replace(currentBucket + '/', '') + newPrefix = newPrefix.replace('*', '') + + if (!newPrefix) + newPrefix = '*' + + return ( +
    +
    + { newPrefix } +
    +
    + +
    +
    + +
    +
    + ) + } +} + +export default connect(state => state)(Policy) diff --git a/browser/app/js/components/PolicyInput.js b/browser/app/js/components/PolicyInput.js new file mode 100644 index 000000000..75809df96 --- /dev/null +++ b/browser/app/js/components/PolicyInput.js @@ -0,0 +1,83 @@ +import { READ_ONLY, WRITE_ONLY, READ_WRITE } from '../constants' +import React, { Component, PropTypes } from 'react' +import connect from 'react-redux/lib/components/connect' +import classnames from 'classnames' +import * as actions from '../actions' + +class PolicyInput extends Component { + componentDidMount() { + const {web, dispatch} = this.props + web.ListAllBucketPolicies({ + bucketName: this.props.currentBucket + }).then(res => { + let policies = res.policies + if (policies) dispatch(actions.setPolicies(policies)) + }).catch(err => { + dispatch(actions.showAlert({ + type: 'danger', + message: err.message + })) + }) + } + + componentWillUnmount() { + const {dispatch} = this.props + dispatch(actions.setPolicies([])) + } + + handlePolicySubmit(e) { + e.preventDefault() + const {web, dispatch} = this.props + + web.SetBucketPolicy({ + bucketName: this.props.currentBucket, + prefix: this.prefix.value, + policy: this.policy.value + }) + .then(() => { + dispatch(actions.setPolicies([{ + policy: this.policy.value, + prefix: this.prefix.value + '*', + }, ...this.props.policies])) + this.prefix.value = '' + }) + .catch(e => dispatch(actions.showAlert({ + type: 'danger', + message: e.message, + }))) + } + + render() { + return ( +
    +
    + this.prefix = prefix } + className="form-control" + placeholder="Prefix" + editable={ true } /> +
    +
    + +
    +
    + +
    +
    + ) + } +} + +export default connect(state => state)(PolicyInput) diff --git a/browser/app/js/components/SettingsModal.js b/browser/app/js/components/SettingsModal.js new file mode 100644 index 000000000..51bd4333b --- /dev/null +++ b/browser/app/js/components/SettingsModal.js @@ -0,0 +1,215 @@ +/* + * Minio Browser (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react' +import connect from 'react-redux/lib/components/connect' +import * as actions from '../actions' + +import Tooltip from 'react-bootstrap/lib/Tooltip' +import Modal from 'react-bootstrap/lib/Modal' +import ModalBody from 'react-bootstrap/lib/ModalBody' +import ModalHeader from 'react-bootstrap/lib/ModalHeader' +import OverlayTrigger from 'react-bootstrap/lib/OverlayTrigger' +import InputGroup from './InputGroup' + +class SettingsModal extends React.Component { + + // When the settings are shown, it loads the access key and secret key. + componentWillMount() { + const {web, dispatch} = this.props + const {serverInfo} = this.props + + let accessKeyEnv = '' + let secretKeyEnv = '' + // Check environment variables first. They may or may not have been + // loaded already; they load in Browse#componentDidMount. + if (serverInfo.envVars) { + serverInfo.envVars.forEach(envVar => { + let keyVal = envVar.split('=') + if (keyVal[0] == 'MINIO_ACCESS_KEY') { + accessKeyEnv = keyVal[1] + } else if (keyVal[0] == 'MINIO_SECRET_KEY') { + secretKeyEnv = keyVal[1] + } + }) + } + if (accessKeyEnv != '' || secretKeyEnv != '') { + dispatch(actions.setSettings({ + accessKey: accessKeyEnv, + secretKey: secretKeyEnv, + keysReadOnly: true + })) + } else { + web.GetAuth() + .then(data => { + dispatch(actions.setSettings({ + accessKey: data.accessKey, + secretKey: data.secretKey + })) + }) + } + } + + // When they are re-hidden, the keys are unloaded from memory. + componentWillUnmount() { + const {dispatch} = this.props + + dispatch(actions.setSettings({ + accessKey: '', + secretKey: '', + secretKeyVisible: false + })) + dispatch(actions.hideSettings()) + } + + // Handle field changes from inside the modal. + accessKeyChange(e) { + const {dispatch} = this.props + dispatch(actions.setSettings({ + accessKey: e.target.value + })) + } + + secretKeyChange(e) { + const {dispatch} = this.props + dispatch(actions.setSettings({ + secretKey: e.target.value + })) + } + + secretKeyVisible(secretKeyVisible) { + const {dispatch} = this.props + dispatch(actions.setSettings({ + secretKeyVisible + })) + } + + // Save the auth params and set them. + setAuth(e) { + e.preventDefault() + const {web, dispatch} = this.props + + let accessKey = document.getElementById('accessKey').value + let secretKey = document.getElementById('secretKey').value + web.SetAuth({ + accessKey, + secretKey + }) + .then(data => { + dispatch(actions.setSettings({ + accessKey: '', + secretKey: '', + secretKeyVisible: false + })) + dispatch(actions.hideSettings()) + dispatch(actions.showAlert({ + type: 'success', + message: 'Changed credentials' + })) + }) + .catch(err => { + dispatch(actions.setSettings({ + accessKey: '', + secretKey: '', + secretKeyVisible: false + })) + dispatch(actions.hideSettings()) + dispatch(actions.showAlert({ + type: 'danger', + message: err.message + })) + }) + } + + generateAuth(e) { + e.preventDefault() + const {dispatch} = this.props + + web.GenerateAuth() + .then(data => { + dispatch(actions.setSettings({ + secretKeyVisible: true + })) + dispatch(actions.setSettings({ + accessKey: data.accessKey, + secretKey: data.secretKey + })) + }) + } + + hideSettings(e) { + e.preventDefault() + + const {dispatch} = this.props + dispatch(actions.hideSettings()) + } + + render() { + let {settings} = this.props + + return ( + + + Change Password + + + + + + +
    + + + +
    +
    + ) + } +} + +export default connect(state => { + return { + web: state.web, + settings: state.settings, + serverInfo: state.serverInfo + } +})(SettingsModal) diff --git a/browser/app/js/components/SideBar.js b/browser/app/js/components/SideBar.js new file mode 100644 index 000000000..ad4aee576 --- /dev/null +++ b/browser/app/js/components/SideBar.js @@ -0,0 +1,85 @@ +/* + * Minio Browser (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react' +import classNames from 'classnames' +import ClickOutHandler from 'react-onclickout' +import Scrollbars from 'react-custom-scrollbars/lib/Scrollbars' +import connect from 'react-redux/lib/components/connect' + +import logo from '../../img/logo.svg' + +let SideBar = ({visibleBuckets, loadBucket, currentBucket, selectBucket, searchBuckets, landingPage, sidebarStatus, clickOutside, showPolicy}) => { + + const list = visibleBuckets.map((bucket, i) => { + return
  • selectBucket(e, bucket) }> + + { bucket } + + +
  • + }) + + return ( + +
    + +
    +
    + + +
    +
    +
    }> +
      + { list } +
    + +
    +
    + +
    + + ) +} + +// Subscribe it to state changes that affect only the sidebar. +export default connect(state => { + return { + visibleBuckets: state.visibleBuckets, + loadBucket: state.loadBucket, + currentBucket: state.currentBucket, + sidebarStatus: state.sidebarStatus + } +})(SideBar) diff --git a/browser/app/js/components/UploadModal.js b/browser/app/js/components/UploadModal.js new file mode 100644 index 000000000..6658ab225 --- /dev/null +++ b/browser/app/js/components/UploadModal.js @@ -0,0 +1,141 @@ +/* + * Minio Browser (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react' +import humanize from 'humanize' +import classNames from 'classnames' +import connect from 'react-redux/lib/components/connect' + +import ProgressBar from 'react-bootstrap/lib/ProgressBar' +import ConfirmModal from './ConfirmModal' + +import * as actions from '../actions' + +// UploadModal is a modal that handles multiple file uploads. +// During the upload, it displays a progress bar, and can transform into an +// abort modal if the user decides to abort the uploads. +class UploadModal extends React.Component { + + // Abort all the current uploads. + abortUploads(e) { + e.preventDefault() + const {dispatch, uploads} = this.props + + for (var slug in uploads) { + let upload = uploads[slug] + upload.xhr.abort() + dispatch(actions.stopUpload({ + slug + })) + } + + this.hideAbort(e) + } + + // Show the abort modal instead of the progress modal. + showAbort(e) { + e.preventDefault() + const {dispatch} = this.props + + dispatch(actions.setShowAbortModal(true)) + } + + // Show the progress modal instead of the abort modal. + hideAbort(e) { + e.preventDefault() + const {dispatch} = this.props + + dispatch(actions.setShowAbortModal(false)) + } + + render() { + const {uploads, showAbortModal} = this.props + + // Show the abort modal. + if (showAbortModal) { + let baseClass = classNames({ + 'abort-upload': true + }) + let okIcon = classNames({ + 'fa': true, + 'fa-times': true + }) + let cancelIcon = classNames({ + 'fa': true, + 'fa-cloud-upload': true + }) + + return ( + + + ) + } + + // If we don't have any files uploading, don't show anything. + let numberUploading = Object.keys(uploads).length + if (numberUploading == 0) + return ( ) + + let totalLoaded = 0 + let totalSize = 0 + + // Iterate over each upload, adding together the total size and that + // which has been uploaded. + for (var slug in uploads) { + let upload = uploads[slug] + totalLoaded += upload.loaded + totalSize += upload.size + } + + let percent = (totalLoaded / totalSize) * 100 + + // If more than one: "Uploading files (5)..." + // If only one: "Uploading myfile.txt..." + let text = 'Uploading ' + (numberUploading == 1 ? `'${uploads[Object.keys(uploads)[0]].name}'` : `files (${numberUploading})`) + '...' + + return ( +
    + +
    + { text } +
    + +
    + { humanize.filesize(totalLoaded) } ({ percent.toFixed(2) } %) +
    +
    + ) + } +} + +export default connect(state => { + return { + uploads: state.uploads, + showAbortModal: state.showAbortModal + } +})(UploadModal) diff --git a/browser/app/js/components/__tests__/Login-test.js b/browser/app/js/components/__tests__/Login-test.js new file mode 100644 index 000000000..1397fb637 --- /dev/null +++ b/browser/app/js/components/__tests__/Login-test.js @@ -0,0 +1,54 @@ +/* + * Minio Browser (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* +import React from 'react' +import ReactTestUtils, {renderIntoDocument} from 'react-addons-test-utils' + +import expect from 'expect' +import Login from '../Login' + +describe('Login', () => { + it('it should have empty credentials', () => { + const alert = { + show: false + } + const dispatch = () => {} + let loginComponent = renderIntoDocument() + const accessKey = document.getElementById('accessKey') + const secretKey = document.getElementById('secretKey') + // Validate default value. + expect(accessKey.value).toEqual('') + expect(secretKey.value).toEqual('') + }) + it('it should set accessKey and secretKey', () => { + const alert = { + show: false + } + const dispatch = () => {} + let loginComponent = renderIntoDocument() + let accessKey = loginComponent.refs.accessKey + let secretKey = loginComponent.refs.secretKey + accessKey.value = 'demo-username' + secretKey.value = 'demo-password' + ReactTestUtils.Simulate.change(accessKey) + ReactTestUtils.Simulate.change(secretKey) + // Validate if the change has occurred. + expect(loginComponent.refs.accessKey.value).toEqual('demo-username') + expect(loginComponent.refs.secretKey.value).toEqual('demo-password') + }) +}); +*/ diff --git a/browser/app/js/constants.js b/browser/app/js/constants.js new file mode 100644 index 000000000..35c20f418 --- /dev/null +++ b/browser/app/js/constants.js @@ -0,0 +1,23 @@ +/* + * Minio Browser (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// File for all the browser constants. + +// minioBrowserPrefix absolute path. +export const minioBrowserPrefix = '/minio' +export const READ_ONLY = 'readonly' +export const WRITE_ONLY = 'writeonly' +export const READ_WRITE = 'readwrite' diff --git a/browser/app/js/jsonrpc.js b/browser/app/js/jsonrpc.js new file mode 100644 index 000000000..99e1d1102 --- /dev/null +++ b/browser/app/js/jsonrpc.js @@ -0,0 +1,91 @@ +/* + * Minio Browser (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import SuperAgent from 'superagent-es6-promise'; +import url from 'url' +import Moment from 'moment' + +export default class JSONrpc { + constructor(params) { + this.endpoint = params.endpoint + this.namespace = params.namespace + this.version = '2.0'; + const parsedUrl = url.parse(this.endpoint) + this.host = parsedUrl.hostname + this.path = parsedUrl.path + this.port = parsedUrl.port + + switch (parsedUrl.protocol) { + case 'http:': { + this.scheme = 'http' + if (parsedUrl.port === 0) { + this.port = 80 + } + break + } + case 'https:': { + this.scheme = 'https' + if (parsedUrl.port === 0) { + this.port = 443 + } + break + } + default: { + throw new Error('Unknown protocol: ' + parsedUrl.protocol) + } + } + } + // call('Get', {id: NN, params: [...]}, function() {}) + call(method, options, token) { + if (!options) { + options = {} + } + if (!options.id) { + options.id = 1; + } + if (!options.params) { + options.params = {}; + } + const dataObj = { + id: options.id, + jsonrpc: this.version, + params: options.params ? options.params : {}, + method: this.namespace ? this.namespace + '.' + method : method + } + let requestParams = { + host: this.host, + port: this.port, + path: this.path, + scheme: this.scheme, + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'x-amz-date': Moment().utc().format('YYYYMMDDTHHmmss') + 'Z' + } + } + + if (token) { + requestParams.headers.Authorization = 'Bearer ' + token + } + + let req = SuperAgent.post(this.endpoint) + for (let key in requestParams.headers) { + req.set(key, requestParams.headers[key]) + } + // req.set('Access-Control-Allow-Origin', 'http://localhost:8080') + return req.send(JSON.stringify(dataObj)).then(res => res) + } +} diff --git a/browser/app/js/mime.js b/browser/app/js/mime.js new file mode 100644 index 000000000..9c8ed40fa --- /dev/null +++ b/browser/app/js/mime.js @@ -0,0 +1,106 @@ +/* + * Minio Browser (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import mimedb from 'mime-types' + +const isFolder = (name, contentType) => { + if (name.endsWith('/')) return true + return false +} + +const isPdf = (name, contentType) => { + if (contentType === 'application/pdf') return true + return false +} + +const isZip = (name, contentType) => { + if (!contentType || !contentType.includes('/')) return false + if (contentType.split('/')[1].includes('zip')) return true + return false +} + +const isCode = (name, contentType) => { + const codeExt = ['c', 'cpp', 'go', 'py', 'java', 'rb', 'js', 'pl', 'fs', + 'php', 'css', 'less', 'scss', 'coffee', 'net', 'html', + 'rs', 'exs', 'scala', 'hs', 'clj', 'el', 'scm', 'lisp', + 'asp', 'aspx'] + const ext = name.split('.').reverse()[0] + for (var i in codeExt) { + if (ext === codeExt[i]) return true + } + return false +} + +const isExcel = (name, contentType) => { + if (!contentType || !contentType.includes('/')) return false + const types = ['excel', 'spreadsheet'] + const subType = contentType.split('/')[1] + for (var i in types) { + if (subType.includes(types[i])) return true + } + return false +} + +const isDoc = (name, contentType) => { + if (!contentType || !contentType.includes('/')) return false + const types = ['word', '.document'] + const subType = contentType.split('/')[1] + for (var i in types) { + if (subType.includes(types[i])) return true + } + return false +} + +const isPresentation = (name, contentType) => { + if (!contentType || !contentType.includes('/')) return false + var types = ['powerpoint', 'presentation'] + const subType = contentType.split('/')[1] + for (var i in types) { + if (subType.includes(types[i])) return true + } + return false +} + +const typeToIcon = (type) => { + return (name, contentType) => { + if (!contentType || !contentType.includes('/')) return false + if (contentType.split('/')[0] === type) return true + return false + } +} + +export const getDataType = (name, contentType) => { + if (contentType === "") { + contentType = mimedb.lookup(name) || 'application/octet-stream' + } + const check = [ + ['folder', isFolder], + ['code', isCode], + ['audio', typeToIcon('audio')], + ['image', typeToIcon('image')], + ['video', typeToIcon('video')], + ['text', typeToIcon('text')], + ['pdf', isPdf], + ['zip', isZip], + ['excel', isExcel], + ['doc', isDoc], + ['presentation', isPresentation] + ] + for (var i in check) { + if (check[i][1](name, contentType)) return check[i][0] + } + return 'other' +} diff --git a/browser/app/js/reducers.js b/browser/app/js/reducers.js new file mode 100644 index 000000000..00482e1ff --- /dev/null +++ b/browser/app/js/reducers.js @@ -0,0 +1,176 @@ +/* + * Minio Browser (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import * as actions from './actions' +import { minioBrowserPrefix } from './constants' + +export default (state = { + buckets: [], + visibleBuckets: [], + objects: [], + storageInfo: {}, + serverInfo: {}, + currentBucket: '', + currentPath: '', + showMakeBucketModal: false, + uploads: {}, + alert: { + show: false, + type: 'danger', + message: '' + }, + loginError: false, + sortNameOrder: false, + sortSizeOrder: false, + sortDateOrder: false, + latestUiVersion: currentUiVersion, + sideBarActive: false, + loginRedirectPath: minioBrowserPrefix, + settings: { + accessKey: '', + secretKey: '', + secretKeyVisible: false + }, + showSettings: false, + policies: [], + deleteConfirmation: { + object: '', + show: false + }, + shareObject: { + show: false, + url: '', + expiry: 604800 + }, + prefixWritable: false + }, action) => { + let newState = Object.assign({}, state) + switch (action.type) { + case actions.SET_WEB: + newState.web = action.web + break + case actions.SET_BUCKETS: + newState.buckets = action.buckets + break + case actions.ADD_BUCKET: + newState.buckets = [action.bucket, ...newState.buckets] + newState.visibleBuckets = [action.bucket, ...newState.visibleBuckets] + break + case actions.SET_VISIBLE_BUCKETS: + newState.visibleBuckets = action.visibleBuckets + break + case actions.SET_CURRENT_BUCKET: + newState.currentBucket = action.currentBucket + break + case actions.SET_OBJECTS: + newState.objects = action.objects + break + case actions.SET_CURRENT_PATH: + newState.currentPath = action.currentPath + break + case actions.SET_STORAGE_INFO: + newState.storageInfo = action.storageInfo + break + case actions.SET_SERVER_INFO: + newState.serverInfo = action.serverInfo + break + case actions.SHOW_MAKEBUCKET_MODAL: + newState.showMakeBucketModal = action.showMakeBucketModal + break + case actions.UPLOAD_PROGRESS: + newState.uploads = Object.assign({}, newState.uploads) + newState.uploads[action.slug].loaded = action.loaded + break + case actions.ADD_UPLOAD: + newState.uploads = Object.assign({}, newState.uploads, { + [action.slug]: { + loaded: 0, + size: action.size, + xhr: action.xhr, + name: action.name + } + }) + break + case actions.STOP_UPLOAD: + newState.uploads = Object.assign({}, newState.uploads) + delete newState.uploads[action.slug] + break + case actions.SET_ALERT: + if (newState.alert.alertTimeout) clearTimeout(newState.alert.alertTimeout) + if (!action.alert.show) { + newState.alert = Object.assign({}, newState.alert, { + show: false + }) + } else { + newState.alert = action.alert + } + break + case actions.SET_LOGIN_ERROR: + newState.loginError = true + break + case actions.SET_SHOW_ABORT_MODAL: + newState.showAbortModal = action.showAbortModal + break + case actions.SHOW_ABOUT: + newState.showAbout = action.showAbout + break + case actions.SET_SORT_NAME_ORDER: + newState.sortNameOrder = action.sortNameOrder + break + case actions.SET_SORT_SIZE_ORDER: + newState.sortSizeOrder = action.sortSizeOrder + break + case actions.SET_SORT_DATE_ORDER: + newState.sortDateOrder = action.sortDateOrder + break + case actions.SET_LATEST_UI_VERSION: + newState.latestUiVersion = action.latestUiVersion + break + case actions.SET_SIDEBAR_STATUS: + newState.sidebarStatus = action.sidebarStatus + break + case actions.SET_LOGIN_REDIRECT_PATH: + newState.loginRedirectPath = action.path + case actions.SET_LOAD_BUCKET: + newState.loadBucket = action.loadBucket + break + case actions.SET_LOAD_PATH: + newState.loadPath = action.loadPath + break + case actions.SHOW_SETTINGS: + newState.showSettings = action.showSettings + break + case actions.SET_SETTINGS: + newState.settings = Object.assign({}, newState.settings, action.settings) + break + case actions.SHOW_BUCKET_POLICY: + newState.showBucketPolicy = action.showBucketPolicy + break + case actions.SET_POLICIES: + newState.policies = action.policies + break + case actions.DELETE_CONFIRMATION: + newState.deleteConfirmation = Object.assign({}, action.payload) + break + case actions.SET_SHARE_OBJECT: + newState.shareObject = Object.assign({}, action.shareObject) + break + case actions.SET_PREFIX_WRITABLE: + newState.prefixWritable = action.prefixWritable + break + } + return newState +} diff --git a/browser/app/js/utils.js b/browser/app/js/utils.js new file mode 100644 index 000000000..3aee71a1b --- /dev/null +++ b/browser/app/js/utils.js @@ -0,0 +1,85 @@ +/* + * Minio Browser (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { minioBrowserPrefix } from './constants.js' + +export const sortObjectsByName = (objects, order) => { + let folders = objects.filter(object => object.name.endsWith('/')) + let files = objects.filter(object => !object.name.endsWith('/')) + folders = folders.sort((a, b) => { + if (a.name.toLowerCase() < b.name.toLowerCase()) return -1 + if (a.name.toLowerCase() > b.name.toLowerCase()) return 1 + return 0 + }) + files = files.sort((a, b) => { + if (a.name.toLowerCase() < b.name.toLowerCase()) return -1 + if (a.name.toLowerCase() > b.name.toLowerCase()) return 1 + return 0 + }) + if (order) { + folders = folders.reverse() + files = files.reverse() + } + return [...folders, ...files] +} + +export const sortObjectsBySize = (objects, order) => { + let folders = objects.filter(object => object.name.endsWith('/')) + let files = objects.filter(object => !object.name.endsWith('/')) + files = files.sort((a, b) => a.size - b.size) + if (order) + files = files.reverse() + return [...folders, ...files] +} + +export const sortObjectsByDate = (objects, order) => { + let folders = objects.filter(object => object.name.endsWith('/')) + let files = objects.filter(object => !object.name.endsWith('/')) + files = files.sort((a, b) => new Date(a.lastModified).getTime() - new Date(b.lastModified).getTime()) + if (order) + files = files.reverse() + return [...folders, ...files] +} + +export const pathSlice = (path) => { + path = path.replace(minioBrowserPrefix, '') + let prefix = '' + let bucket = '' + if (!path) return { + bucket, + prefix + } + let objectIndex = path.indexOf('/', 1) + if (objectIndex == -1) { + bucket = path.slice(1) + return { + bucket, + prefix + } + } + bucket = path.slice(1, objectIndex) + prefix = path.slice(objectIndex + 1) + return { + bucket, + prefix + } +} + +export const pathJoin = (bucket, prefix) => { + if (!prefix) + prefix = '' + return minioBrowserPrefix + '/' + bucket + '/' + prefix +} diff --git a/browser/app/js/web.js b/browser/app/js/web.js new file mode 100644 index 000000000..a4c241137 --- /dev/null +++ b/browser/app/js/web.js @@ -0,0 +1,124 @@ +/* + * Minio Browser (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { browserHistory } from 'react-router' +import JSONrpc from './jsonrpc' +import * as actions from './actions' +import { minioBrowserPrefix } from './constants.js' +import Moment from 'moment' +import storage from 'local-storage-fallback' + +export default class Web { + constructor(endpoint, dispatch) { + const namespace = 'Web' + this.dispatch = dispatch + this.JSONrpc = new JSONrpc({ + endpoint, + namespace + }) + } + makeCall(method, options) { + return this.JSONrpc.call(method, { + params: options + }, storage.getItem('token')) + .catch(err => { + if (err.status === 401) { + storage.removeItem('token') + browserHistory.push(`${minioBrowserPrefix}/login`) + throw new Error('Please re-login.') + } + if (err.status) + throw new Error(`Server returned error [${err.status}]`) + throw new Error('Minio server is unreachable') + }) + .then(res => { + let json = JSON.parse(res.text) + let result = json.result + let error = json.error + if (error) { + throw new Error(error.message) + } + if (!Moment(result.uiVersion).isValid()) { + throw new Error("Invalid UI version in the JSON-RPC response") + } + if (result.uiVersion !== currentUiVersion + && currentUiVersion !== 'MINIO_UI_VERSION') { + storage.setItem('newlyUpdated', true) + location.reload() + } + return result + }) + } + LoggedIn() { + return !!storage.getItem('token') + } + Login(args) { + return this.makeCall('Login', args) + .then(res => { + storage.setItem('token', `${res.token}`) + return res + }) + } + Logout() { + storage.removeItem('token') + } + ServerInfo() { + return this.makeCall('ServerInfo') + } + StorageInfo() { + return this.makeCall('StorageInfo') + } + ListBuckets() { + return this.makeCall('ListBuckets') + } + MakeBucket(args) { + return this.makeCall('MakeBucket', args) + } + ListObjects(args) { + return this.makeCall('ListObjects', args) + } + PresignedGet(args) { + return this.makeCall('PresignedGet', args) + } + PutObjectURL(args) { + return this.makeCall('PutObjectURL', args) + } + RemoveObject(args) { + return this.makeCall('RemoveObject', args) + } + GetAuth() { + return this.makeCall('GetAuth') + } + GenerateAuth() { + return this.makeCall('GenerateAuth') + } + SetAuth(args) { + return this.makeCall('SetAuth', args) + .then(res => { + storage.setItem('token', `${res.token}`) + return res + }) + } + GetBucketPolicy(args) { + return this.makeCall('GetBucketPolicy', args) + } + SetBucketPolicy(args) { + return this.makeCall('SetBucketPolicy', args) + } + ListAllBucketPolicies(args) { + return this.makeCall('ListAllBucketPolicies', args) + } +} diff --git a/browser/app/less/inc/alert.less b/browser/app/less/inc/alert.less new file mode 100644 index 000000000..4c60e5d65 --- /dev/null +++ b/browser/app/less/inc/alert.less @@ -0,0 +1,68 @@ +.alert { + border: 0; + position: fixed; + max-width: 500px; + margin: 0; + box-shadow: 0 4px 5px rgba(0, 0, 0, 0.1); + color: @white; + width: 100%; + right: 20px; + border-radius: 3px; + padding: 17px 50px 17px 17px; + z-index: 10010; + .animation-duration(800ms); + .animation-fill-mode(both); + + &:not(.progress) { + top: 20px; + + @media(min-width: (@screen-sm-min)) { + left: 50%; + margin-left: -250px; + } + } + + &.progress { + bottom: 20px; + right: 20px; + } + + &.alert-danger { + background: @red; + } + + &.alert-success { + background: @green; + } + + &.alert-info { + background: @blue; + } + + @media(max-width: (@screen-xs-max)) { + left: 20px; + width: ~"calc(100% - 40px)"; + max-width: 100%; + } + + .progress { + margin: 10px 10px 8px 0; + height: 5px; + box-shadow: none; + border-radius: 1px; + background-color: @blue; + border-radius: 2px; + overflow: hidden; + } + + .progress-bar { + box-shadow: none; + background-color: @white; + height: 100%; + } + + .close { + position: absolute; + top: 15px; + } +} \ No newline at end of file diff --git a/browser/app/less/inc/animate/animate.less b/browser/app/less/inc/animate/animate.less new file mode 100644 index 000000000..33c53b1ae --- /dev/null +++ b/browser/app/less/inc/animate/animate.less @@ -0,0 +1,13 @@ +.animated{ + &.infinite { + .animation-iteration-count(infinite); + } +} + +@import 'fadeIn'; +@import 'fadeInDown'; +@import 'fadeInUp'; +@import 'fadeOut'; +@import 'fadeOutDown'; +@import 'fadeOutUp'; +@import 'zoomIn'; diff --git a/browser/app/less/inc/animate/fadeIn.less b/browser/app/less/inc/animate/fadeIn.less new file mode 100644 index 000000000..50282ac98 --- /dev/null +++ b/browser/app/less/inc/animate/fadeIn.less @@ -0,0 +1,26 @@ +@-webkit-keyframes fadeIn { + 0% {opacity: 0;} + 100% {opacity: 1;} +} + +@-moz-keyframes fadeIn { + 0% {opacity: 0;} + 100% {opacity: 1;} +} + +@-o-keyframes fadeIn { + 0% {opacity: 0;} + 100% {opacity: 1;} +} + +@keyframes fadeIn { + 0% {opacity: 0;} + 100% {opacity: 1;} +} + +.fadeIn { + -webkit-animation-name: fadeIn; + -moz-animation-name: fadeIn; + -o-animation-name: fadeIn; + animation-name: fadeIn; +} \ No newline at end of file diff --git a/browser/app/less/inc/animate/fadeInDown.less b/browser/app/less/inc/animate/fadeInDown.less new file mode 100644 index 000000000..2a959322e --- /dev/null +++ b/browser/app/less/inc/animate/fadeInDown.less @@ -0,0 +1,54 @@ +@-webkit-keyframes fadeInDown { + 0% { + opacity: 0; + -webkit-transform: translateY(-20px); + } + + 100% { + opacity: 1; + -webkit-transform: translateY(0); + } +} + +@-moz-keyframes fadeInDown { + 0% { + opacity: 0; + -moz-transform: translateY(-20px); + } + + 100% { + opacity: 1; + -moz-transform: translateY(0); + } +} + +@-o-keyframes fadeInDown { + 0% { + opacity: 0; + -ms-transform: translateY(-20px); + } + + 100% { + opacity: 1; + -ms-transform: translateY(0); + } +} + +@keyframes fadeInDown { + 0% { + opacity: 0; + transform: translateY(-20px); + } + + 100% { + opacity: 1; + transform: translateY(0); + } +} + +.fadeInDown { + -webkit-animation-name: fadeInDown; + -moz-animation-name: fadeInDown; + -o-animation-name: fadeInDown; + animation-name: fadeInDown; +} \ No newline at end of file diff --git a/browser/app/less/inc/animate/fadeInUp.less b/browser/app/less/inc/animate/fadeInUp.less new file mode 100644 index 000000000..54b4d26ec --- /dev/null +++ b/browser/app/less/inc/animate/fadeInUp.less @@ -0,0 +1,54 @@ +@-webkit-keyframes fadeInUp { + 0% { + opacity: 0; + -webkit-transform: translateY(20px); + } + + 100% { + opacity: 1; + -webkit-transform: translateY(0); + } +} + +@-moz-keyframes fadeInUp { + 0% { + opacity: 0; + -moz-transform: translateY(20px); + } + + 100% { + opacity: 1; + -moz-transform: translateY(0); + } +} + +@-o-keyframes fadeInUp { + 0% { + opacity: 0; + -o-transform: translateY(20px); + } + + 100% { + opacity: 1; + -o-transform: translateY(0); + } +} + +@keyframes fadeInUp { + 0% { + opacity: 0; + transform: translateY(20px); + } + + 100% { + opacity: 1; + transform: translateY(0); + } +} + +.fadeInUp { + -webkit-animation-name: fadeInUp; + -moz-animation-name: fadeInUp; + -o-animation-name: fadeInUp; + animation-name: fadeInUp; +} \ No newline at end of file diff --git a/browser/app/less/inc/animate/fadeOut.less b/browser/app/less/inc/animate/fadeOut.less new file mode 100644 index 000000000..ba64505b9 --- /dev/null +++ b/browser/app/less/inc/animate/fadeOut.less @@ -0,0 +1,26 @@ +@-webkit-keyframes fadeOut { + 0% {opacity: 1;} + 100% {opacity: 0;} +} + +@-moz-keyframes fadeOut { + 0% {opacity: 1;} + 100% {opacity: 0;} +} + +@-o-keyframes fadeOut { + 0% {opacity: 1;} + 100% {opacity: 0;} +} + +@keyframes fadeOut { + 0% {opacity: 1;} + 100% {opacity: 0;} +} + +.fadeOut { + -webkit-animation-name: fadeOut; + -moz-animation-name: fadeOut; + -o-animation-name: fadeOut; + animation-name: fadeOut; +} \ No newline at end of file diff --git a/browser/app/less/inc/animate/fadeOutDown.less b/browser/app/less/inc/animate/fadeOutDown.less new file mode 100644 index 000000000..214e75367 --- /dev/null +++ b/browser/app/less/inc/animate/fadeOutDown.less @@ -0,0 +1,54 @@ +@-webkit-keyframes fadeOutDown { + 0% { + opacity: 1; + -webkit-transform: translateY(0); + } + + 100% { + opacity: 0; + -webkit-transform: translateY(20px); + } +} + +@-moz-keyframes fadeOutDown { + 0% { + opacity: 1; + -moz-transform: translateY(0); + } + + 100% { + opacity: 0; + -moz-transform: translateY(20px); + } +} + +@-o-keyframes fadeOutDown { + 0% { + opacity: 1; + -o-transform: translateY(0); + } + + 100% { + opacity: 0; + -o-transform: translateY(20px); + } +} + +@keyframes fadeOutDown { + 0% { + opacity: 1; + transform: translateY(0); + } + + 100% { + opacity: 0; + transform: translateY(20px); + } +} + +.fadeOutDown { + -webkit-animation-name: fadeOutDown; + -moz-animation-name: fadeOutDown; + -o-animation-name: fadeOutDown; + animation-name: fadeOutDown; +} \ No newline at end of file diff --git a/browser/app/less/inc/animate/fadeOutUp.less b/browser/app/less/inc/animate/fadeOutUp.less new file mode 100644 index 000000000..cf6115ac0 --- /dev/null +++ b/browser/app/less/inc/animate/fadeOutUp.less @@ -0,0 +1,51 @@ +@-webkit-keyframes fadeOutUp { + 0% { + opacity: 1; + -webkit-transform: translateY(0); + } + + 100% { + opacity: 0; + -webkit-transform: translateY(-20px); + } +} +@-moz-keyframes fadeOutUp { + 0% { + opacity: 1; + -moz-transform: translateY(0); + } + + 100% { + opacity: 0; + -moz-transform: translateY(-20px); + } +} +@-o-keyframes fadeOutUp { + 0% { + opacity: 1; + -o-transform: translateY(0); + } + + 100% { + opacity: 0; + -o-transform: translateY(-20px); + } +} +@keyframes fadeOutUp { + 0% { + opacity: 1; + transform: translateY(0); + } + + 100% { + opacity: 0; + transform: translateY(-20px); + } +} + +.fadeOutUp { + -webkit-animation-name: fadeOutUp; + -moz-animation-name: fadeOutUp; + -o-animation-name: fadeOutUp; + animation-name: fadeOutUp; +} \ No newline at end of file diff --git a/browser/app/less/inc/animate/zoomIn.less b/browser/app/less/inc/animate/zoomIn.less new file mode 100644 index 000000000..34c754fef --- /dev/null +++ b/browser/app/less/inc/animate/zoomIn.less @@ -0,0 +1,23 @@ +@-webkit-keyframes zoomIn { + from { + opacity: 0; + -webkit-transform: scale3d(.3, .3, .3); + transform: scale3d(.3, .3, .3); + } + + 50% { + opacity: 1; + } +} + +@keyframes zoomIn { + from { + opacity: 0; + -webkit-transform: scale3d(.3, .3, .3); + transform: scale3d(.3, .3, .3); + } + + 50% { + opacity: 1; + } +} \ No newline at end of file diff --git a/browser/app/less/inc/base.less b/browser/app/less/inc/base.less new file mode 100644 index 000000000..4f288ae82 --- /dev/null +++ b/browser/app/less/inc/base.less @@ -0,0 +1,31 @@ +* { + -webkit-font-smoothing: antialiased; + -moz-osx-font-smoothing: grayscale; + + &:focus, + &:active { + outline: 0; + } +} + +html { + font-size: 10px; + -webkit-tap-highlight-color: rgba(0,0,0,0); +} + +html, +body { + min-height: 100%; +} + +a { + .transition(color); + .transition-duration(300ms); + +} + +button { + border: 0; +} + + diff --git a/browser/app/less/inc/buttons.less b/browser/app/less/inc/buttons.less new file mode 100644 index 000000000..28131641a --- /dev/null +++ b/browser/app/less/inc/buttons.less @@ -0,0 +1,53 @@ +.btn { + border: 0; + padding: 5px 10px; + font-size: 12px; + line-height: 1.5; + border-radius: 2px; + text-align: center; + .transition(all); + .transition-duration(300ms); + + &:hover, + &:focus { + .opacity(0.9); + } +} + +/*----------------------------------- + Button Variants +------------------------------------*/ +.btn-variant(@bg-color, @color) { + color: @color; + background-color: @bg-color; + + &:hover, + &:focus { + color: @color; + background-color: darken(@bg-color, 6%); + } + + +} + +.btn-block { + display: block; + width: 100%; +} + +.btn-link { + .btn-variant(#eee, #545454); +} + +.btn-danger { + .btn-variant(@red, @white); +} + +.btn-primary { + .btn-variant(@blue, @white); +} + +.btn-success { + .btn-variant(@green, @white); +} +//----------------------------------- \ No newline at end of file diff --git a/browser/app/less/inc/dropdown.less b/browser/app/less/inc/dropdown.less new file mode 100644 index 000000000..4d160464e --- /dev/null +++ b/browser/app/less/inc/dropdown.less @@ -0,0 +1,26 @@ +.dropdown-menu { + padding: 15px 0; + top: 0; + margin-top: -1px; + + & > li { + & > a { + padding: 8px 20px; + font-size: 15px; + + & > i { + width: 20px; + position: relative; + top: 1px; + } + } + } +} + +.dropdown-menu-right { + & > li { + & > a { + text-align: right; + } + } +} \ No newline at end of file diff --git a/browser/app/less/inc/file-explorer.less b/browser/app/less/inc/file-explorer.less new file mode 100644 index 000000000..da0ddcdd7 --- /dev/null +++ b/browser/app/less/inc/file-explorer.less @@ -0,0 +1,160 @@ +/*------------------------------ + Layout +--------------------------------*/ +.file-explorer { + background-color: @white; + position: relative; + height: 100%; + + &.toggled { + height: 100vh; + overflow: hidden; + } +} + +.fe-body { + @media(min-width: @screen-md-min) { + padding: 0 0 40px @fe-sidebar-width; + } + + @media(max-width: @screen-sm-max) { + padding: 75px 0 80px; + } + + min-height:100vh; + overflow: auto; +} + + +/*------------------------------ + Create and Upload Button +--------------------------------*/ +.feb-actions { + position: fixed; + bottom: 30px; + right: 30px; + + .dropdown-menu { + min-width: 55px; + width: 55px; + text-align: center; + background: transparent; + box-shadow: none; + margin: 0; + } + + &.open { + .feba-btn { + .scale(1); + + &:first-child { + .animation-name(feba-btn-anim); + .animation-duration(300ms); + } + + &:last-child { + .animation-name(feba-btn-anim); + .animation-duration(100ms); + } + } + + .feba-toggle { + background: darken(@red, 10%); + + & > span { + .rotate(135deg); + } + } + } +} + +.feba-toggle { + width: 55px; + height: 55px; + line-height: 55px; + border-radius: 50%; + background: @red; + box-shadow: 0 2px 3px rgba(0, 0, 0, 0.15); + display: inline-block; + text-align: center; + border: 0; + padding: 0; + + span { + display: inline-block; + height: 100%; + width: 100%; + } + + i { + color: @white; + font-size: 17px; + line-height: 58px; + } +} + +.feba-toggle, +.feba-toggle > span { + .transition(all); + .transition-duration(250ms); + .backface-visibility(hidden); +} + +.feba-btn { + width: 40px; + margin-top: 10px; + height: 40px; + border-radius: 50%; + text-align: center; + display: inline-block; + color: @white; + line-height: 40px; + box-shadow: 0 2px 3px rgba(0, 0, 0, 0.15); + -webkit-transform: scale(0); + transform: scale(0); + position: relative; + + &:hover, + &:focus { + color: @white; + } + + label { + width: 100%; + height: 100%; + position: absolute; + left: 0; + top: 0; + cursor: pointer; + } +} + +.feba-bucket { + background: @orange; +} + +.feba-upload { + background: @yellow; +} + +@-webkit-keyframes feba-btn-anim { + from { + .scale(0); + .opacity(0); + } + to { + .scale(1); + .opacity(1); + } +} + +@keyframes feba-btn-anim { + from { + .scale(0); + .opacity(0); + } + to { + .scale(1); + .opacity(1); + } +} diff --git a/browser/app/less/inc/font.less b/browser/app/less/inc/font.less new file mode 100644 index 000000000..bdb7f98d8 --- /dev/null +++ b/browser/app/less/inc/font.less @@ -0,0 +1,7 @@ +@font-face { + font-family: Lato; + src: url('../../fonts/lato/lato-normal.woff2') format('woff2'), + url('../../fonts/lato/lato-normal.woff') format('woff'); + font-weight: normal; + font-style: normal; +} \ No newline at end of file diff --git a/browser/app/less/inc/form.less b/browser/app/less/inc/form.less new file mode 100644 index 000000000..6fa7dbaab --- /dev/null +++ b/browser/app/less/inc/form.less @@ -0,0 +1,249 @@ +.form-control { + border: 0; + border-bottom: 1px solid @input-border; + color: #32393F; + padding: 5px; + width: 100%; + font-size: 13px; + background-color: transparent; +} + +select.form-control { + -webkit-appearance: none; + -moz-appearance: none; + border-radius: 0; + background: url(../../img/select-caret.svg) no-repeat bottom 7px right; + +} + + +/*-------------------------- + Input Group +----------------------------*/ +.input-group { + position: relative; + &:not(:last-child) { + margin-bottom: 25px; + } + + label:not(.ig-label) { + font-size: 13px; + display: block; + margin-bottom: 10px; + } +} + +.ig-label { + position: absolute; + text-align: center; + bottom: 7px; + left: 0; + width: 100%; + .transition(all); + .transition-duration(250ms); + padding: 2px 0 3px; + border-radius: 2px; + font-weight: 400; +} + +.ig-helpers { + z-index: 1; + width: 100%; + left: 0; + + &, + &:before, + &:after { + position: absolute; + height: 2px; + bottom: 0; + } + + &:before, + &:after { + content: ''; + width: 0; + .transition(all); + .transition-duration(250ms); + background-color: #03A9F4; + } + + &:before { + left: 50%; + } + + &:after { + right: 50%; + } +} + +.ig-text { + width: 100%; + height: 40px; + border: 0; + background: transparent; + text-align: center; + position: relative; + z-index: 1; + border-bottom: 1px solid #eee; + color: #32393F; + font-size: 13px; + + + &:focus + .ig-helpers { + &:before, + &:after { + width: 50%; + } + } + + &:valid, + &:disabled, + &:focus { + & ~ .ig-label { + bottom: 35px; + font-size: 13px; + z-index: 1; + } + } + + &:disabled { + .opacity(0.5); + } +} + +.ig-dark { + .ig-text { + color: @white; + border-color: rgba(255,255,255,0.1); + } + + .ig-helpers { + &:before, + &:after { + background-color: #dfdfdf; + height: 1px; + } + } +} + +.ig-left { + .ig-label, + .ig-text { + text-align: left; + } +} + +.ig-error { + .ig-label { + color: #E23F3F; + } + .ig-helpers i { + &:first-child, + &:first-child:before, + &:first-child:after { + background: rgba(226, 63, 63, 0.43); + } + &:last-child, + &:last-child:before, + &:last-child:after { + background: #E23F3F !important; + } + } + &:after { + content: "\f05a"; + font-family: FontAwesome; + position: absolute; + top: 17px; + right: 9px; + font-size: 20px; + color: #D33D3E; + } +} + +.ig-search { + &:before { + font-family: @font-family-icon; + content: '\f002'; + font-size: 15px; + position: absolute; + left: 2px; + top: 8px; + } + + .ig-text { + padding-left: 25px; + } +} + + +/*-------------------------- + Share Spinners +----------------------------*/ +.set-expire { + border: 1px solid @input-border; + margin: 35px 0 30px; +} + +.set-expire-item { + padding: 9px 5px 3px; + position: relative; + display: table-cell; + width: 1%; + text-align: center; + + &:not(:last-child) { + border-right: 1px solid @input-border; + } +} + +.set-expire-title { + font-size: 10px; + text-transform: uppercase; +} + +.set-expire-value { + display: inline-block; + overflow: hidden; + position: relative; + left: -8px; + + input { + font-size: 20px; + text-align: center; + position: relative; + right: -15px; + border: 0; + color: @text-strong-color; + padding: 0; + height: 25px; + width: 100%; + font-weight: normal; + } +} + +.set-expire-decrease, +.set-expire-increase { + position: absolute; + width: 20px; + height: 20px; + background: url(../../img/arrow.svg) no-repeat center; + background-size: 85%; + left: 50%; + margin-left: -10px; + .opacity(0.2); + cursor: pointer; + + &:hover { + .opacity(0.5); + } +} + +.set-expire-increase { + top: -25px; +} + +.set-expire-decrease { + bottom: -27px; + .rotate(-180deg); +} \ No newline at end of file diff --git a/browser/app/less/inc/generics.less b/browser/app/less/inc/generics.less new file mode 100644 index 000000000..1c46dfc60 --- /dev/null +++ b/browser/app/less/inc/generics.less @@ -0,0 +1,83 @@ +/*---------------------------- + Text Alignment +-----------------------------*/ +.text-center { text-align: center !important; } +.text-left { text-align: left !important; } +.text-right { text-align: right !important; } + + +/*---------------------------- + Float +-----------------------------*/ +.clearfix { .clearfix(); } +.pull-right { float: right !important; } +.pull-left { float: left !important; } + + +/*---------------------------- + Position +-----------------------------*/ +.p-relative { position: relative; } + + +/*--------------------------------------------------------------------------- + Generate Margin Class + margin, margin-top, margin-bottom, margin-left, margin-right +----------------------------------------------------------------------------*/ + +.margin (@label, @size: 1, @key:1) when (@size =< 30){ + .m-@{key} { + margin: @size !important; + } + + .m-t-@{key} { + margin-top: @size !important; + } + + .m-b-@{key} { + margin-bottom: @size !important; + } + + .m-l-@{key} { + margin-left: @size !important; + } + + .m-r-@{key} { + margin-right: @size !important; + } + + .margin(@label - 5; @size + 5; @key + 5); +} + +.margin(25, 0px, 0); + + +/*--------------------------------------------------------------------------- + Generate Padding Class + padding, padding-top, padding-bottom, padding-left, padding-right +----------------------------------------------------------------------------*/ +.padding (@label, @size: 1, @key:1) when (@size =< 30){ + .p-@{key} { + padding: @size !important; + } + + .p-t-@{key} { + padding-top: @size !important; + } + + .p-b-@{key} { + padding-bottom: @size !important; + } + + .p-l-@{key} { + padding-left: @size !important; + } + + .p-r-@{key} { + padding-right: @size !important; + } + + .padding(@label - 5; @size + 5; @key + 5); +} + +.padding(25, 0px, 0); \ No newline at end of file diff --git a/browser/app/less/inc/header.less b/browser/app/less/inc/header.less new file mode 100644 index 000000000..e95f05d66 --- /dev/null +++ b/browser/app/less/inc/header.less @@ -0,0 +1,242 @@ +/*-------------------------- + Header +----------------------------*/ +.fe-header { + padding: 45px 55px 20px; + + @media(min-width: @screen-md-min) { + position: relative; + } + + @media(max-width: (@screen-xs-max - 100)) { + padding: 25px 25px 20px; + } + + h2 { + font-size: 16px; + font-weight: normal; + margin: 0; + + & > span { + margin-bottom: 7px; + display: inline-block; + + &:not(:first-child) { + &:before { + content: '/'; + margin: 0 4px; + color: @text-color; + } + } + } + } + + p { + margin-top: 7px; + } +} + + +/*-------------------------- + Disk usage +----------------------------*/ +.feh-usage { + margin-top: 12px; + max-width: 285px; + + @media(max-width: (@screen-xs-max - 100px)) { + max-width: 100%; + font-size: 12px; + } + + & > ul { + margin-top: 7px; + list-style: none; + padding: 0; + + & > li { + padding-right: 0; + display: inline-block; + } + } +} + +.fehu-chart { + height: 5px; + background: #eee; + position: relative; + border-radius: 2px; + overflow: hidden; + + & > div { + position: absolute; + left: 0; + height: 100%; + background: @link-color; + } +} + +/*-------------------------- + Header Actions +----------------------------*/ +.feh-actions { + list-style: none; + padding: 0; + margin: 0; + position: absolute; + right: 35px; + top: 30px; + z-index: 11; + + @media(max-width: (@screen-sm-max)) { + top: 7px; + right: 10px; + position: fixed; + } + + & > li { + display: inline-block; + text-align: right; + vertical-align: top; + line-height: 100%; + + & > a, + & > .btn-group > button { + display: block; + height: 45px; + min-width: 45px; + text-align: center; + border-radius: 50%; + padding: 0; + border: 0; + background: none; + + @media(min-width: @screen-md-min) { + color: #7B7B7B; + font-size: 21px; + line-height: 45px; + .transition(all); + .transition-duration(300ms); + + &:hover { + background: rgba(0,0,0,0.09); + } + } + + @media(max-width: (@screen-sm-max)) { + background: url(../../img/more-h-light.svg) no-repeat center; + + .fa-reorder { + display: none; + } + } + + } + } +} + + +/*-------------------------- + Mobile Header +----------------------------*/ +@media(max-width: @screen-sm-max) { + .fe-header-mobile { + background-color: @dark-gray; + padding: 10px 50px 9px 12px; + text-align: center; + position: fixed; + z-index: 10; + box-shadow: 0 0 10px rgba(0,0,0,0.3); + left: 0; + top: 0; + width: 100%; + + .mh-logo { + height: 35px; + position: relative; + top: 4px; + } + } + + .feh-trigger { + width: 41px; + height: 41px; + cursor: pointer; + float: left; + position: relative; + text-align: center; + + &:before, + &:after { + content: ""; + position: absolute; + top: 0; + left: 0; + width: 100%; + height: 100%; + border-radius: 50%; + + } + + &:after { + z-index: 1; + } + + &:before { + background: rgba(255, 255, 255, 0.1); + .transition(all); + .transition-duration(300ms); + .scale(0); + } + } + + .feht-toggled { + &:before { + .scale(1); + } + + .feht-lines { + .rotate(180deg); + + & > div { + &.top { + width: 12px; + transform: translateX(8px) translateY(1px) rotate(45deg); + -webkit-transform: translateX(8px) translateY(1px) rotate(45deg); + } + + &.bottom { + width: 12px; + transform: translateX(8px) translateY(-1px) rotate(-45deg); + -webkit-transform: translateX(8px) translateY(-1px) rotate(-45deg); + } + } + } + } + + .feht-lines, + .feht-lines > div { + .transition(all); + .transition-duration(300ms); + } + + .feht-lines { + width: 18px; + height: 12px; + display: inline-block; + margin-top: 14px; + + & > div { + background-color: #EAEAEA; + width: 18px; + height: 2px; + + &.center { + margin: 3px 0; + } + } + } +} + + + diff --git a/browser/app/less/inc/ie-warning.less b/browser/app/less/inc/ie-warning.less new file mode 100644 index 000000000..c4bcc0a50 --- /dev/null +++ b/browser/app/less/inc/ie-warning.less @@ -0,0 +1,81 @@ +.ie-warning { + background-color: #ff5252; + width: 100%; + height: 100%; + position: fixed; + left: 0; + top: 0; + text-align: center; + + &:before { + width: 1px; + content: ''; + height: 100%; + } + + &:before, + .iw-inner { + display: inline-block; + vertical-align: middle; + } +} + +.iw-inner { + width: 470px; + height: 300px; + background-color: @white; + border-radius: 5px; + padding: 40px; + position: relative; + + ul { + list-style: none; + padding: 0; + margin: 0; + width: 230px; + margin-left: 80px; + margin-top: 16px; + + & > li { + float: left; + + & > a { + display: block; + padding: 10px 15px 7px; + font-size: 14px; + margin: 0 1px; + border-radius: 3px; + + &:hover { + background: #eee; + } + + img { + height: 40px; + margin-bottom: 5px; + } + } + } + } +} + +.iwi-icon { + color: #ff5252; + font-size: 40px; + display: block; + line-height: 100%; + margin-bottom: 15px; +} + +.iwi-skip { + position: absolute; + left: 0; + bottom: -35px; + width: 100%; + color: rgba(255, 255, 255, 0.6); + cursor: pointer; + + &:hover { + color: @white; + } +} \ No newline at end of file diff --git a/browser/app/less/inc/list.less b/browser/app/less/inc/list.less new file mode 100644 index 000000000..d3ee399b1 --- /dev/null +++ b/browser/app/less/inc/list.less @@ -0,0 +1,352 @@ +/*-------------------------- + Row +----------------------------*/ +.fesl-row { + padding-right: 40px; + padding-top: 5px; + padding-bottom: 5px; + position: relative; + + @media (min-width: (@screen-sm-min - 100px)) { + display: flex; + flex-flow: row nowrap; + justify-content: space-between; + } + + .clearfix(); +} + +header.fesl-row { + @media (min-width:(@screen-sm-min - 100px)) { + margin-bottom: 20px; + border-bottom: 1px solid lighten(@text-muted-color, 20%); + padding-left: 40px; + + .fesl-item, + .fesli-sort { + .transition(all); + .transition-duration(300ms); + } + + .fesl-item { + cursor: pointer; + color: @text-color; + font-weight: 500; + margin-bottom: -5px; + + & > .fesli-sort { + float: right; + margin: 4px 0 0; + .opacity(0); + color: @dark-gray; + font-size: 14px; + } + + &:hover:not(.fi-actions) { + background: lighten(@text-muted-color, 22%); + color: @dark-gray; + + & > .fesli-sort { + .opacity(0.5); + } + } + } + } + + @media (max-width:(@screen-xs-max - 100px)) { + display: none; + } +} + +div.fesl-row { + padding-left: 85px; + border-bottom: 1px solid transparent; + cursor: default; + + @media (max-width: (@screen-xs-max - 100px)) { + padding-left: 70px; + padding-right: 45px; + } + + &:nth-child(even) { + background-color: #fafafa; + } + + &:hover { + background-color: #fbf7dc; + } + + &[data-type]:before { + font-family: @font-family-icon; + width: 35px; + height: 35px; + text-align: center; + line-height: 35px; + position: absolute; + border-radius: 50%; + font-size: 16px; + left: 50px; + top: 9px; + color: @white; + + @media (max-width: (@screen-xs-max - 100px)) { + left: 20px; + } + } + + &[data-type="folder"] { + @media (max-width: (@screen-xs-max - 100px)) { + .fesl-item { + &.fi-name { + padding-top: 10px; + padding-bottom: 7px; + } + + &.fi-size, + &.fi-modified { + display: none; + } + } + } + } + + /*-------------------------- + Icons + ----------------------------*/ + &[data-type=folder]:before { + content: '\f114'; + background-color: #a1d6dd; + } + &[data-type=pdf]:before { + content: "\f1c1"; + background-color: #fa7775; + } + &[data-type=zip]:before { + content: "\f1c6"; + background-color: #427089; + } + &[data-type=audio]:before { + content: "\f1c7"; + background-color: #009688 + } + &[data-type=code]:before { + content: "\f1c9"; + background-color: #997867; + } + &[data-type=excel]:before { + content: "\f1c3"; + background-color: #64c866; + } + &[data-type=image]:before { + content: "\f1c5"; + background-color: #f06292; + } + &[data-type=video]:before { + content: "\f1c8"; + background-color: #f8c363; + } + &[data-type=other]:before { + content: "\f016"; + background-color: #afafaf; + } + &[data-type=text]:before { + content: "\f0f6"; + background-color: #8a8a8a; + } + &[data-type=doc]:before { + content: "\f1c2"; + background-color: #2196f5; + } + &[data-type=presentation]:before { + content: "\f1c4"; + background-color: #896ea6; + } + + &.fesl-loading{ + &:before { + content: ''; + } + + &:after { + .list-loader(20px, 20px, rgba(255, 255, 255, 0.5), @white); + left: 57px; + top: 17px; + + @media (max-width: (@screen-xs-max - 100px)) { + left: 27px; + } + } + + } +} + + +/*-------------------------- + Files and Folders +----------------------------*/ +.fesl-item { + display: block; + + a { + color: darken(@text-color, 5%); + } + + @media(min-width: (@screen-sm-min - 100px)) { + &:not(.fi-actions) { + text-overflow: ellipsis; + padding: 10px 15px; + white-space: nowrap; + overflow: hidden; + } + + &.fi-name { + flex: 3; + } + + &.fi-size { + width: 140px; + } + + &.fi-modified { + width: 190px; + } + + &.fi-actions { + width: 40px; + } + } + + @media(max-width: (@screen-xs-max - 100px)) { + padding: 0; + + &.fi-name { + width: 100%; + margin-bottom: 3px; + } + + &.fi-size, + &.fi-modified { + font-size: 12px; + color: #B5B5B5; + float: left; + } + + &.fi-modified { + max-width: 72px; + white-space: nowrap; + overflow: hidden; + } + + &.fi-size { + margin-right: 10px; + } + + &.fi-actions { + position: absolute; + top: 5px; + right: 10px; + } + } +} + + +/*-------------------------- + Action buttons +----------------------------*/ +.fia-toggle { + height: 36px; + width: 36px; + background: transparent url(../../img/more-h.svg) no-repeat center; + position: relative; + top: 3px; + .opacity(0.4); + + &:hover { + .opacity(0.7); + } +} + +.fi-actions { + .dropdown-menu { + background-color: transparent; + box-shadow: none; + padding: 0; + right: 38px; + left: auto; + margin: 0; + height: 100%; + text-align: right; + } + + .dropdown { + &.open { + .dropdown-menu { + .fiad-action { + right: 0; + } + } + } + } +} + +.fiad-action { + height: 35px; + width: 35px; + background: @amber; + display: inline-block; + border-radius: 50%; + text-align: center; + line-height: 35px; + font-weight: normal; + position: relative; + top: 4px; + margin-left: 5px; + .animation-name(fiad-action-anim); + .transform-origin(center center); + .backface-visibility(none); + box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1); + + &:nth-child(2) { + .animation-duration(100ms); + } + + &:nth-child(1) { + .animation-duration(250ms); + } + + & > i { + font-size: 14px; + color: @white; + } + + &:hover { + background-color: darken(@amber, 3%); + } +} + + +@-webkit-keyframes fiad-action-anim { + from { + .scale(0); + .opacity(0); + right: -20px; + } + to { + .scale(1); + .opacity(1); + right: 0; + } +} + +@keyframes fiad-action-anim { + from { + .scale(0); + .opacity(0); + right: -20px; + } + to { + .scale(1); + .opacity(1); + right: 0; + } +} \ No newline at end of file diff --git a/browser/app/less/inc/login.less b/browser/app/less/inc/login.less new file mode 100644 index 000000000..7e86faa0d --- /dev/null +++ b/browser/app/less/inc/login.less @@ -0,0 +1,104 @@ +.login { + height: 100vh; + min-height: 500px; + background: @dark-gray; + + text-align: center; + &:before { + height: ~"calc(100% - 110px)"; + width: 1px; + content: ""; + } +} + +.l-wrap, +.login:before { + display: inline-block; + vertical-align: middle; +} + +.l-wrap { + width: 80%; + max-width: 500px; + margin-top: -50px; + &.toggled { + display: inline-block; + } + + .input-group:not(:last-child) { + margin-bottom: 40px; + } +} + +.l-footer { + height: 110px; + padding: 0 50px; +} + +.lf-logo { + float: right; + img { + width: 40px; + } +} + +.lf-server { + float: left; + color: rgba(255, 255, 255, 0.4); + font-size: 20px; + font-weight: 400; + padding-top: 40px; +} + +@media (max-width: @screen-sm-min) { + .lf-logo, + .lf-server { + float: none; + display: block; + text-align: center; + width: 100%; + } + + .lf-logo { + margin-bottom: 5px; + } + + .lf-server { + font-size: 15px; + } +} + +.lw-btn { + width: 50px; + height: 50px; + border: 1px solid @white; + display: inline-block; + border-radius: 50%; + font-size: 22px; + color: @white; + .transition(all); + .transition-duration(300ms); + opacity: 0.3; + background-color: transparent; + line-height: 45px; + padding: 0; + &:hover { + color: @white; + opacity: 0.8; + border-color: @white; + } + + i { + display: block; + width: 100%; + padding-left: 3px; + } +} + +/*------------------------------ + Chrome autofill fix +-------------------------------*/ +input:-webkit-autofill { + -webkit-box-shadow:0 0 0 50px @dark-gray inset !important; + -webkit-text-fill-color: @white !important; +} \ No newline at end of file diff --git a/browser/app/less/inc/misc.less b/browser/app/less/inc/misc.less new file mode 100644 index 000000000..dba1b43b5 --- /dev/null +++ b/browser/app/less/inc/misc.less @@ -0,0 +1,102 @@ +/*-------------------------- + Close +----------------------------*/ +.close-variant(@color, @bg-color, @color-hover, @bg-color-hover) { + span { + background-color: @bg-color; + color: @color; + } + + &:hover, + &:focus { + span { + background-color: @bg-color-hover; + color: @color-hover; + } + } +} + +.close { + right: 15px; + font-weight: normal; + opacity: 1; + font-size: 18px; + position: absolute; + text-align: center; + top: 16px; + z-index: 1; + padding: 0; + border: 0; + background-color: transparent; + + span { + width: 25px; + height: 25px; + display: block; + border-radius: 50%; + line-height: 24px; + text-shadow: none; + } + + &:not(.close-alt) { + .close-variant(rgba(255, 255, 255, 0.8), rgba(255, 255, 255, 0.1), @white, rgba(255, 255, 255, 0.2)); + } +} + +.close-alt { + .close-variant(#989898, #efefef, #7b7b7b, #e8e8e8); +} + + +/*-------------------------- + Hidden +----------------------------*/ +.hidden { + display: none !important; +} + + +/*-------------------------- + Copy text +----------------------------*/ +.copy-text { + input { + width: 100%; + border-radius: 1px; + border: 1px solid @input-border; + padding: 7px 12px; + font-size: 13px; + line-height: 100%; + cursor: text; + .transition(border-color); + .transition-duration(300ms); + + &:hover { + border-color: darken(@input-border, 5%); + } + } +} + +/*-------------------------- + Sharing +----------------------------*/ +.share-availability { + margin-bottom: 40px; + + &:before, + &:after { + position: absolute; + bottom: -30px; + font-size: 10px; + } + + &:before { + content: '01 Sec'; + left: 0; + } + + &:after { + content: '7 days'; + right: 0; + } +} \ No newline at end of file diff --git a/browser/app/less/inc/mixin.less b/browser/app/less/inc/mixin.less new file mode 100644 index 000000000..528f2f26b --- /dev/null +++ b/browser/app/less/inc/mixin.less @@ -0,0 +1,52 @@ +/*-------------------------- + User Select +----------------------------*/ +.user-select(@value) { + -webkit-user-select: @value; + -moz-user-select: @value; + -ms-user-select: @value; + user-select: @value; +} + + +/*---------------------------------------- + CSS Animations based on animate.css +-----------------------------------------*/ +.animated(@name, @duration) { + -webkit-animation-name: @name; + animation-name: @name; + -webkit-animation-duration: @duration; + animation-duration: @duration; + -webkit-animation-fill-mode: both; + animation-fill-mode: both; +} + +/*------------------------------------------------- + For loop mixin for generate custom classes +--------------------------------------------------*/ +.for(@i, @n) {.-each(@i)} +.for(@n) when (isnumber(@n)) {.for(1, @n)} +.for(@i, @n) when not (@i = @n) { + .for((@i + (@n - @i) / abs(@n - @i)), @n); +} + +.for(@array) when (default()) {.for-impl_(length(@array))} +.for-impl_(@i) when (@i > 1) {.for-impl_((@i - 1))} +.for-impl_(@i) when (@i > 0) {.-each(extract(@array, @i))} + +/*---------------------------------------- + List Loader +-----------------------------------------*/ +.list-loader(@width, @height, @borderColor, @borderColorBottom) { + content: ''; + width: @width; + height: @height; + border-radius: 50%; + .animated(zoomIn, 500ms); + border: 2px solid @borderColor; + border-bottom-color: @borderColorBottom; + position: absolute; + z-index: 1; + -webkit-animation: zoomIn 250ms, spin 700ms 250ms infinite linear; + animation: zoomIn 250ms, spin 700ms 250ms infinite linear; +} \ No newline at end of file diff --git a/browser/app/less/inc/modal.less b/browser/app/less/inc/modal.less new file mode 100644 index 000000000..6d66ffa77 --- /dev/null +++ b/browser/app/less/inc/modal.less @@ -0,0 +1,294 @@ +/*-------------------------- + Modal +----------------------------*/ +.modal { + @media(min-width: @screen-sm-min) { + text-align: center; + + &:before { + content: ''; + height: 100%; + width: 1px; + display: inline-block; + vertical-align: middle; + } + + .modal-dialog { + text-align: left; + margin: 10px auto; + display: inline-block; + vertical-align: middle; + } + } +} + +.modal-dark { + .modal-header { + color: rgba(255, 255, 255, 0.4); + + small { + color: rgba(255, 255, 255, 0.2); + } + } + + .modal-content { + background-color: @dark-gray; + } +} + +.modal-backdrop { + .animated(fadeIn, 200ms); +} + +.modal-dialog { + .animated(zoomIn, 200ms); +} + +.modal-header { + color: @text-strong-color; + position: relative; + + small { + display: block; + text-transform: none; + font-size: 12px; + margin-top: 5px; + color: #a8a8a8; + } +} + +.modal-content { + border-radius: 3px; + box-shadow: none; +} + +.modal-footer { + padding: 0 30px 30px; + text-align: center; +} + + +/*-------------------------- + Dialog +----------------------------*/ +.modal-confirm { + .modal-dialog { + text-align: center; + } +} + +.mc-icon { + margin: 0 0 10px; + + & > i { + font-size: 60px; + } +} + +.mci-red { + color: #ff8f8f; +} + +.mci-amber { + color: @amber; +} + +.mci-green { + color: #64e096; +} + +.mc-text { + color: @text-strong-color; +} + +.mc-sub { + color: @text-muted-color; + margin-top: 5px; + font-size: 13px; +} +//-------------------------- + + +/*-------------------------- + About +----------------------------*/ +.modal-about { + @media (max-width: @screen-xs-max) { + text-align: center; + + .modal-dialog { + max-width: 400px; + width: 90%; + margin: 20px auto 0; + } + } +} + +.ma-inner { + display: flex; + flex-direction: row; + align-items: center; + min-height: 350px; + position: relative; + + @media (min-width: @screen-sm-min) { + &:before { + content: ''; + width: 150px; + height: 100%; + top: 0; + left: 0; + position: absolute; + border-radius: 3px 0 0px 3px; + background-color: #23282C; + } + } +} + +.mai-item { + &:first-child { + width: 150px; + text-align: center; + } + + &:last-child { + flex: 4; + padding: 30px; + } +} + +.maii-logo { + width: 70px; + position: relative; + +} + +.maii-list { + list-style: none; + padding: 0; + + & > li { + margin-bottom: 15px; + + div { + color: rgba(255, 255, 255, 0.8); + text-transform: uppercase; + font-size: 14px; + } + + small { + font-size: 13px; + color: rgba(255, 255, 255, 0.4); + } + } +} +//-------------------------- + + +/*-------------------------- + Preferences +----------------------------*/ +.toggle-password { + position: absolute; + bottom: 30px; + right: 35px; + width: 30px; + height: 30px; + border: 1px solid #eee; + border-radius: 0; + text-align: center; + cursor: pointer; + z-index: 10; + background-color: @white; + padding-top: 5px; + + &.toggled { + background: #eee; + } +} +//-------------------------- + + +/*-------------------------- + Policy +----------------------------*/ +.pm-body { + padding-bottom: 30px; +} + +.pmb-header { + margin-bottom: 35px; +} + +.pmb-list { + display: flex; + flex-flow: row nowrap; + align-items: center; + justify-content: center; + padding: 10px 35px; + + &:nth-child(even) { + background-color: #F7F7F7; + } + + .form-control { + padding-left: 0; + padding-right: 0; + } +} + +header.pmb-list { + margin: 20px 0 10px; +} + +.pmbl-item { + display: block; + font-size: 13px; + + &:nth-child(1) { + flex: 2; + } + + &:nth-child(2) { + margin: 0 25px; + width: 150px; + } + + &:nth-child(3) { + width: 70px; + } +} + +div.pmb-list { + select { + border: 0; + } + + .pml-item { + &:not(:last-child) { + padding: 0 5px; + } + } +} +//-------------------------- + + +/*-------------------------- + Create Bucket +----------------------------*/ +.modal-create-bucket { + .modal-dialog { + position: fixed; + right: 25px; + bottom: 95px; + margin: 0; + height: 110px; + } + + .modal-content { + width: 100%; + height: 100%; + } +} +//-------------------------- + diff --git a/browser/app/less/inc/sidebar.less b/browser/app/less/inc/sidebar.less new file mode 100644 index 000000000..c975472eb --- /dev/null +++ b/browser/app/less/inc/sidebar.less @@ -0,0 +1,187 @@ +/*-------------------------- + Sidebar +----------------------------*/ +.fe-sidebar { + width: @fe-sidebar-width; + background-color: @dark-gray; + position: fixed; + height: 100%; + overflow: hidden; + padding: 35px; + + @media(min-width: @screen-md-min) { + .translate3d(0, 0, 0); + } + + @media(max-width: @screen-sm-max) { + padding-top: 85px; + z-index: 9; + box-shadow: 0 0 10px rgba(0, 0, 0, 0.65); + .transition(all); + .transition-duration(300ms); + .translate3d((-@fe-sidebar-width - 15px), 0, 0); + + &.toggled { + .translate3d(0, 0, 0); + } + } + + a { + color: rgba(255, 255, 255, 0.58); + + &:hover { + color: @white; + } + } +} + +/*-------------------------- + Header +----------------------------*/ +.fes-header { + margin-bottom: 40px; + + img, + h2 { + float: left; + } + + h2 { + margin: 13px 0 0 10px; + font-weight: normal; + } + + img { + width: 32px; + } +} + +/*-------------------------- + List +----------------------------*/ +.fesl-inner { + height: ~"calc(100vh - 260px)"; + overflow: auto; + padding: 0; + margin: 0 -35px; + + & li { + position: relative; + + & > a { + display: block; + padding: 10px 40px 12px 65px; + .text-overflow(); + + &:before { + font-family: FontAwesome; + content: '\f0a0'; + font-size: 17px; + position: absolute; + top: 10px; + left: 35px; + .opacity(0.8); + } + + &.fesli-loading { + &:before { + .list-loader(20px, 20px, rgba(255, 255, 255, 0.1), rgba(255, 255, 255, 0.5)); + left: 32px; + top: 0; + bottom: 0; + margin: auto; + } + } + } + + &.active { + background-color: rgba(0, 0, 0, 0.2); + + & > a { + color: @white; + } + } + + &:not(.active):hover { + background-color: rgba(0, 0, 0, 0.1); + + & > a { + color: @white; + } + } + + &:hover { + .fesli-trigger { + .opacity(0.6); + + &:hover { + .opacity(1); + } + } + } + } + + ul { + list-style: none; + padding: 0; + margin: 0; + } + + &:hover .scrollbar-vertical { + opacity: 1; + } +} + +.fesli-trigger { + .opacity(0); + .transition(all); + .transition-duration(200ms); + position: absolute; + top: 0; + right: 0; + width: 40px; + height: 100%; + cursor: pointer; + background: url(../../img/more-h-light.svg) no-repeat left; +} + +/* Scrollbar */ +.scrollbar-vertical { + position: absolute; + right: 5px; + width: 4px; + height: 100%; + opacity: 0; + .transition(opacity); + .transition-duration(300ms); + + div { + border-radius: 1px !important; + background-color: #6a6a6a !important; + } +} + +/*-------------------------- + Host +----------------------------*/ +.fes-host { + position: fixed; + left: 0; + bottom: 0; + z-index: 1; + background: @dark-gray; + color: rgba(255, 255, 255, 0.4); + font-size: 15px; + font-weight: 400; + width: @fe-sidebar-width; + padding: 20px; + .text-overflow(); + + & > i { + margin-right: 10px; + } +} + + + + diff --git a/browser/app/less/inc/variables.less b/browser/app/less/inc/variables.less new file mode 100644 index 000000000..de6589994 --- /dev/null +++ b/browser/app/less/inc/variables.less @@ -0,0 +1,94 @@ +/*-------------------------- + Base +----------------------------*/ +@font-family-sans-serif : 'Lato', sans-serif; +@font-family-icon : 'fontAwesome'; +@body-bg : #edecec; +@text-color : #8e8e8e; +@font-size-base : 15px; +@link-color : #46a5e0; +@link-hover-decoration : none; + + +/*-------------------------- + File Explorer +----------------------------*/ +@fe-sidebar-width : 300px; +@text-muted-color : #BDBDBD; +@text-strong-color : #333; + +/*-------------------------- + Colors +----------------------------*/ +@cyan : #2ED2FF; +@amber : #ffc107; +@red : #ff726f; +@grey : #f5f5f5; +@dark-blue : #0084d3; +@blue : #00a6f7; +@white : #ffffff; +@black : #1b1e25; +@blue : #50b2ff; +@light-blue : #c1d1e8; +@green : #33d46f; +@yellow : #FFC107; +@orange : #ffc155; +@purple : #9C27B0; +@teal : #009688; +@brown : #795548; +@blue-gray : #374952; +@dark-gray : #32393F; + + +/*-------------------------- + Dropdown +----------------------------*/ +@dropdown-fallback-border : transparent; +@dropdown-border : transparent; +@dropdown-divider-bg : ''; +@dropdown-link-hover-bg : rgba(0,0,0,0.05); +@dropdown-link-color : @text-color; +@dropdown-link-hover-color : #333; +@dropdown-link-disabled-color : #e4e4e4; +@dropdown-divider-bg : rgba(0,0,0,0.08); +@dropdown-link-active-color : #333; +@dropdown-link-active-bg : rgba(0, 0, 0, 0.075); +@dropdown-shadow : 0 2px 10px rgba(0, 0, 0, 0.2); + + +/*-------------------------- + Modal +----------------------------*/ +@modal-content-fallback-border-color: transparent; +@modal-content-border-color: transparent; +@modal-backdrop-bg: rgba(0,0,0,0.1); +@modal-header-border-color: transparent; +@modal-title-line-height: transparent; +@modal-footer-border-color: transparent; +@modal-inner-padding: 30px 35px; +@modal-title-padding: 30px 35px 0px; +@modal-sm: 400px; + + +/*------------------------- + Buttons +--------------------------*/ +@btn-border-radius-large: 2px; +@btn-border-radius-small: 2px; +@btn-border-radius-base: 2px; + + +/*------------------------- + Colors +--------------------------*/ +@brand-primary: #2196F3; +@brand-success: #4CAF50; +@brand-info: #00BCD4; +@brand-warning: #FF9800; +@brand-danger: #FF5722; + + +/*------------------------- + Form +--------------------------*/ +@input-border: #eee; \ No newline at end of file diff --git a/browser/app/less/main.less b/browser/app/less/main.less new file mode 100644 index 000000000..29aa50772 --- /dev/null +++ b/browser/app/less/main.less @@ -0,0 +1,39 @@ +/*---------------------------- + Bootstrap +-----------------------------*/ +@import "../../node_modules/bootstrap/less/scaffolding.less"; +@import "../../node_modules/bootstrap/less/variables.less"; +@import "../../node_modules/bootstrap/less/grid.less"; +@import "../../node_modules/bootstrap/less/mixins.less"; +@import "../../node_modules/bootstrap/less/normalize.less"; +@import "../../node_modules/bootstrap/less/dropdowns.less"; +@import "../../node_modules/bootstrap/less/modals.less"; +@import "../../node_modules/bootstrap/less/tooltip.less"; +@import "../../node_modules/bootstrap/less/responsive-utilities.less"; + + +/*---------------------------- + App +-----------------------------*/ +@import 'inc/mixin'; +@import 'inc/variables'; +@import 'inc/base'; +@import 'inc/animate/animate'; +@import 'inc/generics'; +@import 'inc/font'; +@import 'inc/form'; +@import 'inc/buttons'; +@import 'inc/misc'; +@import 'inc/login'; +@import 'inc/header'; +@import 'inc/sidebar'; +@import 'inc/list'; +@import 'inc/file-explorer'; +@import 'inc/ie-warning'; + +/*---------------------------- + Boostrap +-----------------------------*/ +@import 'inc/dropdown'; +@import 'inc/alert'; +@import 'inc/modal'; diff --git a/browser/build.js b/browser/build.js new file mode 100644 index 000000000..f612b7d02 --- /dev/null +++ b/browser/build.js @@ -0,0 +1,126 @@ +/* + * Minio Browser (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +var moment = require('moment') +var async = require('async') +var exec = require('child_process').exec +var fs = require('fs') + +var isProduction = process.env.NODE_ENV == 'production' ? true : false +var assetsFileName = '' +var commitId = '' +var date = moment.utc() +var version = date.format('YYYY-MM-DDTHH:mm:ss') + 'Z' +var releaseTag = date.format('YYYY-MM-DDTHH-mm-ss') + 'Z' +var buildType = 'DEVELOPMENT' +if (process.env.MINIO_UI_BUILD) buildType = process.env.MINIO_UI_BUILD + +rmDir = function(dirPath) { + try { var files = fs.readdirSync(dirPath); } + catch(e) { return; } + if (files.length > 0) + for (var i = 0; i < files.length; i++) { + var filePath = dirPath + '/' + files[i]; + if (fs.statSync(filePath).isFile()) + fs.unlinkSync(filePath); + else + rmDir(filePath); + } + fs.rmdirSync(dirPath); +}; + +async.waterfall([ + function(cb) { + rmDir('production'); + rmDir('dev'); + var cmd = 'webpack -p --config webpack.production.config.js' + if (!isProduction) { + cmd = 'webpack'; + } + console.log('Running', cmd) + exec(cmd, cb) + }, + function(stdout, stderr, cb) { + if (isProduction) { + fs.renameSync('production/index_bundle.js', + 'production/index_bundle-' + releaseTag + '.js') + } else { + fs.renameSync('dev/index_bundle.js', + 'dev/index_bundle-' + releaseTag + '.js') + } + var cmd = 'git log --format="%H" -n1' + console.log('Running', cmd) + exec(cmd, cb) + }, + function(stdout, stderr, cb) { + if (!stdout) throw new Error('commitId is empty') + commitId = stdout.replace('\n', '') + if (commitId.length !== 40) throw new Error('commitId invalid : ' + commitId) + assetsFileName = 'ui-assets.go'; + var cmd = 'go-bindata-assetfs -pkg miniobrowser -nocompress=true production/...' + if (!isProduction) { + cmd = 'go-bindata-assetfs -pkg miniobrowser -nocompress=true dev/...' + } + console.log('Running', cmd) + exec(cmd, cb) + }, + function(stdout, stderr, cb) { + var cmd = 'gofmt -s -w -l bindata_assetfs.go' + console.log('Running', cmd) + exec(cmd, cb) + }, + function(stdout, stderr, cb) { + fs.renameSync('bindata_assetfs.go', assetsFileName) + fs.appendFileSync(assetsFileName, '\n') + fs.appendFileSync(assetsFileName, 'var UIReleaseTag = "' + buildType + '.' + + releaseTag + '"\n') + fs.appendFileSync(assetsFileName, 'var UICommitID = "' + commitId + '"\n') + fs.appendFileSync(assetsFileName, 'var UIVersion = "' + version + '"') + fs.appendFileSync(assetsFileName, '\n') + var contents; + if (isProduction) { + contents = fs.readFileSync(assetsFileName, 'utf8') + .replace(/_productionIndexHtml/g, '_productionIndexHTML') + .replace(/productionIndexHtmlBytes/g, 'productionIndexHTMLBytes') + .replace(/productionIndexHtml/g, 'productionIndexHTML') + .replace(/_productionIndex_bundleJs/g, '_productionIndexBundleJs') + .replace(/productionIndex_bundleJsBytes/g, 'productionIndexBundleJsBytes') + .replace(/productionIndex_bundleJs/g, 'productionIndexBundleJs') + .replace(/_productionJqueryUiMinJs/g, '_productionJqueryUIMinJs') + .replace(/productionJqueryUiMinJsBytes/g, 'productionJqueryUIMinJsBytes') + .replace(/productionJqueryUiMinJs/g, 'productionJqueryUIMinJs'); + } else { + contents = fs.readFileSync(assetsFileName, 'utf8') + .replace(/_devIndexHtml/g, '_devIndexHTML') + .replace(/devIndexHtmlBytes/g, 'devIndexHTMLBytes') + .replace(/devIndexHtml/g, 'devIndexHTML') + .replace(/_devIndex_bundleJs/g, '_devIndexBundleJs') + .replace(/devIndex_bundleJsBytes/g, 'devIndexBundleJsBytes') + .replace(/devIndex_bundleJs/g, 'devIndexBundleJs') + .replace(/_devJqueryUiMinJs/g, '_devJqueryUIMinJs') + .replace(/devJqueryUiMinJsBytes/g, 'devJqueryUIMinJsBytes') + .replace(/devJqueryUiMinJs/g, 'devJqueryUIMinJs'); + } + contents = contents.replace(/MINIO_UI_VERSION/g, version) + contents = contents.replace(/index_bundle.js/g, 'index_bundle-' + releaseTag + '.js') + + fs.writeFileSync(assetsFileName, contents, 'utf8') + console.log('UI assets file :', assetsFileName) + cb() + } + ], function(err) { + if (err) return console.log(err) + }) diff --git a/browser/karma.conf.js b/browser/karma.conf.js new file mode 100644 index 000000000..5637a12f5 --- /dev/null +++ b/browser/karma.conf.js @@ -0,0 +1,40 @@ +var webpack = require('webpack'); + +module.exports = function (config) { + config.set({ + browsers: [ process.env.CONTINUOUS_INTEGRATION ? 'Firefox' : 'Chrome' ], + singleRun: true, + frameworks: [ 'mocha' ], + files: [ + 'tests.webpack.js' + ], + preprocessors: { + 'tests.webpack.js': [ 'webpack' ] + }, + reporters: [ 'dots' ], + webpack: { + module: { + loaders: [{ + test: /\.js$/, + exclude: /(node_modules|bower_components)/, + loader: 'babel', + query: { + presets: ['react', 'es2015'] + } + }, { + test: /\.less$/, + loader: 'style!css!less' + }, { + test: /\.css$/, + loader: 'style!css' + }, { + test: /\.(eot|woff|woff2|ttf|svg|png)/, + loader: 'url' + }] + } + }, + webpackServer: { + noInfo: true + } + }); +}; diff --git a/browser/package.json b/browser/package.json new file mode 100644 index 000000000..23e8145c6 --- /dev/null +++ b/browser/package.json @@ -0,0 +1,82 @@ +{ + "name": "minio-browser", + "version": "0.0.1", + "description": "Minio Browser", + "scripts": { + "test": "karma start", + "dev": "NODE_ENV=dev webpack-dev-server --devtool eval --progress --colors --hot --content-base dev", + "build": "NODE_ENV=dev node build.js", + "release": "NODE_ENV=production MINIO_UI_BUILD=RELEASE node build.js", + "format": "esformatter -i 'app/**/*.js'" + }, + "repository": { + "type": "git", + "url": "https://github.com/minio/miniobrowser" + }, + "author": "Minio Inc", + "license": "Apache-2.0", + "bugs": { + "url": "https://github.com/minio/miniobrowser/issues" + }, + "homepage": "https://github.com/minio/miniobrowser", + "devDependencies": { + "async": "^1.5.2", + "babel-cli": "^6.14.0", + "babel-core": "^6.14.0", + "babel-loader": "^6.2.5", + "babel-plugin-syntax-object-rest-spread": "^6.13.0", + "babel-plugin-transform-object-rest-spread": "^6.8.0", + "babel-preset-es2015": "^6.14.0", + "babel-preset-react": "^6.11.1", + "babel-register": "^6.14.0", + "copy-webpack-plugin": "^0.3.3", + "css-loader": "^0.23.1", + "esformatter": "^0.10.0", + "esformatter-jsx-ignore": "^1.0.6", + "expect": "^1.20.2", + "history": "^1.17.0", + "html-webpack-plugin": "^2.22.0", + "json-loader": "^0.5.4", + "karma": "^0.13.22", + "karma-chrome-launcher": "^0.2.3", + "karma-cli": "^0.1.2", + "karma-firefox-launcher": "^0.1.7", + "karma-mocha": "^0.2.2", + "karma-webpack": "^1.7.0", + "less": "^2.7.1", + "less-loader": "^2.2.3", + "mocha": "^2.5.3", + "moment": "^2.15.1", + "purifycss-webpack-plugin": "^2.0.3", + "react": "^0.14.8", + "react-addons-test-utils": "^0.14.8", + "react-bootstrap": "^0.28.5", + "react-custom-scrollbars": "^2.3.0", + "react-redux": "^4.4.5", + "react-router": "^2.8.1", + "redux": "^3.6.0", + "redux-thunk": "^1.0.3", + "style-loader": "^0.13.1", + "superagent": "^1.8.4", + "superagent-es6-promise": "^1.0.0", + "url-loader": "^0.5.7", + "webpack": "^1.12.11", + "webpack-dev-server": "^1.14.1" + }, + "dependencies": { + "bootstrap": "^3.3.6", + "classnames": "^2.2.3", + "font-awesome": "^4.7.0", + "humanize": "0.0.9", + "json-loader": "^0.5.4", + "local-storage-fallback": "^1.3.0", + "mime-db": "^1.25.0", + "mime-types": "^2.1.13", + "react": "^0.14.8", + "react-copy-to-clipboard": "^4.2.3", + "react-custom-scrollbars": "^2.2.2", + "react-dom": "^0.14.6", + "react-dropzone": "^3.5.3", + "react-onclickout": "2.0.4" + } +} diff --git a/browser/tests.webpack.js b/browser/tests.webpack.js new file mode 100644 index 000000000..871037f23 --- /dev/null +++ b/browser/tests.webpack.js @@ -0,0 +1,2 @@ +var context = require.context('./app', true, /-test\.js$/); +context.keys().forEach(context); \ No newline at end of file diff --git a/vendor/github.com/minio/miniobrowser/ui-assets.go b/browser/ui-assets.go similarity index 100% rename from vendor/github.com/minio/miniobrowser/ui-assets.go rename to browser/ui-assets.go diff --git a/browser/webpack.config.js b/browser/webpack.config.js new file mode 100644 index 000000000..3ccdaba0b --- /dev/null +++ b/browser/webpack.config.js @@ -0,0 +1,105 @@ +/* + * Minio Browser (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +var webpack = require('webpack') +var path = require('path') +var CopyWebpackPlugin = require('copy-webpack-plugin') +var purify = require("purifycss-webpack-plugin") + +var exports = { + context: __dirname, + entry: [ + path.resolve(__dirname, 'app/index.js') + ], + output: { + path: path.resolve(__dirname, 'dev'), + filename: 'index_bundle.js', + publicPath: '/minio/' + }, + module: { + loaders: [{ + test: /\.js$/, + exclude: /(node_modules|bower_components)/, + loader: 'babel', + query: { + presets: ['react', 'es2015'] + } + }, { + test: /\.less$/, + loader: 'style!css!less' + }, { + test: /\.json$/, + loader: 'json-loader' + },{ + test: /\.css$/, + loader: 'style!css' + }, { + test: /\.(eot|woff|woff2|ttf|svg|png)/, + loader: 'url' + }] + }, + node:{ + fs:'empty' + }, + devServer: { + historyApiFallback: { + index: '/minio/' + }, + proxy: { + '/minio/webrpc': { + target: 'http://localhost:9000', + secure: false + }, + '/minio/upload/*': { + target: 'http://localhost:9000', + secure: false + }, + '/minio/download/*': { + target: 'http://localhost:9000', + secure: false + }, + } + }, + plugins: [ + new CopyWebpackPlugin([ + {from: 'app/css/loader.css'}, + {from: 'app/img/favicon.ico'}, + {from: 'app/img/browsers/chrome.png'}, + {from: 'app/img/browsers/firefox.png'}, + {from: 'app/img/browsers/safari.png'}, + {from: 'app/img/logo.svg'}, + {from: 'app/index.html'} + ]), + new webpack.ContextReplacementPlugin(/moment[\\\/]locale$/, /^\.\/(en)$/), + new purify({ + basePath: __dirname, + paths: [ + "app/index.html", + "app/js/*.js" + ] + }) + ] +} + +if (process.env.NODE_ENV === 'dev') { + exports.entry = [ + 'webpack/hot/dev-server', + 'webpack-dev-server/client?http://localhost:8080', + path.resolve(__dirname, 'app/index.js') + ] +} + +module.exports = exports diff --git a/browser/webpack.production.config.js b/browser/webpack.production.config.js new file mode 100644 index 000000000..9c0604dcc --- /dev/null +++ b/browser/webpack.production.config.js @@ -0,0 +1,88 @@ +/* + * Isomorphic Javascript library for Minio Browser JSON-RPC API, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +var webpack = require('webpack') +var path = require('path') +var CopyWebpackPlugin = require('copy-webpack-plugin') +var purify = require("purifycss-webpack-plugin") + +var exports = { + context: __dirname, + entry: [ + path.resolve(__dirname, 'app/index.js') + ], + output: { + path: path.resolve(__dirname, 'production'), + filename: 'index_bundle.js' + }, + module: { + loaders: [{ + test: /\.js$/, + exclude: /(node_modules|bower_components)/, + loader: 'babel', + query: { + presets: ['react', 'es2015'] + } + }, { + test: /\.less$/, + loader: 'style!css!less' + }, { + test: /\.json$/, + loader: 'json-loader' + }, { + test: /\.css$/, + loader: 'style!css' + }, { + test: /\.(eot|woff|woff2|ttf|svg|png)/, + loader: 'url' + }] + }, + node:{ + fs:'empty' + }, + plugins: [ + new CopyWebpackPlugin([ + {from: 'app/css/loader.css'}, + {from: 'app/img/favicon.ico'}, + {from: 'app/img/browsers/chrome.png'}, + {from: 'app/img/browsers/firefox.png'}, + {from: 'app/img/browsers/safari.png'}, + {from: 'app/img/logo.svg'}, + {from: 'app/index.html'} + ]), + new webpack.DefinePlugin({ + 'process.env.NODE_ENV': '"production"' + }), + new webpack.ContextReplacementPlugin(/moment[\\\/]locale$/, /^\.\/(en)$/), + new purify({ + basePath: __dirname, + paths: [ + "app/index.html", + "app/js/*.js" + ] + }) + ] +} + +if (process.env.NODE_ENV === 'dev') { + exports.entry = [ + 'webpack/hot/dev-server', + 'webpack-dev-server/client?http://localhost:8080', + path.resolve(__dirname, 'app/index.js') + ] +} + +module.exports = exports diff --git a/cmd/web-handlers.go b/cmd/web-handlers.go index d4cb4a498..8346ef944 100644 --- a/cmd/web-handlers.go +++ b/cmd/web-handlers.go @@ -33,7 +33,7 @@ import ( "github.com/gorilla/mux" "github.com/gorilla/rpc/v2/json2" "github.com/minio/minio-go/pkg/policy" - "github.com/minio/miniobrowser" + "github.com/minio/minio/browser" ) // WebGenericArgs - empty struct for calls that don't accept arguments diff --git a/cmd/web-router.go b/cmd/web-router.go index b84ea27f8..f478abfb8 100644 --- a/cmd/web-router.go +++ b/cmd/web-router.go @@ -25,7 +25,7 @@ import ( router "github.com/gorilla/mux" jsonrpc "github.com/gorilla/rpc/v2" "github.com/gorilla/rpc/v2/json2" - "github.com/minio/miniobrowser" + "github.com/minio/minio/browser" ) // webAPI container for Web API. diff --git a/vendor/github.com/minio/miniobrowser/LICENSE b/vendor/github.com/minio/miniobrowser/LICENSE deleted file mode 100644 index 8f71f43fe..000000000 --- a/vendor/github.com/minio/miniobrowser/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/vendor/vendor.json b/vendor/vendor.json index 2411c44d9..8b55ad8eb 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -199,12 +199,6 @@ "revision": "9e734013294ab153b0bdbe182738bcddd46f1947", "revisionTime": "2016-08-18T00:31:20Z" }, - { - "checksumSHA1": "lkkQ8bAbNRvg9AceSmuAfh3udFg=", - "path": "github.com/minio/miniobrowser", - "revision": "10e951aa618d52796584f9dd233353a52d104c8d", - "revisionTime": "2017-01-23T04:37:46Z" - }, { "checksumSHA1": "GOSe2XEQI4AYwrMoLZu8vtmzkJM=", "path": "github.com/minio/redigo/redis", From fc880f9b23926762b3b3527ee11da183fde4f374 Mon Sep 17 00:00:00 2001 From: Anis Elleuch Date: Tue, 24 Jan 2017 17:08:36 +0100 Subject: [PATCH 096/100] admin: Enhance set credentials test (#3619) Add more test cases and ignore access and secret keys set from the env --- cmd/admin-handlers_test.go | 102 +++++++++++++++++++++++++++++-------- 1 file changed, 82 insertions(+), 20 deletions(-) diff --git a/cmd/admin-handlers_test.go b/cmd/admin-handlers_test.go index 250c144ca..22c406d1e 100644 --- a/cmd/admin-handlers_test.go +++ b/cmd/admin-handlers_test.go @@ -187,7 +187,7 @@ func getServiceCmdRequest(cmd cmdType, cred credential, body []byte) (*http.Requ // testServicesCmdHandler - parametrizes service subcommand tests on // cmdType value. -func testServicesCmdHandler(cmd cmdType, args map[string]interface{}, t *testing.T) { +func testServicesCmdHandler(cmd cmdType, t *testing.T) { adminTestBed, err := prepareAdminXLTestBed() if err != nil { t.Fatal("Failed to initialize a single node XL backend for admin handler tests.") @@ -214,10 +214,6 @@ func testServicesCmdHandler(cmd cmdType, args map[string]interface{}, t *testing credentials := serverConfig.GetCredential() var body []byte - if cmd == setCreds { - body, _ = xml.Marshal(setCredsReq{Username: args["username"].(string), Password: args["password"].(string)}) - } - req, err := getServiceCmdRequest(cmd, credentials, body) if err != nil { t.Fatalf("Failed to build service status request %v", err) @@ -240,18 +236,6 @@ func testServicesCmdHandler(cmd cmdType, args map[string]interface{}, t *testing } } - if cmd == setCreds { - // Check if new credentials are set - cred := serverConfig.GetCredential() - if cred.AccessKey != args["username"].(string) { - t.Errorf("Wrong access key, expected = %s, found = %s", args["username"].(string), cred.AccessKey) - } - if cred.SecretKey != args["password"].(string) { - t.Errorf("Wrong secret key, expected = %s, found = %s", args["password"].(string), cred.SecretKey) - } - - } - if rec.Code != http.StatusOK { resp, _ := ioutil.ReadAll(rec.Body) t.Errorf("Expected to receive %d status code but received %d. Body (%s)", @@ -261,16 +245,94 @@ func testServicesCmdHandler(cmd cmdType, args map[string]interface{}, t *testing // Test for service status management REST API. func TestServiceStatusHandler(t *testing.T) { - testServicesCmdHandler(statusCmd, nil, t) + testServicesCmdHandler(statusCmd, t) } // Test for service restart management REST API. func TestServiceRestartHandler(t *testing.T) { - testServicesCmdHandler(restartCmd, nil, t) + testServicesCmdHandler(restartCmd, t) } +// Test for service set creds management REST API. func TestServiceSetCreds(t *testing.T) { - testServicesCmdHandler(setCreds, map[string]interface{}{"username": "minio", "password": "minio123"}, t) + adminTestBed, err := prepareAdminXLTestBed() + if err != nil { + t.Fatal("Failed to initialize a single node XL backend for admin handler tests.") + } + defer adminTestBed.TearDown() + + // Initialize admin peers to make admin RPC calls. Note: In a + // single node setup, this degenerates to a simple function + // call under the hood. + eps, err := parseStorageEndpoints([]string{"http://localhost"}) + if err != nil { + t.Fatalf("Failed to parse storage end point - %v", err) + } + + // Set globalMinioAddr to be able to distinguish local endpoints from remote. + globalMinioAddr = eps[0].Host + initGlobalAdminPeers(eps) + + credentials := serverConfig.GetCredential() + var body []byte + + testCases := []struct { + Username string + Password string + EnvKeysSet bool + ExpectedStatusCode int + }{ + // Bad secret key + {"minio", "minio", false, http.StatusBadRequest}, + // Bad secret key set from the env + {"minio", "minio", true, http.StatusMethodNotAllowed}, + // Good keys set from the env + {"minio", "minio123", true, http.StatusMethodNotAllowed}, + // Successful operation should be the last one to do not change server credentials during tests. + {"minio", "minio123", false, http.StatusOK}, + } + for i, testCase := range testCases { + // Set or unset environement keys + if !testCase.EnvKeysSet { + globalEnvAccessKey = "" + globalEnvSecretKey = "" + } else { + globalEnvAccessKey = testCase.Username + globalEnvSecretKey = testCase.Password + } + + // Construct setCreds request body + body, _ = xml.Marshal(setCredsReq{Username: testCase.Username, Password: testCase.Password}) + // Construct setCreds request + req, err := getServiceCmdRequest(setCreds, credentials, body) + if err != nil { + t.Fatalf("Failed to build service status request %v", err) + } + + rec := httptest.NewRecorder() + + // Execute request + adminTestBed.mux.ServeHTTP(rec, req) + + // Check if the http code response is expected + if rec.Code != testCase.ExpectedStatusCode { + t.Errorf("Test %d: Wrong status code, expected = %d, found = %d", i+1, testCase.ExpectedStatusCode, rec.Code) + resp, _ := ioutil.ReadAll(rec.Body) + t.Errorf("Expected to receive %d status code but received %d. Body (%s)", + http.StatusOK, rec.Code, string(resp)) + } + + // If we got 200 OK, check if new credentials are really set + if rec.Code == http.StatusOK { + cred := serverConfig.GetCredential() + if cred.AccessKey != testCase.Username { + t.Errorf("Test %d: Wrong access key, expected = %s, found = %s", i+1, testCase.Username, cred.AccessKey) + } + if cred.SecretKey != testCase.Password { + t.Errorf("Test %d: Wrong secret key, expected = %s, found = %s", i+1, testCase.Password, cred.SecretKey) + } + } + } } // mkLockQueryVal - helper function to build lock query param. From 0e693e0284def47f8a86962fa2d7fa55965e0161 Mon Sep 17 00:00:00 2001 From: Krishnan Parthasarathi Date: Tue, 24 Jan 2017 21:41:05 +0530 Subject: [PATCH 097/100] Add dry-run query param for HealFormat API (#3618) --- cmd/admin-handlers.go | 24 +++++++++++++++++------- pkg/madmin/API.md | 11 +++++++++-- pkg/madmin/examples/heal-format.go | 12 ++++++++++-- pkg/madmin/heal-commands.go | 9 ++++++--- 4 files changed, 42 insertions(+), 14 deletions(-) diff --git a/cmd/admin-handlers.go b/cmd/admin-handlers.go index 2639d7c3b..b83f49652 100644 --- a/cmd/admin-handlers.go +++ b/cmd/admin-handlers.go @@ -398,7 +398,7 @@ func (adminAPI adminAPIHandlers) ListBucketsHealHandler(w http.ResponseWriter, r writeSuccessResponseXML(w, encodeResponse(listResponse)) } -// HealBucketHandler - POST /?heal&bucket=mybucket +// HealBucketHandler - POST /?heal&bucket=mybucket&dry-run // - x-minio-operation = bucket // - bucket is mandatory query parameter // Heal a given bucket, if present. @@ -425,7 +425,7 @@ func (adminAPI adminAPIHandlers) HealBucketHandler(w http.ResponseWriter, r *htt return } - // if dry-run=yes, then only perform validations and return success. + // if dry-run is present in query-params, then only perform validations and return success. if isDryRun(vars) { writeSuccessResponseHeadersOnly(w) return @@ -442,15 +442,16 @@ func (adminAPI adminAPIHandlers) HealBucketHandler(w http.ResponseWriter, r *htt writeSuccessResponseHeadersOnly(w) } -// isDryRun - returns true if dry-run query param was set to yes and false otherwise. +// isDryRun - returns true if dry-run query param was set and false otherwise. +// otherwise. func isDryRun(qval url.Values) bool { - if dryRun := qval.Get(string(mgmtDryRun)); dryRun == "yes" { + if _, dryRun := qval[string(mgmtDryRun)]; dryRun { return true } return false } -// HealObjectHandler - POST /?heal&bucket=mybucket&object=myobject +// HealObjectHandler - POST /?heal&bucket=mybucket&object=myobject&dry-run // - x-minio-operation = object // - bucket and object are both mandatory query parameters // Heal a given object, if present. @@ -485,7 +486,8 @@ func (adminAPI adminAPIHandlers) HealObjectHandler(w http.ResponseWriter, r *htt return } - // if dry-run=yes, then only perform validations and return success. + // if dry-run is set in query params then perform validations + // and return success. if isDryRun(vars) { writeSuccessResponseHeadersOnly(w) return @@ -501,7 +503,7 @@ func (adminAPI adminAPIHandlers) HealObjectHandler(w http.ResponseWriter, r *htt writeSuccessResponseHeadersOnly(w) } -// HealFormatHandler - POST /?heal +// HealFormatHandler - POST /?heal&dry-run // - x-minio-operation = format // - bucket and object are both mandatory query parameters // Heal a given object, if present. @@ -528,6 +530,14 @@ func (adminAPI adminAPIHandlers) HealFormatHandler(w http.ResponseWriter, r *htt return } + // if dry-run is set in query-params, return success as + // validations are successful so far. + vars := r.URL.Query() + if isDryRun(vars) { + writeSuccessResponseHeadersOnly(w) + return + } + // Create a new set of storage instances to heal format.json. bootstrapDisks, err := initStorageDisks(globalEndpoints) if err != nil { diff --git a/pkg/madmin/API.md b/pkg/madmin/API.md index 420f20984..5d588db51 100644 --- a/pkg/madmin/API.md +++ b/pkg/madmin/API.md @@ -251,13 +251,20 @@ __Example__ ``` -### HealFormat() error +### HealFormat(isDryRun bool) error Heal storage format on available disks. This is used when disks were replaced or were found with missing format. This is supported only for erasure-coded backend. __Example__ ``` go - err := madmClnt.HealFormat() + isDryRun := true + err := madmClnt.HealFormat(isDryRun) + if err != nil { + log.Fatalln(err) + } + + isDryRun = false + err = madmClnt.HealFormat(isDryRun) if err != nil { log.Fatalln(err) } diff --git a/pkg/madmin/examples/heal-format.go b/pkg/madmin/examples/heal-format.go index 4b915747d..fb0484beb 100644 --- a/pkg/madmin/examples/heal-format.go +++ b/pkg/madmin/examples/heal-format.go @@ -39,8 +39,16 @@ func main() { log.Fatalln(err) } - // Heal storage format on available disks. - err = madmClnt.HealFormat() + // Attempt healing format in dry-run mode. + isDryRun := true + err = madmClnt.HealFormat(isDryRun) + if err != nil { + log.Fatalln(err) + } + + // Perform actual healing of format. + isDryRun = false + err = madmClnt.HealFormat(isDryRun) if err != nil { log.Fatalln(err) } diff --git a/pkg/madmin/heal-commands.go b/pkg/madmin/heal-commands.go index ba6d6fa12..34128345f 100644 --- a/pkg/madmin/heal-commands.go +++ b/pkg/madmin/heal-commands.go @@ -344,7 +344,7 @@ func (adm *AdminClient) HealBucket(bucket string, dryrun bool) error { queryVal.Set("heal", "") queryVal.Set(string(healBucket), bucket) if dryrun { - queryVal.Set(string(healDryRun), "yes") + queryVal.Set(string(healDryRun), "") } hdrs := make(http.Header) @@ -378,7 +378,7 @@ func (adm *AdminClient) HealObject(bucket, object string, dryrun bool) error { queryVal.Set(string(healBucket), bucket) queryVal.Set(string(healObject), object) if dryrun { - queryVal.Set(string(healDryRun), "yes") + queryVal.Set(string(healDryRun), "") } hdrs := make(http.Header) @@ -405,9 +405,12 @@ func (adm *AdminClient) HealObject(bucket, object string, dryrun bool) error { } // HealFormat - heal storage format on available disks. -func (adm *AdminClient) HealFormat() error { +func (adm *AdminClient) HealFormat(dryrun bool) error { queryVal := url.Values{} queryVal.Set("heal", "") + if dryrun { + queryVal.Set(string(healDryRun), "") + } // Set x-minio-operation to format. hdrs := make(http.Header) From b4343a28b77f39bfe6f929a27426e455c5ae3a6c Mon Sep 17 00:00:00 2001 From: Krishna Srinivas Date: Tue, 24 Jan 2017 11:56:30 -0800 Subject: [PATCH 098/100] browser: add yarn.lock and .gitignore files. (#3621) --- browser/.gitignore | 20 + browser/yarn.lock | 4791 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 4811 insertions(+) create mode 100644 browser/.gitignore create mode 100644 browser/yarn.lock diff --git a/browser/.gitignore b/browser/.gitignore new file mode 100644 index 000000000..6e4ed66d7 --- /dev/null +++ b/browser/.gitignore @@ -0,0 +1,20 @@ +**/*.swp +cover.out +*~ +minio +!*/ +site/ +**/*.test +**/*.sublime-workspace +/.idea/ +/Minio.iml +**/access.log +build +vendor/**/*.js +vendor/**/*.json +release +.DS_Store +*.syso +coverage.txt +node_modules +production diff --git a/browser/yarn.lock b/browser/yarn.lock new file mode 100644 index 000000000..c53fe8b86 --- /dev/null +++ b/browser/yarn.lock @@ -0,0 +1,4791 @@ +# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. +# yarn lockfile v1 + + +abbrev@1: + version "1.0.9" + resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.0.9.tgz#91b4792588a7738c25f35dd6f63752a2f8776135" + +accepts@1.3.3, accepts@~1.3.3: + version "1.3.3" + resolved "https://registry.yarnpkg.com/accepts/-/accepts-1.3.3.tgz#c3ca7434938648c3e0d9c1e328dd68b622c284ca" + dependencies: + mime-types "~2.1.11" + negotiator "0.6.1" + +acorn-to-esprima@^2.0.6, acorn-to-esprima@^2.0.8: + version "2.0.8" + resolved "https://registry.yarnpkg.com/acorn-to-esprima/-/acorn-to-esprima-2.0.8.tgz#003f0c642eb92132f417d3708f14ada82adf2eb1" + +acorn@^3.0.0, acorn@^3.1.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/acorn/-/acorn-3.3.0.tgz#45e37fb39e8da3f25baee3ff5369e2bb5f22017a" + +add-px-to-style@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/add-px-to-style/-/add-px-to-style-1.0.0.tgz#d0c135441fa8014a8137904531096f67f28f263a" + +after@0.8.2: + version "0.8.2" + resolved "https://registry.yarnpkg.com/after/-/after-0.8.2.tgz#fedb394f9f0e02aa9768e702bda23b505fae7e1f" + +align-text@^0.1.1, align-text@^0.1.3: + version "0.1.4" + resolved "https://registry.yarnpkg.com/align-text/-/align-text-0.1.4.tgz#0cd90a561093f35d0a99256c22b7069433fad117" + dependencies: + kind-of "^3.0.2" + longest "^1.0.1" + repeat-string "^1.5.2" + +alphanum-sort@^1.0.1, alphanum-sort@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/alphanum-sort/-/alphanum-sort-1.0.2.tgz#97a1119649b211ad33691d9f9f486a8ec9fbe0a3" + +amdefine@>=0.0.4: + version "1.0.1" + resolved "https://registry.yarnpkg.com/amdefine/-/amdefine-1.0.1.tgz#4a5282ac164729e93619bcfd3ad151f817ce91f5" + +ansi-regex@^2.0.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-2.1.1.tgz#c3b33ab5ee360d86e0e628f0468ae7ef27d654df" + +ansi-styles@^2.0.1, ansi-styles@^2.2.1: + version "2.2.1" + resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-2.2.1.tgz#b432dd3358b634cf75e1e4664368240533c1ddbe" + +anymatch@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-1.3.0.tgz#a3e52fa39168c825ff57b0248126ce5a8ff95507" + dependencies: + arrify "^1.0.0" + micromatch "^2.1.5" + +aproba@^1.0.3: + version "1.0.4" + resolved "https://registry.yarnpkg.com/aproba/-/aproba-1.0.4.tgz#2713680775e7614c8ba186c065d4e2e52d1072c0" + +are-we-there-yet@~1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/are-we-there-yet/-/are-we-there-yet-1.1.2.tgz#80e470e95a084794fe1899262c5667c6e88de1b3" + dependencies: + delegates "^1.0.0" + readable-stream "^2.0.0 || ^1.1.13" + +argparse@^1.0.7: + version "1.0.9" + resolved "https://registry.yarnpkg.com/argparse/-/argparse-1.0.9.tgz#73d83bc263f86e97f8cc4f6bae1b0e90a7d22c86" + dependencies: + sprintf-js "~1.0.2" + +arr-diff@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/arr-diff/-/arr-diff-2.0.0.tgz#8f3b827f955a8bd669697e4a4256ac3ceae356cf" + dependencies: + arr-flatten "^1.0.1" + +arr-flatten@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/arr-flatten/-/arr-flatten-1.0.1.tgz#e5ffe54d45e19f32f216e91eb99c8ce892bb604b" + +array-flatten@1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/array-flatten/-/array-flatten-1.1.1.tgz#9a5f699051b1e7073328f2a008968b64ea2955d2" + +array-slice@^0.2.3: + version "0.2.3" + resolved "https://registry.yarnpkg.com/array-slice/-/array-slice-0.2.3.tgz#dd3cfb80ed7973a75117cdac69b0b99ec86186f5" + +array-unique@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/array-unique/-/array-unique-0.2.1.tgz#a1d97ccafcbc2625cc70fadceb36a50c58b01a53" + +arraybuffer.slice@0.0.6: + version "0.0.6" + resolved "https://registry.yarnpkg.com/arraybuffer.slice/-/arraybuffer.slice-0.0.6.tgz#f33b2159f0532a3f3107a272c0ccfbd1ad2979ca" + +arrify@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/arrify/-/arrify-1.0.1.tgz#898508da2226f380df904728456849c1501a4b0d" + +asap@~2.0.3: + version "2.0.5" + resolved "https://registry.yarnpkg.com/asap/-/asap-2.0.5.tgz#522765b50c3510490e52d7dcfe085ef9ba96958f" + +asn1@~0.2.3: + version "0.2.3" + resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.2.3.tgz#dac8787713c9966849fc8180777ebe9c1ddf3b86" + +assert-plus@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-0.2.0.tgz#d74e1b87e7affc0db8aadb7021f3fe48101ab234" + +assert-plus@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-1.0.0.tgz#f12e0f3c5d77b0b1cdd9146942e4e96c1e4dd525" + +assert@^1.1.1: + version "1.4.1" + resolved "https://registry.yarnpkg.com/assert/-/assert-1.4.1.tgz#99912d591836b5a6f5b345c0f07eefc08fc65d91" + dependencies: + util "0.10.3" + +ast-types@0.9.4: + version "0.9.4" + resolved "https://registry.yarnpkg.com/ast-types/-/ast-types-0.9.4.tgz#410d1f81890aeb8e0a38621558ba5869ae53c91b" + +async-each@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/async-each/-/async-each-1.0.1.tgz#19d386a1d9edc6e7c1c85d388aedbcc56d33602d" + +async@^0.9.0, async@~0.9.0: + version "0.9.2" + resolved "https://registry.yarnpkg.com/async/-/async-0.9.2.tgz#aea74d5e61c1f899613bf64bda66d4c78f2fd17d" + +async@^1.3.0, async@^1.4.0, async@^1.5.2: + version "1.5.2" + resolved "https://registry.yarnpkg.com/async/-/async-1.5.2.tgz#ec6a61ae56480c0c3cb241c95618e20892f9672a" + +async@~0.2.6: + version "0.2.10" + resolved "https://registry.yarnpkg.com/async/-/async-0.2.10.tgz#b6bbe0b0674b9d719708ca38de8c237cb526c3d1" + +asynckit@^0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79" + +atob@~1.1.0: + version "1.1.3" + resolved "https://registry.yarnpkg.com/atob/-/atob-1.1.3.tgz#95f13629b12c3a51a5d215abdce2aa9f32f80773" + +attr-accept@^1.0.3: + version "1.1.0" + resolved "https://registry.yarnpkg.com/attr-accept/-/attr-accept-1.1.0.tgz#b5cd35227f163935a8f1de10ed3eba16941f6be6" + +autoprefixer@^6.3.1: + version "6.7.0" + resolved "https://registry.yarnpkg.com/autoprefixer/-/autoprefixer-6.7.0.tgz#88992cf04df141e7b8293550f2ee716c565d1cae" + dependencies: + browserslist "~1.6.0" + caniuse-db "^1.0.30000613" + normalize-range "^0.1.2" + num2fraction "^1.2.2" + postcss "^5.2.11" + postcss-value-parser "^3.2.3" + +aws-sign2@~0.6.0: + version "0.6.0" + resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.6.0.tgz#14342dd38dbcc94d0e5b87d763cd63612c0e794f" + +aws4@^1.2.1: + version "1.5.0" + resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.5.0.tgz#0a29ffb79c31c9e712eeb087e8e7a64b4a56d755" + +babel-cli@^6.14.0: + version "6.22.2" + resolved "https://registry.yarnpkg.com/babel-cli/-/babel-cli-6.22.2.tgz#3f814c8acf52759082b8fedd9627f938936ab559" + dependencies: + babel-core "^6.22.1" + babel-polyfill "^6.22.0" + babel-register "^6.22.0" + babel-runtime "^6.22.0" + commander "^2.8.1" + convert-source-map "^1.1.0" + fs-readdir-recursive "^1.0.0" + glob "^7.0.0" + lodash "^4.2.0" + output-file-sync "^1.1.0" + path-is-absolute "^1.0.0" + slash "^1.0.0" + source-map "^0.5.0" + v8flags "^2.0.10" + optionalDependencies: + chokidar "^1.6.1" + +babel-code-frame@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-code-frame/-/babel-code-frame-6.22.0.tgz#027620bee567a88c32561574e7fd0801d33118e4" + dependencies: + chalk "^1.1.0" + esutils "^2.0.2" + js-tokens "^3.0.0" + +babel-core@^6.14.0, babel-core@^6.22.0, babel-core@^6.22.1: + version "6.22.1" + resolved "https://registry.yarnpkg.com/babel-core/-/babel-core-6.22.1.tgz#9c5fd658ba1772d28d721f6d25d968fc7ae21648" + dependencies: + babel-code-frame "^6.22.0" + babel-generator "^6.22.0" + babel-helpers "^6.22.0" + babel-messages "^6.22.0" + babel-register "^6.22.0" + babel-runtime "^6.22.0" + babel-template "^6.22.0" + babel-traverse "^6.22.1" + babel-types "^6.22.0" + babylon "^6.11.0" + convert-source-map "^1.1.0" + debug "^2.1.1" + json5 "^0.5.0" + lodash "^4.2.0" + minimatch "^3.0.2" + path-is-absolute "^1.0.0" + private "^0.1.6" + slash "^1.0.0" + source-map "^0.5.0" + +babel-generator@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-generator/-/babel-generator-6.22.0.tgz#d642bf4961911a8adc7c692b0c9297f325cda805" + dependencies: + babel-messages "^6.22.0" + babel-runtime "^6.22.0" + babel-types "^6.22.0" + detect-indent "^4.0.0" + jsesc "^1.3.0" + lodash "^4.2.0" + source-map "^0.5.0" + +babel-helper-builder-react-jsx@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-helper-builder-react-jsx/-/babel-helper-builder-react-jsx-6.22.0.tgz#aafb31913e47761fd4d0b6987756a144a65fca0d" + dependencies: + babel-runtime "^6.22.0" + babel-types "^6.22.0" + esutils "^2.0.0" + lodash "^4.2.0" + +babel-helper-call-delegate@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-helper-call-delegate/-/babel-helper-call-delegate-6.22.0.tgz#119921b56120f17e9dae3f74b4f5cc7bcc1b37ef" + dependencies: + babel-helper-hoist-variables "^6.22.0" + babel-runtime "^6.22.0" + babel-traverse "^6.22.0" + babel-types "^6.22.0" + +babel-helper-define-map@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-helper-define-map/-/babel-helper-define-map-6.22.0.tgz#9544e9502b2d6dfe7d00ff60e82bd5a7a89e95b7" + dependencies: + babel-helper-function-name "^6.22.0" + babel-runtime "^6.22.0" + babel-types "^6.22.0" + lodash "^4.2.0" + +babel-helper-function-name@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-helper-function-name/-/babel-helper-function-name-6.22.0.tgz#51f1bdc4bb89b15f57a9b249f33d742816dcbefc" + dependencies: + babel-helper-get-function-arity "^6.22.0" + babel-runtime "^6.22.0" + babel-template "^6.22.0" + babel-traverse "^6.22.0" + babel-types "^6.22.0" + +babel-helper-get-function-arity@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-helper-get-function-arity/-/babel-helper-get-function-arity-6.22.0.tgz#0beb464ad69dc7347410ac6ade9f03a50634f5ce" + dependencies: + babel-runtime "^6.22.0" + babel-types "^6.22.0" + +babel-helper-hoist-variables@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-helper-hoist-variables/-/babel-helper-hoist-variables-6.22.0.tgz#3eacbf731d80705845dd2e9718f600cfb9b4ba72" + dependencies: + babel-runtime "^6.22.0" + babel-types "^6.22.0" + +babel-helper-optimise-call-expression@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-helper-optimise-call-expression/-/babel-helper-optimise-call-expression-6.22.0.tgz#f8d5d4b40a6e2605a6a7f9d537b581bea3756d15" + dependencies: + babel-runtime "^6.22.0" + babel-types "^6.22.0" + +babel-helper-regex@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-helper-regex/-/babel-helper-regex-6.22.0.tgz#79f532be1647b1f0ee3474b5f5c3da58001d247d" + dependencies: + babel-runtime "^6.22.0" + babel-types "^6.22.0" + lodash "^4.2.0" + +babel-helper-replace-supers@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-helper-replace-supers/-/babel-helper-replace-supers-6.22.0.tgz#1fcee2270657548908c34db16bcc345f9850cf42" + dependencies: + babel-helper-optimise-call-expression "^6.22.0" + babel-messages "^6.22.0" + babel-runtime "^6.22.0" + babel-template "^6.22.0" + babel-traverse "^6.22.0" + babel-types "^6.22.0" + +babel-helpers@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-helpers/-/babel-helpers-6.22.0.tgz#d275f55f2252b8101bff07bc0c556deda657392c" + dependencies: + babel-runtime "^6.22.0" + babel-template "^6.22.0" + +babel-loader@^6.2.5: + version "6.2.10" + resolved "https://registry.yarnpkg.com/babel-loader/-/babel-loader-6.2.10.tgz#adefc2b242320cd5d15e65b31cea0e8b1b02d4b0" + dependencies: + find-cache-dir "^0.1.1" + loader-utils "^0.2.11" + mkdirp "^0.5.1" + object-assign "^4.0.1" + +babel-messages@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-messages/-/babel-messages-6.22.0.tgz#36066a214f1217e4ed4164867669ecb39e3ea575" + dependencies: + babel-runtime "^6.22.0" + +babel-plugin-check-es2015-constants@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-plugin-check-es2015-constants/-/babel-plugin-check-es2015-constants-6.22.0.tgz#35157b101426fd2ffd3da3f75c7d1e91835bbf8a" + dependencies: + babel-runtime "^6.22.0" + +babel-plugin-syntax-flow@^6.18.0, babel-plugin-syntax-flow@^6.3.13: + version "6.18.0" + resolved "https://registry.yarnpkg.com/babel-plugin-syntax-flow/-/babel-plugin-syntax-flow-6.18.0.tgz#4c3ab20a2af26aa20cd25995c398c4eb70310c8d" + +babel-plugin-syntax-jsx@^6.3.13, babel-plugin-syntax-jsx@^6.8.0: + version "6.18.0" + resolved "https://registry.yarnpkg.com/babel-plugin-syntax-jsx/-/babel-plugin-syntax-jsx-6.18.0.tgz#0af32a9a6e13ca7a3fd5069e62d7b0f58d0d8946" + +babel-plugin-syntax-object-rest-spread@^6.13.0, babel-plugin-syntax-object-rest-spread@^6.8.0: + version "6.13.0" + resolved "https://registry.yarnpkg.com/babel-plugin-syntax-object-rest-spread/-/babel-plugin-syntax-object-rest-spread-6.13.0.tgz#fd6536f2bce13836ffa3a5458c4903a597bb3bf5" + +babel-plugin-transform-es2015-arrow-functions@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-arrow-functions/-/babel-plugin-transform-es2015-arrow-functions-6.22.0.tgz#452692cb711d5f79dc7f85e440ce41b9f244d221" + dependencies: + babel-runtime "^6.22.0" + +babel-plugin-transform-es2015-block-scoped-functions@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-block-scoped-functions/-/babel-plugin-transform-es2015-block-scoped-functions-6.22.0.tgz#bbc51b49f964d70cb8d8e0b94e820246ce3a6141" + dependencies: + babel-runtime "^6.22.0" + +babel-plugin-transform-es2015-block-scoping@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-block-scoping/-/babel-plugin-transform-es2015-block-scoping-6.22.0.tgz#00d6e3a0bebdcfe7536b9d653b44a9141e63e47e" + dependencies: + babel-runtime "^6.22.0" + babel-template "^6.22.0" + babel-traverse "^6.22.0" + babel-types "^6.22.0" + lodash "^4.2.0" + +babel-plugin-transform-es2015-classes@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-classes/-/babel-plugin-transform-es2015-classes-6.22.0.tgz#54d44998fd823d9dca15292324161c331c1b6f14" + dependencies: + babel-helper-define-map "^6.22.0" + babel-helper-function-name "^6.22.0" + babel-helper-optimise-call-expression "^6.22.0" + babel-helper-replace-supers "^6.22.0" + babel-messages "^6.22.0" + babel-runtime "^6.22.0" + babel-template "^6.22.0" + babel-traverse "^6.22.0" + babel-types "^6.22.0" + +babel-plugin-transform-es2015-computed-properties@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-computed-properties/-/babel-plugin-transform-es2015-computed-properties-6.22.0.tgz#7c383e9629bba4820c11b0425bdd6290f7f057e7" + dependencies: + babel-runtime "^6.22.0" + babel-template "^6.22.0" + +babel-plugin-transform-es2015-destructuring@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-destructuring/-/babel-plugin-transform-es2015-destructuring-6.22.0.tgz#8e0af2f885a0b2cf999d47c4c1dd23ce88cfa4c6" + dependencies: + babel-runtime "^6.22.0" + +babel-plugin-transform-es2015-duplicate-keys@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-duplicate-keys/-/babel-plugin-transform-es2015-duplicate-keys-6.22.0.tgz#672397031c21610d72dd2bbb0ba9fb6277e1c36b" + dependencies: + babel-runtime "^6.22.0" + babel-types "^6.22.0" + +babel-plugin-transform-es2015-for-of@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-for-of/-/babel-plugin-transform-es2015-for-of-6.22.0.tgz#180467ad63aeea592a1caeee4bf1c8b3e2616265" + dependencies: + babel-runtime "^6.22.0" + +babel-plugin-transform-es2015-function-name@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-function-name/-/babel-plugin-transform-es2015-function-name-6.22.0.tgz#f5fcc8b09093f9a23c76ac3d9e392c3ec4b77104" + dependencies: + babel-helper-function-name "^6.22.0" + babel-runtime "^6.22.0" + babel-types "^6.22.0" + +babel-plugin-transform-es2015-literals@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-literals/-/babel-plugin-transform-es2015-literals-6.22.0.tgz#4f54a02d6cd66cf915280019a31d31925377ca2e" + dependencies: + babel-runtime "^6.22.0" + +babel-plugin-transform-es2015-modules-amd@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-modules-amd/-/babel-plugin-transform-es2015-modules-amd-6.22.0.tgz#bf69cd34889a41c33d90dfb740e0091ccff52f21" + dependencies: + babel-plugin-transform-es2015-modules-commonjs "^6.22.0" + babel-runtime "^6.22.0" + babel-template "^6.22.0" + +babel-plugin-transform-es2015-modules-commonjs@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-modules-commonjs/-/babel-plugin-transform-es2015-modules-commonjs-6.22.0.tgz#6ca04e22b8e214fb50169730657e7a07dc941145" + dependencies: + babel-plugin-transform-strict-mode "^6.22.0" + babel-runtime "^6.22.0" + babel-template "^6.22.0" + babel-types "^6.22.0" + +babel-plugin-transform-es2015-modules-systemjs@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-modules-systemjs/-/babel-plugin-transform-es2015-modules-systemjs-6.22.0.tgz#810cd0cd025a08383b84236b92c6e31f88e644ad" + dependencies: + babel-helper-hoist-variables "^6.22.0" + babel-runtime "^6.22.0" + babel-template "^6.22.0" + +babel-plugin-transform-es2015-modules-umd@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-modules-umd/-/babel-plugin-transform-es2015-modules-umd-6.22.0.tgz#60d0ba3bd23258719c64391d9bf492d648dc0fae" + dependencies: + babel-plugin-transform-es2015-modules-amd "^6.22.0" + babel-runtime "^6.22.0" + babel-template "^6.22.0" + +babel-plugin-transform-es2015-object-super@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-object-super/-/babel-plugin-transform-es2015-object-super-6.22.0.tgz#daa60e114a042ea769dd53fe528fc82311eb98fc" + dependencies: + babel-helper-replace-supers "^6.22.0" + babel-runtime "^6.22.0" + +babel-plugin-transform-es2015-parameters@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-parameters/-/babel-plugin-transform-es2015-parameters-6.22.0.tgz#57076069232019094f27da8c68bb7162fe208dbb" + dependencies: + babel-helper-call-delegate "^6.22.0" + babel-helper-get-function-arity "^6.22.0" + babel-runtime "^6.22.0" + babel-template "^6.22.0" + babel-traverse "^6.22.0" + babel-types "^6.22.0" + +babel-plugin-transform-es2015-shorthand-properties@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-shorthand-properties/-/babel-plugin-transform-es2015-shorthand-properties-6.22.0.tgz#8ba776e0affaa60bff21e921403b8a652a2ff723" + dependencies: + babel-runtime "^6.22.0" + babel-types "^6.22.0" + +babel-plugin-transform-es2015-spread@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-spread/-/babel-plugin-transform-es2015-spread-6.22.0.tgz#d6d68a99f89aedc4536c81a542e8dd9f1746f8d1" + dependencies: + babel-runtime "^6.22.0" + +babel-plugin-transform-es2015-sticky-regex@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-sticky-regex/-/babel-plugin-transform-es2015-sticky-regex-6.22.0.tgz#ab316829e866ee3f4b9eb96939757d19a5bc4593" + dependencies: + babel-helper-regex "^6.22.0" + babel-runtime "^6.22.0" + babel-types "^6.22.0" + +babel-plugin-transform-es2015-template-literals@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-template-literals/-/babel-plugin-transform-es2015-template-literals-6.22.0.tgz#a84b3450f7e9f8f1f6839d6d687da84bb1236d8d" + dependencies: + babel-runtime "^6.22.0" + +babel-plugin-transform-es2015-typeof-symbol@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-typeof-symbol/-/babel-plugin-transform-es2015-typeof-symbol-6.22.0.tgz#87faf2336d3b6a97f68c4d906b0cd0edeae676e1" + dependencies: + babel-runtime "^6.22.0" + +babel-plugin-transform-es2015-unicode-regex@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-unicode-regex/-/babel-plugin-transform-es2015-unicode-regex-6.22.0.tgz#8d9cc27e7ee1decfe65454fb986452a04a613d20" + dependencies: + babel-helper-regex "^6.22.0" + babel-runtime "^6.22.0" + regexpu-core "^2.0.0" + +babel-plugin-transform-flow-strip-types@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-flow-strip-types/-/babel-plugin-transform-flow-strip-types-6.22.0.tgz#84cb672935d43714fdc32bce84568d87441cf7cf" + dependencies: + babel-plugin-syntax-flow "^6.18.0" + babel-runtime "^6.22.0" + +babel-plugin-transform-object-rest-spread@^6.8.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-object-rest-spread/-/babel-plugin-transform-object-rest-spread-6.22.0.tgz#1d419b55e68d2e4f64a5ff3373bd67d73c8e83bc" + dependencies: + babel-plugin-syntax-object-rest-spread "^6.8.0" + babel-runtime "^6.22.0" + +babel-plugin-transform-react-display-name@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-react-display-name/-/babel-plugin-transform-react-display-name-6.22.0.tgz#077197520fa8562b8d3da4c3c4b0b1bdd7853f26" + dependencies: + babel-runtime "^6.22.0" + +babel-plugin-transform-react-jsx-self@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-react-jsx-self/-/babel-plugin-transform-react-jsx-self-6.22.0.tgz#df6d80a9da2612a121e6ddd7558bcbecf06e636e" + dependencies: + babel-plugin-syntax-jsx "^6.8.0" + babel-runtime "^6.22.0" + +babel-plugin-transform-react-jsx-source@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-react-jsx-source/-/babel-plugin-transform-react-jsx-source-6.22.0.tgz#66ac12153f5cd2d17b3c19268f4bf0197f44ecd6" + dependencies: + babel-plugin-syntax-jsx "^6.8.0" + babel-runtime "^6.22.0" + +babel-plugin-transform-react-jsx@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-react-jsx/-/babel-plugin-transform-react-jsx-6.22.0.tgz#48556b7dd4c3fe97d1c943bcd54fc3f2561c1817" + dependencies: + babel-helper-builder-react-jsx "^6.22.0" + babel-plugin-syntax-jsx "^6.8.0" + babel-runtime "^6.22.0" + +babel-plugin-transform-regenerator@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-regenerator/-/babel-plugin-transform-regenerator-6.22.0.tgz#65740593a319c44522157538d690b84094617ea6" + dependencies: + regenerator-transform "0.9.8" + +babel-plugin-transform-strict-mode@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-strict-mode/-/babel-plugin-transform-strict-mode-6.22.0.tgz#e008df01340fdc87e959da65991b7e05970c8c7c" + dependencies: + babel-runtime "^6.22.0" + babel-types "^6.22.0" + +babel-polyfill@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-polyfill/-/babel-polyfill-6.22.0.tgz#1ac99ebdcc6ba4db1e2618c387b2084a82154a3b" + dependencies: + babel-runtime "^6.22.0" + core-js "^2.4.0" + regenerator-runtime "^0.10.0" + +babel-preset-es2015@^6.14.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-preset-es2015/-/babel-preset-es2015-6.22.0.tgz#af5a98ecb35eb8af764ad8a5a05eb36dc4386835" + dependencies: + babel-plugin-check-es2015-constants "^6.22.0" + babel-plugin-transform-es2015-arrow-functions "^6.22.0" + babel-plugin-transform-es2015-block-scoped-functions "^6.22.0" + babel-plugin-transform-es2015-block-scoping "^6.22.0" + babel-plugin-transform-es2015-classes "^6.22.0" + babel-plugin-transform-es2015-computed-properties "^6.22.0" + babel-plugin-transform-es2015-destructuring "^6.22.0" + babel-plugin-transform-es2015-duplicate-keys "^6.22.0" + babel-plugin-transform-es2015-for-of "^6.22.0" + babel-plugin-transform-es2015-function-name "^6.22.0" + babel-plugin-transform-es2015-literals "^6.22.0" + babel-plugin-transform-es2015-modules-amd "^6.22.0" + babel-plugin-transform-es2015-modules-commonjs "^6.22.0" + babel-plugin-transform-es2015-modules-systemjs "^6.22.0" + babel-plugin-transform-es2015-modules-umd "^6.22.0" + babel-plugin-transform-es2015-object-super "^6.22.0" + babel-plugin-transform-es2015-parameters "^6.22.0" + babel-plugin-transform-es2015-shorthand-properties "^6.22.0" + babel-plugin-transform-es2015-spread "^6.22.0" + babel-plugin-transform-es2015-sticky-regex "^6.22.0" + babel-plugin-transform-es2015-template-literals "^6.22.0" + babel-plugin-transform-es2015-typeof-symbol "^6.22.0" + babel-plugin-transform-es2015-unicode-regex "^6.22.0" + babel-plugin-transform-regenerator "^6.22.0" + +babel-preset-react@^6.11.1: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-preset-react/-/babel-preset-react-6.22.0.tgz#7bc97e2d73eec4b980fb6b4e4e0884e81ccdc165" + dependencies: + babel-plugin-syntax-flow "^6.3.13" + babel-plugin-syntax-jsx "^6.3.13" + babel-plugin-transform-flow-strip-types "^6.22.0" + babel-plugin-transform-react-display-name "^6.22.0" + babel-plugin-transform-react-jsx "^6.22.0" + babel-plugin-transform-react-jsx-self "^6.22.0" + babel-plugin-transform-react-jsx-source "^6.22.0" + +babel-register@^6.14.0, babel-register@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-register/-/babel-register-6.22.0.tgz#a61dd83975f9ca4a9e7d6eff3059494cd5ea4c63" + dependencies: + babel-core "^6.22.0" + babel-runtime "^6.22.0" + core-js "^2.4.0" + home-or-tmp "^2.0.0" + lodash "^4.2.0" + mkdirp "^0.5.1" + source-map-support "^0.4.2" + +babel-runtime@^5.8.25: + version "5.8.38" + resolved "https://registry.yarnpkg.com/babel-runtime/-/babel-runtime-5.8.38.tgz#1c0b02eb63312f5f087ff20450827b425c9d4c19" + dependencies: + core-js "^1.0.0" + +babel-runtime@^6.18.0, babel-runtime@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-runtime/-/babel-runtime-6.22.0.tgz#1cf8b4ac67c77a4ddb0db2ae1f74de52ac4ca611" + dependencies: + core-js "^2.4.0" + regenerator-runtime "^0.10.0" + +babel-template@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-template/-/babel-template-6.22.0.tgz#403d110905a4626b317a2a1fcb8f3b73204b2edb" + dependencies: + babel-runtime "^6.22.0" + babel-traverse "^6.22.0" + babel-types "^6.22.0" + babylon "^6.11.0" + lodash "^4.2.0" + +babel-traverse@^6.22.0, babel-traverse@^6.22.1, babel-traverse@^6.4.5, babel-traverse@^6.9.0: + version "6.22.1" + resolved "https://registry.yarnpkg.com/babel-traverse/-/babel-traverse-6.22.1.tgz#3b95cd6b7427d6f1f757704908f2fc9748a5f59f" + dependencies: + babel-code-frame "^6.22.0" + babel-messages "^6.22.0" + babel-runtime "^6.22.0" + babel-types "^6.22.0" + babylon "^6.15.0" + debug "^2.2.0" + globals "^9.0.0" + invariant "^2.2.0" + lodash "^4.2.0" + +babel-types@^6.19.0, babel-types@^6.22.0: + version "6.22.0" + resolved "https://registry.yarnpkg.com/babel-types/-/babel-types-6.22.0.tgz#2a447e8d0ea25d2512409e4175479fd78cc8b1db" + dependencies: + babel-runtime "^6.22.0" + esutils "^2.0.2" + lodash "^4.2.0" + to-fast-properties "^1.0.1" + +babylon@^6.11.0, babylon@^6.15.0, babylon@^6.8.0: + version "6.15.0" + resolved "https://registry.yarnpkg.com/babylon/-/babylon-6.15.0.tgz#ba65cfa1a80e1759b0e89fb562e27dccae70348e" + +backo2@1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/backo2/-/backo2-1.0.2.tgz#31ab1ac8b129363463e35b3ebb69f4dfcfba7947" + +balanced-match@^0.4.1, balanced-match@^0.4.2: + version "0.4.2" + resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-0.4.2.tgz#cb3f3e3c732dc0f01ee70b403f302e61d7709838" + +base62@^1.1.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/base62/-/base62-1.1.2.tgz#22ced6a49913565bc0b8d9a11563a465c084124c" + +base64-arraybuffer@0.1.5: + version "0.1.5" + resolved "https://registry.yarnpkg.com/base64-arraybuffer/-/base64-arraybuffer-0.1.5.tgz#73926771923b5a19747ad666aa5cd4bf9c6e9ce8" + +base64-js@^1.0.2: + version "1.2.0" + resolved "https://registry.yarnpkg.com/base64-js/-/base64-js-1.2.0.tgz#a39992d723584811982be5e290bb6a53d86700f1" + +base64id@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/base64id/-/base64id-1.0.0.tgz#47688cb99bb6804f0e06d3e763b1c32e57d8e6b6" + +batch@0.5.3, batch@^0.5.3: + version "0.5.3" + resolved "https://registry.yarnpkg.com/batch/-/batch-0.5.3.tgz#3f3414f380321743bfc1042f9a83ff1d5824d464" + +bcrypt-pbkdf@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.0.tgz#3ca76b85241c7170bf7d9703e7b9aa74630040d4" + dependencies: + tweetnacl "^0.14.3" + +better-assert@~1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/better-assert/-/better-assert-1.0.2.tgz#40866b9e1b9e0b55b481894311e68faffaebc522" + dependencies: + callsite "1.0.0" + +big.js@^3.1.3: + version "3.1.3" + resolved "https://registry.yarnpkg.com/big.js/-/big.js-3.1.3.tgz#4cada2193652eb3ca9ec8e55c9015669c9806978" + +binary-extensions@^1.0.0: + version "1.8.0" + resolved "https://registry.yarnpkg.com/binary-extensions/-/binary-extensions-1.8.0.tgz#48ec8d16df4377eae5fa5884682480af4d95c774" + +blob@0.0.4: + version "0.0.4" + resolved "https://registry.yarnpkg.com/blob/-/blob-0.0.4.tgz#bcf13052ca54463f30f9fc7e95b9a47630a94921" + +block-stream@*: + version "0.0.9" + resolved "https://registry.yarnpkg.com/block-stream/-/block-stream-0.0.9.tgz#13ebfe778a03205cfe03751481ebb4b3300c126a" + dependencies: + inherits "~2.0.0" + +bluebird@^2.10.2, bluebird@^2.9.27: + version "2.11.0" + resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-2.11.0.tgz#534b9033c022c9579c56ba3b3e5a5caafbb650e1" + +bluebird@^3.4.7: + version "3.4.7" + resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-3.4.7.tgz#f72d760be09b7f76d08ed8fae98b289a8d05fab3" + +body-parser@^1.12.4: + version "1.16.0" + resolved "https://registry.yarnpkg.com/body-parser/-/body-parser-1.16.0.tgz#924a5e472c6229fb9d69b85a20d5f2532dec788b" + dependencies: + bytes "2.4.0" + content-type "~1.0.2" + debug "2.6.0" + depd "~1.1.0" + http-errors "~1.5.1" + iconv-lite "0.4.15" + on-finished "~2.3.0" + qs "6.2.1" + raw-body "~2.2.0" + type-is "~1.6.14" + +boolbase@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/boolbase/-/boolbase-1.0.0.tgz#68dff5fbe60c51eb37725ea9e3ed310dcc1e776e" + +boom@2.x.x: + version "2.10.1" + resolved "https://registry.yarnpkg.com/boom/-/boom-2.10.1.tgz#39c8918ceff5799f83f9492a848f625add0c766f" + dependencies: + hoek "2.x.x" + +bootstrap@^3.3.6: + version "3.3.7" + resolved "https://registry.yarnpkg.com/bootstrap/-/bootstrap-3.3.7.tgz#5a389394549f23330875a3b150656574f8a9eb71" + +brace-expansion@^1.0.0: + version "1.1.6" + resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.6.tgz#7197d7eaa9b87e648390ea61fc66c84427420df9" + dependencies: + balanced-match "^0.4.1" + concat-map "0.0.1" + +braces@^0.1.2: + version "0.1.5" + resolved "https://registry.yarnpkg.com/braces/-/braces-0.1.5.tgz#c085711085291d8b75fdd74eab0f8597280711e6" + dependencies: + expand-range "^0.1.0" + +braces@^1.8.2: + version "1.8.5" + resolved "https://registry.yarnpkg.com/braces/-/braces-1.8.5.tgz#ba77962e12dff969d6b76711e914b737857bf6a7" + dependencies: + expand-range "^1.8.1" + preserve "^0.2.0" + repeat-element "^1.1.2" + +browserify-aes@0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/browserify-aes/-/browserify-aes-0.4.0.tgz#067149b668df31c4b58533e02d01e806d8608e2c" + dependencies: + inherits "^2.0.1" + +browserify-zlib@^0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/browserify-zlib/-/browserify-zlib-0.1.4.tgz#bb35f8a519f600e0fa6b8485241c979d0141fb2d" + dependencies: + pako "~0.2.0" + +browserslist@^1.0.1, browserslist@^1.5.2, browserslist@~1.6.0: + version "1.6.0" + resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-1.6.0.tgz#85fb7c993540d3fda31c282baf7f5aee698ac9ee" + dependencies: + caniuse-db "^1.0.30000613" + electron-to-chromium "^1.2.0" + +buffer-shims@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/buffer-shims/-/buffer-shims-1.0.0.tgz#9978ce317388c649ad8793028c3477ef044a8b51" + +buffer@^4.9.0: + version "4.9.1" + resolved "https://registry.yarnpkg.com/buffer/-/buffer-4.9.1.tgz#6d1bb601b07a4efced97094132093027c95bc298" + dependencies: + base64-js "^1.0.2" + ieee754 "^1.1.4" + isarray "^1.0.0" + +builtin-status-codes@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/builtin-status-codes/-/builtin-status-codes-3.0.0.tgz#85982878e21b98e1c66425e03d0174788f569ee8" + +bytes@2.3.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/bytes/-/bytes-2.3.0.tgz#d5b680a165b6201739acb611542aabc2d8ceb070" + +bytes@2.4.0: + version "2.4.0" + resolved "https://registry.yarnpkg.com/bytes/-/bytes-2.4.0.tgz#7d97196f9d5baf7f6935e25985549edd2a6c2339" + +callsite@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/callsite/-/callsite-1.0.0.tgz#280398e5d664bd74038b6f0905153e6e8af1bc20" + +camel-case@3.0.x: + version "3.0.0" + resolved "https://registry.yarnpkg.com/camel-case/-/camel-case-3.0.0.tgz#ca3c3688a4e9cf3a4cda777dc4dcbc713249cf73" + dependencies: + no-case "^2.2.0" + upper-case "^1.1.1" + +camelcase@^1.0.2: + version "1.2.1" + resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-1.2.1.tgz#9bb5304d2e0b56698b2c758b08a3eaa9daa58a39" + +camelcase@^2.0.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-2.1.1.tgz#7c1d16d679a1bbe59ca02cacecfb011e201f5a1f" + +caniuse-api@^1.5.2: + version "1.5.2" + resolved "https://registry.yarnpkg.com/caniuse-api/-/caniuse-api-1.5.2.tgz#8f393c682f661c0a997b77bba6e826483fb3600e" + dependencies: + browserslist "^1.0.1" + caniuse-db "^1.0.30000346" + lodash.memoize "^4.1.0" + lodash.uniq "^4.3.0" + shelljs "^0.7.0" + +caniuse-db@^1.0.30000346, caniuse-db@^1.0.30000613: + version "1.0.30000613" + resolved "https://registry.yarnpkg.com/caniuse-db/-/caniuse-db-1.0.30000613.tgz#639133b7a5380c1416f9701d23d54d093dd68299" + +caseless@~0.11.0: + version "0.11.0" + resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.11.0.tgz#715b96ea9841593cc33067923f5ec60ebda4f7d7" + +center-align@^0.1.1: + version "0.1.3" + resolved "https://registry.yarnpkg.com/center-align/-/center-align-0.1.3.tgz#aa0d32629b6ee972200411cbd4461c907bc2b7ad" + dependencies: + align-text "^0.1.3" + lazy-cache "^1.0.3" + +chalk@^1.1.0, chalk@^1.1.1, chalk@^1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-1.1.3.tgz#a8115c55e4a702fe4d150abd3872822a7e09fc98" + dependencies: + ansi-styles "^2.2.1" + escape-string-regexp "^1.0.2" + has-ansi "^2.0.0" + strip-ansi "^3.0.0" + supports-color "^2.0.0" + +chokidar@^1.0.0, chokidar@^1.4.1, chokidar@^1.6.1: + version "1.6.1" + resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-1.6.1.tgz#2f4447ab5e96e50fb3d789fd90d4c72e0e4c70c2" + dependencies: + anymatch "^1.3.0" + async-each "^1.0.0" + glob-parent "^2.0.0" + inherits "^2.0.1" + is-binary-path "^1.0.0" + is-glob "^2.0.0" + path-is-absolute "^1.0.0" + readdirp "^2.0.0" + optionalDependencies: + fsevents "^1.0.0" + +clap@^1.0.9: + version "1.1.2" + resolved "https://registry.yarnpkg.com/clap/-/clap-1.1.2.tgz#316545bf22229225a2cecaa6824cd2f56a9709ed" + dependencies: + chalk "^1.1.3" + +classnames@^2.1.5, classnames@^2.2.3, classnames@^2.2.5: + version "2.2.5" + resolved "https://registry.yarnpkg.com/classnames/-/classnames-2.2.5.tgz#fb3801d453467649ef3603c7d61a02bd129bde6d" + +clean-css@3.4.x, clean-css@^3.2.10: + version "3.4.24" + resolved "https://registry.yarnpkg.com/clean-css/-/clean-css-3.4.24.tgz#89f5a5e9da37ae02394fe049a41388abbe72c3b5" + dependencies: + commander "2.8.x" + source-map "0.4.x" + +cliui@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/cliui/-/cliui-2.1.0.tgz#4b475760ff80264c762c3a1719032e91c7fea0d1" + dependencies: + center-align "^0.1.1" + right-align "^0.1.1" + wordwrap "0.0.2" + +cliui@^3.0.3: + version "3.2.0" + resolved "https://registry.yarnpkg.com/cliui/-/cliui-3.2.0.tgz#120601537a916d29940f934da3b48d585a39213d" + dependencies: + string-width "^1.0.1" + strip-ansi "^3.0.1" + wrap-ansi "^2.0.0" + +clone@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/clone/-/clone-1.0.2.tgz#260b7a99ebb1edfe247538175f783243cb19d149" + +coa@~1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/coa/-/coa-1.0.1.tgz#7f959346cfc8719e3f7233cd6852854a7c67d8a3" + dependencies: + q "^1.1.2" + +code-point-at@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/code-point-at/-/code-point-at-1.1.0.tgz#0d070b4d043a5bea33a2f1a40e2edb3d9a4ccf77" + +color-convert@^1.3.0: + version "1.9.0" + resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-1.9.0.tgz#1accf97dd739b983bf994d56fec8f95853641b7a" + dependencies: + color-name "^1.1.1" + +color-name@^1.0.0, color-name@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.1.tgz#4b1415304cf50028ea81643643bd82ea05803689" + +color-string@^0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/color-string/-/color-string-0.3.0.tgz#27d46fb67025c5c2fa25993bfbf579e47841b991" + dependencies: + color-name "^1.0.0" + +color@^0.11.0: + version "0.11.4" + resolved "https://registry.yarnpkg.com/color/-/color-0.11.4.tgz#6d7b5c74fb65e841cd48792ad1ed5e07b904d764" + dependencies: + clone "^1.0.2" + color-convert "^1.3.0" + color-string "^0.3.0" + +colormin@^1.0.5: + version "1.1.2" + resolved "https://registry.yarnpkg.com/colormin/-/colormin-1.1.2.tgz#ea2f7420a72b96881a38aae59ec124a6f7298133" + dependencies: + color "^0.11.0" + css-color-names "0.0.4" + has "^1.0.1" + +colors@^1.1.0, colors@~1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/colors/-/colors-1.1.2.tgz#168a4701756b6a7f51a12ce0c97bfa28c084ed63" + +combined-stream@^1.0.5, combined-stream@~1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.5.tgz#938370a57b4a51dea2c77c15d5c5fdf895164009" + dependencies: + delayed-stream "~1.0.0" + +commander@0.6.1: + version "0.6.1" + resolved "https://registry.yarnpkg.com/commander/-/commander-0.6.1.tgz#fa68a14f6a945d54dbbe50d8cdb3320e9e3b1a06" + +commander@2.3.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/commander/-/commander-2.3.0.tgz#fd430e889832ec353b9acd1de217c11cb3eef873" + +commander@2.8.x: + version "2.8.1" + resolved "https://registry.yarnpkg.com/commander/-/commander-2.8.1.tgz#06be367febfda0c330aa1e2a072d3dc9762425d4" + dependencies: + graceful-readlink ">= 1.0.0" + +commander@2.9.x, commander@^2.2.0, commander@^2.5.0, commander@^2.8.1, commander@^2.9.0: + version "2.9.0" + resolved "https://registry.yarnpkg.com/commander/-/commander-2.9.0.tgz#9c99094176e12240cb22d6c5146098400fe0f7d4" + dependencies: + graceful-readlink ">= 1.0.0" + +commondir@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/commondir/-/commondir-1.0.1.tgz#ddd800da0c66127393cca5950ea968a3aaf1253b" + +commoner@^0.10.1: + version "0.10.8" + resolved "https://registry.yarnpkg.com/commoner/-/commoner-0.10.8.tgz#34fc3672cd24393e8bb47e70caa0293811f4f2c5" + dependencies: + commander "^2.5.0" + detective "^4.3.1" + glob "^5.0.15" + graceful-fs "^4.1.2" + iconv-lite "^0.4.5" + mkdirp "^0.5.0" + private "^0.1.6" + q "^1.1.2" + recast "^0.11.17" + +component-bind@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/component-bind/-/component-bind-1.0.0.tgz#00c608ab7dcd93897c0009651b1d3a8e1e73bbd1" + +component-emitter@1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/component-emitter/-/component-emitter-1.1.2.tgz#296594f2753daa63996d2af08d15a95116c9aec3" + +component-emitter@1.2.1, component-emitter@~1.2.0: + version "1.2.1" + resolved "https://registry.yarnpkg.com/component-emitter/-/component-emitter-1.2.1.tgz#137918d6d78283f7df7a6b7c5a63e140e69425e6" + +component-inherit@0.0.3: + version "0.0.3" + resolved "https://registry.yarnpkg.com/component-inherit/-/component-inherit-0.0.3.tgz#645fc4adf58b72b649d5cae65135619db26ff143" + +compressible@~2.0.8: + version "2.0.9" + resolved "https://registry.yarnpkg.com/compressible/-/compressible-2.0.9.tgz#6daab4e2b599c2770dd9e21e7a891b1c5a755425" + dependencies: + mime-db ">= 1.24.0 < 2" + +compression@^1.5.2: + version "1.6.2" + resolved "https://registry.yarnpkg.com/compression/-/compression-1.6.2.tgz#cceb121ecc9d09c52d7ad0c3350ea93ddd402bc3" + dependencies: + accepts "~1.3.3" + bytes "2.3.0" + compressible "~2.0.8" + debug "~2.2.0" + on-headers "~1.0.1" + vary "~1.1.0" + +concat-map@0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b" + +connect-history-api-fallback@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/connect-history-api-fallback/-/connect-history-api-fallback-1.3.0.tgz#e51d17f8f0ef0db90a64fdb47de3051556e9f169" + +connect@^3.3.5: + version "3.5.0" + resolved "https://registry.yarnpkg.com/connect/-/connect-3.5.0.tgz#b357525a0b4c1f50599cd983e1d9efeea9677198" + dependencies: + debug "~2.2.0" + finalhandler "0.5.0" + parseurl "~1.3.1" + utils-merge "1.0.0" + +console-browserify@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/console-browserify/-/console-browserify-1.1.0.tgz#f0241c45730a9fc6323b206dbf38edc741d0bb10" + dependencies: + date-now "^0.1.4" + +console-control-strings@^1.0.0, console-control-strings@~1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/console-control-strings/-/console-control-strings-1.1.0.tgz#3d7cf4464db6446ea644bf4b39507f9851008e8e" + +constants-browserify@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/constants-browserify/-/constants-browserify-1.0.0.tgz#c20b96d8c617748aaf1c16021760cd27fcb8cb75" + +content-disposition@0.5.1: + version "0.5.1" + resolved "https://registry.yarnpkg.com/content-disposition/-/content-disposition-0.5.1.tgz#87476c6a67c8daa87e32e87616df883ba7fb071b" + +content-type@~1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/content-type/-/content-type-1.0.2.tgz#b7d113aee7a8dd27bd21133c4dc2529df1721eed" + +convert-source-map@^0.3.3: + version "0.3.5" + resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-0.3.5.tgz#f1d802950af7dd2631a1febe0596550c86ab3190" + +convert-source-map@^1.1.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-1.3.0.tgz#e9f3e9c6e2728efc2676696a70eb382f73106a67" + +cookie-signature@1.0.6: + version "1.0.6" + resolved "https://registry.yarnpkg.com/cookie-signature/-/cookie-signature-1.0.6.tgz#e303a882b342cc3ee8ca513a79999734dab3ae2c" + +cookie@0.3.1, cookie@^0.3.1: + version "0.3.1" + resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.3.1.tgz#e7e0a1f9ef43b4c8ba925c5c5a96e806d16873bb" + +cookiejar@2.0.6: + version "2.0.6" + resolved "https://registry.yarnpkg.com/cookiejar/-/cookiejar-2.0.6.tgz#0abf356ad00d1c5a219d88d44518046dd026acfe" + +copy-to-clipboard@^3: + version "3.0.5" + resolved "https://registry.yarnpkg.com/copy-to-clipboard/-/copy-to-clipboard-3.0.5.tgz#4cd40e7c2ee159bc72d4f06b5bec8f9e0a1a3442" + dependencies: + toggle-selection "^1.0.3" + +copy-webpack-plugin@^0.3.3: + version "0.3.3" + resolved "https://registry.yarnpkg.com/copy-webpack-plugin/-/copy-webpack-plugin-0.3.3.tgz#f33bd4270fe4f8eec5591a373fba02e8de762162" + dependencies: + bluebird "^2.10.2" + lodash "^3.10.1" + minimatch "^3.0.0" + node-dir "^0.1.10" + +core-js@^1.0.0: + version "1.2.7" + resolved "https://registry.yarnpkg.com/core-js/-/core-js-1.2.7.tgz#652294c14651db28fa93bd2d5ff2983a4f08c636" + +core-js@^2.1.0, core-js@^2.4.0: + version "2.4.1" + resolved "https://registry.yarnpkg.com/core-js/-/core-js-2.4.1.tgz#4de911e667b0eae9124e34254b53aea6fc618d3e" + +core-util-is@~1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.2.tgz#b5fd54220aa2bc5ab57aab7140c940754503c1a7" + +cryptiles@2.x.x: + version "2.0.5" + resolved "https://registry.yarnpkg.com/cryptiles/-/cryptiles-2.0.5.tgz#3bdfecdc608147c1c67202fa291e7dca59eaa3b8" + dependencies: + boom "2.x.x" + +crypto-browserify@3.3.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/crypto-browserify/-/crypto-browserify-3.3.0.tgz#b9fc75bb4a0ed61dcf1cd5dae96eb30c9c3e506c" + dependencies: + browserify-aes "0.4.0" + pbkdf2-compat "2.0.1" + ripemd160 "0.2.0" + sha.js "2.2.6" + +css-color-names@0.0.4: + version "0.0.4" + resolved "https://registry.yarnpkg.com/css-color-names/-/css-color-names-0.0.4.tgz#808adc2e79cf84738069b646cb20ec27beb629e0" + +css-loader@^0.23.1: + version "0.23.1" + resolved "https://registry.yarnpkg.com/css-loader/-/css-loader-0.23.1.tgz#9fa23f2b5c0965235910ad5ecef3b8a36390fe50" + dependencies: + css-selector-tokenizer "^0.5.1" + cssnano ">=2.6.1 <4" + loader-utils "~0.2.2" + lodash.camelcase "^3.0.1" + object-assign "^4.0.1" + postcss "^5.0.6" + postcss-modules-extract-imports "^1.0.0" + postcss-modules-local-by-default "^1.0.1" + postcss-modules-scope "^1.0.0" + postcss-modules-values "^1.1.0" + source-list-map "^0.1.4" + +css-select@^1.1.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/css-select/-/css-select-1.2.0.tgz#2b3a110539c5355f1cd8d314623e870b121ec858" + dependencies: + boolbase "~1.0.0" + css-what "2.1" + domutils "1.5.1" + nth-check "~1.0.1" + +css-selector-tokenizer@^0.5.1: + version "0.5.4" + resolved "https://registry.yarnpkg.com/css-selector-tokenizer/-/css-selector-tokenizer-0.5.4.tgz#139bafd34a35fd0c1428487049e0699e6f6a2c21" + dependencies: + cssesc "^0.1.0" + fastparse "^1.1.1" + +css-selector-tokenizer@^0.6.0: + version "0.6.0" + resolved "https://registry.yarnpkg.com/css-selector-tokenizer/-/css-selector-tokenizer-0.6.0.tgz#6445f582c7930d241dcc5007a43d6fcb8f073152" + dependencies: + cssesc "^0.1.0" + fastparse "^1.1.1" + regexpu-core "^1.0.0" + +css-what@2.1: + version "2.1.0" + resolved "https://registry.yarnpkg.com/css-what/-/css-what-2.1.0.tgz#9467d032c38cfaefb9f2d79501253062f87fa1bd" + +css@^2.0.0: + version "2.2.1" + resolved "https://registry.yarnpkg.com/css/-/css-2.2.1.tgz#73a4c81de85db664d4ee674f7d47085e3b2d55dc" + dependencies: + inherits "^2.0.1" + source-map "^0.1.38" + source-map-resolve "^0.3.0" + urix "^0.1.0" + +cssesc@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/cssesc/-/cssesc-0.1.0.tgz#c814903e45623371a0477b40109aaafbeeaddbb4" + +"cssnano@>=2.6.1 <4": + version "3.10.0" + resolved "https://registry.yarnpkg.com/cssnano/-/cssnano-3.10.0.tgz#4f38f6cea2b9b17fa01490f23f1dc68ea65c1c38" + dependencies: + autoprefixer "^6.3.1" + decamelize "^1.1.2" + defined "^1.0.0" + has "^1.0.1" + object-assign "^4.0.1" + postcss "^5.0.14" + postcss-calc "^5.2.0" + postcss-colormin "^2.1.8" + postcss-convert-values "^2.3.4" + postcss-discard-comments "^2.0.4" + postcss-discard-duplicates "^2.0.1" + postcss-discard-empty "^2.0.1" + postcss-discard-overridden "^0.1.1" + postcss-discard-unused "^2.2.1" + postcss-filter-plugins "^2.0.0" + postcss-merge-idents "^2.1.5" + postcss-merge-longhand "^2.0.1" + postcss-merge-rules "^2.0.3" + postcss-minify-font-values "^1.0.2" + postcss-minify-gradients "^1.0.1" + postcss-minify-params "^1.0.4" + postcss-minify-selectors "^2.0.4" + postcss-normalize-charset "^1.1.0" + postcss-normalize-url "^3.0.7" + postcss-ordered-values "^2.1.0" + postcss-reduce-idents "^2.2.2" + postcss-reduce-initial "^1.0.0" + postcss-reduce-transforms "^1.0.3" + postcss-svgo "^2.1.1" + postcss-unique-selectors "^2.0.2" + postcss-value-parser "^3.2.3" + postcss-zindex "^2.0.1" + +csso@~2.2.1: + version "2.2.1" + resolved "https://registry.yarnpkg.com/csso/-/csso-2.2.1.tgz#51fbb5347e50e81e6ed51668a48490ae6fe2afe2" + dependencies: + clap "^1.0.9" + source-map "^0.5.3" + +custom-event@~1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/custom-event/-/custom-event-1.0.1.tgz#5d02a46850adf1b4a317946a3928fccb5bfd0425" + +dashdash@^1.12.0: + version "1.14.1" + resolved "https://registry.yarnpkg.com/dashdash/-/dashdash-1.14.1.tgz#853cfa0f7cbe2fed5de20326b8dd581035f6e2f0" + dependencies: + assert-plus "^1.0.0" + +date-now@^0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/date-now/-/date-now-0.1.4.tgz#eaf439fd4d4848ad74e5cc7dbef200672b9e345b" + +debug@2, debug@2.6.0, debug@^2.1.1, debug@^2.1.3, debug@^2.2.0: + version "2.6.0" + resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.0.tgz#bc596bcabe7617f11d9fa15361eded5608b8499b" + dependencies: + ms "0.7.2" + +debug@2.2.0, debug@~2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/debug/-/debug-2.2.0.tgz#f87057e995b1a1f6ae6a4960664137bc56f039da" + dependencies: + ms "0.7.1" + +debug@2.3.3: + version "2.3.3" + resolved "https://registry.yarnpkg.com/debug/-/debug-2.3.3.tgz#40c453e67e6e13c901ddec317af8986cda9eff8c" + dependencies: + ms "0.7.2" + +debug@^0.7.4: + version "0.7.4" + resolved "https://registry.yarnpkg.com/debug/-/debug-0.7.4.tgz#06e1ea8082c2cb14e39806e22e2f6f757f92af39" + +decamelize@^1.0.0, decamelize@^1.1.1, decamelize@^1.1.2: + version "1.2.0" + resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-1.2.0.tgz#f6534d15148269b20352e7bee26f501f9a191290" + +deep-equal@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/deep-equal/-/deep-equal-1.0.1.tgz#f5d260292b660e084eff4cdbc9f08ad3247448b5" + +deep-extend@~0.4.0: + version "0.4.1" + resolved "https://registry.yarnpkg.com/deep-extend/-/deep-extend-0.4.1.tgz#efe4113d08085f4e6f9687759810f807469e2253" + +define-properties@^1.1.2, define-properties@~1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/define-properties/-/define-properties-1.1.2.tgz#83a73f2fea569898fb737193c8f873caf6d45c94" + dependencies: + foreach "^2.0.5" + object-keys "^1.0.8" + +defined@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/defined/-/defined-1.0.0.tgz#c98d9bcef75674188e110969151199e39b1fa693" + +delayed-stream@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619" + +delegates@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/delegates/-/delegates-1.0.0.tgz#84c6e159b81904fdca59a0ef44cd870d31250f9a" + +depd@~1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/depd/-/depd-1.1.0.tgz#e1bd82c6aab6ced965b97b88b17ed3e528ca18c3" + +destroy@~1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/destroy/-/destroy-1.0.4.tgz#978857442c44749e4206613e37946205826abd80" + +detect-indent@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/detect-indent/-/detect-indent-4.0.0.tgz#f76d064352cdf43a1cb6ce619c4ee3a9475de208" + dependencies: + repeating "^2.0.0" + +detective@^4.3.1: + version "4.3.2" + resolved "https://registry.yarnpkg.com/detective/-/detective-4.3.2.tgz#77697e2e7947ac3fe7c8e26a6d6f115235afa91c" + dependencies: + acorn "^3.1.0" + defined "^1.0.0" + +di@^0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/di/-/di-0.0.1.tgz#806649326ceaa7caa3306d75d985ea2748ba913c" + +diff@1.4.0, diff@^1.3.2: + version "1.4.0" + resolved "https://registry.yarnpkg.com/diff/-/diff-1.4.0.tgz#7f28d2eb9ee7b15a97efd89ce63dcfdaa3ccbabf" + +disparity@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/disparity/-/disparity-2.0.0.tgz#57ddacb47324ae5f58d2cc0da886db4ce9eeb718" + dependencies: + ansi-styles "^2.0.1" + diff "^1.3.2" + +dom-converter@~0.1: + version "0.1.4" + resolved "https://registry.yarnpkg.com/dom-converter/-/dom-converter-0.1.4.tgz#a45ef5727b890c9bffe6d7c876e7b19cb0e17f3b" + dependencies: + utila "~0.3" + +dom-css@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/dom-css/-/dom-css-2.1.0.tgz#fdbc2d5a015d0a3e1872e11472bbd0e7b9e6a202" + dependencies: + add-px-to-style "1.0.0" + prefix-style "2.0.1" + to-camel-case "1.0.0" + +dom-helpers@^2.4.0: + version "2.4.0" + resolved "https://registry.yarnpkg.com/dom-helpers/-/dom-helpers-2.4.0.tgz#9bb4b245f637367b1fa670274272aa28fe06c367" + +dom-serialize@^2.2.0: + version "2.2.1" + resolved "https://registry.yarnpkg.com/dom-serialize/-/dom-serialize-2.2.1.tgz#562ae8999f44be5ea3076f5419dcd59eb43ac95b" + dependencies: + custom-event "~1.0.0" + ent "~2.2.0" + extend "^3.0.0" + void-elements "^2.0.0" + +dom-serializer@0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/dom-serializer/-/dom-serializer-0.1.0.tgz#073c697546ce0780ce23be4a28e293e40bc30c82" + dependencies: + domelementtype "~1.1.1" + entities "~1.1.1" + +domain-browser@^1.1.1: + version "1.1.7" + resolved "https://registry.yarnpkg.com/domain-browser/-/domain-browser-1.1.7.tgz#867aa4b093faa05f1de08c06f4d7b21fdf8698bc" + +domelementtype@1: + version "1.3.0" + resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-1.3.0.tgz#b17aed82e8ab59e52dd9c19b1756e0fc187204c2" + +domelementtype@~1.1.1: + version "1.1.3" + resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-1.1.3.tgz#bd28773e2642881aec51544924299c5cd822185b" + +domhandler@2.1: + version "2.1.0" + resolved "https://registry.yarnpkg.com/domhandler/-/domhandler-2.1.0.tgz#d2646f5e57f6c3bab11cf6cb05d3c0acf7412594" + dependencies: + domelementtype "1" + +domutils@1.1: + version "1.1.6" + resolved "https://registry.yarnpkg.com/domutils/-/domutils-1.1.6.tgz#bddc3de099b9a2efacc51c623f28f416ecc57485" + dependencies: + domelementtype "1" + +domutils@1.5.1: + version "1.5.1" + resolved "https://registry.yarnpkg.com/domutils/-/domutils-1.5.1.tgz#dcd8488a26f563d61079e48c9f7b7e32373682cf" + dependencies: + dom-serializer "0" + domelementtype "1" + +ecc-jsbn@~0.1.1: + version "0.1.1" + resolved "https://registry.yarnpkg.com/ecc-jsbn/-/ecc-jsbn-0.1.1.tgz#0fc73a9ed5f0d53c38193398523ef7e543777505" + dependencies: + jsbn "~0.1.0" + +ee-first@1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.1.1.tgz#590c61156b0ae2f4f0255732a158b266bc56b21d" + +electron-to-chromium@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.2.0.tgz#3bd7761f85bd4163602259ae6c7ed338050b17e7" + +emojis-list@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/emojis-list/-/emojis-list-2.1.0.tgz#4daa4d9db00f9819880c79fa457ae5b09a1fd389" + +encodeurl@~1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/encodeurl/-/encodeurl-1.0.1.tgz#79e3d58655346909fe6f0f45a5de68103b294d20" + +engine.io-client@1.8.2: + version "1.8.2" + resolved "https://registry.yarnpkg.com/engine.io-client/-/engine.io-client-1.8.2.tgz#c38767547f2a7d184f5752f6f0ad501006703766" + dependencies: + component-emitter "1.2.1" + component-inherit "0.0.3" + debug "2.3.3" + engine.io-parser "1.3.2" + has-cors "1.1.0" + indexof "0.0.1" + parsejson "0.0.3" + parseqs "0.0.5" + parseuri "0.0.5" + ws "1.1.1" + xmlhttprequest-ssl "1.5.3" + yeast "0.1.2" + +engine.io-parser@1.3.2: + version "1.3.2" + resolved "https://registry.yarnpkg.com/engine.io-parser/-/engine.io-parser-1.3.2.tgz#937b079f0007d0893ec56d46cb220b8cb435220a" + dependencies: + after "0.8.2" + arraybuffer.slice "0.0.6" + base64-arraybuffer "0.1.5" + blob "0.0.4" + has-binary "0.1.7" + wtf-8 "1.0.0" + +engine.io@1.8.2: + version "1.8.2" + resolved "https://registry.yarnpkg.com/engine.io/-/engine.io-1.8.2.tgz#6b59be730b348c0125b0a4589de1c355abcf7a7e" + dependencies: + accepts "1.3.3" + base64id "1.0.0" + cookie "0.3.1" + debug "2.3.3" + engine.io-parser "1.3.2" + ws "1.1.1" + +enhanced-resolve@~0.9.0: + version "0.9.1" + resolved "https://registry.yarnpkg.com/enhanced-resolve/-/enhanced-resolve-0.9.1.tgz#4d6e689b3725f86090927ccc86cd9f1635b89e2e" + dependencies: + graceful-fs "^4.1.2" + memory-fs "^0.2.0" + tapable "^0.1.8" + +ent@~2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/ent/-/ent-2.2.0.tgz#e964219325a21d05f44466a2f686ed6ce5f5dd1d" + +entities@~1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/entities/-/entities-1.1.1.tgz#6e5c2d0a5621b5dadaecef80b90edfb5cd7772f0" + +envify@^3.0.0: + version "3.4.1" + resolved "https://registry.yarnpkg.com/envify/-/envify-3.4.1.tgz#d7122329e8df1688ba771b12501917c9ce5cbce8" + dependencies: + jstransform "^11.0.3" + through "~2.3.4" + +errno@^0.1.1, errno@^0.1.3: + version "0.1.4" + resolved "https://registry.yarnpkg.com/errno/-/errno-0.1.4.tgz#b896e23a9e5e8ba33871fc996abd3635fc9a1c7d" + dependencies: + prr "~0.0.0" + +es-abstract@^1.6.1: + version "1.7.0" + resolved "https://registry.yarnpkg.com/es-abstract/-/es-abstract-1.7.0.tgz#dfade774e01bfcd97f96180298c449c8623fb94c" + dependencies: + es-to-primitive "^1.1.1" + function-bind "^1.1.0" + is-callable "^1.1.3" + is-regex "^1.0.3" + +es-to-primitive@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/es-to-primitive/-/es-to-primitive-1.1.1.tgz#45355248a88979034b6792e19bb81f2b7975dd0d" + dependencies: + is-callable "^1.1.1" + is-date-object "^1.0.1" + is-symbol "^1.0.1" + +escape-html@~1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/escape-html/-/escape-html-1.0.3.tgz#0258eae4d3d0c0974de1c169188ef0051d1d1988" + +escape-string-regexp@1.0.2, escape-string-regexp@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.2.tgz#4dbc2fe674e71949caf3fb2695ce7f2dc1d9a8d1" + +esformatter-jsx-ignore@^1.0.6: + version "1.0.6" + resolved "https://registry.yarnpkg.com/esformatter-jsx-ignore/-/esformatter-jsx-ignore-1.0.6.tgz#e594f6b77db6f85d8c1179ae6dc465756d422489" + dependencies: + esprima-fb "^12001.1.0-dev-harmony-fb" + fresh-falafel "^0.2.6" + +esformatter-parser@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/esformatter-parser/-/esformatter-parser-1.0.0.tgz#0854072d0487539ed39cae38d8a5432c17ec11d3" + dependencies: + acorn-to-esprima "^2.0.8" + babel-traverse "^6.9.0" + babylon "^6.8.0" + rocambole "^0.7.0" + +esformatter@^0.10.0: + version "0.10.0" + resolved "https://registry.yarnpkg.com/esformatter/-/esformatter-0.10.0.tgz#e321ecc3d94083372cdfcf5c6f942cef6fec59d3" + dependencies: + acorn-to-esprima "^2.0.6" + babel-traverse "^6.4.5" + debug "^0.7.4" + disparity "^2.0.0" + esformatter-parser "^1.0.0" + glob "^7.0.5" + minimatch "^3.0.2" + minimist "^1.1.1" + mout ">=0.9 <2.0" + npm-run "^3.0.0" + resolve "^1.1.5" + rocambole ">=0.7 <2.0" + rocambole-indent "^2.0.4" + rocambole-linebreak "^1.0.2" + rocambole-node "~1.0" + rocambole-token "^1.1.2" + rocambole-whitespace "^1.0.0" + stdin "*" + strip-json-comments "~0.1.1" + supports-color "^1.3.1" + user-home "^2.0.0" + +esprima-fb@^12001.1.0-dev-harmony-fb: + version "12001.1.0-dev-harmony-fb" + resolved "https://registry.yarnpkg.com/esprima-fb/-/esprima-fb-12001.1.0-dev-harmony-fb.tgz#d84400384ba95ce2678c617ad24a7f40808da915" + +esprima-fb@^15001.1.0-dev-harmony-fb: + version "15001.1.0-dev-harmony-fb" + resolved "https://registry.yarnpkg.com/esprima-fb/-/esprima-fb-15001.1.0-dev-harmony-fb.tgz#30a947303c6b8d5e955bee2b99b1d233206a6901" + +esprima@^2.0.0, esprima@^2.1, esprima@^2.6.0: + version "2.7.3" + resolved "https://registry.yarnpkg.com/esprima/-/esprima-2.7.3.tgz#96e3b70d5779f6ad49cd032673d1c312767ba581" + +esprima@~3.1.0: + version "3.1.3" + resolved "https://registry.yarnpkg.com/esprima/-/esprima-3.1.3.tgz#fdca51cee6133895e3c88d535ce49dbff62a4633" + +esutils@^2.0.0, esutils@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/esutils/-/esutils-2.0.2.tgz#0abf4f1caa5bcb1f7a9d8acc6dea4faaa04bac9b" + +etag@~1.7.0: + version "1.7.0" + resolved "https://registry.yarnpkg.com/etag/-/etag-1.7.0.tgz#03d30b5f67dd6e632d2945d30d6652731a34d5d8" + +eventemitter3@1.x.x: + version "1.2.0" + resolved "https://registry.yarnpkg.com/eventemitter3/-/eventemitter3-1.2.0.tgz#1c86991d816ad1e504750e73874224ecf3bec508" + +events@^1.0.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/events/-/events-1.1.1.tgz#9ebdb7635ad099c70dcc4c2a1f5004288e8bd924" + +eventsource@0.1.6: + version "0.1.6" + resolved "https://registry.yarnpkg.com/eventsource/-/eventsource-0.1.6.tgz#0acede849ed7dd1ccc32c811bb11b944d4f29232" + dependencies: + original ">=0.0.5" + +expand-braces@^0.1.1: + version "0.1.2" + resolved "https://registry.yarnpkg.com/expand-braces/-/expand-braces-0.1.2.tgz#488b1d1d2451cb3d3a6b192cfc030f44c5855fea" + dependencies: + array-slice "^0.2.3" + array-unique "^0.2.1" + braces "^0.1.2" + +expand-brackets@^0.1.4: + version "0.1.5" + resolved "https://registry.yarnpkg.com/expand-brackets/-/expand-brackets-0.1.5.tgz#df07284e342a807cd733ac5af72411e581d1177b" + dependencies: + is-posix-bracket "^0.1.0" + +expand-range@^0.1.0: + version "0.1.1" + resolved "https://registry.yarnpkg.com/expand-range/-/expand-range-0.1.1.tgz#4cb8eda0993ca56fa4f41fc42f3cbb4ccadff044" + dependencies: + is-number "^0.1.1" + repeat-string "^0.2.2" + +expand-range@^1.8.1: + version "1.8.2" + resolved "https://registry.yarnpkg.com/expand-range/-/expand-range-1.8.2.tgz#a299effd335fe2721ebae8e257ec79644fc85337" + dependencies: + fill-range "^2.1.0" + +expect@^1.20.2: + version "1.20.2" + resolved "https://registry.yarnpkg.com/expect/-/expect-1.20.2.tgz#d458fe4c56004036bae3232416a3f6361f04f965" + dependencies: + define-properties "~1.1.2" + has "^1.0.1" + is-equal "^1.5.1" + is-regex "^1.0.3" + object-inspect "^1.1.0" + object-keys "^1.0.9" + tmatch "^2.0.1" + +express@^4.13.3: + version "4.14.0" + resolved "https://registry.yarnpkg.com/express/-/express-4.14.0.tgz#c1ee3f42cdc891fb3dc650a8922d51ec847d0d66" + dependencies: + accepts "~1.3.3" + array-flatten "1.1.1" + content-disposition "0.5.1" + content-type "~1.0.2" + cookie "0.3.1" + cookie-signature "1.0.6" + debug "~2.2.0" + depd "~1.1.0" + encodeurl "~1.0.1" + escape-html "~1.0.3" + etag "~1.7.0" + finalhandler "0.5.0" + fresh "0.3.0" + merge-descriptors "1.0.1" + methods "~1.1.2" + on-finished "~2.3.0" + parseurl "~1.3.1" + path-to-regexp "0.1.7" + proxy-addr "~1.1.2" + qs "6.2.0" + range-parser "~1.2.0" + send "0.14.1" + serve-static "~1.11.1" + type-is "~1.6.13" + utils-merge "1.0.0" + vary "~1.1.0" + +extend@3.0.0, extend@^3.0.0, extend@~3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.0.tgz#5a474353b9f3353ddd8176dfd37b91c83a46f1d4" + +extglob@^0.3.1: + version "0.3.2" + resolved "https://registry.yarnpkg.com/extglob/-/extglob-0.3.2.tgz#2e18ff3d2f49ab2765cec9023f011daa8d8349a1" + dependencies: + is-extglob "^1.0.0" + +extsprintf@1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.0.2.tgz#e1080e0658e300b06294990cc70e1502235fd550" + +fastparse@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/fastparse/-/fastparse-1.1.1.tgz#d1e2643b38a94d7583b479060e6c4affc94071f8" + +faye-websocket@^0.10.0: + version "0.10.0" + resolved "https://registry.yarnpkg.com/faye-websocket/-/faye-websocket-0.10.0.tgz#4e492f8d04dfb6f89003507f6edbf2d501e7c6f4" + dependencies: + websocket-driver ">=0.5.1" + +faye-websocket@~0.11.0: + version "0.11.1" + resolved "https://registry.yarnpkg.com/faye-websocket/-/faye-websocket-0.11.1.tgz#f0efe18c4f56e4f40afc7e06c719fd5ee6188f38" + dependencies: + websocket-driver ">=0.5.1" + +fbjs@^0.6.1: + version "0.6.1" + resolved "https://registry.yarnpkg.com/fbjs/-/fbjs-0.6.1.tgz#9636b7705f5ba9684d44b72f78321254afc860f7" + dependencies: + core-js "^1.0.0" + loose-envify "^1.0.0" + promise "^7.0.3" + ua-parser-js "^0.7.9" + whatwg-fetch "^0.9.0" + +filename-regex@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/filename-regex/-/filename-regex-2.0.0.tgz#996e3e80479b98b9897f15a8a58b3d084e926775" + +fill-range@^2.1.0: + version "2.2.3" + resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-2.2.3.tgz#50b77dfd7e469bc7492470963699fe7a8485a723" + dependencies: + is-number "^2.1.0" + isobject "^2.0.0" + randomatic "^1.1.3" + repeat-element "^1.1.2" + repeat-string "^1.5.2" + +finalhandler@0.5.0: + version "0.5.0" + resolved "https://registry.yarnpkg.com/finalhandler/-/finalhandler-0.5.0.tgz#e9508abece9b6dba871a6942a1d7911b91911ac7" + dependencies: + debug "~2.2.0" + escape-html "~1.0.3" + on-finished "~2.3.0" + statuses "~1.3.0" + unpipe "~1.0.0" + +find-cache-dir@^0.1.1: + version "0.1.1" + resolved "https://registry.yarnpkg.com/find-cache-dir/-/find-cache-dir-0.1.1.tgz#c8defae57c8a52a8a784f9e31c57c742e993a0b9" + dependencies: + commondir "^1.0.1" + mkdirp "^0.5.1" + pkg-dir "^1.0.0" + +find-up@^1.0.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/find-up/-/find-up-1.1.2.tgz#6b2e9822b1a2ce0a60ab64d610eccad53cb24d0f" + dependencies: + path-exists "^2.0.0" + pinkie-promise "^2.0.0" + +flatten@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/flatten/-/flatten-1.0.2.tgz#dae46a9d78fbe25292258cc1e780a41d95c03782" + +font-awesome@^4.7.0: + version "4.7.0" + resolved "https://registry.yarnpkg.com/font-awesome/-/font-awesome-4.7.0.tgz#8fa8cf0411a1a31afd07b06d2902bb9fc815a133" + +for-in@^0.1.5: + version "0.1.6" + resolved "https://registry.yarnpkg.com/for-in/-/for-in-0.1.6.tgz#c9f96e89bfad18a545af5ec3ed352a1d9e5b4dc8" + +for-own@^0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/for-own/-/for-own-0.1.4.tgz#0149b41a39088c7515f51ebe1c1386d45f935072" + dependencies: + for-in "^0.1.5" + +foreach@^2.0.5: + version "2.0.5" + resolved "https://registry.yarnpkg.com/foreach/-/foreach-2.0.5.tgz#0bee005018aeb260d0a3af3ae658dd0136ec1b99" + +forever-agent@~0.6.1: + version "0.6.1" + resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.6.1.tgz#fbc71f0c41adeb37f96c577ad1ed42d8fdacca91" + +form-data@1.0.0-rc3: + version "1.0.0-rc3" + resolved "https://registry.yarnpkg.com/form-data/-/form-data-1.0.0-rc3.tgz#d35bc62e7fbc2937ae78f948aaa0d38d90607577" + dependencies: + async "^1.4.0" + combined-stream "^1.0.5" + mime-types "^2.1.3" + +form-data@~2.1.1: + version "2.1.2" + resolved "https://registry.yarnpkg.com/form-data/-/form-data-2.1.2.tgz#89c3534008b97eada4cbb157d58f6f5df025eae4" + dependencies: + asynckit "^0.4.0" + combined-stream "^1.0.5" + mime-types "^2.1.12" + +formidable@~1.0.14: + version "1.0.17" + resolved "https://registry.yarnpkg.com/formidable/-/formidable-1.0.17.tgz#ef5491490f9433b705faa77249c99029ae348559" + +forwarded@~0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/forwarded/-/forwarded-0.1.0.tgz#19ef9874c4ae1c297bcf078fde63a09b66a84363" + +fresh-falafel@^0.2.6: + version "0.2.6" + resolved "https://registry.yarnpkg.com/fresh-falafel/-/fresh-falafel-0.2.6.tgz#3775b982055fdffb1f48cb17755164f8fc522339" + dependencies: + esprima "^2.0.0" + +fresh@0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/fresh/-/fresh-0.3.0.tgz#651f838e22424e7566de161d8358caa199f83d4f" + +fs-access@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/fs-access/-/fs-access-1.0.1.tgz#d6a87f262271cefebec30c553407fb995da8777a" + dependencies: + null-check "^1.0.0" + +fs-readdir-recursive@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/fs-readdir-recursive/-/fs-readdir-recursive-1.0.0.tgz#8cd1745c8b4f8a29c8caec392476921ba195f560" + +fs.realpath@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f" + +fsevents@^1.0.0: + version "1.0.17" + resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-1.0.17.tgz#8537f3f12272678765b4fd6528c0f1f66f8f4558" + dependencies: + nan "^2.3.0" + node-pre-gyp "^0.6.29" + +fstream-ignore@~1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/fstream-ignore/-/fstream-ignore-1.0.5.tgz#9c31dae34767018fe1d249b24dada67d092da105" + dependencies: + fstream "^1.0.0" + inherits "2" + minimatch "^3.0.0" + +fstream@^1.0.0, fstream@^1.0.2, fstream@~1.0.10: + version "1.0.10" + resolved "https://registry.yarnpkg.com/fstream/-/fstream-1.0.10.tgz#604e8a92fe26ffd9f6fae30399d4984e1ab22822" + dependencies: + graceful-fs "^4.1.2" + inherits "~2.0.0" + mkdirp ">=0.5 0" + rimraf "2" + +function-bind@^1.0.2, function-bind@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.0.tgz#16176714c801798e4e8f2cf7f7529467bb4a5771" + +gauge@~2.7.1: + version "2.7.2" + resolved "https://registry.yarnpkg.com/gauge/-/gauge-2.7.2.tgz#15cecc31b02d05345a5d6b0e171cdb3ad2307774" + dependencies: + aproba "^1.0.3" + console-control-strings "^1.0.0" + has-unicode "^2.0.0" + object-assign "^4.1.0" + signal-exit "^3.0.0" + string-width "^1.0.1" + strip-ansi "^3.0.1" + supports-color "^0.2.0" + wide-align "^1.1.0" + +generate-function@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/generate-function/-/generate-function-2.0.0.tgz#6858fe7c0969b7d4e9093337647ac79f60dfbe74" + +generate-object-property@^1.1.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/generate-object-property/-/generate-object-property-1.2.0.tgz#9c0e1c40308ce804f4783618b937fa88f99d50d0" + dependencies: + is-property "^1.0.0" + +getpass@^0.1.1: + version "0.1.6" + resolved "https://registry.yarnpkg.com/getpass/-/getpass-0.1.6.tgz#283ffd9fc1256840875311c1b60e8c40187110e6" + dependencies: + assert-plus "^1.0.0" + +glob-base@^0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/glob-base/-/glob-base-0.3.0.tgz#dbb164f6221b1c0b1ccf82aea328b497df0ea3c4" + dependencies: + glob-parent "^2.0.0" + is-glob "^2.0.0" + +glob-parent@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-2.0.0.tgz#81383d72db054fcccf5336daa902f182f6edbb28" + dependencies: + is-glob "^2.0.0" + +glob@3.2.11: + version "3.2.11" + resolved "https://registry.yarnpkg.com/glob/-/glob-3.2.11.tgz#4a973f635b9190f715d10987d5c00fd2815ebe3d" + dependencies: + inherits "2" + minimatch "0.3" + +glob@^5.0.15: + version "5.0.15" + resolved "https://registry.yarnpkg.com/glob/-/glob-5.0.15.tgz#1bc936b9e02f4a603fcc222ecf7633d30b8b93b1" + dependencies: + inflight "^1.0.4" + inherits "2" + minimatch "2 || 3" + once "^1.3.0" + path-is-absolute "^1.0.0" + +glob@^6.0.4: + version "6.0.4" + resolved "https://registry.yarnpkg.com/glob/-/glob-6.0.4.tgz#0f08860f6a155127b2fadd4f9ce24b1aab6e4d22" + dependencies: + inflight "^1.0.4" + inherits "2" + minimatch "2 || 3" + once "^1.3.0" + path-is-absolute "^1.0.0" + +glob@^7.0.0, glob@^7.0.5: + version "7.1.1" + resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.1.tgz#805211df04faaf1c63a3600306cdf5ade50b2ec8" + dependencies: + fs.realpath "^1.0.0" + inflight "^1.0.4" + inherits "2" + minimatch "^3.0.2" + once "^1.3.0" + path-is-absolute "^1.0.0" + +globals@^9.0.0: + version "9.14.0" + resolved "https://registry.yarnpkg.com/globals/-/globals-9.14.0.tgz#8859936af0038741263053b39d0e76ca241e4034" + +graceful-fs@^4.1.2, graceful-fs@^4.1.4: + version "4.1.11" + resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.1.11.tgz#0e8bdfe4d1ddb8854d64e04ea7c00e2a026e5658" + +"graceful-readlink@>= 1.0.0": + version "1.0.1" + resolved "https://registry.yarnpkg.com/graceful-readlink/-/graceful-readlink-1.0.1.tgz#4cafad76bc62f02fa039b2f94e9a3dd3a391a725" + +growl@1.9.2: + version "1.9.2" + resolved "https://registry.yarnpkg.com/growl/-/growl-1.9.2.tgz#0ea7743715db8d8de2c5ede1775e1b45ac85c02f" + +har-validator@~2.0.6: + version "2.0.6" + resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-2.0.6.tgz#cdcbc08188265ad119b6a5a7c8ab70eecfb5d27d" + dependencies: + chalk "^1.1.1" + commander "^2.9.0" + is-my-json-valid "^2.12.4" + pinkie-promise "^2.0.0" + +has-ansi@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/has-ansi/-/has-ansi-2.0.0.tgz#34f5049ce1ecdf2b0649af3ef24e45ed35416d91" + dependencies: + ansi-regex "^2.0.0" + +has-binary@0.1.7: + version "0.1.7" + resolved "https://registry.yarnpkg.com/has-binary/-/has-binary-0.1.7.tgz#68e61eb16210c9545a0a5cce06a873912fe1e68c" + dependencies: + isarray "0.0.1" + +has-cors@1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/has-cors/-/has-cors-1.1.0.tgz#5e474793f7ea9843d1bb99c23eef49ff126fff39" + +has-flag@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-1.0.0.tgz#9d9e793165ce017a00f00418c43f942a7b1d11fa" + +has-unicode@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/has-unicode/-/has-unicode-2.0.1.tgz#e0e6fe6a28cf51138855e086d1691e771de2a8b9" + +has@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/has/-/has-1.0.1.tgz#8461733f538b0837c9361e39a9ab9e9704dc2f28" + dependencies: + function-bind "^1.0.2" + +hawk@~3.1.3: + version "3.1.3" + resolved "https://registry.yarnpkg.com/hawk/-/hawk-3.1.3.tgz#078444bd7c1640b0fe540d2c9b73d59678e8e1c4" + dependencies: + boom "2.x.x" + cryptiles "2.x.x" + hoek "2.x.x" + sntp "1.x.x" + +he@1.1.x: + version "1.1.1" + resolved "https://registry.yarnpkg.com/he/-/he-1.1.1.tgz#93410fd21b009735151f8868c2f271f3427e23fd" + +history@^1.17.0: + version "1.17.0" + resolved "https://registry.yarnpkg.com/history/-/history-1.17.0.tgz#c5483caa5a1d1fea00a1a7d8d19b874016711d29" + dependencies: + deep-equal "^1.0.0" + invariant "^2.0.0" + query-string "^3.0.0" + warning "^2.0.0" + +history@^2.1.2: + version "2.1.2" + resolved "https://registry.yarnpkg.com/history/-/history-2.1.2.tgz#4aa2de897a0e4867e4539843be6ecdb2986bfdec" + dependencies: + deep-equal "^1.0.0" + invariant "^2.0.0" + query-string "^3.0.0" + warning "^2.0.0" + +hoek@2.x.x: + version "2.16.3" + resolved "https://registry.yarnpkg.com/hoek/-/hoek-2.16.3.tgz#20bb7403d3cea398e91dc4710a8ff1b8274a25ed" + +hoist-non-react-statics@^1.0.3, hoist-non-react-statics@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/hoist-non-react-statics/-/hoist-non-react-statics-1.2.0.tgz#aa448cf0986d55cc40773b17174b7dd066cb7cfb" + +home-or-tmp@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/home-or-tmp/-/home-or-tmp-2.0.0.tgz#e36c3f2d2cae7d746a857e38d18d5f32a7882db8" + dependencies: + os-homedir "^1.0.0" + os-tmpdir "^1.0.1" + +html-comment-regex@^1.1.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/html-comment-regex/-/html-comment-regex-1.1.1.tgz#668b93776eaae55ebde8f3ad464b307a4963625e" + +html-minifier@^3.2.3: + version "3.2.3" + resolved "https://registry.yarnpkg.com/html-minifier/-/html-minifier-3.2.3.tgz#d2ff536e24d95726c332493d8f77d84dbed85372" + dependencies: + camel-case "3.0.x" + clean-css "3.4.x" + commander "2.9.x" + he "1.1.x" + ncname "1.0.x" + param-case "2.1.x" + relateurl "0.2.x" + uglify-js "2.7.x" + +html-webpack-plugin@^2.22.0: + version "2.26.0" + resolved "https://registry.yarnpkg.com/html-webpack-plugin/-/html-webpack-plugin-2.26.0.tgz#ba97c8a66f912b85df80d2aeea65966c8bd9249e" + dependencies: + bluebird "^3.4.7" + html-minifier "^3.2.3" + loader-utils "^0.2.16" + lodash "^4.17.3" + pretty-error "^2.0.2" + toposort "^1.0.0" + +htmlparser2@~3.3.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/htmlparser2/-/htmlparser2-3.3.0.tgz#cc70d05a59f6542e43f0e685c982e14c924a9efe" + dependencies: + domelementtype "1" + domhandler "2.1" + domutils "1.1" + readable-stream "1.0" + +http-errors@~1.5.0, http-errors@~1.5.1: + version "1.5.1" + resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.5.1.tgz#788c0d2c1de2c81b9e6e8c01843b6b97eb920750" + dependencies: + inherits "2.0.3" + setprototypeof "1.0.2" + statuses ">= 1.3.1 < 2" + +http-proxy-middleware@~0.17.1: + version "0.17.3" + resolved "https://registry.yarnpkg.com/http-proxy-middleware/-/http-proxy-middleware-0.17.3.tgz#940382147149b856084f5534752d5b5a8168cd1d" + dependencies: + http-proxy "^1.16.2" + is-glob "^3.1.0" + lodash "^4.17.2" + micromatch "^2.3.11" + +http-proxy@^1.13.0, http-proxy@^1.16.2: + version "1.16.2" + resolved "https://registry.yarnpkg.com/http-proxy/-/http-proxy-1.16.2.tgz#06dff292952bf64dbe8471fa9df73066d4f37742" + dependencies: + eventemitter3 "1.x.x" + requires-port "1.x.x" + +http-signature@~1.1.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/http-signature/-/http-signature-1.1.1.tgz#df72e267066cd0ac67fb76adf8e134a8fbcf91bf" + dependencies: + assert-plus "^0.2.0" + jsprim "^1.2.2" + sshpk "^1.7.0" + +https-browserify@0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/https-browserify/-/https-browserify-0.0.1.tgz#3f91365cabe60b77ed0ebba24b454e3e09d95a82" + +humanize@0.0.9: + version "0.0.9" + resolved "https://registry.yarnpkg.com/humanize/-/humanize-0.0.9.tgz#1994ffaecdfe9c441ed2bdac7452b7bb4c9e41a4" + +iconv-lite@0.4.15, iconv-lite@^0.4.5: + version "0.4.15" + resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.15.tgz#fe265a218ac6a57cfe854927e9d04c19825eddeb" + +icss-replace-symbols@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/icss-replace-symbols/-/icss-replace-symbols-1.0.2.tgz#cb0b6054eb3af6edc9ab1d62d01933e2d4c8bfa5" + +ieee754@^1.1.4: + version "1.1.8" + resolved "https://registry.yarnpkg.com/ieee754/-/ieee754-1.1.8.tgz#be33d40ac10ef1926701f6f08a2d86fbfd1ad3e4" + +image-size@~0.5.0: + version "0.5.1" + resolved "https://registry.yarnpkg.com/image-size/-/image-size-0.5.1.tgz#28eea8548a4b1443480ddddc1e083ae54652439f" + +indexes-of@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/indexes-of/-/indexes-of-1.0.1.tgz#f30f716c8e2bd346c7b67d3df3915566a7c05607" + +indexof@0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/indexof/-/indexof-0.0.1.tgz#82dc336d232b9062179d05ab3293a66059fd435d" + +inflight@^1.0.4: + version "1.0.6" + resolved "https://registry.yarnpkg.com/inflight/-/inflight-1.0.6.tgz#49bd6331d7d02d0c09bc910a1075ba8165b56df9" + dependencies: + once "^1.3.0" + wrappy "1" + +inherits@2, inherits@2.0.3, inherits@^2.0.1, inherits@~2.0.0, inherits@~2.0.1: + version "2.0.3" + resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.3.tgz#633c2c83e3da42a502f52466022480f4208261de" + +inherits@2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.1.tgz#b17d08d326b4423e568eff719f91b0b1cbdf69f1" + +ini@~1.3.0: + version "1.3.4" + resolved "https://registry.yarnpkg.com/ini/-/ini-1.3.4.tgz#0537cb79daf59b59a1a517dff706c86ec039162e" + +interpret@^0.6.4: + version "0.6.6" + resolved "https://registry.yarnpkg.com/interpret/-/interpret-0.6.6.tgz#fecd7a18e7ce5ca6abfb953e1f86213a49f1625b" + +interpret@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/interpret/-/interpret-1.0.1.tgz#d579fb7f693b858004947af39fa0db49f795602c" + +invariant@^2.0.0, invariant@^2.1.0, invariant@^2.1.2, invariant@^2.2.0, invariant@^2.2.1: + version "2.2.2" + resolved "https://registry.yarnpkg.com/invariant/-/invariant-2.2.2.tgz#9e1f56ac0acdb6bf303306f338be3b204ae60360" + dependencies: + loose-envify "^1.0.0" + +invert-kv@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/invert-kv/-/invert-kv-1.0.0.tgz#104a8e4aaca6d3d8cd157a8ef8bfab2d7a3ffdb6" + +ipaddr.js@1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/ipaddr.js/-/ipaddr.js-1.2.0.tgz#8aba49c9192799585bdd643e0ccb50e8ae777ba4" + +is-absolute-url@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/is-absolute-url/-/is-absolute-url-2.1.0.tgz#50530dfb84fcc9aa7dbe7852e83a37b93b9f2aa6" + +is-arrow-function@^2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/is-arrow-function/-/is-arrow-function-2.0.3.tgz#29be2c2d8d9450852b8bbafb635ba7b8d8e87ec2" + dependencies: + is-callable "^1.0.4" + +is-binary-path@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-binary-path/-/is-binary-path-1.0.1.tgz#75f16642b480f187a711c814161fd3a4a7655898" + dependencies: + binary-extensions "^1.0.0" + +is-boolean-object@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-boolean-object/-/is-boolean-object-1.0.0.tgz#98f8b28030684219a95f375cfbd88ce3405dff93" + +is-buffer@^1.0.2: + version "1.1.4" + resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-1.1.4.tgz#cfc86ccd5dc5a52fa80489111c6920c457e2d98b" + +is-callable@^1.0.4, is-callable@^1.1.1, is-callable@^1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/is-callable/-/is-callable-1.1.3.tgz#86eb75392805ddc33af71c92a0eedf74ee7604b2" + +is-date-object@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-date-object/-/is-date-object-1.0.1.tgz#9aa20eb6aeebbff77fbd33e74ca01b33581d3a16" + +is-dotfile@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/is-dotfile/-/is-dotfile-1.0.2.tgz#2c132383f39199f8edc268ca01b9b007d205cc4d" + +is-equal-shallow@^0.1.3: + version "0.1.3" + resolved "https://registry.yarnpkg.com/is-equal-shallow/-/is-equal-shallow-0.1.3.tgz#2238098fc221de0bcfa5d9eac4c45d638aa1c534" + dependencies: + is-primitive "^2.0.0" + +is-equal@^1.5.1: + version "1.5.3" + resolved "https://registry.yarnpkg.com/is-equal/-/is-equal-1.5.3.tgz#05b7fa3a1122cbc71c1ef41ce0142d5532013b29" + dependencies: + has "^1.0.1" + is-arrow-function "^2.0.3" + is-boolean-object "^1.0.0" + is-callable "^1.1.3" + is-date-object "^1.0.1" + is-generator-function "^1.0.3" + is-number-object "^1.0.3" + is-regex "^1.0.3" + is-string "^1.0.4" + is-symbol "^1.0.1" + object.entries "^1.0.3" + +is-extendable@^0.1.1: + version "0.1.1" + resolved "https://registry.yarnpkg.com/is-extendable/-/is-extendable-0.1.1.tgz#62b110e289a471418e3ec36a617d472e301dfc89" + +is-extglob@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-extglob/-/is-extglob-1.0.0.tgz#ac468177c4943405a092fc8f29760c6ffc6206c0" + +is-extglob@^2.1.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/is-extglob/-/is-extglob-2.1.1.tgz#a88c02535791f02ed37c76a1b9ea9773c833f8c2" + +is-finite@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/is-finite/-/is-finite-1.0.2.tgz#cc6677695602be550ef11e8b4aa6305342b6d0aa" + dependencies: + number-is-nan "^1.0.0" + +is-fullwidth-code-point@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz#ef9e31386f031a7f0d643af82fde50c457ef00cb" + dependencies: + number-is-nan "^1.0.0" + +is-generator-function@^1.0.3: + version "1.0.6" + resolved "https://registry.yarnpkg.com/is-generator-function/-/is-generator-function-1.0.6.tgz#9e71653cd15fff341c79c4151460a131d31e9fc4" + +is-glob@^2.0.0, is-glob@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-2.0.1.tgz#d096f926a3ded5600f3fdfd91198cb0888c2d863" + dependencies: + is-extglob "^1.0.0" + +is-glob@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-3.1.0.tgz#7ba5ae24217804ac70707b96922567486cc3e84a" + dependencies: + is-extglob "^2.1.0" + +is-my-json-valid@^2.12.4: + version "2.15.0" + resolved "https://registry.yarnpkg.com/is-my-json-valid/-/is-my-json-valid-2.15.0.tgz#936edda3ca3c211fd98f3b2d3e08da43f7b2915b" + dependencies: + generate-function "^2.0.0" + generate-object-property "^1.1.0" + jsonpointer "^4.0.0" + xtend "^4.0.0" + +is-number-object@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/is-number-object/-/is-number-object-1.0.3.tgz#f265ab89a9f445034ef6aff15a8f00b00f551799" + +is-number@^0.1.1: + version "0.1.1" + resolved "https://registry.yarnpkg.com/is-number/-/is-number-0.1.1.tgz#69a7af116963d47206ec9bd9b48a14216f1e3806" + +is-number@^2.0.2, is-number@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/is-number/-/is-number-2.1.0.tgz#01fcbbb393463a548f2f466cce16dece49db908f" + dependencies: + kind-of "^3.0.2" + +is-plain-obj@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-plain-obj/-/is-plain-obj-1.1.0.tgz#71a50c8429dfca773c92a390a4a03b39fcd51d3e" + +is-posix-bracket@^0.1.0: + version "0.1.1" + resolved "https://registry.yarnpkg.com/is-posix-bracket/-/is-posix-bracket-0.1.1.tgz#3334dc79774368e92f016e6fbc0a88f5cd6e6bc4" + +is-primitive@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/is-primitive/-/is-primitive-2.0.0.tgz#207bab91638499c07b2adf240a41a87210034575" + +is-property@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/is-property/-/is-property-1.0.2.tgz#57fe1c4e48474edd65b09911f26b1cd4095dda84" + +is-regex@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/is-regex/-/is-regex-1.0.3.tgz#0d55182bddf9f2fde278220aec3a75642c908637" + +is-string@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/is-string/-/is-string-1.0.4.tgz#cc3a9b69857d621e963725a24caeec873b826e64" + +is-svg@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/is-svg/-/is-svg-2.1.0.tgz#cf61090da0d9efbcab8722deba6f032208dbb0e9" + dependencies: + html-comment-regex "^1.1.0" + +is-symbol@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-symbol/-/is-symbol-1.0.1.tgz#3cc59f00025194b6ab2e38dbae6689256b660572" + +is-typedarray@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-typedarray/-/is-typedarray-1.0.0.tgz#e479c80858df0c1b11ddda6940f96011fcda4a9a" + +isarray@0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/isarray/-/isarray-0.0.1.tgz#8a18acfca9a8f4177e09abfc6038939b05d1eedf" + +isarray@1.0.0, isarray@^1.0.0, isarray@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/isarray/-/isarray-1.0.0.tgz#bb935d48582cba168c06834957a54a3e07124f11" + +isbinaryfile@^3.0.0: + version "3.0.2" + resolved "https://registry.yarnpkg.com/isbinaryfile/-/isbinaryfile-3.0.2.tgz#4a3e974ec0cba9004d3fc6cde7209ea69368a621" + +isexe@^1.1.1: + version "1.1.2" + resolved "https://registry.yarnpkg.com/isexe/-/isexe-1.1.2.tgz#36f3e22e60750920f5e7241a476a8c6a42275ad0" + +isobject@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/isobject/-/isobject-2.1.0.tgz#f065561096a3f1da2ef46272f815c840d87e0c89" + dependencies: + isarray "1.0.0" + +isstream@~0.1.2: + version "0.1.2" + resolved "https://registry.yarnpkg.com/isstream/-/isstream-0.1.2.tgz#47e63f7af55afa6f92e1500e690eb8b8529c099a" + +jade@0.26.3: + version "0.26.3" + resolved "https://registry.yarnpkg.com/jade/-/jade-0.26.3.tgz#8f10d7977d8d79f2f6ff862a81b0513ccb25686c" + dependencies: + commander "0.6.1" + mkdirp "0.3.0" + +jodid25519@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/jodid25519/-/jodid25519-1.0.2.tgz#06d4912255093419477d425633606e0e90782967" + dependencies: + jsbn "~0.1.0" + +js-base64@^2.1.9: + version "2.1.9" + resolved "https://registry.yarnpkg.com/js-base64/-/js-base64-2.1.9.tgz#f0e80ae039a4bd654b5f281fc93f04a914a7fcce" + +js-tokens@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-3.0.0.tgz#a2f2a969caae142fb3cd56228358c89366957bd1" + +js-yaml@~3.6.1: + version "3.6.1" + resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.6.1.tgz#6e5fe67d8b205ce4d22fad05b7781e8dadcc4b30" + dependencies: + argparse "^1.0.7" + esprima "^2.6.0" + +jsbn@~0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/jsbn/-/jsbn-0.1.0.tgz#650987da0dd74f4ebf5a11377a2aa2d273e97dfd" + +jsesc@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-1.3.0.tgz#46c3fec8c1892b12b0833db9bc7622176dbab34b" + +jsesc@~0.5.0: + version "0.5.0" + resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-0.5.0.tgz#e7dee66e35d6fc16f710fe91d5cf69f70f08911d" + +json-loader@^0.5.4: + version "0.5.4" + resolved "https://registry.yarnpkg.com/json-loader/-/json-loader-0.5.4.tgz#8baa1365a632f58a3c46d20175fc6002c96e37de" + +json-schema@0.2.3: + version "0.2.3" + resolved "https://registry.yarnpkg.com/json-schema/-/json-schema-0.2.3.tgz#b480c892e59a2f05954ce727bd3f2a4e882f9e13" + +json-stringify-safe@~5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz#1296a2d58fd45f19a0f6ce01d65701e2c735b6eb" + +json3@3.3.2, json3@^3.3.2: + version "3.3.2" + resolved "https://registry.yarnpkg.com/json3/-/json3-3.3.2.tgz#3c0434743df93e2f5c42aee7b19bcb483575f4e1" + +json5@^0.5.0: + version "0.5.1" + resolved "https://registry.yarnpkg.com/json5/-/json5-0.5.1.tgz#1eade7acc012034ad84e2396767ead9fa5495821" + +jsonpointer@^4.0.0: + version "4.0.1" + resolved "https://registry.yarnpkg.com/jsonpointer/-/jsonpointer-4.0.1.tgz#4fd92cb34e0e9db3c89c8622ecf51f9b978c6cb9" + +jsprim@^1.2.2: + version "1.3.1" + resolved "https://registry.yarnpkg.com/jsprim/-/jsprim-1.3.1.tgz#2a7256f70412a29ee3670aaca625994c4dcff252" + dependencies: + extsprintf "1.0.2" + json-schema "0.2.3" + verror "1.3.6" + +jstransform@^11.0.3: + version "11.0.3" + resolved "https://registry.yarnpkg.com/jstransform/-/jstransform-11.0.3.tgz#09a78993e0ae4d4ef4487f6155a91f6190cb4223" + dependencies: + base62 "^1.1.0" + commoner "^0.10.1" + esprima-fb "^15001.1.0-dev-harmony-fb" + object-assign "^2.0.0" + source-map "^0.4.2" + +karma-chrome-launcher@^0.2.3: + version "0.2.3" + resolved "https://registry.yarnpkg.com/karma-chrome-launcher/-/karma-chrome-launcher-0.2.3.tgz#4c6d700d163a9d34c618efd87918be49e7a4a8c9" + dependencies: + fs-access "^1.0.0" + which "^1.2.1" + +karma-cli@^0.1.2: + version "0.1.2" + resolved "https://registry.yarnpkg.com/karma-cli/-/karma-cli-0.1.2.tgz#cacea84371ece19876265c8fa102ebbb9fee4a8c" + dependencies: + resolve "^1.1.6" + +karma-firefox-launcher@^0.1.7: + version "0.1.7" + resolved "https://registry.yarnpkg.com/karma-firefox-launcher/-/karma-firefox-launcher-0.1.7.tgz#c05dd86533691e62f31952595098e8bd357d39f3" + +karma-mocha@^0.2.2: + version "0.2.2" + resolved "https://registry.yarnpkg.com/karma-mocha/-/karma-mocha-0.2.2.tgz#388ed917da15dcb196d1b915c1934ef803193f8e" + +karma-webpack@^1.7.0: + version "1.8.1" + resolved "https://registry.yarnpkg.com/karma-webpack/-/karma-webpack-1.8.1.tgz#39d5fd2edeea3cc3ef5b405989b37d5b0e6a3b4e" + dependencies: + async "~0.9.0" + loader-utils "^0.2.5" + lodash "^3.8.0" + source-map "^0.1.41" + webpack-dev-middleware "^1.0.11" + +karma@^0.13.22: + version "0.13.22" + resolved "https://registry.yarnpkg.com/karma/-/karma-0.13.22.tgz#07750b1bd063d7e7e7b91bcd2e6354d8f2aa8744" + dependencies: + batch "^0.5.3" + bluebird "^2.9.27" + body-parser "^1.12.4" + chokidar "^1.4.1" + colors "^1.1.0" + connect "^3.3.5" + core-js "^2.1.0" + di "^0.0.1" + dom-serialize "^2.2.0" + expand-braces "^0.1.1" + glob "^7.0.0" + graceful-fs "^4.1.2" + http-proxy "^1.13.0" + isbinaryfile "^3.0.0" + lodash "^3.8.0" + log4js "^0.6.31" + mime "^1.3.4" + minimatch "^3.0.0" + optimist "^0.6.1" + rimraf "^2.3.3" + socket.io "^1.4.5" + source-map "^0.5.3" + useragent "^2.1.6" + +keycode@^2.1.0: + version "2.1.8" + resolved "https://registry.yarnpkg.com/keycode/-/keycode-2.1.8.tgz#94d2b7098215eff0e8f9a8931d5a59076c4532fb" + +kind-of@^3.0.2: + version "3.1.0" + resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-3.1.0.tgz#475d698a5e49ff5e53d14e3e732429dc8bf4cf47" + dependencies: + is-buffer "^1.0.2" + +lazy-cache@^1.0.3: + version "1.0.4" + resolved "https://registry.yarnpkg.com/lazy-cache/-/lazy-cache-1.0.4.tgz#a1d78fc3a50474cb80845d3b3b6e1da49a446e8e" + +lcid@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/lcid/-/lcid-1.0.0.tgz#308accafa0bc483a3867b4b6f2b9506251d1b835" + dependencies: + invert-kv "^1.0.0" + +less-loader@^2.2.3: + version "2.2.3" + resolved "https://registry.yarnpkg.com/less-loader/-/less-loader-2.2.3.tgz#b6d8f8139c8493df09d992a93a00734b08f84528" + dependencies: + loader-utils "^0.2.5" + +less@^2.7.1: + version "2.7.2" + resolved "https://registry.yarnpkg.com/less/-/less-2.7.2.tgz#368d6cc73e1fb03981183280918743c5dcf9b3df" + optionalDependencies: + errno "^0.1.1" + graceful-fs "^4.1.2" + image-size "~0.5.0" + mime "^1.2.11" + mkdirp "^0.5.0" + promise "^7.1.1" + request "^2.72.0" + source-map "^0.5.3" + +loader-utils@0.2.x, loader-utils@^0.2.11, loader-utils@^0.2.16, loader-utils@^0.2.5, loader-utils@^0.2.7, loader-utils@~0.2.2: + version "0.2.16" + resolved "https://registry.yarnpkg.com/loader-utils/-/loader-utils-0.2.16.tgz#f08632066ed8282835dff88dfb52704765adee6d" + dependencies: + big.js "^3.1.3" + emojis-list "^2.0.0" + json5 "^0.5.0" + object-assign "^4.0.1" + +local-storage-fallback@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/local-storage-fallback/-/local-storage-fallback-1.3.0.tgz#d53c2455391ae2a1fa32db02031224cca9a8c8da" + dependencies: + cookie "^0.3.1" + +lodash-compat@^3.10.1: + version "3.10.2" + resolved "https://registry.yarnpkg.com/lodash-compat/-/lodash-compat-3.10.2.tgz#c6940128a9d30f8e902cd2cf99fd0cba4ecfc183" + +lodash-es@^4.2.1: + version "4.17.4" + resolved "https://registry.yarnpkg.com/lodash-es/-/lodash-es-4.17.4.tgz#dcc1d7552e150a0640073ba9cb31d70f032950e7" + +lodash._createcompounder@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/lodash._createcompounder/-/lodash._createcompounder-3.0.0.tgz#5dd2cb55372d6e70e0e2392fb2304d6631091075" + dependencies: + lodash.deburr "^3.0.0" + lodash.words "^3.0.0" + +lodash._root@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/lodash._root/-/lodash._root-3.0.1.tgz#fba1c4524c19ee9a5f8136b4609f017cf4ded692" + +lodash.camelcase@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/lodash.camelcase/-/lodash.camelcase-3.0.1.tgz#932c8b87f8a4377897c67197533282f97aeac298" + dependencies: + lodash._createcompounder "^3.0.0" + +lodash.deburr@^3.0.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/lodash.deburr/-/lodash.deburr-3.2.0.tgz#6da8f54334a366a7cf4c4c76ef8d80aa1b365ed5" + dependencies: + lodash._root "^3.0.0" + +lodash.indexof@^4.0.5: + version "4.0.5" + resolved "https://registry.yarnpkg.com/lodash.indexof/-/lodash.indexof-4.0.5.tgz#53714adc2cddd6ed87638f893aa9b6c24e31ef3c" + +lodash.memoize@^4.1.0: + version "4.1.2" + resolved "https://registry.yarnpkg.com/lodash.memoize/-/lodash.memoize-4.1.2.tgz#bcc6c49a42a2840ed997f323eada5ecd182e0bfe" + +lodash.uniq@^4.3.0: + version "4.5.0" + resolved "https://registry.yarnpkg.com/lodash.uniq/-/lodash.uniq-4.5.0.tgz#d0225373aeb652adc1bc82e4945339a842754773" + +lodash.words@^3.0.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/lodash.words/-/lodash.words-3.2.0.tgz#4e2a8649bc08745b17c695b1a3ce8fee596623b3" + dependencies: + lodash._root "^3.0.0" + +lodash@^3.10.1, lodash@^3.8.0: + version "3.10.1" + resolved "https://registry.yarnpkg.com/lodash/-/lodash-3.10.1.tgz#5bf45e8e49ba4189e17d482789dfd15bd140b7b6" + +lodash@^4.17.2, lodash@^4.17.3, lodash@^4.2.0, lodash@^4.2.1: + version "4.17.4" + resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.4.tgz#78203a4d1c328ae1d86dca6460e369b57f4055ae" + +log4js@^0.6.31: + version "0.6.38" + resolved "https://registry.yarnpkg.com/log4js/-/log4js-0.6.38.tgz#2c494116695d6fb25480943d3fc872e662a522fd" + dependencies: + readable-stream "~1.0.2" + semver "~4.3.3" + +longest@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/longest/-/longest-1.0.1.tgz#30a0b2da38f73770e8294a0d22e6625ed77d0097" + +loose-envify@^1.0.0, loose-envify@^1.1.0, loose-envify@^1.2.0: + version "1.3.1" + resolved "https://registry.yarnpkg.com/loose-envify/-/loose-envify-1.3.1.tgz#d1a8ad33fa9ce0e713d65fdd0ac8b748d478c848" + dependencies: + js-tokens "^3.0.0" + +lower-case@^1.1.1: + version "1.1.3" + resolved "https://registry.yarnpkg.com/lower-case/-/lower-case-1.1.3.tgz#c92393d976793eee5ba4edb583cf8eae35bd9bfb" + +lru-cache@2: + version "2.7.3" + resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-2.7.3.tgz#6d4524e8b955f95d4f5b58851ce21dd72fb4e952" + +lru-cache@2.2.x: + version "2.2.4" + resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-2.2.4.tgz#6c658619becf14031d0d0b594b16042ce4dc063d" + +macaddress@^0.2.8: + version "0.2.8" + resolved "https://registry.yarnpkg.com/macaddress/-/macaddress-0.2.8.tgz#5904dc537c39ec6dbefeae902327135fa8511f12" + +math-expression-evaluator@^1.2.14: + version "1.2.15" + resolved "https://registry.yarnpkg.com/math-expression-evaluator/-/math-expression-evaluator-1.2.15.tgz#38dc5f0194c5bf5ff1c690ad4c4b64df71ac0187" + dependencies: + lodash.indexof "^4.0.5" + +media-typer@0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/media-typer/-/media-typer-0.3.0.tgz#8710d7af0aa626f8fffa1ce00168545263255748" + +memory-fs@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/memory-fs/-/memory-fs-0.2.0.tgz#f2bb25368bc121e391c2520de92969caee0a0290" + +memory-fs@~0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/memory-fs/-/memory-fs-0.3.0.tgz#7bcc6b629e3a43e871d7e29aca6ae8a7f15cbb20" + dependencies: + errno "^0.1.3" + readable-stream "^2.0.1" + +memory-fs@~0.4.1: + version "0.4.1" + resolved "https://registry.yarnpkg.com/memory-fs/-/memory-fs-0.4.1.tgz#3a9a20b8462523e447cfbc7e8bb80ed667bfc552" + dependencies: + errno "^0.1.3" + readable-stream "^2.0.1" + +merge-descriptors@1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/merge-descriptors/-/merge-descriptors-1.0.1.tgz#b00aaa556dd8b44568150ec9d1b953f3f90cbb61" + +methods@~1.1.1, methods@~1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/methods/-/methods-1.1.2.tgz#5529a4d67654134edcc5266656835b0f851afcee" + +micromatch@^2.1.5, micromatch@^2.3.11: + version "2.3.11" + resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-2.3.11.tgz#86677c97d1720b363431d04d0d15293bd38c1565" + dependencies: + arr-diff "^2.0.0" + array-unique "^0.2.1" + braces "^1.8.2" + expand-brackets "^0.1.4" + extglob "^0.3.1" + filename-regex "^2.0.0" + is-extglob "^1.0.0" + is-glob "^2.0.1" + kind-of "^3.0.2" + normalize-path "^2.0.1" + object.omit "^2.0.0" + parse-glob "^3.0.4" + regex-cache "^0.4.2" + +"mime-db@>= 1.24.0 < 2", mime-db@^1.25.0, mime-db@~1.26.0: + version "1.26.0" + resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.26.0.tgz#eaffcd0e4fc6935cf8134da246e2e6c35305adff" + +mime-types@^2.1.12, mime-types@^2.1.13, mime-types@^2.1.3, mime-types@~2.1.11, mime-types@~2.1.13, mime-types@~2.1.7: + version "2.1.14" + resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.14.tgz#f7ef7d97583fcaf3b7d282b6f8b5679dab1e94ee" + dependencies: + mime-db "~1.26.0" + +mime@1.2.x: + version "1.2.11" + resolved "https://registry.yarnpkg.com/mime/-/mime-1.2.11.tgz#58203eed86e3a5ef17aed2b7d9ebd47f0a60dd10" + +mime@1.3.4, mime@^1.2.11, mime@^1.3.4: + version "1.3.4" + resolved "https://registry.yarnpkg.com/mime/-/mime-1.3.4.tgz#115f9e3b6b3daf2959983cb38f149a2d40eb5d53" + +minimatch@0.3: + version "0.3.0" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-0.3.0.tgz#275d8edaac4f1bb3326472089e7949c8394699dd" + dependencies: + lru-cache "2" + sigmund "~1.0.0" + +"minimatch@2 || 3", minimatch@^3.0.0, minimatch@^3.0.2: + version "3.0.3" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.0.3.tgz#2a4e4090b96b2db06a9d7df01055a62a77c9b774" + dependencies: + brace-expansion "^1.0.0" + +minimist@0.0.8, minimist@~0.0.1: + version "0.0.8" + resolved "https://registry.yarnpkg.com/minimist/-/minimist-0.0.8.tgz#857fcabfc3397d2625b8228262e86aa7a011b05d" + +minimist@^1.1.1, minimist@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.0.tgz#a35008b20f41383eec1fb914f4cd5df79a264284" + +mkdirp@0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.3.0.tgz#1bbf5ab1ba827af23575143490426455f481fe1e" + +mkdirp@0.5.1, "mkdirp@>=0.5 0", mkdirp@^0.5.0, mkdirp@^0.5.1, mkdirp@~0.5.0, mkdirp@~0.5.1: + version "0.5.1" + resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.1.tgz#30057438eac6cf7f8c4767f38648d6697d75c903" + dependencies: + minimist "0.0.8" + +mocha@^2.5.3: + version "2.5.3" + resolved "https://registry.yarnpkg.com/mocha/-/mocha-2.5.3.tgz#161be5bdeb496771eb9b35745050b622b5aefc58" + dependencies: + commander "2.3.0" + debug "2.2.0" + diff "1.4.0" + escape-string-regexp "1.0.2" + glob "3.2.11" + growl "1.9.2" + jade "0.26.3" + mkdirp "0.5.1" + supports-color "1.2.0" + to-iso-string "0.0.2" + +moment@^2.15.1: + version "2.17.1" + resolved "https://registry.yarnpkg.com/moment/-/moment-2.17.1.tgz#fed9506063f36b10f066c8b59a144d7faebe1d82" + +"mout@>=0.9 <2.0": + version "1.0.0" + resolved "https://registry.yarnpkg.com/mout/-/mout-1.0.0.tgz#9bdf1d4af57d66d47cb353a6335a3281098e1501" + +mout@^0.11.0: + version "0.11.1" + resolved "https://registry.yarnpkg.com/mout/-/mout-0.11.1.tgz#ba3611df5f0e5b1ffbfd01166b8f02d1f5fa2b99" + +ms@0.7.1: + version "0.7.1" + resolved "https://registry.yarnpkg.com/ms/-/ms-0.7.1.tgz#9cd13c03adbff25b65effde7ce864ee952017098" + +ms@0.7.2: + version "0.7.2" + resolved "https://registry.yarnpkg.com/ms/-/ms-0.7.2.tgz#ae25cf2512b3885a1d95d7f037868d8431124765" + +nan@^2.3.0: + version "2.5.1" + resolved "https://registry.yarnpkg.com/nan/-/nan-2.5.1.tgz#d5b01691253326a97a2bbee9e61c55d8d60351e2" + +ncname@1.0.x: + version "1.0.0" + resolved "https://registry.yarnpkg.com/ncname/-/ncname-1.0.0.tgz#5b57ad18b1ca092864ef62b0b1ed8194f383b71c" + dependencies: + xml-char-classes "^1.0.0" + +negotiator@0.6.1: + version "0.6.1" + resolved "https://registry.yarnpkg.com/negotiator/-/negotiator-0.6.1.tgz#2b327184e8992101177b28563fb5e7102acd0ca9" + +no-case@^2.2.0: + version "2.3.1" + resolved "https://registry.yarnpkg.com/no-case/-/no-case-2.3.1.tgz#7aeba1c73a52184265554b7dc03baf720df80081" + dependencies: + lower-case "^1.1.1" + +node-dir@^0.1.10: + version "0.1.16" + resolved "https://registry.yarnpkg.com/node-dir/-/node-dir-0.1.16.tgz#d2ef583aa50b90d93db8cdd26fcea58353957fe4" + dependencies: + minimatch "^3.0.2" + +node-libs-browser@^0.7.0: + version "0.7.0" + resolved "https://registry.yarnpkg.com/node-libs-browser/-/node-libs-browser-0.7.0.tgz#3e272c0819e308935e26674408d7af0e1491b83b" + dependencies: + assert "^1.1.1" + browserify-zlib "^0.1.4" + buffer "^4.9.0" + console-browserify "^1.1.0" + constants-browserify "^1.0.0" + crypto-browserify "3.3.0" + domain-browser "^1.1.1" + events "^1.0.0" + https-browserify "0.0.1" + os-browserify "^0.2.0" + path-browserify "0.0.0" + process "^0.11.0" + punycode "^1.2.4" + querystring-es3 "^0.2.0" + readable-stream "^2.0.5" + stream-browserify "^2.0.1" + stream-http "^2.3.1" + string_decoder "^0.10.25" + timers-browserify "^2.0.2" + tty-browserify "0.0.0" + url "^0.11.0" + util "^0.10.3" + vm-browserify "0.0.4" + +node-pre-gyp@^0.6.29: + version "0.6.32" + resolved "https://registry.yarnpkg.com/node-pre-gyp/-/node-pre-gyp-0.6.32.tgz#fc452b376e7319b3d255f5f34853ef6fd8fe1fd5" + dependencies: + mkdirp "~0.5.1" + nopt "~3.0.6" + npmlog "^4.0.1" + rc "~1.1.6" + request "^2.79.0" + rimraf "~2.5.4" + semver "~5.3.0" + tar "~2.2.1" + tar-pack "~3.3.0" + +nopt@~3.0.6: + version "3.0.6" + resolved "https://registry.yarnpkg.com/nopt/-/nopt-3.0.6.tgz#c6465dbf08abcd4db359317f79ac68a646b28ff9" + dependencies: + abbrev "1" + +normalize-path@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-2.0.1.tgz#47886ac1662760d4261b7d979d241709d3ce3f7a" + +normalize-range@^0.1.2: + version "0.1.2" + resolved "https://registry.yarnpkg.com/normalize-range/-/normalize-range-0.1.2.tgz#2d10c06bdfd312ea9777695a4d28439456b75942" + +normalize-url@^1.4.0: + version "1.9.0" + resolved "https://registry.yarnpkg.com/normalize-url/-/normalize-url-1.9.0.tgz#c2bb50035edee62cd81edb2d45da68dc25e3423e" + dependencies: + object-assign "^4.0.1" + prepend-http "^1.0.0" + query-string "^4.1.0" + sort-keys "^1.0.0" + +npm-path@^1.0.0, npm-path@^1.0.1: + version "1.1.0" + resolved "https://registry.yarnpkg.com/npm-path/-/npm-path-1.1.0.tgz#0474ae00419c327d54701b7cf2cd05dc88be1140" + dependencies: + which "^1.2.4" + +npm-run@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/npm-run/-/npm-run-3.0.0.tgz#568920f840a98fd8e2299db66b2616e2476caf69" + dependencies: + minimist "^1.1.1" + npm-path "^1.0.1" + npm-which "^2.0.0" + serializerr "^1.0.1" + sync-exec "^0.6.2" + +npm-which@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/npm-which/-/npm-which-2.0.0.tgz#0c46982160b783093661d1d01bd4496d2feabbac" + dependencies: + commander "^2.2.0" + npm-path "^1.0.0" + which "^1.0.5" + +npmlog@^4.0.1: + version "4.0.2" + resolved "https://registry.yarnpkg.com/npmlog/-/npmlog-4.0.2.tgz#d03950e0e78ce1527ba26d2a7592e9348ac3e75f" + dependencies: + are-we-there-yet "~1.1.2" + console-control-strings "~1.1.0" + gauge "~2.7.1" + set-blocking "~2.0.0" + +nth-check@~1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/nth-check/-/nth-check-1.0.1.tgz#9929acdf628fc2c41098deab82ac580cf149aae4" + dependencies: + boolbase "~1.0.0" + +null-check@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/null-check/-/null-check-1.0.0.tgz#977dffd7176012b9ec30d2a39db5cf72a0439edd" + +num2fraction@^1.2.2: + version "1.2.2" + resolved "https://registry.yarnpkg.com/num2fraction/-/num2fraction-1.2.2.tgz#6f682b6a027a4e9ddfa4564cd2589d1d4e669ede" + +number-is-nan@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/number-is-nan/-/number-is-nan-1.0.1.tgz#097b602b53422a522c1afb8790318336941a011d" + +oauth-sign@~0.8.1: + version "0.8.2" + resolved "https://registry.yarnpkg.com/oauth-sign/-/oauth-sign-0.8.2.tgz#46a6ab7f0aead8deae9ec0565780b7d4efeb9d43" + +object-assign@4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.0.tgz#7a3b3d0e98063d43f4c03f2e8ae6cd51a86883a0" + +object-assign@^2.0.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-2.1.1.tgz#43c36e5d569ff8e4816c4efa8be02d26967c18aa" + +object-assign@^4.0.1, object-assign@^4.1.0: + version "4.1.1" + resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863" + +object-component@0.0.3: + version "0.0.3" + resolved "https://registry.yarnpkg.com/object-component/-/object-component-0.0.3.tgz#f0c69aa50efc95b866c186f400a33769cb2f1291" + +object-inspect@^1.1.0: + version "1.2.1" + resolved "https://registry.yarnpkg.com/object-inspect/-/object-inspect-1.2.1.tgz#3b62226eb8f6d441751c7d8f22a20ff80ac9dc3f" + +object-keys@^1.0.8, object-keys@^1.0.9: + version "1.0.11" + resolved "https://registry.yarnpkg.com/object-keys/-/object-keys-1.0.11.tgz#c54601778ad560f1142ce0e01bcca8b56d13426d" + +object.entries@^1.0.3: + version "1.0.4" + resolved "https://registry.yarnpkg.com/object.entries/-/object.entries-1.0.4.tgz#1bf9a4dd2288f5b33f3a993d257661f05d161a5f" + dependencies: + define-properties "^1.1.2" + es-abstract "^1.6.1" + function-bind "^1.1.0" + has "^1.0.1" + +object.omit@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/object.omit/-/object.omit-2.0.1.tgz#1a9c744829f39dbb858c76ca3579ae2a54ebd1fa" + dependencies: + for-own "^0.1.4" + is-extendable "^0.1.1" + +on-finished@~2.3.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/on-finished/-/on-finished-2.3.0.tgz#20f1336481b083cd75337992a16971aa2d906947" + dependencies: + ee-first "1.1.1" + +on-headers@~1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/on-headers/-/on-headers-1.0.1.tgz#928f5d0f470d49342651ea6794b0857c100693f7" + +once@^1.3.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1" + dependencies: + wrappy "1" + +once@~1.3.3: + version "1.3.3" + resolved "https://registry.yarnpkg.com/once/-/once-1.3.3.tgz#b2e261557ce4c314ec8304f3fa82663e4297ca20" + dependencies: + wrappy "1" + +open@0.0.5: + version "0.0.5" + resolved "https://registry.yarnpkg.com/open/-/open-0.0.5.tgz#42c3e18ec95466b6bf0dc42f3a2945c3f0cad8fc" + +optimist@^0.6.1, optimist@~0.6.0, optimist@~0.6.1: + version "0.6.1" + resolved "https://registry.yarnpkg.com/optimist/-/optimist-0.6.1.tgz#da3ea74686fa21a19a111c326e90eb15a0196686" + dependencies: + minimist "~0.0.1" + wordwrap "~0.0.2" + +options@>=0.0.5: + version "0.0.6" + resolved "https://registry.yarnpkg.com/options/-/options-0.0.6.tgz#ec22d312806bb53e731773e7cdaefcf1c643128f" + +original@>=0.0.5: + version "1.0.0" + resolved "https://registry.yarnpkg.com/original/-/original-1.0.0.tgz#9147f93fa1696d04be61e01bd50baeaca656bd3b" + dependencies: + url-parse "1.0.x" + +os-browserify@^0.2.0: + version "0.2.1" + resolved "https://registry.yarnpkg.com/os-browserify/-/os-browserify-0.2.1.tgz#63fc4ccee5d2d7763d26bbf8601078e6c2e0044f" + +os-homedir@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/os-homedir/-/os-homedir-1.0.2.tgz#ffbc4988336e0e833de0c168c7ef152121aa7fb3" + +os-locale@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/os-locale/-/os-locale-1.4.0.tgz#20f9f17ae29ed345e8bde583b13d2009803c14d9" + dependencies: + lcid "^1.0.0" + +os-tmpdir@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/os-tmpdir/-/os-tmpdir-1.0.2.tgz#bbe67406c79aa85c5cfec766fe5734555dfa1274" + +output-file-sync@^1.1.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/output-file-sync/-/output-file-sync-1.1.2.tgz#d0a33eefe61a205facb90092e826598d5245ce76" + dependencies: + graceful-fs "^4.1.4" + mkdirp "^0.5.1" + object-assign "^4.1.0" + +pako@~0.2.0: + version "0.2.9" + resolved "https://registry.yarnpkg.com/pako/-/pako-0.2.9.tgz#f3f7522f4ef782348da8161bad9ecfd51bf83a75" + +param-case@2.1.x: + version "2.1.0" + resolved "https://registry.yarnpkg.com/param-case/-/param-case-2.1.0.tgz#2619f90fd6c829ed0b958f1c84ed03a745a6d70a" + dependencies: + no-case "^2.2.0" + +parse-glob@^3.0.4: + version "3.0.4" + resolved "https://registry.yarnpkg.com/parse-glob/-/parse-glob-3.0.4.tgz#b2c376cfb11f35513badd173ef0bb6e3a388391c" + dependencies: + glob-base "^0.3.0" + is-dotfile "^1.0.0" + is-extglob "^1.0.0" + is-glob "^2.0.0" + +parsejson@0.0.3: + version "0.0.3" + resolved "https://registry.yarnpkg.com/parsejson/-/parsejson-0.0.3.tgz#ab7e3759f209ece99437973f7d0f1f64ae0e64ab" + dependencies: + better-assert "~1.0.0" + +parseqs@0.0.5: + version "0.0.5" + resolved "https://registry.yarnpkg.com/parseqs/-/parseqs-0.0.5.tgz#d5208a3738e46766e291ba2ea173684921a8b89d" + dependencies: + better-assert "~1.0.0" + +parseuri@0.0.5: + version "0.0.5" + resolved "https://registry.yarnpkg.com/parseuri/-/parseuri-0.0.5.tgz#80204a50d4dbb779bfdc6ebe2778d90e4bce320a" + dependencies: + better-assert "~1.0.0" + +parseurl@~1.3.1: + version "1.3.1" + resolved "https://registry.yarnpkg.com/parseurl/-/parseurl-1.3.1.tgz#c8ab8c9223ba34888aa64a297b28853bec18da56" + +path-browserify@0.0.0: + version "0.0.0" + resolved "https://registry.yarnpkg.com/path-browserify/-/path-browserify-0.0.0.tgz#a0b870729aae214005b7d5032ec2cbbb0fb4451a" + +path-exists@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-2.1.0.tgz#0feb6c64f0fc518d9a754dd5efb62c7022761f4b" + dependencies: + pinkie-promise "^2.0.0" + +path-is-absolute@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f" + +path-to-regexp@0.1.7: + version "0.1.7" + resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-0.1.7.tgz#df604178005f522f15eb4490e7247a1bfaa67f8c" + +pbkdf2-compat@2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/pbkdf2-compat/-/pbkdf2-compat-2.0.1.tgz#b6e0c8fa99494d94e0511575802a59a5c142f288" + +performance-now@~0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/performance-now/-/performance-now-0.2.0.tgz#33ef30c5c77d4ea21c5a53869d91b56d8f2555e5" + +pinkie-promise@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/pinkie-promise/-/pinkie-promise-2.0.1.tgz#2135d6dfa7a358c069ac9b178776288228450ffa" + dependencies: + pinkie "^2.0.0" + +pinkie@^2.0.0: + version "2.0.4" + resolved "https://registry.yarnpkg.com/pinkie/-/pinkie-2.0.4.tgz#72556b80cfa0d48a974e80e77248e80ed4f7f870" + +pkg-dir@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/pkg-dir/-/pkg-dir-1.0.0.tgz#7a4b508a8d5bb2d629d447056ff4e9c9314cf3d4" + dependencies: + find-up "^1.0.0" + +postcss-calc@^5.2.0: + version "5.3.1" + resolved "https://registry.yarnpkg.com/postcss-calc/-/postcss-calc-5.3.1.tgz#77bae7ca928ad85716e2fda42f261bf7c1d65b5e" + dependencies: + postcss "^5.0.2" + postcss-message-helpers "^2.0.0" + reduce-css-calc "^1.2.6" + +postcss-colormin@^2.1.8: + version "2.2.1" + resolved "https://registry.yarnpkg.com/postcss-colormin/-/postcss-colormin-2.2.1.tgz#dc5421b6ae6f779ef6bfd47352b94abe59d0316b" + dependencies: + colormin "^1.0.5" + postcss "^5.0.13" + postcss-value-parser "^3.2.3" + +postcss-convert-values@^2.3.4: + version "2.6.0" + resolved "https://registry.yarnpkg.com/postcss-convert-values/-/postcss-convert-values-2.6.0.tgz#08c6d06130fe58a91a21ff50829e1aad6a3a1acc" + dependencies: + postcss "^5.0.11" + postcss-value-parser "^3.1.2" + +postcss-discard-comments@^2.0.4: + version "2.0.4" + resolved "https://registry.yarnpkg.com/postcss-discard-comments/-/postcss-discard-comments-2.0.4.tgz#befe89fafd5b3dace5ccce51b76b81514be00e3d" + dependencies: + postcss "^5.0.14" + +postcss-discard-duplicates@^2.0.1: + version "2.0.2" + resolved "https://registry.yarnpkg.com/postcss-discard-duplicates/-/postcss-discard-duplicates-2.0.2.tgz#02be520e91571ffb10738766a981d5770989bb32" + dependencies: + postcss "^5.0.4" + +postcss-discard-empty@^2.0.1: + version "2.1.0" + resolved "https://registry.yarnpkg.com/postcss-discard-empty/-/postcss-discard-empty-2.1.0.tgz#d2b4bd9d5ced5ebd8dcade7640c7d7cd7f4f92b5" + dependencies: + postcss "^5.0.14" + +postcss-discard-overridden@^0.1.1: + version "0.1.1" + resolved "https://registry.yarnpkg.com/postcss-discard-overridden/-/postcss-discard-overridden-0.1.1.tgz#8b1eaf554f686fb288cd874c55667b0aa3668d58" + dependencies: + postcss "^5.0.16" + +postcss-discard-unused@^2.2.1: + version "2.2.3" + resolved "https://registry.yarnpkg.com/postcss-discard-unused/-/postcss-discard-unused-2.2.3.tgz#bce30b2cc591ffc634322b5fb3464b6d934f4433" + dependencies: + postcss "^5.0.14" + uniqs "^2.0.0" + +postcss-filter-plugins@^2.0.0: + version "2.0.2" + resolved "https://registry.yarnpkg.com/postcss-filter-plugins/-/postcss-filter-plugins-2.0.2.tgz#6d85862534d735ac420e4a85806e1f5d4286d84c" + dependencies: + postcss "^5.0.4" + uniqid "^4.0.0" + +postcss-merge-idents@^2.1.5: + version "2.1.7" + resolved "https://registry.yarnpkg.com/postcss-merge-idents/-/postcss-merge-idents-2.1.7.tgz#4c5530313c08e1d5b3bbf3d2bbc747e278eea270" + dependencies: + has "^1.0.1" + postcss "^5.0.10" + postcss-value-parser "^3.1.1" + +postcss-merge-longhand@^2.0.1: + version "2.0.2" + resolved "https://registry.yarnpkg.com/postcss-merge-longhand/-/postcss-merge-longhand-2.0.2.tgz#23d90cd127b0a77994915332739034a1a4f3d658" + dependencies: + postcss "^5.0.4" + +postcss-merge-rules@^2.0.3: + version "2.1.1" + resolved "https://registry.yarnpkg.com/postcss-merge-rules/-/postcss-merge-rules-2.1.1.tgz#5e5640020ce43cddd343c73bba91c9a358d1fe0f" + dependencies: + browserslist "^1.5.2" + caniuse-api "^1.5.2" + postcss "^5.0.4" + postcss-selector-parser "^2.2.2" + vendors "^1.0.0" + +postcss-message-helpers@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/postcss-message-helpers/-/postcss-message-helpers-2.0.0.tgz#a4f2f4fab6e4fe002f0aed000478cdf52f9ba60e" + +postcss-minify-font-values@^1.0.2: + version "1.0.5" + resolved "https://registry.yarnpkg.com/postcss-minify-font-values/-/postcss-minify-font-values-1.0.5.tgz#4b58edb56641eba7c8474ab3526cafd7bbdecb69" + dependencies: + object-assign "^4.0.1" + postcss "^5.0.4" + postcss-value-parser "^3.0.2" + +postcss-minify-gradients@^1.0.1: + version "1.0.5" + resolved "https://registry.yarnpkg.com/postcss-minify-gradients/-/postcss-minify-gradients-1.0.5.tgz#5dbda11373703f83cfb4a3ea3881d8d75ff5e6e1" + dependencies: + postcss "^5.0.12" + postcss-value-parser "^3.3.0" + +postcss-minify-params@^1.0.4: + version "1.2.2" + resolved "https://registry.yarnpkg.com/postcss-minify-params/-/postcss-minify-params-1.2.2.tgz#ad2ce071373b943b3d930a3fa59a358c28d6f1f3" + dependencies: + alphanum-sort "^1.0.1" + postcss "^5.0.2" + postcss-value-parser "^3.0.2" + uniqs "^2.0.0" + +postcss-minify-selectors@^2.0.4: + version "2.1.1" + resolved "https://registry.yarnpkg.com/postcss-minify-selectors/-/postcss-minify-selectors-2.1.1.tgz#b2c6a98c0072cf91b932d1a496508114311735bf" + dependencies: + alphanum-sort "^1.0.2" + has "^1.0.1" + postcss "^5.0.14" + postcss-selector-parser "^2.0.0" + +postcss-modules-extract-imports@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/postcss-modules-extract-imports/-/postcss-modules-extract-imports-1.0.1.tgz#8fb3fef9a6dd0420d3f6d4353cf1ff73f2b2a341" + dependencies: + postcss "^5.0.4" + +postcss-modules-local-by-default@^1.0.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/postcss-modules-local-by-default/-/postcss-modules-local-by-default-1.1.1.tgz#29a10673fa37d19251265ca2ba3150d9040eb4ce" + dependencies: + css-selector-tokenizer "^0.6.0" + postcss "^5.0.4" + +postcss-modules-scope@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/postcss-modules-scope/-/postcss-modules-scope-1.0.2.tgz#ff977395e5e06202d7362290b88b1e8cd049de29" + dependencies: + css-selector-tokenizer "^0.6.0" + postcss "^5.0.4" + +postcss-modules-values@^1.1.0: + version "1.2.2" + resolved "https://registry.yarnpkg.com/postcss-modules-values/-/postcss-modules-values-1.2.2.tgz#f0e7d476fe1ed88c5e4c7f97533a3e772ad94ca1" + dependencies: + icss-replace-symbols "^1.0.2" + postcss "^5.0.14" + +postcss-normalize-charset@^1.1.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/postcss-normalize-charset/-/postcss-normalize-charset-1.1.1.tgz#ef9ee71212d7fe759c78ed162f61ed62b5cb93f1" + dependencies: + postcss "^5.0.5" + +postcss-normalize-url@^3.0.7: + version "3.0.8" + resolved "https://registry.yarnpkg.com/postcss-normalize-url/-/postcss-normalize-url-3.0.8.tgz#108f74b3f2fcdaf891a2ffa3ea4592279fc78222" + dependencies: + is-absolute-url "^2.0.0" + normalize-url "^1.4.0" + postcss "^5.0.14" + postcss-value-parser "^3.2.3" + +postcss-ordered-values@^2.1.0: + version "2.2.3" + resolved "https://registry.yarnpkg.com/postcss-ordered-values/-/postcss-ordered-values-2.2.3.tgz#eec6c2a67b6c412a8db2042e77fe8da43f95c11d" + dependencies: + postcss "^5.0.4" + postcss-value-parser "^3.0.1" + +postcss-reduce-idents@^2.2.2: + version "2.4.0" + resolved "https://registry.yarnpkg.com/postcss-reduce-idents/-/postcss-reduce-idents-2.4.0.tgz#c2c6d20cc958284f6abfbe63f7609bf409059ad3" + dependencies: + postcss "^5.0.4" + postcss-value-parser "^3.0.2" + +postcss-reduce-initial@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/postcss-reduce-initial/-/postcss-reduce-initial-1.0.1.tgz#68f80695f045d08263a879ad240df8dd64f644ea" + dependencies: + postcss "^5.0.4" + +postcss-reduce-transforms@^1.0.3: + version "1.0.4" + resolved "https://registry.yarnpkg.com/postcss-reduce-transforms/-/postcss-reduce-transforms-1.0.4.tgz#ff76f4d8212437b31c298a42d2e1444025771ae1" + dependencies: + has "^1.0.1" + postcss "^5.0.8" + postcss-value-parser "^3.0.1" + +postcss-selector-parser@^2.0.0, postcss-selector-parser@^2.2.2: + version "2.2.2" + resolved "https://registry.yarnpkg.com/postcss-selector-parser/-/postcss-selector-parser-2.2.2.tgz#3d70f5adda130da51c7c0c2fc023f56b1374fe08" + dependencies: + flatten "^1.0.2" + indexes-of "^1.0.1" + uniq "^1.0.1" + +postcss-svgo@^2.1.1: + version "2.1.6" + resolved "https://registry.yarnpkg.com/postcss-svgo/-/postcss-svgo-2.1.6.tgz#b6df18aa613b666e133f08adb5219c2684ac108d" + dependencies: + is-svg "^2.0.0" + postcss "^5.0.14" + postcss-value-parser "^3.2.3" + svgo "^0.7.0" + +postcss-unique-selectors@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/postcss-unique-selectors/-/postcss-unique-selectors-2.0.2.tgz#981d57d29ddcb33e7b1dfe1fd43b8649f933ca1d" + dependencies: + alphanum-sort "^1.0.1" + postcss "^5.0.4" + uniqs "^2.0.0" + +postcss-value-parser@^3.0.1, postcss-value-parser@^3.0.2, postcss-value-parser@^3.1.1, postcss-value-parser@^3.1.2, postcss-value-parser@^3.2.3, postcss-value-parser@^3.3.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/postcss-value-parser/-/postcss-value-parser-3.3.0.tgz#87f38f9f18f774a4ab4c8a232f5c5ce8872a9d15" + +postcss-zindex@^2.0.1: + version "2.2.0" + resolved "https://registry.yarnpkg.com/postcss-zindex/-/postcss-zindex-2.2.0.tgz#d2109ddc055b91af67fc4cb3b025946639d2af22" + dependencies: + has "^1.0.1" + postcss "^5.0.4" + uniqs "^2.0.0" + +postcss@^5.0.10, postcss@^5.0.11, postcss@^5.0.12, postcss@^5.0.13, postcss@^5.0.14, postcss@^5.0.16, postcss@^5.0.2, postcss@^5.0.4, postcss@^5.0.5, postcss@^5.0.6, postcss@^5.0.8, postcss@^5.2.11: + version "5.2.11" + resolved "https://registry.yarnpkg.com/postcss/-/postcss-5.2.11.tgz#ff29bcd6d2efb98bfe08a022055ec599bbe7b761" + dependencies: + chalk "^1.1.3" + js-base64 "^2.1.9" + source-map "^0.5.6" + supports-color "^3.2.3" + +prefix-style@2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/prefix-style/-/prefix-style-2.0.1.tgz#66bba9a870cfda308a5dc20e85e9120932c95a06" + +prepend-http@^1.0.0: + version "1.0.4" + resolved "https://registry.yarnpkg.com/prepend-http/-/prepend-http-1.0.4.tgz#d4f4562b0ce3696e41ac52d0e002e57a635dc6dc" + +preserve@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/preserve/-/preserve-0.2.0.tgz#815ed1f6ebc65926f865b310c0713bcb3315ce4b" + +pretty-error@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/pretty-error/-/pretty-error-2.0.2.tgz#a7db19cbb529ca9f0af3d3a2f77d5caf8e5dec23" + dependencies: + renderkid "~2.0.0" + utila "~0.4" + +private@^0.1.6, private@~0.1.5: + version "0.1.6" + resolved "https://registry.yarnpkg.com/private/-/private-0.1.6.tgz#55c6a976d0f9bafb9924851350fe47b9b5fbb7c1" + +process-nextick-args@~1.0.6: + version "1.0.7" + resolved "https://registry.yarnpkg.com/process-nextick-args/-/process-nextick-args-1.0.7.tgz#150e20b756590ad3f91093f25a4f2ad8bff30ba3" + +process@^0.11.0: + version "0.11.9" + resolved "https://registry.yarnpkg.com/process/-/process-0.11.9.tgz#7bd5ad21aa6253e7da8682264f1e11d11c0318c1" + +promise@^7.0.3, promise@^7.1.1: + version "7.1.1" + resolved "https://registry.yarnpkg.com/promise/-/promise-7.1.1.tgz#489654c692616b8aa55b0724fa809bb7db49c5bf" + dependencies: + asap "~2.0.3" + +protochain@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/protochain/-/protochain-1.0.5.tgz#991c407e99de264aadf8f81504b5e7faf7bfa260" + +proxy-addr@~1.1.2: + version "1.1.3" + resolved "https://registry.yarnpkg.com/proxy-addr/-/proxy-addr-1.1.3.tgz#dc97502f5722e888467b3fa2297a7b1ff47df074" + dependencies: + forwarded "~0.1.0" + ipaddr.js "1.2.0" + +prr@~0.0.0: + version "0.0.0" + resolved "https://registry.yarnpkg.com/prr/-/prr-0.0.0.tgz#1a84b85908325501411853d0081ee3fa86e2926a" + +punycode@1.3.2: + version "1.3.2" + resolved "https://registry.yarnpkg.com/punycode/-/punycode-1.3.2.tgz#9653a036fb7c1ee42342f2325cceefea3926c48d" + +punycode@^1.2.4, punycode@^1.4.1: + version "1.4.1" + resolved "https://registry.yarnpkg.com/punycode/-/punycode-1.4.1.tgz#c0d5a63b2718800ad8e1eb0fa5269c84dd41845e" + +purify-css@^1.1.1: + version "1.1.9" + resolved "https://registry.yarnpkg.com/purify-css/-/purify-css-1.1.9.tgz#46c9acd8940f3076c0c346c027e286f996168357" + dependencies: + clean-css "^3.2.10" + glob "^6.0.4" + rework "^1.0.1" + uglifyjs "^2.4.10" + yargs "^3.10.0" + +purifycss-webpack-plugin@^2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/purifycss-webpack-plugin/-/purifycss-webpack-plugin-2.0.3.tgz#ef34175d21390232db06009c066b92243b3921e7" + dependencies: + glob "^5.0.15" + purify-css "^1.1.1" + webpack-sources "^0.1.0" + +q@^1.1.2: + version "1.4.1" + resolved "https://registry.yarnpkg.com/q/-/q-1.4.1.tgz#55705bcd93c5f3673530c2c2cbc0c2b3addc286e" + +qs@2.3.3: + version "2.3.3" + resolved "https://registry.yarnpkg.com/qs/-/qs-2.3.3.tgz#e9e85adbe75da0bbe4c8e0476a086290f863b404" + +qs@6.2.0: + version "6.2.0" + resolved "https://registry.yarnpkg.com/qs/-/qs-6.2.0.tgz#3b7848c03c2dece69a9522b0fae8c4126d745f3b" + +qs@6.2.1: + version "6.2.1" + resolved "https://registry.yarnpkg.com/qs/-/qs-6.2.1.tgz#ce03c5ff0935bc1d9d69a9f14cbd18e568d67625" + +qs@~6.3.0: + version "6.3.0" + resolved "https://registry.yarnpkg.com/qs/-/qs-6.3.0.tgz#f403b264f23bc01228c74131b407f18d5ea5d442" + +query-string@^3.0.0: + version "3.0.3" + resolved "https://registry.yarnpkg.com/query-string/-/query-string-3.0.3.tgz#ae2e14b4d05071d4e9b9eb4873c35b0dcd42e638" + dependencies: + strict-uri-encode "^1.0.0" + +query-string@^4.1.0: + version "4.3.1" + resolved "https://registry.yarnpkg.com/query-string/-/query-string-4.3.1.tgz#54baada6713eafc92be75c47a731f2ebd09cd11d" + dependencies: + object-assign "^4.1.0" + strict-uri-encode "^1.0.0" + +querystring-es3@^0.2.0: + version "0.2.1" + resolved "https://registry.yarnpkg.com/querystring-es3/-/querystring-es3-0.2.1.tgz#9ec61f79049875707d69414596fd907a4d711e73" + +querystring@0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/querystring/-/querystring-0.2.0.tgz#b209849203bb25df820da756e747005878521620" + +querystringify@0.0.x: + version "0.0.4" + resolved "https://registry.yarnpkg.com/querystringify/-/querystringify-0.0.4.tgz#0cf7f84f9463ff0ae51c4c4b142d95be37724d9c" + +raf@^3.1.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/raf/-/raf-3.3.0.tgz#93845eeffc773f8129039f677f80a36044eee2c3" + dependencies: + performance-now "~0.2.0" + +randomatic@^1.1.3: + version "1.1.6" + resolved "https://registry.yarnpkg.com/randomatic/-/randomatic-1.1.6.tgz#110dcabff397e9dcff7c0789ccc0a49adf1ec5bb" + dependencies: + is-number "^2.0.2" + kind-of "^3.0.2" + +range-parser@^1.0.3, range-parser@~1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/range-parser/-/range-parser-1.2.0.tgz#f49be6b487894ddc40dcc94a322f611092e00d5e" + +raw-body@~2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/raw-body/-/raw-body-2.2.0.tgz#994976cf6a5096a41162840492f0bdc5d6e7fb96" + dependencies: + bytes "2.4.0" + iconv-lite "0.4.15" + unpipe "1.0.0" + +rc@~1.1.6: + version "1.1.6" + resolved "https://registry.yarnpkg.com/rc/-/rc-1.1.6.tgz#43651b76b6ae53b5c802f1151fa3fc3b059969c9" + dependencies: + deep-extend "~0.4.0" + ini "~1.3.0" + minimist "^1.2.0" + strip-json-comments "~1.0.4" + +react-addons-test-utils@^0.14.8: + version "0.14.8" + resolved "https://registry.yarnpkg.com/react-addons-test-utils/-/react-addons-test-utils-0.14.8.tgz#dcddc039e71fc3c81d80338e53a3714f14d41e1f" + +react-bootstrap@^0.28.5: + version "0.28.5" + resolved "https://registry.yarnpkg.com/react-bootstrap/-/react-bootstrap-0.28.5.tgz#393ab59ad66071a6a7bae0fda7518cdd228f2baf" + dependencies: + babel-runtime "^5.8.25" + classnames "^2.1.5" + dom-helpers "^2.4.0" + invariant "^2.1.2" + keycode "^2.1.0" + lodash-compat "^3.10.1" + react-overlays "^0.6.0" + react-prop-types "^0.3.0" + uncontrollable "^3.1.3" + warning "^2.1.0" + +react-copy-to-clipboard@^4.2.3: + version "4.2.3" + resolved "https://registry.yarnpkg.com/react-copy-to-clipboard/-/react-copy-to-clipboard-4.2.3.tgz#268c5a0fbde9a95d96145014e7f85110b0e7fb8e" + dependencies: + copy-to-clipboard "^3" + +react-custom-scrollbars@^2.2.2: + version "2.3.0" + resolved "https://registry.yarnpkg.com/react-custom-scrollbars/-/react-custom-scrollbars-2.3.0.tgz#53c9b72be6933e7a853c6ece7493dda7998752b3" + dependencies: + dom-css "^2.0.0" + raf "^3.1.0" + +react-dom@^0.14.6: + version "0.14.8" + resolved "https://registry.yarnpkg.com/react-dom/-/react-dom-0.14.8.tgz#0f1c547514263f771bd31814a739e5306575069e" + +react-dropzone@^3.5.3: + version "3.9.1" + resolved "https://registry.yarnpkg.com/react-dropzone/-/react-dropzone-3.9.1.tgz#f90e0c7d603be3fb2253ee315063d424371a57bd" + dependencies: + attr-accept "^1.0.3" + +react-onclickout@2.0.4: + version "2.0.4" + resolved "https://registry.yarnpkg.com/react-onclickout/-/react-onclickout-2.0.4.tgz#2c7539a647e1dcdcab0b28e2f4eae3c3e00f0c64" + +react-overlays@^0.6.0: + version "0.6.10" + resolved "https://registry.yarnpkg.com/react-overlays/-/react-overlays-0.6.10.tgz#e7e52dad47f00a0fc784eb044428c3a9e874bfa3" + dependencies: + classnames "^2.2.5" + dom-helpers "^2.4.0" + react-prop-types "^0.4.0" + warning "^3.0.0" + +react-prop-types@^0.3.0: + version "0.3.2" + resolved "https://registry.yarnpkg.com/react-prop-types/-/react-prop-types-0.3.2.tgz#e2763ac6f3a80199d8981c3647c44b0554c97b7f" + dependencies: + warning "^2.0.0" + +react-prop-types@^0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/react-prop-types/-/react-prop-types-0.4.0.tgz#f99b0bfb4006929c9af2051e7c1414a5c75b93d0" + dependencies: + warning "^3.0.0" + +react-redux@^4.4.5: + version "4.4.6" + resolved "https://registry.yarnpkg.com/react-redux/-/react-redux-4.4.6.tgz#4b9d32985307a11096a2dd61561980044fcc6209" + dependencies: + hoist-non-react-statics "^1.0.3" + invariant "^2.0.0" + lodash "^4.2.0" + loose-envify "^1.1.0" + +react-router@^2.8.1: + version "2.8.1" + resolved "https://registry.yarnpkg.com/react-router/-/react-router-2.8.1.tgz#73e9491f6ceb316d0f779829081863e378ee4ed7" + dependencies: + history "^2.1.2" + hoist-non-react-statics "^1.2.0" + invariant "^2.2.1" + loose-envify "^1.2.0" + warning "^3.0.0" + +react@^0.14.8: + version "0.14.8" + resolved "https://registry.yarnpkg.com/react/-/react-0.14.8.tgz#078dfa454d4745bcc54a9726311c2bf272c23684" + dependencies: + envify "^3.0.0" + fbjs "^0.6.1" + +readable-stream@1.0, readable-stream@~1.0.2: + version "1.0.34" + resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-1.0.34.tgz#125820e34bc842d2f2aaafafe4c2916ee32c157c" + dependencies: + core-util-is "~1.0.0" + inherits "~2.0.1" + isarray "0.0.1" + string_decoder "~0.10.x" + +readable-stream@1.0.27-1: + version "1.0.27-1" + resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-1.0.27-1.tgz#6b67983c20357cefd07f0165001a16d710d91078" + dependencies: + core-util-is "~1.0.0" + inherits "~2.0.1" + isarray "0.0.1" + string_decoder "~0.10.x" + +"readable-stream@^2.0.0 || ^1.1.13", readable-stream@^2.0.1, readable-stream@^2.0.2, readable-stream@^2.0.5, readable-stream@^2.1.0: + version "2.2.2" + resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.2.2.tgz#a9e6fec3c7dda85f8bb1b3ba7028604556fc825e" + dependencies: + buffer-shims "^1.0.0" + core-util-is "~1.0.0" + inherits "~2.0.1" + isarray "~1.0.0" + process-nextick-args "~1.0.6" + string_decoder "~0.10.x" + util-deprecate "~1.0.1" + +readable-stream@~2.1.4: + version "2.1.5" + resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.1.5.tgz#66fa8b720e1438b364681f2ad1a63c618448c9d0" + dependencies: + buffer-shims "^1.0.0" + core-util-is "~1.0.0" + inherits "~2.0.1" + isarray "~1.0.0" + process-nextick-args "~1.0.6" + string_decoder "~0.10.x" + util-deprecate "~1.0.1" + +readdirp@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/readdirp/-/readdirp-2.1.0.tgz#4ed0ad060df3073300c48440373f72d1cc642d78" + dependencies: + graceful-fs "^4.1.2" + minimatch "^3.0.2" + readable-stream "^2.0.2" + set-immediate-shim "^1.0.1" + +recast@^0.11.17: + version "0.11.20" + resolved "https://registry.yarnpkg.com/recast/-/recast-0.11.20.tgz#2cb9bec269c03b36d0598118a936cd0a293ca3f3" + dependencies: + ast-types "0.9.4" + esprima "~3.1.0" + private "~0.1.5" + source-map "~0.5.0" + +rechoir@^0.6.2: + version "0.6.2" + resolved "https://registry.yarnpkg.com/rechoir/-/rechoir-0.6.2.tgz#85204b54dba82d5742e28c96756ef43af50e3384" + dependencies: + resolve "^1.1.6" + +reduce-component@1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/reduce-component/-/reduce-component-1.0.1.tgz#e0c93542c574521bea13df0f9488ed82ab77c5da" + +reduce-css-calc@^1.2.6: + version "1.3.0" + resolved "https://registry.yarnpkg.com/reduce-css-calc/-/reduce-css-calc-1.3.0.tgz#747c914e049614a4c9cfbba629871ad1d2927716" + dependencies: + balanced-match "^0.4.2" + math-expression-evaluator "^1.2.14" + reduce-function-call "^1.0.1" + +reduce-function-call@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/reduce-function-call/-/reduce-function-call-1.0.2.tgz#5a200bf92e0e37751752fe45b0ab330fd4b6be99" + dependencies: + balanced-match "^0.4.2" + +redux-thunk@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/redux-thunk/-/redux-thunk-1.0.3.tgz#778aa0099eea0595031ab6b39165f6670d8d26bd" + +redux@^3.6.0: + version "3.6.0" + resolved "https://registry.yarnpkg.com/redux/-/redux-3.6.0.tgz#887c2b3d0b9bd86eca2be70571c27654c19e188d" + dependencies: + lodash "^4.2.1" + lodash-es "^4.2.1" + loose-envify "^1.1.0" + symbol-observable "^1.0.2" + +regenerate@^1.2.1: + version "1.3.2" + resolved "https://registry.yarnpkg.com/regenerate/-/regenerate-1.3.2.tgz#d1941c67bad437e1be76433add5b385f95b19260" + +regenerator-runtime@^0.10.0: + version "0.10.1" + resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.10.1.tgz#257f41961ce44558b18f7814af48c17559f9faeb" + +regenerator-transform@0.9.8: + version "0.9.8" + resolved "https://registry.yarnpkg.com/regenerator-transform/-/regenerator-transform-0.9.8.tgz#0f88bb2bc03932ddb7b6b7312e68078f01026d6c" + dependencies: + babel-runtime "^6.18.0" + babel-types "^6.19.0" + private "^0.1.6" + +regex-cache@^0.4.2: + version "0.4.3" + resolved "http://registry.npmjs.org/regex-cache/-/regex-cache-0.4.3.tgz#9b1a6c35d4d0dfcef5711ae651e8e9d3d7114145" + dependencies: + is-equal-shallow "^0.1.3" + is-primitive "^2.0.0" + +regexpu-core@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/regexpu-core/-/regexpu-core-1.0.0.tgz#86a763f58ee4d7c2f6b102e4764050de7ed90c6b" + dependencies: + regenerate "^1.2.1" + regjsgen "^0.2.0" + regjsparser "^0.1.4" + +regexpu-core@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/regexpu-core/-/regexpu-core-2.0.0.tgz#49d038837b8dcf8bfa5b9a42139938e6ea2ae240" + dependencies: + regenerate "^1.2.1" + regjsgen "^0.2.0" + regjsparser "^0.1.4" + +regjsgen@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/regjsgen/-/regjsgen-0.2.0.tgz#6c016adeac554f75823fe37ac05b92d5a4edb1f7" + +regjsparser@^0.1.4: + version "0.1.5" + resolved "https://registry.yarnpkg.com/regjsparser/-/regjsparser-0.1.5.tgz#7ee8f84dc6fa792d3fd0ae228d24bd949ead205c" + dependencies: + jsesc "~0.5.0" + +relateurl@0.2.x: + version "0.2.7" + resolved "https://registry.yarnpkg.com/relateurl/-/relateurl-0.2.7.tgz#54dbf377e51440aca90a4cd274600d3ff2d888a9" + +renderkid@~2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/renderkid/-/renderkid-2.0.0.tgz#1859753e7a5adbf35443aba0d4e4579e78abee85" + dependencies: + css-select "^1.1.0" + dom-converter "~0.1" + htmlparser2 "~3.3.0" + strip-ansi "^3.0.0" + utila "~0.3" + +repeat-element@^1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/repeat-element/-/repeat-element-1.1.2.tgz#ef089a178d1483baae4d93eb98b4f9e4e11d990a" + +repeat-string@^0.2.2: + version "0.2.2" + resolved "https://registry.yarnpkg.com/repeat-string/-/repeat-string-0.2.2.tgz#c7a8d3236068362059a7e4651fc6884e8b1fb4ae" + +repeat-string@^1.5.0, repeat-string@^1.5.2: + version "1.6.1" + resolved "https://registry.yarnpkg.com/repeat-string/-/repeat-string-1.6.1.tgz#8dcae470e1c88abc2d600fff4a776286da75e637" + +repeating@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/repeating/-/repeating-2.0.1.tgz#5214c53a926d3552707527fbab415dbc08d06dda" + dependencies: + is-finite "^1.0.0" + +request@^2.72.0, request@^2.79.0: + version "2.79.0" + resolved "https://registry.yarnpkg.com/request/-/request-2.79.0.tgz#4dfe5bf6be8b8cdc37fcf93e04b65577722710de" + dependencies: + aws-sign2 "~0.6.0" + aws4 "^1.2.1" + caseless "~0.11.0" + combined-stream "~1.0.5" + extend "~3.0.0" + forever-agent "~0.6.1" + form-data "~2.1.1" + har-validator "~2.0.6" + hawk "~3.1.3" + http-signature "~1.1.0" + is-typedarray "~1.0.0" + isstream "~0.1.2" + json-stringify-safe "~5.0.1" + mime-types "~2.1.7" + oauth-sign "~0.8.1" + qs "~6.3.0" + stringstream "~0.0.4" + tough-cookie "~2.3.0" + tunnel-agent "~0.4.1" + uuid "^3.0.0" + +requires-port@1.0.x, requires-port@1.x.x: + version "1.0.0" + resolved "https://registry.yarnpkg.com/requires-port/-/requires-port-1.0.0.tgz#925d2601d39ac485e091cf0da5c6e694dc3dcaff" + +resolve-url@~0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/resolve-url/-/resolve-url-0.2.1.tgz#2c637fe77c893afd2a663fe21aa9080068e2052a" + +resolve@^1.1.5, resolve@^1.1.6: + version "1.2.0" + resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.2.0.tgz#9589c3f2f6149d1417a40becc1663db6ec6bc26c" + +rework@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/rework/-/rework-1.0.1.tgz#30806a841342b54510aa4110850cd48534144aa7" + dependencies: + convert-source-map "^0.3.3" + css "^2.0.0" + +right-align@^0.1.1: + version "0.1.3" + resolved "https://registry.yarnpkg.com/right-align/-/right-align-0.1.3.tgz#61339b722fe6a3515689210d24e14c96148613ef" + dependencies: + align-text "^0.1.1" + +rimraf@2, rimraf@^2.3.3, rimraf@~2.5.1, rimraf@~2.5.4: + version "2.5.4" + resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.5.4.tgz#96800093cbf1a0c86bd95b4625467535c29dfa04" + dependencies: + glob "^7.0.5" + +ripemd160@0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/ripemd160/-/ripemd160-0.2.0.tgz#2bf198bde167cacfa51c0a928e84b68bbe171fce" + +rocambole-indent@^2.0.4: + version "2.0.4" + resolved "https://registry.yarnpkg.com/rocambole-indent/-/rocambole-indent-2.0.4.tgz#a18a24977ca0400b861daa4631e861dcb52d085c" + dependencies: + debug "^2.1.3" + mout "^0.11.0" + rocambole-token "^1.2.1" + +rocambole-linebreak@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/rocambole-linebreak/-/rocambole-linebreak-1.0.2.tgz#03621515b43b4721c97e5a1c1bca5a0366368f2f" + dependencies: + debug "^2.1.3" + rocambole-token "^1.2.1" + semver "^4.3.1" + +rocambole-node@~1.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/rocambole-node/-/rocambole-node-1.0.0.tgz#db5b49de7407b0080dd514872f28e393d0f7ff3f" + +rocambole-token@^1.1.2, rocambole-token@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/rocambole-token/-/rocambole-token-1.2.1.tgz#c785df7428dc3cb27ad7897047bd5238cc070d35" + +rocambole-whitespace@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/rocambole-whitespace/-/rocambole-whitespace-1.0.0.tgz#63330949256b29941f59b190459f999c6b1d3bf9" + dependencies: + debug "^2.1.3" + repeat-string "^1.5.0" + rocambole-token "^1.2.1" + +"rocambole@>=0.7 <2.0", rocambole@^0.7.0: + version "0.7.0" + resolved "https://registry.yarnpkg.com/rocambole/-/rocambole-0.7.0.tgz#f6c79505517dc42b6fb840842b8b953b0f968585" + dependencies: + esprima "^2.1" + +sax@~1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/sax/-/sax-1.2.1.tgz#7b8e656190b228e81a66aea748480d828cd2d37a" + +semver@^4.3.1, semver@~4.3.3: + version "4.3.6" + resolved "https://registry.yarnpkg.com/semver/-/semver-4.3.6.tgz#300bc6e0e86374f7ba61068b5b1ecd57fc6532da" + +semver@~5.3.0: + version "5.3.0" + resolved "https://registry.yarnpkg.com/semver/-/semver-5.3.0.tgz#9b2ce5d3de02d17c6012ad326aa6b4d0cf54f94f" + +send@0.14.1: + version "0.14.1" + resolved "https://registry.yarnpkg.com/send/-/send-0.14.1.tgz#a954984325392f51532a7760760e459598c89f7a" + dependencies: + debug "~2.2.0" + depd "~1.1.0" + destroy "~1.0.4" + encodeurl "~1.0.1" + escape-html "~1.0.3" + etag "~1.7.0" + fresh "0.3.0" + http-errors "~1.5.0" + mime "1.3.4" + ms "0.7.1" + on-finished "~2.3.0" + range-parser "~1.2.0" + statuses "~1.3.0" + +send@0.14.2: + version "0.14.2" + resolved "https://registry.yarnpkg.com/send/-/send-0.14.2.tgz#39b0438b3f510be5dc6f667a11f71689368cdeef" + dependencies: + debug "~2.2.0" + depd "~1.1.0" + destroy "~1.0.4" + encodeurl "~1.0.1" + escape-html "~1.0.3" + etag "~1.7.0" + fresh "0.3.0" + http-errors "~1.5.1" + mime "1.3.4" + ms "0.7.2" + on-finished "~2.3.0" + range-parser "~1.2.0" + statuses "~1.3.1" + +serializerr@^1.0.1: + version "1.0.3" + resolved "https://registry.yarnpkg.com/serializerr/-/serializerr-1.0.3.tgz#12d4c5aa1c3ffb8f6d1dc5f395aa9455569c3f91" + dependencies: + protochain "^1.0.5" + +serve-index@^1.7.2: + version "1.8.0" + resolved "https://registry.yarnpkg.com/serve-index/-/serve-index-1.8.0.tgz#7c5d96c13fb131101f93c1c5774f8516a1e78d3b" + dependencies: + accepts "~1.3.3" + batch "0.5.3" + debug "~2.2.0" + escape-html "~1.0.3" + http-errors "~1.5.0" + mime-types "~2.1.11" + parseurl "~1.3.1" + +serve-static@~1.11.1: + version "1.11.2" + resolved "https://registry.yarnpkg.com/serve-static/-/serve-static-1.11.2.tgz#2cf9889bd4435a320cc36895c9aa57bd662e6ac7" + dependencies: + encodeurl "~1.0.1" + escape-html "~1.0.3" + parseurl "~1.3.1" + send "0.14.2" + +set-blocking@~2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/set-blocking/-/set-blocking-2.0.0.tgz#045f9782d011ae9a6803ddd382b24392b3d890f7" + +set-immediate-shim@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/set-immediate-shim/-/set-immediate-shim-1.0.1.tgz#4b2b1b27eb808a9f8dcc481a58e5e56f599f3f61" + +setimmediate@^1.0.4: + version "1.0.5" + resolved "https://registry.yarnpkg.com/setimmediate/-/setimmediate-1.0.5.tgz#290cbb232e306942d7d7ea9b83732ab7856f8285" + +setprototypeof@1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/setprototypeof/-/setprototypeof-1.0.2.tgz#81a552141ec104b88e89ce383103ad5c66564d08" + +sha.js@2.2.6: + version "2.2.6" + resolved "https://registry.yarnpkg.com/sha.js/-/sha.js-2.2.6.tgz#17ddeddc5f722fb66501658895461977867315ba" + +shelljs@^0.7.0: + version "0.7.6" + resolved "https://registry.yarnpkg.com/shelljs/-/shelljs-0.7.6.tgz#379cccfb56b91c8601e4793356eb5382924de9ad" + dependencies: + glob "^7.0.0" + interpret "^1.0.0" + rechoir "^0.6.2" + +sigmund@~1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/sigmund/-/sigmund-1.0.1.tgz#3ff21f198cad2175f9f3b781853fd94d0d19b590" + +signal-exit@^3.0.0: + version "3.0.2" + resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.2.tgz#b5fdc08f1287ea1178628e415e25132b73646c6d" + +slash@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/slash/-/slash-1.0.0.tgz#c41f2f6c39fc16d1cd17ad4b5d896114ae470d55" + +sntp@1.x.x: + version "1.0.9" + resolved "https://registry.yarnpkg.com/sntp/-/sntp-1.0.9.tgz#6541184cc90aeea6c6e7b35e2659082443c66198" + dependencies: + hoek "2.x.x" + +socket.io-adapter@0.5.0: + version "0.5.0" + resolved "https://registry.yarnpkg.com/socket.io-adapter/-/socket.io-adapter-0.5.0.tgz#cb6d4bb8bec81e1078b99677f9ced0046066bb8b" + dependencies: + debug "2.3.3" + socket.io-parser "2.3.1" + +socket.io-client@1.7.2: + version "1.7.2" + resolved "https://registry.yarnpkg.com/socket.io-client/-/socket.io-client-1.7.2.tgz#39fdb0c3dd450e321b7e40cfd83612ec533dd644" + dependencies: + backo2 "1.0.2" + component-bind "1.0.0" + component-emitter "1.2.1" + debug "2.3.3" + engine.io-client "1.8.2" + has-binary "0.1.7" + indexof "0.0.1" + object-component "0.0.3" + parseuri "0.0.5" + socket.io-parser "2.3.1" + to-array "0.1.4" + +socket.io-parser@2.3.1: + version "2.3.1" + resolved "https://registry.yarnpkg.com/socket.io-parser/-/socket.io-parser-2.3.1.tgz#dd532025103ce429697326befd64005fcfe5b4a0" + dependencies: + component-emitter "1.1.2" + debug "2.2.0" + isarray "0.0.1" + json3 "3.3.2" + +socket.io@^1.4.5: + version "1.7.2" + resolved "https://registry.yarnpkg.com/socket.io/-/socket.io-1.7.2.tgz#83bbbdf2e79263b378900da403e7843e05dc3b71" + dependencies: + debug "2.3.3" + engine.io "1.8.2" + has-binary "0.1.7" + object-assign "4.1.0" + socket.io-adapter "0.5.0" + socket.io-client "1.7.2" + socket.io-parser "2.3.1" + +sockjs-client@^1.0.3: + version "1.1.2" + resolved "https://registry.yarnpkg.com/sockjs-client/-/sockjs-client-1.1.2.tgz#f0212a8550e4c9468c8cceaeefd2e3493c033ad5" + dependencies: + debug "^2.2.0" + eventsource "0.1.6" + faye-websocket "~0.11.0" + inherits "^2.0.1" + json3 "^3.3.2" + url-parse "^1.1.1" + +sockjs@^0.3.15: + version "0.3.18" + resolved "https://registry.yarnpkg.com/sockjs/-/sockjs-0.3.18.tgz#d9b289316ca7df77595ef299e075f0f937eb4207" + dependencies: + faye-websocket "^0.10.0" + uuid "^2.0.2" + +sort-keys@^1.0.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/sort-keys/-/sort-keys-1.1.2.tgz#441b6d4d346798f1b4e49e8920adfba0e543f9ad" + dependencies: + is-plain-obj "^1.0.0" + +source-list-map@^0.1.4, source-list-map@~0.1.7: + version "0.1.8" + resolved "https://registry.yarnpkg.com/source-list-map/-/source-list-map-0.1.8.tgz#c550b2ab5427f6b3f21f5afead88c4f5587b2106" + +source-map-resolve@^0.3.0: + version "0.3.1" + resolved "https://registry.yarnpkg.com/source-map-resolve/-/source-map-resolve-0.3.1.tgz#610f6122a445b8dd51535a2a71b783dfc1248761" + dependencies: + atob "~1.1.0" + resolve-url "~0.2.1" + source-map-url "~0.3.0" + urix "~0.1.0" + +source-map-support@^0.4.2: + version "0.4.10" + resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.4.10.tgz#d7b19038040a14c0837a18e630a196453952b378" + dependencies: + source-map "^0.5.3" + +source-map-url@~0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/source-map-url/-/source-map-url-0.3.0.tgz#7ecaf13b57bcd09da8a40c5d269db33799d4aaf9" + +source-map@0.1.34: + version "0.1.34" + resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.1.34.tgz#a7cfe89aec7b1682c3b198d0acfb47d7d090566b" + dependencies: + amdefine ">=0.0.4" + +source-map@0.4.x, source-map@^0.4.2, source-map@~0.4.1: + version "0.4.4" + resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.4.4.tgz#eba4f5da9c0dc999de68032d8b4f76173652036b" + dependencies: + amdefine ">=0.0.4" + +source-map@^0.1.38, source-map@^0.1.41: + version "0.1.43" + resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.1.43.tgz#c24bc146ca517c1471f5dacbe2571b2b7f9e3346" + dependencies: + amdefine ">=0.0.4" + +source-map@^0.5.0, source-map@^0.5.3, source-map@^0.5.6, source-map@~0.5.0, source-map@~0.5.1, source-map@~0.5.3: + version "0.5.6" + resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.5.6.tgz#75ce38f52bf0733c5a7f0c118d81334a2bb5f412" + +sprintf-js@~1.0.2: + version "1.0.3" + resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c" + +sshpk@^1.7.0: + version "1.10.2" + resolved "https://registry.yarnpkg.com/sshpk/-/sshpk-1.10.2.tgz#d5a804ce22695515638e798dbe23273de070a5fa" + dependencies: + asn1 "~0.2.3" + assert-plus "^1.0.0" + dashdash "^1.12.0" + getpass "^0.1.1" + optionalDependencies: + bcrypt-pbkdf "^1.0.0" + ecc-jsbn "~0.1.1" + jodid25519 "^1.0.0" + jsbn "~0.1.0" + tweetnacl "~0.14.0" + +"statuses@>= 1.3.1 < 2", statuses@~1.3.0, statuses@~1.3.1: + version "1.3.1" + resolved "https://registry.yarnpkg.com/statuses/-/statuses-1.3.1.tgz#faf51b9eb74aaef3b3acf4ad5f61abf24cb7b93e" + +stdin@*: + version "0.0.1" + resolved "https://registry.yarnpkg.com/stdin/-/stdin-0.0.1.tgz#d3041981aaec3dfdbc77a1b38d6372e38f5fb71e" + +stream-browserify@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/stream-browserify/-/stream-browserify-2.0.1.tgz#66266ee5f9bdb9940a4e4514cafb43bb71e5c9db" + dependencies: + inherits "~2.0.1" + readable-stream "^2.0.2" + +stream-cache@~0.0.1: + version "0.0.2" + resolved "https://registry.yarnpkg.com/stream-cache/-/stream-cache-0.0.2.tgz#1ac5ad6832428ca55667dbdee395dad4e6db118f" + +stream-http@^2.3.1: + version "2.6.3" + resolved "https://registry.yarnpkg.com/stream-http/-/stream-http-2.6.3.tgz#4c3ddbf9635968ea2cfd4e48d43de5def2625ac3" + dependencies: + builtin-status-codes "^3.0.0" + inherits "^2.0.1" + readable-stream "^2.1.0" + to-arraybuffer "^1.0.0" + xtend "^4.0.0" + +strict-uri-encode@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/strict-uri-encode/-/strict-uri-encode-1.1.0.tgz#279b225df1d582b1f54e65addd4352e18faa0713" + +string-width@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-1.0.2.tgz#118bdf5b8cdc51a2a7e70d211e07e2b0b9b107d3" + dependencies: + code-point-at "^1.0.0" + is-fullwidth-code-point "^1.0.0" + strip-ansi "^3.0.0" + +string_decoder@^0.10.25, string_decoder@~0.10.x: + version "0.10.31" + resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-0.10.31.tgz#62e203bc41766c6c28c9fc84301dab1c5310fa94" + +stringstream@~0.0.4: + version "0.0.5" + resolved "https://registry.yarnpkg.com/stringstream/-/stringstream-0.0.5.tgz#4e484cd4de5a0bbbee18e46307710a8a81621878" + +strip-ansi@^3.0.0, strip-ansi@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-3.0.1.tgz#6a385fb8853d952d5ff05d0e8aaf94278dc63dcf" + dependencies: + ansi-regex "^2.0.0" + +strip-json-comments@~0.1.1: + version "0.1.3" + resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-0.1.3.tgz#164c64e370a8a3cc00c9e01b539e569823f0ee54" + +strip-json-comments@~1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-1.0.4.tgz#1e15fbcac97d3ee99bf2d73b4c656b082bbafb91" + +style-loader@^0.13.1: + version "0.13.1" + resolved "https://registry.yarnpkg.com/style-loader/-/style-loader-0.13.1.tgz#468280efbc0473023cd3a6cd56e33b5a1d7fc3a9" + dependencies: + loader-utils "^0.2.7" + +superagent-es6-promise@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/superagent-es6-promise/-/superagent-es6-promise-1.0.0.tgz#f50730843d4698a84b1d6125e7ad57f18cc2fda4" + +superagent@^1.8.4: + version "1.8.5" + resolved "https://registry.yarnpkg.com/superagent/-/superagent-1.8.5.tgz#1c0ddc3af30e80eb84ebc05cb2122da8fe940b55" + dependencies: + component-emitter "~1.2.0" + cookiejar "2.0.6" + debug "2" + extend "3.0.0" + form-data "1.0.0-rc3" + formidable "~1.0.14" + methods "~1.1.1" + mime "1.3.4" + qs "2.3.3" + readable-stream "1.0.27-1" + reduce-component "1.0.1" + +supports-color@1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-1.2.0.tgz#ff1ed1e61169d06b3cf2d588e188b18d8847e17e" + +supports-color@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-0.2.0.tgz#d92de2694eb3f67323973d7ae3d8b55b4c22190a" + +supports-color@^1.3.1: + version "1.3.1" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-1.3.1.tgz#15758df09d8ff3b4acc307539fabe27095e1042d" + +supports-color@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-2.0.0.tgz#535d045ce6b6363fa40117084629995e9df324c7" + +supports-color@^3.1.0, supports-color@^3.1.1, supports-color@^3.2.3: + version "3.2.3" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-3.2.3.tgz#65ac0504b3954171d8a64946b2ae3cbb8a5f54f6" + dependencies: + has-flag "^1.0.0" + +svgo@^0.7.0: + version "0.7.1" + resolved "https://registry.yarnpkg.com/svgo/-/svgo-0.7.1.tgz#287320fed972cb097e72c2bb1685f96fe08f8034" + dependencies: + coa "~1.0.1" + colors "~1.1.2" + csso "~2.2.1" + js-yaml "~3.6.1" + mkdirp "~0.5.1" + sax "~1.2.1" + whet.extend "~0.9.9" + +symbol-observable@^1.0.2: + version "1.0.4" + resolved "https://registry.yarnpkg.com/symbol-observable/-/symbol-observable-1.0.4.tgz#29bf615d4aa7121bdd898b22d4b3f9bc4e2aa03d" + +sync-exec@^0.6.2: + version "0.6.2" + resolved "https://registry.yarnpkg.com/sync-exec/-/sync-exec-0.6.2.tgz#717d22cc53f0ce1def5594362f3a89a2ebb91105" + +tapable@^0.1.8, tapable@~0.1.8: + version "0.1.10" + resolved "https://registry.yarnpkg.com/tapable/-/tapable-0.1.10.tgz#29c35707c2b70e50d07482b5d202e8ed446dafd4" + +tar-pack@~3.3.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/tar-pack/-/tar-pack-3.3.0.tgz#30931816418f55afc4d21775afdd6720cee45dae" + dependencies: + debug "~2.2.0" + fstream "~1.0.10" + fstream-ignore "~1.0.5" + once "~1.3.3" + readable-stream "~2.1.4" + rimraf "~2.5.1" + tar "~2.2.1" + uid-number "~0.0.6" + +tar@~2.2.1: + version "2.2.1" + resolved "https://registry.yarnpkg.com/tar/-/tar-2.2.1.tgz#8e4d2a256c0e2185c6b18ad694aec968b83cb1d1" + dependencies: + block-stream "*" + fstream "^1.0.2" + inherits "2" + +through@~2.3.4: + version "2.3.8" + resolved "https://registry.yarnpkg.com/through/-/through-2.3.8.tgz#0dd4c9ffaabc357960b1b724115d7e0e86a2e1f5" + +timers-browserify@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/timers-browserify/-/timers-browserify-2.0.2.tgz#ab4883cf597dcd50af211349a00fbca56ac86b86" + dependencies: + setimmediate "^1.0.4" + +tmatch@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/tmatch/-/tmatch-2.0.1.tgz#0c56246f33f30da1b8d3d72895abaf16660f38cf" + +to-array@0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/to-array/-/to-array-0.1.4.tgz#17e6c11f73dd4f3d74cda7a4ff3238e9ad9bf890" + +to-arraybuffer@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/to-arraybuffer/-/to-arraybuffer-1.0.1.tgz#7d229b1fcc637e466ca081180836a7aabff83f43" + +to-camel-case@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/to-camel-case/-/to-camel-case-1.0.0.tgz#1a56054b2f9d696298ce66a60897322b6f423e46" + dependencies: + to-space-case "^1.0.0" + +to-fast-properties@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/to-fast-properties/-/to-fast-properties-1.0.2.tgz#f3f5c0c3ba7299a7ef99427e44633257ade43320" + +to-iso-string@0.0.2: + version "0.0.2" + resolved "https://registry.yarnpkg.com/to-iso-string/-/to-iso-string-0.0.2.tgz#4dc19e664dfccbe25bd8db508b00c6da158255d1" + +to-no-case@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/to-no-case/-/to-no-case-1.0.2.tgz#c722907164ef6b178132c8e69930212d1b4aa16a" + +to-space-case@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/to-space-case/-/to-space-case-1.0.0.tgz#b052daafb1b2b29dc770cea0163e5ec0ebc9fc17" + dependencies: + to-no-case "^1.0.0" + +toggle-selection@^1.0.3: + version "1.0.5" + resolved "https://registry.yarnpkg.com/toggle-selection/-/toggle-selection-1.0.5.tgz#726c703de607193a73c32c7df49cd24950fc574f" + +toposort@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/toposort/-/toposort-1.0.0.tgz#b66cf385a1a8a8e68e45b8259e7f55875e8b06ef" + +tough-cookie@~2.3.0: + version "2.3.2" + resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.3.2.tgz#f081f76e4c85720e6c37a5faced737150d84072a" + dependencies: + punycode "^1.4.1" + +tty-browserify@0.0.0: + version "0.0.0" + resolved "https://registry.yarnpkg.com/tty-browserify/-/tty-browserify-0.0.0.tgz#a157ba402da24e9bf957f9aa69d524eed42901a6" + +tunnel-agent@~0.4.1: + version "0.4.3" + resolved "https://registry.yarnpkg.com/tunnel-agent/-/tunnel-agent-0.4.3.tgz#6373db76909fe570e08d73583365ed828a74eeeb" + +tweetnacl@^0.14.3, tweetnacl@~0.14.0: + version "0.14.5" + resolved "https://registry.yarnpkg.com/tweetnacl/-/tweetnacl-0.14.5.tgz#5ae68177f192d4456269d108afa93ff8743f4f64" + +type-is@~1.6.13, type-is@~1.6.14: + version "1.6.14" + resolved "https://registry.yarnpkg.com/type-is/-/type-is-1.6.14.tgz#e219639c17ded1ca0789092dd54a03826b817cb2" + dependencies: + media-typer "0.3.0" + mime-types "~2.1.13" + +ua-parser-js@^0.7.9: + version "0.7.12" + resolved "https://registry.yarnpkg.com/ua-parser-js/-/ua-parser-js-0.7.12.tgz#04c81a99bdd5dc52263ea29d24c6bf8d4818a4bb" + +uglify-js@2.7.x, uglify-js@~2.7.3: + version "2.7.5" + resolved "https://registry.yarnpkg.com/uglify-js/-/uglify-js-2.7.5.tgz#4612c0c7baaee2ba7c487de4904ae122079f2ca8" + dependencies: + async "~0.2.6" + source-map "~0.5.1" + uglify-to-browserify "~1.0.0" + yargs "~3.10.0" + +uglify-to-browserify@~1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/uglify-to-browserify/-/uglify-to-browserify-1.0.2.tgz#6e0924d6bda6b5afe349e39a6d632850a0f882b7" + +uglifyjs@^2.4.10: + version "2.4.10" + resolved "https://registry.yarnpkg.com/uglifyjs/-/uglifyjs-2.4.10.tgz#632927319fa6a3da3fc91f9773ac27bfe6c3ee92" + dependencies: + async "~0.2.6" + source-map "0.1.34" + uglify-to-browserify "~1.0.0" + yargs "~1.3.3" + +uid-number@~0.0.6: + version "0.0.6" + resolved "https://registry.yarnpkg.com/uid-number/-/uid-number-0.0.6.tgz#0ea10e8035e8eb5b8e4449f06da1c730663baa81" + +ultron@1.0.x: + version "1.0.2" + resolved "https://registry.yarnpkg.com/ultron/-/ultron-1.0.2.tgz#ace116ab557cd197386a4e88f4685378c8b2e4fa" + +uncontrollable@^3.1.3: + version "3.3.1" + resolved "https://registry.yarnpkg.com/uncontrollable/-/uncontrollable-3.3.1.tgz#e23b402e7a4c69b1853fb4b43ce34b6480c65b6f" + dependencies: + invariant "^2.1.0" + +uniq@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/uniq/-/uniq-1.0.1.tgz#b31c5ae8254844a3a8281541ce2b04b865a734ff" + +uniqid@^4.0.0: + version "4.1.1" + resolved "https://registry.yarnpkg.com/uniqid/-/uniqid-4.1.1.tgz#89220ddf6b751ae52b5f72484863528596bb84c1" + dependencies: + macaddress "^0.2.8" + +uniqs@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/uniqs/-/uniqs-2.0.0.tgz#ffede4b36b25290696e6e165d4a59edb998e6b02" + +unpipe@1.0.0, unpipe@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/unpipe/-/unpipe-1.0.0.tgz#b2bf4ee8514aae6165b4817829d21b2ef49904ec" + +upper-case@^1.1.1: + version "1.1.3" + resolved "https://registry.yarnpkg.com/upper-case/-/upper-case-1.1.3.tgz#f6b4501c2ec4cdd26ba78be7222961de77621598" + +urix@^0.1.0, urix@~0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/urix/-/urix-0.1.0.tgz#da937f7a62e21fec1fd18d49b35c2935067a6c72" + +url-loader@^0.5.7: + version "0.5.7" + resolved "https://registry.yarnpkg.com/url-loader/-/url-loader-0.5.7.tgz#67e8779759f8000da74994906680c943a9b0925d" + dependencies: + loader-utils "0.2.x" + mime "1.2.x" + +url-parse@1.0.x: + version "1.0.5" + resolved "https://registry.yarnpkg.com/url-parse/-/url-parse-1.0.5.tgz#0854860422afdcfefeb6c965c662d4800169927b" + dependencies: + querystringify "0.0.x" + requires-port "1.0.x" + +url-parse@^1.1.1: + version "1.1.7" + resolved "https://registry.yarnpkg.com/url-parse/-/url-parse-1.1.7.tgz#025cff999653a459ab34232147d89514cc87d74a" + dependencies: + querystringify "0.0.x" + requires-port "1.0.x" + +url@^0.11.0: + version "0.11.0" + resolved "https://registry.yarnpkg.com/url/-/url-0.11.0.tgz#3838e97cfc60521eb73c525a8e55bfdd9e2e28f1" + dependencies: + punycode "1.3.2" + querystring "0.2.0" + +user-home@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/user-home/-/user-home-1.1.1.tgz#2b5be23a32b63a7c9deb8d0f28d485724a3df190" + +user-home@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/user-home/-/user-home-2.0.0.tgz#9c70bfd8169bc1dcbf48604e0f04b8b49cde9e9f" + dependencies: + os-homedir "^1.0.0" + +useragent@^2.1.6: + version "2.1.11" + resolved "https://registry.yarnpkg.com/useragent/-/useragent-2.1.11.tgz#6a026e6a6c619b46ca7a0b2fdef6c1ac3da8ca29" + dependencies: + lru-cache "2.2.x" + +util-deprecate@~1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/util-deprecate/-/util-deprecate-1.0.2.tgz#450d4dc9fa70de732762fbd2d4a28981419a0ccf" + +util@0.10.3, util@^0.10.3: + version "0.10.3" + resolved "https://registry.yarnpkg.com/util/-/util-0.10.3.tgz#7afb1afe50805246489e3db7fe0ed379336ac0f9" + dependencies: + inherits "2.0.1" + +utila@~0.3: + version "0.3.3" + resolved "https://registry.yarnpkg.com/utila/-/utila-0.3.3.tgz#d7e8e7d7e309107092b05f8d9688824d633a4226" + +utila@~0.4: + version "0.4.0" + resolved "https://registry.yarnpkg.com/utila/-/utila-0.4.0.tgz#8a16a05d445657a3aea5eecc5b12a4fa5379772c" + +utils-merge@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/utils-merge/-/utils-merge-1.0.0.tgz#0294fb922bb9375153541c4f7096231f287c8af8" + +uuid@^2.0.2: + version "2.0.3" + resolved "https://registry.yarnpkg.com/uuid/-/uuid-2.0.3.tgz#67e2e863797215530dff318e5bf9dcebfd47b21a" + +uuid@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.0.1.tgz#6544bba2dfda8c1cf17e629a3a305e2bb1fee6c1" + +v8flags@^2.0.10: + version "2.0.11" + resolved "https://registry.yarnpkg.com/v8flags/-/v8flags-2.0.11.tgz#bca8f30f0d6d60612cc2c00641e6962d42ae6881" + dependencies: + user-home "^1.1.1" + +vary@~1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/vary/-/vary-1.1.0.tgz#e1e5affbbd16ae768dd2674394b9ad3022653140" + +vendors@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/vendors/-/vendors-1.0.1.tgz#37ad73c8ee417fb3d580e785312307d274847f22" + +verror@1.3.6: + version "1.3.6" + resolved "https://registry.yarnpkg.com/verror/-/verror-1.3.6.tgz#cff5df12946d297d2baaefaa2689e25be01c005c" + dependencies: + extsprintf "1.0.2" + +vm-browserify@0.0.4: + version "0.0.4" + resolved "https://registry.yarnpkg.com/vm-browserify/-/vm-browserify-0.0.4.tgz#5d7ea45bbef9e4a6ff65f95438e0a87c357d5a73" + dependencies: + indexof "0.0.1" + +void-elements@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/void-elements/-/void-elements-2.0.1.tgz#c066afb582bb1cb4128d60ea92392e94d5e9dbec" + +warning@^2.0.0, warning@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/warning/-/warning-2.1.0.tgz#21220d9c63afc77a8c92111e011af705ce0c6901" + dependencies: + loose-envify "^1.0.0" + +warning@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/warning/-/warning-3.0.0.tgz#32e5377cb572de4ab04753bdf8821c01ed605b7c" + dependencies: + loose-envify "^1.0.0" + +watchpack@^0.2.1: + version "0.2.9" + resolved "https://registry.yarnpkg.com/watchpack/-/watchpack-0.2.9.tgz#62eaa4ab5e5ba35fdfc018275626e3c0f5e3fb0b" + dependencies: + async "^0.9.0" + chokidar "^1.0.0" + graceful-fs "^4.1.2" + +webpack-core@~0.6.9: + version "0.6.9" + resolved "https://registry.yarnpkg.com/webpack-core/-/webpack-core-0.6.9.tgz#fc571588c8558da77be9efb6debdc5a3b172bdc2" + dependencies: + source-list-map "~0.1.7" + source-map "~0.4.1" + +webpack-dev-middleware@^1.0.11, webpack-dev-middleware@^1.4.0: + version "1.9.0" + resolved "https://registry.yarnpkg.com/webpack-dev-middleware/-/webpack-dev-middleware-1.9.0.tgz#a1c67a3dfd8a5c5d62740aa0babe61758b4c84aa" + dependencies: + memory-fs "~0.4.1" + mime "^1.3.4" + path-is-absolute "^1.0.0" + range-parser "^1.0.3" + +webpack-dev-server@^1.14.1: + version "1.16.2" + resolved "https://registry.yarnpkg.com/webpack-dev-server/-/webpack-dev-server-1.16.2.tgz#8bebc2c4ce1c45a15c72dd769d9ba08db306a793" + dependencies: + compression "^1.5.2" + connect-history-api-fallback "^1.3.0" + express "^4.13.3" + http-proxy-middleware "~0.17.1" + open "0.0.5" + optimist "~0.6.1" + serve-index "^1.7.2" + sockjs "^0.3.15" + sockjs-client "^1.0.3" + stream-cache "~0.0.1" + strip-ansi "^3.0.0" + supports-color "^3.1.1" + webpack-dev-middleware "^1.4.0" + +webpack-sources@^0.1.0: + version "0.1.4" + resolved "https://registry.yarnpkg.com/webpack-sources/-/webpack-sources-0.1.4.tgz#ccc2c817e08e5fa393239412690bb481821393cd" + dependencies: + source-list-map "~0.1.7" + source-map "~0.5.3" + +webpack@^1.12.11: + version "1.14.0" + resolved "https://registry.yarnpkg.com/webpack/-/webpack-1.14.0.tgz#54f1ffb92051a328a5b2057d6ae33c289462c823" + dependencies: + acorn "^3.0.0" + async "^1.3.0" + clone "^1.0.2" + enhanced-resolve "~0.9.0" + interpret "^0.6.4" + loader-utils "^0.2.11" + memory-fs "~0.3.0" + mkdirp "~0.5.0" + node-libs-browser "^0.7.0" + optimist "~0.6.0" + supports-color "^3.1.0" + tapable "~0.1.8" + uglify-js "~2.7.3" + watchpack "^0.2.1" + webpack-core "~0.6.9" + +websocket-driver@>=0.5.1: + version "0.6.5" + resolved "https://registry.yarnpkg.com/websocket-driver/-/websocket-driver-0.6.5.tgz#5cb2556ceb85f4373c6d8238aa691c8454e13a36" + dependencies: + websocket-extensions ">=0.1.1" + +websocket-extensions@>=0.1.1: + version "0.1.1" + resolved "https://registry.yarnpkg.com/websocket-extensions/-/websocket-extensions-0.1.1.tgz#76899499c184b6ef754377c2dbb0cd6cb55d29e7" + +whatwg-fetch@^0.9.0: + version "0.9.0" + resolved "https://registry.yarnpkg.com/whatwg-fetch/-/whatwg-fetch-0.9.0.tgz#0e3684c6cb9995b43efc9df03e4c365d95fd9cc0" + +whet.extend@~0.9.9: + version "0.9.9" + resolved "https://registry.yarnpkg.com/whet.extend/-/whet.extend-0.9.9.tgz#f877d5bf648c97e5aa542fadc16d6a259b9c11a1" + +which@^1.0.5, which@^1.2.1, which@^1.2.4: + version "1.2.12" + resolved "https://registry.yarnpkg.com/which/-/which-1.2.12.tgz#de67b5e450269f194909ef23ece4ebe416fa1192" + dependencies: + isexe "^1.1.1" + +wide-align@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/wide-align/-/wide-align-1.1.0.tgz#40edde802a71fea1f070da3e62dcda2e7add96ad" + dependencies: + string-width "^1.0.1" + +window-size@0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/window-size/-/window-size-0.1.0.tgz#5438cd2ea93b202efa3a19fe8887aee7c94f9c9d" + +window-size@^0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/window-size/-/window-size-0.1.4.tgz#f8e1aa1ee5a53ec5bf151ffa09742a6ad7697876" + +wordwrap@0.0.2: + version "0.0.2" + resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-0.0.2.tgz#b79669bb42ecb409f83d583cad52ca17eaa1643f" + +wordwrap@~0.0.2: + version "0.0.3" + resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-0.0.3.tgz#a3d5da6cd5c0bc0008d37234bbaf1bed63059107" + +wrap-ansi@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-2.1.0.tgz#d8fc3d284dd05794fe84973caecdd1cf824fdd85" + dependencies: + string-width "^1.0.1" + strip-ansi "^3.0.1" + +wrappy@1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f" + +ws@1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/ws/-/ws-1.1.1.tgz#082ddb6c641e85d4bb451f03d52f06eabdb1f018" + dependencies: + options ">=0.0.5" + ultron "1.0.x" + +wtf-8@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/wtf-8/-/wtf-8-1.0.0.tgz#392d8ba2d0f1c34d1ee2d630f15d0efb68e1048a" + +xml-char-classes@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/xml-char-classes/-/xml-char-classes-1.0.0.tgz#64657848a20ffc5df583a42ad8a277b4512bbc4d" + +xmlhttprequest-ssl@1.5.3: + version "1.5.3" + resolved "https://registry.yarnpkg.com/xmlhttprequest-ssl/-/xmlhttprequest-ssl-1.5.3.tgz#185a888c04eca46c3e4070d99f7b49de3528992d" + +xtend@^4.0.0: + version "4.0.1" + resolved "https://registry.yarnpkg.com/xtend/-/xtend-4.0.1.tgz#a5c6d532be656e23db820efb943a1f04998d63af" + +y18n@^3.2.0: + version "3.2.1" + resolved "https://registry.yarnpkg.com/y18n/-/y18n-3.2.1.tgz#6d15fba884c08679c0d77e88e7759e811e07fa41" + +yargs@^3.10.0: + version "3.32.0" + resolved "https://registry.yarnpkg.com/yargs/-/yargs-3.32.0.tgz#03088e9ebf9e756b69751611d2a5ef591482c995" + dependencies: + camelcase "^2.0.1" + cliui "^3.0.3" + decamelize "^1.1.1" + os-locale "^1.4.0" + string-width "^1.0.1" + window-size "^0.1.4" + y18n "^3.2.0" + +yargs@~1.3.3: + version "1.3.3" + resolved "https://registry.yarnpkg.com/yargs/-/yargs-1.3.3.tgz#054de8b61f22eefdb7207059eaef9d6b83fb931a" + +yargs@~3.10.0: + version "3.10.0" + resolved "https://registry.yarnpkg.com/yargs/-/yargs-3.10.0.tgz#f7ee7bd857dd7c1d2d38c0e74efbd681d1431fd1" + dependencies: + camelcase "^1.0.2" + cliui "^2.1.0" + decamelize "^1.0.0" + window-size "0.1.0" + +yeast@0.1.2: + version "0.1.2" + resolved "https://registry.yarnpkg.com/yeast/-/yeast-0.1.2.tgz#008e06d8094320c372dbc2f8ed76a0ca6c8ac419" From 659d5aabd14c670741e3b4ea9af48d425ba23177 Mon Sep 17 00:00:00 2001 From: Krishna Srinivas Date: Tue, 24 Jan 2017 12:08:00 -0800 Subject: [PATCH 099/100] browser: Access to private paths redirects to login. (#3622) --- browser/app/js/actions.js | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/browser/app/js/actions.js b/browser/app/js/actions.js index 598124b84..979f1ad97 100644 --- a/browser/app/js/actions.js +++ b/browser/app/js/actions.js @@ -16,10 +16,13 @@ import url from 'url' import Moment from 'moment' +import browserHistory from 'react-router/lib/browserHistory' import web from './web' import * as utils from './utils' import storage from 'local-storage-fallback' +import { minioBrowserPrefix } from './constants' + export const SET_WEB = 'SET_WEB' export const SET_CURRENT_BUCKET = 'SET_CURRENT_BUCKET' export const SET_CURRENT_PATH = 'SET_CURRENT_PATH' @@ -311,6 +314,8 @@ export const selectPrefix = prefix => { })) dispatch(setLoadBucket('')) dispatch(setLoadPath('')) + // Use browserHistory.replace instead of push so that browser back button works fine. + browserHistory.replace(`${minioBrowserPrefix}/login`) }) } } From d41dcb784b4a40639525c27841b9250c123dcb56 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Tue, 24 Jan 2017 18:07:31 -0800 Subject: [PATCH 100/100] Move to blake2b-simd due to perf problems in golang.org/x/crypto Ref https://github.com/golang/go/issues/18563 --- cmd/erasure-utils.go | 6 +- .../github.com/minio/blake2b-simd/blake2b.go | 301 ++++ .../minio/blake2b-simd/compressAvx2_amd64.go | 47 + .../minio/blake2b-simd/compressAvx2_amd64.s | 671 ++++++++ .../minio/blake2b-simd/compressAvx_amd64.go | 41 + .../minio/blake2b-simd/compressAvx_amd64.s | 682 ++++++++ .../minio/blake2b-simd/compressSse_amd64.go | 41 + .../minio/blake2b-simd/compressSse_amd64.s | 770 +++++++++ .../minio/blake2b-simd/compress_amd64.go | 30 + .../minio/blake2b-simd/compress_generic.go | 1419 +++++++++++++++++ .../minio/blake2b-simd/compress_noasm.go | 23 + vendor/github.com/minio/blake2b-simd/cpuid.go | 60 + .../github.com/minio/blake2b-simd/cpuid_386.s | 33 + .../minio/blake2b-simd/cpuid_amd64.s | 34 + vendor/golang.org/x/crypto/blake2b/blake2b.go | 194 --- .../x/crypto/blake2b/blake2bAVX2_amd64.go | 43 - .../x/crypto/blake2b/blake2bAVX2_amd64.s | 502 ------ .../x/crypto/blake2b/blake2b_amd64.go | 25 - .../x/crypto/blake2b/blake2b_amd64.s | 290 ---- .../x/crypto/blake2b/blake2b_generic.go | 179 --- .../x/crypto/blake2b/blake2b_ref.go | 11 - .../x/crypto/blake2b/blake2b_test.go | 448 ------ vendor/vendor.json | 6 + 23 files changed, 4161 insertions(+), 1695 deletions(-) create mode 100644 vendor/github.com/minio/blake2b-simd/blake2b.go create mode 100644 vendor/github.com/minio/blake2b-simd/compressAvx2_amd64.go create mode 100644 vendor/github.com/minio/blake2b-simd/compressAvx2_amd64.s create mode 100644 vendor/github.com/minio/blake2b-simd/compressAvx_amd64.go create mode 100644 vendor/github.com/minio/blake2b-simd/compressAvx_amd64.s create mode 100644 vendor/github.com/minio/blake2b-simd/compressSse_amd64.go create mode 100644 vendor/github.com/minio/blake2b-simd/compressSse_amd64.s create mode 100644 vendor/github.com/minio/blake2b-simd/compress_amd64.go create mode 100644 vendor/github.com/minio/blake2b-simd/compress_generic.go create mode 100644 vendor/github.com/minio/blake2b-simd/compress_noasm.go create mode 100644 vendor/github.com/minio/blake2b-simd/cpuid.go create mode 100644 vendor/github.com/minio/blake2b-simd/cpuid_386.s create mode 100644 vendor/github.com/minio/blake2b-simd/cpuid_amd64.s delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b.go delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_generic.go delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_ref.go delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_test.go diff --git a/cmd/erasure-utils.go b/cmd/erasure-utils.go index 814df1c47..d01ea731a 100644 --- a/cmd/erasure-utils.go +++ b/cmd/erasure-utils.go @@ -24,8 +24,8 @@ import ( "sync" "github.com/klauspost/reedsolomon" + "github.com/minio/blake2b-simd" "github.com/minio/sha256-simd" - "golang.org/x/crypto/blake2b" ) // newHashWriters - inititialize a slice of hashes for the disk count. @@ -48,14 +48,14 @@ func newHash(algo string) (h hash.Hash) { // ignore the error, because New512 without a key never fails // New512 only returns a non-nil error, if the length of the passed // key > 64 bytes - but we use blake2b as hash function (no key) - h, _ = blake2b.New512(nil) + h = blake2b.New512() // Add new hashes here. default: // Default to blake2b. // ignore the error, because New512 without a key never fails // New512 only returns a non-nil error, if the length of the passed // key > 64 bytes - but we use blake2b as hash function (no key) - h, _ = blake2b.New512(nil) + h = blake2b.New512() } return h } diff --git a/vendor/github.com/minio/blake2b-simd/blake2b.go b/vendor/github.com/minio/blake2b-simd/blake2b.go new file mode 100644 index 000000000..538466a1a --- /dev/null +++ b/vendor/github.com/minio/blake2b-simd/blake2b.go @@ -0,0 +1,301 @@ +// Written in 2012 by Dmitry Chestnykh. +// +// To the extent possible under law, the author have dedicated all copyright +// and related and neighboring rights to this software to the public domain +// worldwide. This software is distributed without any warranty. +// http://creativecommons.org/publicdomain/zero/1.0/ + +// Package blake2b implements BLAKE2b cryptographic hash function. +package blake2b + +import ( + "encoding/binary" + "errors" + "hash" +) + +const ( + BlockSize = 128 // block size of algorithm + Size = 64 // maximum digest size + SaltSize = 16 // maximum salt size + PersonSize = 16 // maximum personalization string size + KeySize = 64 // maximum size of key +) + +type digest struct { + h [8]uint64 // current chain value + t [2]uint64 // message bytes counter + f [2]uint64 // finalization flags + x [BlockSize]byte // buffer for data not yet compressed + nx int // number of bytes in buffer + + ih [8]uint64 // initial chain value (after config) + paddedKey [BlockSize]byte // copy of key, padded with zeros + isKeyed bool // indicates whether hash was keyed + size uint8 // digest size in bytes + isLastNode bool // indicates processing of the last node in tree hashing +} + +// Initialization values. +var iv = [8]uint64{ + 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, + 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1, + 0x510e527fade682d1, 0x9b05688c2b3e6c1f, + 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179, +} + +// Config is used to configure hash function parameters and keying. +// All parameters are optional. +type Config struct { + Size uint8 // digest size (if zero, default size of 64 bytes is used) + Key []byte // key for prefix-MAC + Salt []byte // salt (if < 16 bytes, padded with zeros) + Person []byte // personalization (if < 16 bytes, padded with zeros) + Tree *Tree // parameters for tree hashing +} + +// Tree represents parameters for tree hashing. +type Tree struct { + Fanout uint8 // fanout + MaxDepth uint8 // maximal depth + LeafSize uint32 // leaf maximal byte length (0 for unlimited) + NodeOffset uint64 // node offset (0 for first, leftmost or leaf) + NodeDepth uint8 // node depth (0 for leaves) + InnerHashSize uint8 // inner hash byte length + IsLastNode bool // indicates processing of the last node of layer +} + +var ( + defaultConfig = &Config{Size: Size} + config256 = &Config{Size: 32} +) + +func verifyConfig(c *Config) error { + if c.Size > Size { + return errors.New("digest size is too large") + } + if len(c.Key) > KeySize { + return errors.New("key is too large") + } + if len(c.Salt) > SaltSize { + // Smaller salt is okay: it will be padded with zeros. + return errors.New("salt is too large") + } + if len(c.Person) > PersonSize { + // Smaller personalization is okay: it will be padded with zeros. + return errors.New("personalization is too large") + } + if c.Tree != nil { + if c.Tree.Fanout == 1 { + return errors.New("fanout of 1 is not allowed in tree mode") + } + if c.Tree.MaxDepth < 2 { + return errors.New("incorrect tree depth") + } + if c.Tree.InnerHashSize < 1 || c.Tree.InnerHashSize > Size { + return errors.New("incorrect tree inner hash size") + } + } + return nil +} + +// New returns a new hash.Hash configured with the given Config. +// Config can be nil, in which case the default one is used, calculating 64-byte digest. +// Returns non-nil error if Config contains invalid parameters. +func New(c *Config) (hash.Hash, error) { + if c == nil { + c = defaultConfig + } else { + if c.Size == 0 { + // Set default size if it's zero. + c.Size = Size + } + if err := verifyConfig(c); err != nil { + return nil, err + } + } + d := new(digest) + d.initialize(c) + return d, nil +} + +// initialize initializes digest with the given +// config, which must be non-nil and verified. +func (d *digest) initialize(c *Config) { + // Create parameter block. + var p [BlockSize]byte + p[0] = c.Size + p[1] = uint8(len(c.Key)) + if c.Salt != nil { + copy(p[32:], c.Salt) + } + if c.Person != nil { + copy(p[48:], c.Person) + } + if c.Tree != nil { + p[2] = c.Tree.Fanout + p[3] = c.Tree.MaxDepth + binary.LittleEndian.PutUint32(p[4:], c.Tree.LeafSize) + binary.LittleEndian.PutUint64(p[8:], c.Tree.NodeOffset) + p[16] = c.Tree.NodeDepth + p[17] = c.Tree.InnerHashSize + } else { + p[2] = 1 + p[3] = 1 + } + + // Initialize. + d.size = c.Size + for i := 0; i < 8; i++ { + d.h[i] = iv[i] ^ binary.LittleEndian.Uint64(p[i*8:]) + } + if c.Tree != nil && c.Tree.IsLastNode { + d.isLastNode = true + } + + // Process key. + if c.Key != nil { + copy(d.paddedKey[:], c.Key) + d.Write(d.paddedKey[:]) + d.isKeyed = true + } + // Save a copy of initialized state. + copy(d.ih[:], d.h[:]) +} + +// New512 returns a new hash.Hash computing the BLAKE2b 64-byte checksum. +func New512() hash.Hash { + d := new(digest) + d.initialize(defaultConfig) + return d +} + +// New256 returns a new hash.Hash computing the BLAKE2b 32-byte checksum. +func New256() hash.Hash { + d := new(digest) + d.initialize(config256) + return d +} + +// NewMAC returns a new hash.Hash computing BLAKE2b prefix- +// Message Authentication Code of the given size in bytes +// (up to 64) with the given key (up to 64 bytes in length). +func NewMAC(outBytes uint8, key []byte) hash.Hash { + d, err := New(&Config{Size: outBytes, Key: key}) + if err != nil { + panic(err.Error()) + } + return d +} + +// Reset resets the state of digest to the initial state +// after configuration and keying. +func (d *digest) Reset() { + copy(d.h[:], d.ih[:]) + d.t[0] = 0 + d.t[1] = 0 + d.f[0] = 0 + d.f[1] = 0 + d.nx = 0 + if d.isKeyed { + d.Write(d.paddedKey[:]) + } +} + +// Size returns the digest size in bytes. +func (d *digest) Size() int { return int(d.size) } + +// BlockSize returns the algorithm block size in bytes. +func (d *digest) BlockSize() int { return BlockSize } + +func (d *digest) Write(p []byte) (nn int, err error) { + nn = len(p) + left := BlockSize - d.nx + if len(p) > left { + // Process buffer. + copy(d.x[d.nx:], p[:left]) + p = p[left:] + compress(d, d.x[:]) + d.nx = 0 + } + // Process full blocks except for the last one. + if len(p) > BlockSize { + n := len(p) &^ (BlockSize - 1) + if n == len(p) { + n -= BlockSize + } + compress(d, p[:n]) + p = p[n:] + } + // Fill buffer. + d.nx += copy(d.x[d.nx:], p) + return +} + +// Sum returns the calculated checksum. +func (d *digest) Sum(in []byte) []byte { + // Make a copy of d so that caller can keep writing and summing. + d0 := *d + hash := d0.checkSum() + return append(in, hash[:d0.size]...) +} + +func (d *digest) checkSum() [Size]byte { + // Do not create unnecessary copies of the key. + if d.isKeyed { + for i := 0; i < len(d.paddedKey); i++ { + d.paddedKey[i] = 0 + } + } + + dec := BlockSize - uint64(d.nx) + if d.t[0] < dec { + d.t[1]-- + } + d.t[0] -= dec + + // Pad buffer with zeros. + for i := d.nx; i < len(d.x); i++ { + d.x[i] = 0 + } + // Set last block flag. + d.f[0] = 0xffffffffffffffff + if d.isLastNode { + d.f[1] = 0xffffffffffffffff + } + // Compress last block. + compress(d, d.x[:]) + + var out [Size]byte + j := 0 + for _, s := range d.h[:(d.size-1)/8+1] { + out[j+0] = byte(s >> 0) + out[j+1] = byte(s >> 8) + out[j+2] = byte(s >> 16) + out[j+3] = byte(s >> 24) + out[j+4] = byte(s >> 32) + out[j+5] = byte(s >> 40) + out[j+6] = byte(s >> 48) + out[j+7] = byte(s >> 56) + j += 8 + } + return out +} + +// Sum512 returns a 64-byte BLAKE2b hash of data. +func Sum512(data []byte) [64]byte { + var d digest + d.initialize(defaultConfig) + d.Write(data) + return d.checkSum() +} + +// Sum256 returns a 32-byte BLAKE2b hash of data. +func Sum256(data []byte) (out [32]byte) { + var d digest + d.initialize(config256) + d.Write(data) + sum := d.checkSum() + copy(out[:], sum[:32]) + return +} diff --git a/vendor/github.com/minio/blake2b-simd/compressAvx2_amd64.go b/vendor/github.com/minio/blake2b-simd/compressAvx2_amd64.go new file mode 100644 index 000000000..ec53599f8 --- /dev/null +++ b/vendor/github.com/minio/blake2b-simd/compressAvx2_amd64.go @@ -0,0 +1,47 @@ +//+build !noasm +//+build !appengine + +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package blake2b + +//go:noescape +func compressAVX2Loop(p []uint8, in, iv, t, f, shffle, out []uint64) + +func compressAVX2(d *digest, p []uint8) { + var ( + in [8]uint64 + out [8]uint64 + shffle [8]uint64 + ) + + // vector for PSHUFB instruction + shffle[0] = 0x0201000706050403 + shffle[1] = 0x0a09080f0e0d0c0b + shffle[2] = 0x0201000706050403 + shffle[3] = 0x0a09080f0e0d0c0b + shffle[4] = 0x0100070605040302 + shffle[5] = 0x09080f0e0d0c0b0a + shffle[6] = 0x0100070605040302 + shffle[7] = 0x09080f0e0d0c0b0a + + in[0], in[1], in[2], in[3], in[4], in[5], in[6], in[7] = d.h[0], d.h[1], d.h[2], d.h[3], d.h[4], d.h[5], d.h[6], d.h[7] + + compressAVX2Loop(p, in[:], iv[:], d.t[:], d.f[:], shffle[:], out[:]) + + d.h[0], d.h[1], d.h[2], d.h[3], d.h[4], d.h[5], d.h[6], d.h[7] = out[0], out[1], out[2], out[3], out[4], out[5], out[6], out[7] +} diff --git a/vendor/github.com/minio/blake2b-simd/compressAvx2_amd64.s b/vendor/github.com/minio/blake2b-simd/compressAvx2_amd64.s new file mode 100644 index 000000000..24df234b5 --- /dev/null +++ b/vendor/github.com/minio/blake2b-simd/compressAvx2_amd64.s @@ -0,0 +1,671 @@ +//+build !noasm !appengine + +// +// Minio Cloud Storage, (C) 2016 Minio, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// +// Based on AVX2 implementation from https://github.com/sneves/blake2-avx2/blob/master/blake2b-common.h +// +// Use github.com/fwessels/asm2plan9s on this file to assemble instructions to their Plan9 equivalent +// +// Assembly code below essentially follows the ROUND macro (see blake2b-round.h) which is defined as: +// #define ROUND(r) \ +// LOAD_MSG_ ##r ##_1(b0, b1); \ +// G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ +// LOAD_MSG_ ##r ##_2(b0, b1); \ +// G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ +// DIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h); \ +// LOAD_MSG_ ##r ##_3(b0, b1); \ +// G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ +// LOAD_MSG_ ##r ##_4(b0, b1); \ +// G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ +// UNDIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h); +// +// as well as the go equivalent in https://github.com/dchest/blake2b/blob/master/block.go +// +// As in the macro, G1/G2 in the 1st and 2nd half are identical (so literal copy of assembly) +// +// Rounds are also the same, except for the loading of the message (and rounds 1 & 11 and +// rounds 2 & 12 are identical) +// + +#define G1 \ + \ // G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); + BYTE $0xc5; BYTE $0xfd; BYTE $0xd4; BYTE $0xc4 \ // VPADDQ YMM0,YMM0,YMM4 /* v0 += m[0], v1 += m[2], v2 += m[4], v3 += m[6] */ + BYTE $0xc5; BYTE $0xfd; BYTE $0xd4; BYTE $0xc1 \ // VPADDQ YMM0,YMM0,YMM1 /* v0 += v4, v1 += v5, v2 += v6, v3 += v7 */ + BYTE $0xc5; BYTE $0xe5; BYTE $0xef; BYTE $0xd8 \ // VPXOR YMM3,YMM3,YMM0 /* v12 ^= v0, v13 ^= v1, v14 ^= v2, v15 ^= v3 */ + BYTE $0xc5; BYTE $0xfd; BYTE $0x70; BYTE $0xdb; BYTE $0xb1 \ // VPSHUFD YMM3,YMM3,0xb1 /* v12 = v12<<(64-32) | v12>>32, v13 = */ + BYTE $0xc5; BYTE $0xed; BYTE $0xd4; BYTE $0xd3 \ // VPADDQ YMM2,YMM2,YMM3 /* v8 += v12, v9 += v13, v10 += v14, v11 += v15 */ + BYTE $0xc5; BYTE $0xf5; BYTE $0xef; BYTE $0xca \ // VPXOR YMM1,YMM1,YMM2 /* v4 ^= v8, v5 ^= v9, v6 ^= v10, v7 ^= v11 */ + BYTE $0xc4; BYTE $0xe2; BYTE $0x75; BYTE $0x00; BYTE $0xce // VPSHUFB YMM1,YMM1,YMM6 /* v4 = v4<<(64-24) | v4>>24, ..., ..., v7 = v7<<(64-24) | v7>>24 */ + +#define G2 \ + BYTE $0xc5; BYTE $0xfd; BYTE $0xd4; BYTE $0xc5 \ // VPADDQ YMM0,YMM0,YMM5 /* v0 += m[1], v1 += m[3], v2 += m[5], v3 += m[7] */ + BYTE $0xc5; BYTE $0xfd; BYTE $0xd4; BYTE $0xc1 \ // VPADDQ YMM0,YMM0,YMM1 /* v0 += v4, v1 += v5, v2 += v6, v3 += v7 */ + BYTE $0xc5; BYTE $0xe5; BYTE $0xef; BYTE $0xd8 \ // VPXOR YMM3,YMM3,YMM0 /* v12 ^= v0, v13 ^= v1, v14 ^= v2, v15 ^= v3 */ + BYTE $0xc4; BYTE $0xe2; BYTE $0x65; BYTE $0x00; BYTE $0xdf \ // VPSHUFB YMM3,YMM3,YMM7 /* v12 = v12<<(64-16) | v12>>16, ..., ..., v15 = v15<<(64-16) | v15>>16 */ + BYTE $0xc5; BYTE $0xed; BYTE $0xd4; BYTE $0xd3 \ // VPADDQ YMM2,YMM2,YMM3 /* v8 += v12, v9 += v13, v10 += v14, v11 += v15 */ + BYTE $0xc5; BYTE $0xf5; BYTE $0xef; BYTE $0xca \ // VPXOR YMM1,YMM1,YMM2 /* v4 ^= v8, v5 ^= v9, v6 ^= v10, v7 ^= v11 */ + BYTE $0xc5; BYTE $0x75; BYTE $0xd4; BYTE $0xf9 \ // VPADDQ YMM15,YMM1,YMM1 /* temp reg = reg*2 */ + BYTE $0xc5; BYTE $0xf5; BYTE $0x73; BYTE $0xd1; BYTE $0x3f \ // VPSRLQ YMM1,YMM1,0x3f /* reg = reg>>63 */ + BYTE $0xc4; BYTE $0xc1; BYTE $0x75; BYTE $0xef; BYTE $0xcf // VPXOR YMM1,YMM1,YMM15 /* ORed together: v4 = v4<<(64-63) | v4>>63, v5 = v5<<(64-63) | v5>>63 */ + +#define DIAGONALIZE \ + BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb \ // VPERMQ YMM3, YMM3, 0x93 + BYTE $0x93 \ + BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2 \ // VPERMQ YMM2, YMM2, 0x4e + BYTE $0x4e \ + BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9 \ // VPERMQ YMM1, YMM1, 0x39 + BYTE $0x39 \ + // DO NOT DELETE -- macro delimiter (previous line extended) + +#define UNDIAGONALIZE \ + BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb \ // VPERMQ YMM3, YMM3, 0x39 + BYTE $0x39 \ + BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2 \ // VPERMQ YMM2, YMM2, 0x4e + BYTE $0x4e \ + BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9 \ // VPERMQ YMM1, YMM1, 0x93 + BYTE $0x93 \ + // DO NOT DELETE -- macro delimiter (previous line extended) + +#define LOAD_SHUFFLE \ + MOVQ shffle+120(FP), SI \ // SI: &shuffle + BYTE $0xc5; BYTE $0xfe; BYTE $0x6f; BYTE $0x36 \ // VMOVDQU YMM6, [rsi] + BYTE $0xc5; BYTE $0xfe; BYTE $0x6f; BYTE $0x7e; BYTE $0x20 // VMOVDQU YMM7, 32[rsi] + +// func compressAVX2Loop(compressSSE(p []uint8, in, iv, t, f, shffle, out []uint64) +TEXT ·compressAVX2Loop(SB), 7, $0 + + // REGISTER USE + // Y0 - Y3: v0 - v15 + // Y4 - Y5: m[0] - m[7] + // Y6 - Y7: shuffle value + // Y8 - Y9: temp registers + // Y10 -Y13: copy of full message + // Y15: temp register + + // Load digest + MOVQ in+24(FP), SI // SI: &in + BYTE $0xc5; BYTE $0xfe; BYTE $0x6f; BYTE $0x06 // VMOVDQU YMM0, [rsi] + BYTE $0xc5; BYTE $0xfe; BYTE $0x6f; BYTE $0x4e; BYTE $0x20 // VMOVDQU YMM1, 32[rsi] + + // Already store digest into &out (so we can reload it later generically) + MOVQ out+144(FP), SI // SI: &out + BYTE $0xc5; BYTE $0xfe; BYTE $0x7f; BYTE $0x06 // VMOVDQU [rsi], YMM0 + BYTE $0xc5; BYTE $0xfe; BYTE $0x7f; BYTE $0x4e; BYTE $0x20 // VMOVDQU 32[rsi], YMM1 + + // Initialize message pointer and loop counter + MOVQ message+0(FP), DX // DX: &p (message) + MOVQ message_len+8(FP), R8 // R8: len(message) + SHRQ $7, R8 // len(message) / 128 + CMPQ R8, $0 + JEQ complete + +loop: + // Increment counter + MOVQ t+72(FP), SI // SI: &t + MOVQ 0(SI), R9 // + ADDQ $128, R9 // /* d.t[0] += BlockSize */ + MOVQ R9, 0(SI) // + CMPQ R9, $128 // /* if d.t[0] < BlockSize { */ + JGE noincr // + MOVQ 8(SI), R9 // + ADDQ $1, R9 // /* d.t[1]++ */ + MOVQ R9, 8(SI) // +noincr: // /* } */ + + // Load initialization vector + MOVQ iv+48(FP), SI // SI: &iv + BYTE $0xc5; BYTE $0xfe; BYTE $0x6f; BYTE $0x16 // VMOVDQU YMM2, [rsi] + BYTE $0xc5; BYTE $0xfe; BYTE $0x6f; BYTE $0x5e; BYTE $0x20 // VMOVDQU YMM3, 32[rsi] + MOVQ t+72(FP), SI // SI: &t + BYTE $0xc4; BYTE $0x63; BYTE $0x3d; BYTE $0x38; BYTE $0x06 // VINSERTI128 YMM8, YMM8, [rsi], 0 /* Y8 = t[0]+t[1] */ + BYTE $0x00 + MOVQ t+96(FP), SI // SI: &f + BYTE $0xc4; BYTE $0x63; BYTE $0x3d; BYTE $0x38; BYTE $0x06 // VINSERTI128 YMM8, YMM8, [rsi], 1 /* Y8 = t[0]+t[1]+f[0]+f[1] */ + BYTE $0x01 + BYTE $0xc4; BYTE $0xc1; BYTE $0x65; BYTE $0xef; BYTE $0xd8 // VPXOR YMM3,YMM3,YMM8 /* Y3 = Y3 ^ Y8 */ + + BYTE $0xc5; BYTE $0x7e; BYTE $0x6f; BYTE $0x12 // VMOVDQU YMM10, [rdx] /* Y10 = m[0]+ m[1]+ m[2]+ m[3] */ + BYTE $0xc5; BYTE $0x7e; BYTE $0x6f; BYTE $0x5a; BYTE $0x20 // VMOVDQU YMM11, 32[rdx] /* Y11 = m[4]+ m[5]+ m[6]+ m[7] */ + BYTE $0xc5; BYTE $0x7e; BYTE $0x6f; BYTE $0x62; BYTE $0x40 // VMOVDQU YMM12, 64[rdx] /* Y12 = m[8]+ m[9]+m[10]+m[11] */ + BYTE $0xc5; BYTE $0x7e; BYTE $0x6f; BYTE $0x6a; BYTE $0x60 // VMOVDQU YMM13, 96[rdx] /* Y13 = m[12]+m[13]+m[14]+m[15] */ + + LOAD_SHUFFLE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 1 + /////////////////////////////////////////////////////////////////////////// + + BYTE $0xc4; BYTE $0xc1; BYTE $0x2d; BYTE $0x6c; BYTE $0xe3 // VPUNPCKLQDQ YMM4, YMM10, YMM11 /* m[0], m[4], m[2], m[6] */ + BYTE $0xc4; BYTE $0xc1; BYTE $0x2d; BYTE $0x6d; BYTE $0xeb // VPUNPCKHQDQ YMM5, YMM10, YMM11 /* m[1], m[5], m[3], m[7] */ + BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xe4 // VPERMQ YMM4, YMM4, 0xd8 /* 0x1101 1000 = 0xd8 */ + BYTE $0xd8 + BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xed // VPERMQ YMM5, YMM5, 0xd8 /* 0x1101 1000 = 0xd8 */ + BYTE $0xd8 + + G1 + G2 + + DIAGONALIZE + + BYTE $0xc4; BYTE $0xc1; BYTE $0x1d; BYTE $0x6c; BYTE $0xe5 // VPUNPCKLQDQ YMM4, YMM12, YMM13 /* m[8], m[12], m[10], m[14] */ + BYTE $0xc4; BYTE $0xc1; BYTE $0x1d; BYTE $0x6d; BYTE $0xed // VPUNPCKHQDQ YMM5, YMM12, YMM13 /* m[9], m[13], m[11], m[15] */ + BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xe4 // VPERMQ YMM4, YMM4, 0xd8 /* 0x1101 1000 = 0xd8 */ + BYTE $0xd8 + BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xed // VPERMQ YMM5, YMM5, 0xd8 /* 0x1101 1000 = 0xd8 */ + BYTE $0xd8 + + G1 + G2 + + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 2 + /////////////////////////////////////////////////////////////////////////// + + BYTE $0xc4; BYTE $0x41; BYTE $0x25; BYTE $0x6c; BYTE $0xc5 // VPUNPCKLQDQ YMM8, YMM11, YMM13 /* m[4], ____, ____, m[14] */ + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc0 // VPERMQ YMM8, YMM8, 0x03 /* m[14], m[4], ____, ____ */ /* xxxx 0011 = 0x03 */ + BYTE $0x03 + BYTE $0xc4; BYTE $0x41; BYTE $0x1d; BYTE $0x6d; BYTE $0xcd // VPUNPCKHQDQ YMM9, YMM12, YMM13 /* m[9], m[13], ____, ____ */ + BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe1 // VPERM2I128 YMM4, YMM8, YMM9, 0x20 /* m[9], m[13], ____, ____ */ /* 0010 0000 = 0x20 */ + BYTE $0x20 + + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc4 // VPERMQ YMM8, YMM12, 0x02 /* m[10], m[8], ____, ____ */ /* xxxx 0010 = 0x02 */ + BYTE $0x02 + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xcd // VPERMQ YMM9, YMM13, 0x30 /* ____, ____, m[15], ____ */ /* xx11 xxxx = 0x30 */ + BYTE $0x30 + BYTE $0xc4; BYTE $0x41; BYTE $0x35; BYTE $0x6c; BYTE $0xcb // VPUNPCKLQDQ YMM9, YMM9, YMM11 /* ____, ____, m[15], m[6] */ + BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe9 // VPERM2I128 YMM5, YMM8, YMM9, 0x30 /* m[9], m[13], m[15], m[6] */ /* 0011 0000 = 0x30 */ + BYTE $0x30 + + G1 + G2 + + DIAGONALIZE + + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc2 // VPERMQ YMM8, YMM10, 0x01 /* m[1], m[0], ____, ____ */ /* xxxx 0001 = 0x01 */ + BYTE $0x01 + BYTE $0xc4; BYTE $0x41; BYTE $0x25; BYTE $0x6d; BYTE $0xcc // VPUNPCKHQDQ YMM9, YMM11, YMM12 /* m[5], ____, ____, m[11] */ + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc9 // VPERMQ YMM9, YMM9, 0x03 /* m[11], m[5], ____, ____ */ /* xxxx 0011 = 0x03 */ + BYTE $0x03 + BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe1 // VPERM2I128 YMM4, YMM8, YMM9, 0x20 /* m[1], m[0], m[11], m[5] */ /* 0010 0000 = 0x20 */ + BYTE $0x20 + + BYTE $0xc4; BYTE $0x41; BYTE $0x2d; BYTE $0x6c; BYTE $0xc5 // VPUNPCKLQDQ YMM8, YMM10, YMM13 /* ___, m[12], m[2], ____ */ + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc0 // VPERMQ YMM8, YMM8, 0x09 /* m[12], m[2], ____, ____ */ /* xxxx 1001 = 0x09 */ + BYTE $0x09 + BYTE $0xc4; BYTE $0x41; BYTE $0x25; BYTE $0x6d; BYTE $0xca // VPUNPCKHQDQ YMM9, YMM11, YMM10 /* ____, ____, m[7], m[3] */ + BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe9 // VPERM2I128 YMM5, YMM8, YMM9, 0x30 /* m[9], m[13], m[15], m[6] */ /* 0011 0000 = 0x30 */ + BYTE $0x30 + + G1 + G2 + + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 3 + /////////////////////////////////////////////////////////////////////////// + + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc5 // VPERMQ YMM8, YMM13, 0x00 + BYTE $0x00 + BYTE $0xc4; BYTE $0x41; BYTE $0x1d; BYTE $0x6d; BYTE $0xc0 // VPUNPCKHQDQ YMM8, YMM12, YMM8 + BYTE $0xc4; BYTE $0x41; BYTE $0x25; BYTE $0x6d; BYTE $0xcd // VPUNPCKHQDQ YMM9, YMM11, YMM13 + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc9 // VPERMQ YMM9, YMM9, 0x0c + BYTE $0x0c + BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe1 // VPERM2I128 YMM4, YMM8, YMM9, 0x21 + BYTE $0x21 + + BYTE $0xc4; BYTE $0x41; BYTE $0x1d; BYTE $0x6c; BYTE $0xc2 // VPUNPCKLQDQ YMM8, YMM12, YMM10 + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xcd // VPERMQ YMM9, YMM13, 0x55 + BYTE $0x55 + BYTE $0xc4; BYTE $0x41; BYTE $0x2d; BYTE $0x6c; BYTE $0xc9 // VPUNPCKLQDQ YMM9, YMM10, YMM9 + BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe9 // VPERM2I128 YMM5, YMM8, YMM9, 0x30 + BYTE $0x30 + + G1 + G2 + + DIAGONALIZE + + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc2 // VPERMQ YMM8, YMM10, 0xff + BYTE $0xff + BYTE $0xc4; BYTE $0x41; BYTE $0x1d; BYTE $0x6c; BYTE $0xc0 // VPUNPCKLQDQ YMM8, YMM12, YMM8 + BYTE $0xc4; BYTE $0x41; BYTE $0x25; BYTE $0x6d; BYTE $0xcc // VPUNPCKHQDQ YMM9, YMM11, YMM12 + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc9 // VPERMQ YMM9, YMM9, 0x60 + BYTE $0x60 + BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe1 // VPERM2I128 YMM4, YMM8, YMM9, 0x31 + BYTE $0x31 + + BYTE $0xc4; BYTE $0x41; BYTE $0x15; BYTE $0x6c; BYTE $0xc3 // VPUNPCKLQDQ YMM8, YMM13, YMM11 + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xcb // VPERMQ YMM9, YMM11, 0x00 + BYTE $0x00 + BYTE $0xc4; BYTE $0x41; BYTE $0x2d; BYTE $0x6d; BYTE $0xc9 // VPUNPCKHQDQ YMM9, YMM10, YMM9 + BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe9 // VPERM2I128 YMM5, YMM8, YMM9, 0x21 + BYTE $0x21 + + G1 + G2 + + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 4 + /////////////////////////////////////////////////////////////////////////// + + BYTE $0xc4; BYTE $0x41; BYTE $0x25; BYTE $0x6d; BYTE $0xc2 // VPUNPCKHQDQ YMM8, YMM11, YMM10 + BYTE $0xc4; BYTE $0x41; BYTE $0x15; BYTE $0x6d; BYTE $0xcc // VPUNPCKHQDQ YMM9, YMM13, YMM12 + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc9 // VPERMQ YMM9, YMM9, 0x0c + BYTE $0x0c + BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe1 // VPERM2I128 YMM4, YMM8, YMM9, 0x21 + BYTE $0x21 + + BYTE $0xc4; BYTE $0x41; BYTE $0x1d; BYTE $0x6d; BYTE $0xc2 // VPUNPCKHQDQ YMM8, YMM12, YMM10 + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xcd // VPERMQ YMM9, YMM13, 0x08 + BYTE $0x08 + BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe9 // VPERM2I128 YMM5, YMM8, YMM9, 0x20 + BYTE $0x20 + + G1 + G2 + + DIAGONALIZE + + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc3 // VPERMQ YMM8, YMM11, 0x55 + BYTE $0x55 + BYTE $0xc4; BYTE $0x41; BYTE $0x2d; BYTE $0x6c; BYTE $0xc0 // VPUNPCKLQDQ YMM8, YMM10, YMM8 + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xcd // VPERMQ YMM9, YMM13, 0xff + BYTE $0xff + BYTE $0xc4; BYTE $0x41; BYTE $0x25; BYTE $0x6c; BYTE $0xc9 // VPUNPCKLQDQ YMM9, YMM11, YMM9 + BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe1 // VPERM2I128 YMM4, YMM8, YMM9, 0x21 + BYTE $0x21 + + BYTE $0xc4; BYTE $0x41; BYTE $0x25; BYTE $0x6c; BYTE $0xc4 // VPUNPCKLQDQ YMM8, YMM11, YMM12 + BYTE $0xc4; BYTE $0x41; BYTE $0x2d; BYTE $0x6c; BYTE $0xcc // VPUNPCKLQDQ YMM9, YMM10, YMM12 + BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe9 // VPERM2I128 YMM5, YMM8, YMM9, 0x21 + BYTE $0x21 + + G1 + G2 + + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 5 + /////////////////////////////////////////////////////////////////////////// + + BYTE $0xc4; BYTE $0x41; BYTE $0x1d; BYTE $0x6d; BYTE $0xc3 // VPUNPCKHQDQ YMM8, YMM12, YMM11 + BYTE $0xc4; BYTE $0x41; BYTE $0x2d; BYTE $0x6c; BYTE $0xcc // VPUNPCKLQDQ YMM9, YMM10, YMM12 + BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe1 // VPERM2I128 YMM4, YMM8, YMM9, 0x30 + BYTE $0x30 + + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc3 // VPERMQ YMM8, YMM11, 0xff + BYTE $0xff + BYTE $0xc4; BYTE $0x41; BYTE $0x2d; BYTE $0x6c; BYTE $0xc0 // VPUNPCKLQDQ YMM8, YMM10, YMM8 + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xcd // VPERMQ YMM9, YMM13, 0xff + BYTE $0xff + BYTE $0xc4; BYTE $0x41; BYTE $0x25; BYTE $0x6c; BYTE $0xc9 // VPUNPCKLQDQ YMM9, YMM11, YMM9 + BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe9 // VPERM2I128 YMM5, YMM8, YMM9, 0x20 + BYTE $0x20 + + G1 + G2 + + DIAGONALIZE + + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc4 // VPERMQ YMM8, YMM12, 0xff + BYTE $0xff + BYTE $0xc4; BYTE $0x41; BYTE $0x15; BYTE $0x6c; BYTE $0xc0 // VPUNPCKLQDQ YMM8, YMM13, YMM8 + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xca // VPERMQ YMM9, YMM10, 0xff + BYTE $0xff + BYTE $0xc4; BYTE $0x41; BYTE $0x25; BYTE $0x6c; BYTE $0xc9 // VPUNPCKLQDQ YMM9, YMM11, YMM9 + BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe1 // VPERM2I128 YMM4, YMM8, YMM9, 0x31 + BYTE $0x31 + + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc5 // VPERMQ YMM8, YMM13, 0x00 + BYTE $0x00 + BYTE $0xc4; BYTE $0x41; BYTE $0x2d; BYTE $0x6d; BYTE $0xc0 // VPUNPCKHQDQ YMM8, YMM10, YMM8 + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xcd // VPERMQ YMM9, YMM13, 0x55 + BYTE $0x55 + BYTE $0xc4; BYTE $0x41; BYTE $0x1d; BYTE $0x6c; BYTE $0xc9 // VPUNPCKLQDQ YMM9, YMM12, YMM9 + BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe9 // VPERM2I128 YMM5, YMM8, YMM9, 0x20 + BYTE $0x20 + + G1 + G2 + + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 6 + /////////////////////////////////////////////////////////////////////////// + + BYTE $0xc4; BYTE $0x41; BYTE $0x2d; BYTE $0x6c; BYTE $0xc3 // VPUNPCKLQDQ YMM8, YMM10, YMM11 + BYTE $0xc4; BYTE $0x41; BYTE $0x2d; BYTE $0x6c; BYTE $0xcc // VPUNPCKLQDQ YMM9, YMM10, YMM12 + BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe1 // VPERM2I128 YMM4, YMM8, YMM9, 0x21 + BYTE $0x21 + + BYTE $0xc4; BYTE $0x41; BYTE $0x15; BYTE $0x6c; BYTE $0xc4 // VPUNPCKLQDQ YMM8, YMM13, YMM12 + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc0 // VPERMQ YMM8, YMM8, 0x0c + BYTE $0x0c + BYTE $0xc4; BYTE $0x41; BYTE $0x1d; BYTE $0x6d; BYTE $0xca // VPUNPCKHQDQ YMM9, YMM12, YMM10 + BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe9 // VPERM2I128 YMM5, YMM8, YMM9, 0x30 + BYTE $0x30 + + G1 + G2 + + DIAGONALIZE + + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc3 // VPERMQ YMM8, YMM11, 0x0c + BYTE $0x0c + BYTE $0xc4; BYTE $0x41; BYTE $0x15; BYTE $0x6d; BYTE $0xca // VPUNPCKHQDQ YMM9, YMM13, YMM10 + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc9 // VPERMQ YMM9, YMM9, 0x60 + BYTE $0x60 + BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe1 // VPERM2I128 YMM4, YMM8, YMM9, 0x30 + BYTE $0x30 + + BYTE $0xc4; BYTE $0x41; BYTE $0x15; BYTE $0x6d; BYTE $0xc3 // VPUNPCKHQDQ YMM8, YMM13, YMM11 + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xcc // VPERMQ YMM9, YMM12, 0x55 + BYTE $0x55 + BYTE $0xc4; BYTE $0x41; BYTE $0x15; BYTE $0x6c; BYTE $0xc9 // VPUNPCKLQDQ YMM9, YMM13, YMM9 + BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe9 // VPERM2I128 YMM5, YMM8, YMM9, 0x30 + BYTE $0x30 + + G1 + G2 + + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 7 + /////////////////////////////////////////////////////////////////////////// + + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc2 // VPERMQ YMM8, YMM10, 0x55 + BYTE $0x55 + BYTE $0xc4; BYTE $0x41; BYTE $0x15; BYTE $0x6c; BYTE $0xc0 // VPUNPCKLQDQ YMM8, YMM13, YMM8 + BYTE $0xc4; BYTE $0x41; BYTE $0x15; BYTE $0x6c; BYTE $0xcb // VPUNPCKLQDQ YMM9, YMM13, YMM11 + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc9 // VPERMQ YMM9, YMM9, 0x60 + BYTE $0x60 + BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe1 // VPERM2I128 YMM4, YMM8, YMM9, 0x30 + BYTE $0x30 + + BYTE $0xc4; BYTE $0x41; BYTE $0x25; BYTE $0x6d; BYTE $0xc5 // VPUNPCKHQDQ YMM8, YMM11, YMM13 + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc0 // VPERMQ YMM8, YMM8, 0x0c + BYTE $0x0c + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xcc // VPERMQ YMM9, YMM12, 0xaa + BYTE $0xaa + BYTE $0xc4; BYTE $0x41; BYTE $0x15; BYTE $0x6d; BYTE $0xc9 // VPUNPCKHQDQ YMM9, YMM13, YMM9 + BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe9 // VPERM2I128 YMM5, YMM8, YMM9, 0x20 + BYTE $0x20 + + G1 + G2 + + DIAGONALIZE + + BYTE $0xc4; BYTE $0x41; BYTE $0x2d; BYTE $0x6c; BYTE $0xc3 // VPUNPCKLQDQ YMM8, YMM10, YMM11 + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc0 // VPERMQ YMM8, YMM8, 0x0c + BYTE $0x0c + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xcc // VPERMQ YMM9, YMM12, 0x01 + BYTE $0x01 + BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe1 // VPERM2I128 YMM4, YMM8, YMM9, 0x20 + BYTE $0x20 + + BYTE $0xc4; BYTE $0x41; BYTE $0x25; BYTE $0x6d; BYTE $0xc2 // VPUNPCKHQDQ YMM8, YMM11, YMM10 + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xcc // VPERMQ YMM9, YMM12, 0xff + BYTE $0xff + BYTE $0xc4; BYTE $0x41; BYTE $0x2d; BYTE $0x6c; BYTE $0xc9 // VPUNPCKLQDQ YMM9, YMM10, YMM9 + BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe9 // VPERM2I128 YMM5, YMM8, YMM9, 0x31 + BYTE $0x31 + + G1 + G2 + + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 8 + /////////////////////////////////////////////////////////////////////////// + + BYTE $0xc4; BYTE $0x41; BYTE $0x15; BYTE $0x6d; BYTE $0xc3 // VPUNPCKHQDQ YMM8, YMM13, YMM11 + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc0 // VPERMQ YMM8, YMM8, 0x0c + BYTE $0x0c + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xca // VPERMQ YMM9, YMM10, 0xff + BYTE $0xff + BYTE $0xc4; BYTE $0x41; BYTE $0x15; BYTE $0x6c; BYTE $0xc9 // VPUNPCKLQDQ YMM9, YMM13, YMM9 + BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe1 // VPERM2I128 YMM4, YMM8, YMM9, 0x20 + BYTE $0x20 + + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc5 // VPERMQ YMM8, YMM13, 0xaa + BYTE $0xaa + BYTE $0xc4; BYTE $0x41; BYTE $0x1d; BYTE $0x6d; BYTE $0xc0 // VPUNPCKHQDQ YMM8, YMM12, YMM8 + BYTE $0xc4; BYTE $0x41; BYTE $0x2d; BYTE $0x6d; BYTE $0xcc // VPUNPCKHQDQ YMM9, YMM10, YMM12 + BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe9 // VPERM2I128 YMM5, YMM8, YMM9, 0x21 + BYTE $0x21 + + G1 + G2 + + DIAGONALIZE + + BYTE $0xc4; BYTE $0x41; BYTE $0x25; BYTE $0x6d; BYTE $0xc5 // VPUNPCKHQDQ YMM8, YMM11, YMM13 + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc0 // VPERMQ YMM8, YMM8, 0x0c + BYTE $0x0c + BYTE $0xc4; BYTE $0x41; BYTE $0x1d; BYTE $0x6c; BYTE $0xca // VPUNPCKLQDQ YMM9, YMM12, YMM10 + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc9 // VPERMQ YMM9, YMM9, 0x0c + BYTE $0x0c + BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe1 // VPERM2I128 YMM4, YMM8, YMM9, 0x20 + BYTE $0x20 + + BYTE $0xc4; BYTE $0x41; BYTE $0x2d; BYTE $0x6c; BYTE $0xc3 // VPUNPCKLQDQ YMM8, YMM10, YMM11 + BYTE $0xc4; BYTE $0x41; BYTE $0x25; BYTE $0x6c; BYTE $0xcc // VPUNPCKLQDQ YMM9, YMM11, YMM12 + BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe9 // VPERM2I128 YMM5, YMM8, YMM9, 0x30 + BYTE $0x30 + + G1 + G2 + + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 9 + /////////////////////////////////////////////////////////////////////////// + + BYTE $0xc4; BYTE $0x41; BYTE $0x25; BYTE $0x6c; BYTE $0xc5 // VPUNPCKLQDQ YMM8, YMM11, YMM13 + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xca // VPERMQ YMM9, YMM10, 0x00 + BYTE $0x00 + BYTE $0xc4; BYTE $0x41; BYTE $0x1d; BYTE $0x6d; BYTE $0xc9 // VPUNPCKHQDQ YMM9, YMM12, YMM9 + BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe1 // VPERM2I128 YMM4, YMM8, YMM9, 0x31 + BYTE $0x31 + + BYTE $0xc4; BYTE $0x41; BYTE $0x15; BYTE $0x6d; BYTE $0xc4 // VPUNPCKHQDQ YMM8, YMM13, YMM12 + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc0 // VPERMQ YMM8, YMM8, 0x60 + BYTE $0x60 + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xcc // VPERMQ YMM9, YMM12, 0x00 + BYTE $0x00 + BYTE $0xc4; BYTE $0x41; BYTE $0x2d; BYTE $0x6d; BYTE $0xc9 // VPUNPCKHQDQ YMM9, YMM10, YMM9 + BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe9 // VPERM2I128 YMM5, YMM8, YMM9, 0x31 + BYTE $0x31 + + G1 + G2 + + DIAGONALIZE + + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xcc // VPERMQ YMM9, YMM12, 0xaa + BYTE $0xaa + BYTE $0xc4; BYTE $0x41; BYTE $0x2d; BYTE $0x6d; BYTE $0xc9 // VPUNPCKHQDQ YMM9, YMM10, YMM9 + BYTE $0xc4; BYTE $0xc3; BYTE $0x15; BYTE $0x46; BYTE $0xe1 // VPERM2I128 YMM4, YMM13, YMM9, 0x20 + BYTE $0x20 + + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc3 // VPERMQ YMM8, YMM11, 0xff + BYTE $0xff + BYTE $0xc4; BYTE $0x41; BYTE $0x2d; BYTE $0x6c; BYTE $0xc0 // VPUNPCKLQDQ YMM8, YMM10, YMM8 + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xcb // VPERMQ YMM9, YMM11, 0x04 + BYTE $0x04 + BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe9 // VPERM2I128 YMM5, YMM8, YMM9, 0x21 + BYTE $0x21 + + G1 + G2 + + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 10 + /////////////////////////////////////////////////////////////////////////// + + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc4 // VPERMQ YMM8, YMM12, 0x20 + BYTE $0x20 + BYTE $0xc4; BYTE $0x41; BYTE $0x25; BYTE $0x6d; BYTE $0xca // VPUNPCKHQDQ YMM9, YMM11, YMM10 + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc9 // VPERMQ YMM9, YMM9, 0x60 + BYTE $0x60 + BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe1 // VPERM2I128 YMM4, YMM8, YMM9, 0x31 + BYTE $0x31 + + BYTE $0xc4; BYTE $0x41; BYTE $0x2d; BYTE $0x6c; BYTE $0xc3 // VPUNPCKLQDQ YMM8, YMM10, YMM11 + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc0 // VPERMQ YMM8, YMM8, 0x60 + BYTE $0x60 + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xcb // VPERMQ YMM9, YMM11, 0x60 + BYTE $0x60 + BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe9 // VPERM2I128 YMM5, YMM8, YMM9, 0x31 + BYTE $0x31 + + G1 + G2 + + DIAGONALIZE + + BYTE $0xc4; BYTE $0x41; BYTE $0x15; BYTE $0x6d; BYTE $0xc4 // VPUNPCKHQDQ YMM8, YMM13, YMM12 + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc0 // VPERMQ YMM8, YMM8, 0x60 + BYTE $0x60 + BYTE $0xc4; BYTE $0x41; BYTE $0x2d; BYTE $0x6d; BYTE $0xcd // VPUNPCKHQDQ YMM9, YMM10, YMM13 + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc9 // VPERMQ YMM9, YMM9, 0x60 + BYTE $0x60 + BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe1 // VPERM2I128 YMM4, YMM8, YMM9, 0x31 + BYTE $0x31 + + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc5 // VPERMQ YMM8, YMM13, 0xaa + BYTE $0xaa + BYTE $0xc4; BYTE $0x41; BYTE $0x1d; BYTE $0x6d; BYTE $0xc0 // VPUNPCKHQDQ YMM8, YMM12, YMM8 + BYTE $0xc4; BYTE $0x41; BYTE $0x15; BYTE $0x6c; BYTE $0xca // VPUNPCKLQDQ YMM9, YMM13, YMM10 + BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe9 // VPERM2I128 YMM5, YMM8, YMM9, 0x21 + BYTE $0x21 + + G1 + G2 + + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 1 1 + /////////////////////////////////////////////////////////////////////////// + + BYTE $0xc4; BYTE $0xc1; BYTE $0x2d; BYTE $0x6c; BYTE $0xe3 // VPUNPCKLQDQ YMM4, YMM10, YMM11 /* m[0], m[4], m[2], m[6] */ + BYTE $0xc4; BYTE $0xc1; BYTE $0x2d; BYTE $0x6d; BYTE $0xeb // VPUNPCKHQDQ YMM5, YMM10, YMM11 /* m[1], m[5], m[3], m[7] */ + BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xe4 // VPERMQ YMM4, YMM4, 0xd8 /* 0x1101 1000 = 0xd8 */ + BYTE $0xd8 + BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xed // VPERMQ YMM5, YMM5, 0xd8 /* 0x1101 1000 = 0xd8 */ + BYTE $0xd8 + + G1 + G2 + + DIAGONALIZE + + BYTE $0xc4; BYTE $0xc1; BYTE $0x1d; BYTE $0x6c; BYTE $0xe5 // VPUNPCKLQDQ YMM4, YMM12, YMM13 /* m[8], m[12], m[10], m[14] */ + BYTE $0xc4; BYTE $0xc1; BYTE $0x1d; BYTE $0x6d; BYTE $0xed // VPUNPCKHQDQ YMM5, YMM12, YMM13 /* m[9], m[13], m[11], m[15] */ + BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xe4 // VPERMQ YMM4, YMM4, 0xd8 /* 0x1101 1000 = 0xd8 */ + BYTE $0xd8 + BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xed // VPERMQ YMM5, YMM5, 0xd8 /* 0x1101 1000 = 0xd8 */ + BYTE $0xd8 + + G1 + G2 + + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 1 2 + /////////////////////////////////////////////////////////////////////////// + + BYTE $0xc4; BYTE $0x41; BYTE $0x25; BYTE $0x6c; BYTE $0xc5 // VPUNPCKLQDQ YMM8, YMM11, YMM13 /* m[4], ____, ____, m[14] */ + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc0 // VPERMQ YMM8, YMM8, 0x03 /* m[14], m[4], ____, ____ */ /* xxxx 0011 = 0x03 */ + BYTE $0x03 + BYTE $0xc4; BYTE $0x41; BYTE $0x1d; BYTE $0x6d; BYTE $0xcd // VPUNPCKHQDQ YMM9, YMM12, YMM13 /* m[9], m[13], ____, ____ */ + BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe1 // VPERM2I128 YMM4, YMM8, YMM9, 0x20 /* m[9], m[13], ____, ____ */ /* 0010 0000 = 0x20 */ + BYTE $0x20 + + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc4 // VPERMQ YMM8, YMM12, 0x02 /* m[10], m[8], ____, ____ */ /* xxxx 0010 = 0x02 */ + BYTE $0x02 + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xcd // VPERMQ YMM9, YMM13, 0x30 /* ____, ____, m[15], ____ */ /* xx11 xxxx = 0x30 */ + BYTE $0x30 + BYTE $0xc4; BYTE $0x41; BYTE $0x35; BYTE $0x6c; BYTE $0xcb // VPUNPCKLQDQ YMM9, YMM9, YMM11 /* ____, ____, m[15], m[6] */ + BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe9 // VPERM2I128 YMM5, YMM8, YMM9, 0x30 /* m[9], m[13], m[15], m[6] */ /* 0011 0000 = 0x30 */ + BYTE $0x30 + + G1 + G2 + + DIAGONALIZE + + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc2 // VPERMQ YMM8, YMM10, 0x01 /* m[1], m[0], ____, ____ */ /* xxxx 0001 = 0x01 */ + BYTE $0x01 + BYTE $0xc4; BYTE $0x41; BYTE $0x25; BYTE $0x6d; BYTE $0xcc // VPUNPCKHQDQ YMM9, YMM11, YMM12 /* m[5], ____, ____, m[11] */ + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc9 // VPERMQ YMM9, YMM9, 0x03 /* m[11], m[5], ____, ____ */ /* xxxx 0011 = 0x03 */ + BYTE $0x03 + BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe1 // VPERM2I128 YMM4, YMM8, YMM9, 0x20 /* m[1], m[0], m[11], m[5] */ /* 0010 0000 = 0x20 */ + BYTE $0x20 + + BYTE $0xc4; BYTE $0x41; BYTE $0x2d; BYTE $0x6c; BYTE $0xc5 // VPUNPCKLQDQ YMM8, YMM10, YMM13 /* ___, m[12], m[2], ____ */ + BYTE $0xc4; BYTE $0x43; BYTE $0xfd; BYTE $0x00; BYTE $0xc0 // VPERMQ YMM8, YMM8, 0x09 /* m[12], m[2], ____, ____ */ /* xxxx 1001 = 0x09 */ + BYTE $0x09 + BYTE $0xc4; BYTE $0x41; BYTE $0x25; BYTE $0x6d; BYTE $0xca // VPUNPCKHQDQ YMM9, YMM11, YMM10 /* ____, ____, m[7], m[3] */ + BYTE $0xc4; BYTE $0xc3; BYTE $0x3d; BYTE $0x46; BYTE $0xe9 // VPERM2I128 YMM5, YMM8, YMM9, 0x30 /* m[9], m[13], m[15], m[6] */ /* 0011 0000 = 0x30 */ + BYTE $0x30 + + G1 + G2 + + UNDIAGONALIZE + + // Reload digest (most current value store in &out) + MOVQ out+144(FP), SI // SI: &in + BYTE $0xc5; BYTE $0x7e; BYTE $0x6f; BYTE $0x26 // VMOVDQU YMM12, [rsi] + BYTE $0xc5; BYTE $0x7e; BYTE $0x6f; BYTE $0x6e; BYTE $0x20 // VMOVDQU YMM13, 32[rsi] + + BYTE $0xc5; BYTE $0xfd; BYTE $0xef; BYTE $0xc2 // VPXOR YMM0,YMM0,YMM2 /* X0 = X0 ^ X4, X1 = X1 ^ X5 */ + BYTE $0xc4; BYTE $0xc1; BYTE $0x7d; BYTE $0xef; BYTE $0xc4 // VPXOR YMM0,YMM0,YMM12 /* X0 = X0 ^ X12, X1 = X1 ^ X13 */ + BYTE $0xc5; BYTE $0xf5; BYTE $0xef; BYTE $0xcb // VPXOR YMM1,YMM1,YMM3 /* X2 = X2 ^ X6, X3 = X3 ^ X7 */ + BYTE $0xc4; BYTE $0xc1; BYTE $0x75; BYTE $0xef; BYTE $0xcd // VPXOR YMM1,YMM1,YMM13 /* X2 = X2 ^ X14, X3 = X3 ^ X15 */ + + // Store digest into &out + MOVQ out+144(FP), SI // SI: &out + BYTE $0xc5; BYTE $0xfe; BYTE $0x7f; BYTE $0x06 // VMOVDQU [rsi], YMM0 + BYTE $0xc5; BYTE $0xfe; BYTE $0x7f; BYTE $0x4e; BYTE $0x20 // VMOVDQU 32[rsi], YMM1 + + // Increment message pointer and check if there's more to do + ADDQ $128, DX // message += 128 + SUBQ $1, R8 + JNZ loop + +complete: + BYTE $0xc5; BYTE $0xf8; BYTE $0x77 // VZEROUPPER /* Prevent further context switches */ + RET + diff --git a/vendor/github.com/minio/blake2b-simd/compressAvx_amd64.go b/vendor/github.com/minio/blake2b-simd/compressAvx_amd64.go new file mode 100644 index 000000000..cfa12c04f --- /dev/null +++ b/vendor/github.com/minio/blake2b-simd/compressAvx_amd64.go @@ -0,0 +1,41 @@ +//+build !noasm +//+build !appengine + +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package blake2b + +//go:noescape +func blockAVXLoop(p []uint8, in, iv, t, f, shffle, out []uint64) + +func compressAVX(d *digest, p []uint8) { + var ( + in [8]uint64 + out [8]uint64 + shffle [2]uint64 + ) + + // vector for PSHUFB instruction + shffle[0] = 0x0201000706050403 + shffle[1] = 0x0a09080f0e0d0c0b + + in[0], in[1], in[2], in[3], in[4], in[5], in[6], in[7] = d.h[0], d.h[1], d.h[2], d.h[3], d.h[4], d.h[5], d.h[6], d.h[7] + + blockAVXLoop(p, in[:], iv[:], d.t[:], d.f[:], shffle[:], out[:]) + + d.h[0], d.h[1], d.h[2], d.h[3], d.h[4], d.h[5], d.h[6], d.h[7] = out[0], out[1], out[2], out[3], out[4], out[5], out[6], out[7] +} diff --git a/vendor/github.com/minio/blake2b-simd/compressAvx_amd64.s b/vendor/github.com/minio/blake2b-simd/compressAvx_amd64.s new file mode 100644 index 000000000..f68e17392 --- /dev/null +++ b/vendor/github.com/minio/blake2b-simd/compressAvx_amd64.s @@ -0,0 +1,682 @@ +//+build !noasm !appengine + +// +// Minio Cloud Storage, (C) 2016 Minio, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// +// Based on SSE implementation from https://github.com/BLAKE2/BLAKE2/blob/master/sse/blake2b.c +// +// Use github.com/fwessels/asm2plan9s on this file to assemble instructions to their Plan9 equivalent +// +// Assembly code below essentially follows the ROUND macro (see blake2b-round.h) which is defined as: +// #define ROUND(r) \ +// LOAD_MSG_ ##r ##_1(b0, b1); \ +// G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ +// LOAD_MSG_ ##r ##_2(b0, b1); \ +// G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ +// DIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h); \ +// LOAD_MSG_ ##r ##_3(b0, b1); \ +// G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ +// LOAD_MSG_ ##r ##_4(b0, b1); \ +// G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ +// UNDIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h); +// +// as well as the go equivalent in https://github.com/dchest/blake2b/blob/master/block.go +// +// As in the macro, G1/G2 in the 1st and 2nd half are identical (so literal copy of assembly) +// +// Rounds are also the same, except for the loading of the message (and rounds 1 & 11 and +// rounds 2 & 12 are identical) +// + +#define G1 \ + \ // G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); + LONG $0xd479c1c4; BYTE $0xc0 \ // VPADDQ XMM0,XMM0,XMM8 /* v0 += m[0], v1 += m[2] */ + LONG $0xd471c1c4; BYTE $0xc9 \ // VPADDQ XMM1,XMM1,XMM9 /* v2 += m[4], v3 += m[6] */ + LONG $0xc2d4f9c5 \ // VPADDQ XMM0,XMM0,XMM2 /* v0 += v4, v1 += v5 */ + LONG $0xcbd4f1c5 \ // VPADDQ XMM1,XMM1,XMM3 /* v2 += v6, v3 += v7 */ + LONG $0xf0efc9c5 \ // VPXOR XMM6,XMM6,XMM0 /* v12 ^= v0, v13 ^= v1 */ + LONG $0xf9efc1c5 \ // VPXOR XMM7,XMM7,XMM1 /* v14 ^= v2, v15 ^= v3 */ + LONG $0xf670f9c5; BYTE $0xb1 \ // VPSHUFD XMM6,XMM6,0xb1 /* v12 = v12<<(64-32) | v12>>32, v13 = v13<<(64-32) | v13>>32 */ + LONG $0xff70f9c5; BYTE $0xb1 \ // VPSHUFD XMM7,XMM7,0xb1 /* v14 = v14<<(64-32) | v14>>32, v15 = v15<<(64-32) | v15>>32 */ + LONG $0xe6d4d9c5 \ // VPADDQ XMM4,XMM4,XMM6 /* v8 += v12, v9 += v13 */ + LONG $0xefd4d1c5 \ // VPADDQ XMM5,XMM5,XMM7 /* v10 += v14, v11 += v15 */ + LONG $0xd4efe9c5 \ // VPXOR XMM2,XMM2,XMM4 /* v4 ^= v8, v5 ^= v9 */ + LONG $0xddefe1c5 \ // VPXOR XMM3,XMM3,XMM5 /* v6 ^= v10, v7 ^= v11 */ + LONG $0x0069c2c4; BYTE $0xd4 \ // VPSHUFB XMM2,XMM2,XMM12 /* v4 = v4<<(64-24) | v4>>24, v5 = v5<<(64-24) | v5>>24 */ + LONG $0x0061c2c4; BYTE $0xdc // VPSHUFB XMM3,XMM3,XMM12 /* v6 = v6<<(64-24) | v6>>24, v7 = v7<<(64-24) | v7>>24 */ + +#define G2 \ + \ // G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); + LONG $0xd479c1c4; BYTE $0xc2 \ // VPADDQ XMM0,XMM0,XMM10 /* v0 += m[1], v1 += m[3] */ + LONG $0xd471c1c4; BYTE $0xcb \ // VPADDQ XMM1,XMM1,XMM11 /* v2 += m[5], v3 += m[7] */ + LONG $0xc2d4f9c5 \ // VPADDQ XMM0,XMM0,XMM2 /* v0 += v4, v1 += v5 */ + LONG $0xcbd4f1c5 \ // VPADDQ XMM1,XMM1,XMM3 /* v2 += v6, v3 += v7 */ + LONG $0xf0efc9c5 \ // VPXOR XMM6,XMM6,XMM0 /* v12 ^= v0, v13 ^= v1 */ + LONG $0xf9efc1c5 \ // VPXOR XMM7,XMM7,XMM1 /* v14 ^= v2, v15 ^= v3 */ + LONG $0xf670fbc5; BYTE $0x39 \ // VPSHUFLW XMM6,XMM6,0x39 /* combined with next ... */ + LONG $0xf670fac5; BYTE $0x39 \ // VPSHUFHW XMM6,XMM6,0x39 /* v12 = v12<<(64-16) | v12>>16, v13 = v13<<(64-16) | v13>>16 */ + LONG $0xff70fbc5; BYTE $0x39 \ // VPSHUFLW XMM7,XMM7,0x39 /* combined with next ... */ + LONG $0xff70fac5; BYTE $0x39 \ // VPSHUFHW XMM7,XMM7,0x39 /* v14 = v14<<(64-16) | v14>>16, v15 = v15<<(64-16) | v15>>16 */ + LONG $0xe6d4d9c5 \ // VPADDQ XMM4,XMM4,XMM6 /* v8 += v12, v9 += v13 */ + LONG $0xefd4d1c5 \ // VPADDQ XMM5,XMM5,XMM7 /* v10 += v14, v11 += v15 */ + LONG $0xd4efe9c5 \ // VPXOR XMM2,XMM2,XMM4 /* v4 ^= v8, v5 ^= v9 */ + LONG $0xddefe1c5 \ // VPXOR XMM3,XMM3,XMM5 /* v6 ^= v10, v7 ^= v11 */ + LONG $0xfad469c5 \ // VPADDQ XMM15,XMM2,XMM2 /* temp reg = reg*2 */ + LONG $0xd273e9c5; BYTE $0x3f \ // VPSRLQ XMM2,XMM2,0x3f /* reg = reg>>63 */ + LONG $0xef69c1c4; BYTE $0xd7 \ // VPXOR XMM2,XMM2,XMM15 /* ORed together: v4 = v4<<(64-63) | v4>>63, v5 = v5<<(64-63) | v5>>63 */ + LONG $0xfbd461c5 \ // VPADDQ XMM15,XMM3,XMM3 /* temp reg = reg*2 */ + LONG $0xd373e1c5; BYTE $0x3f \ // VPSRLQ XMM3,XMM3,0x3f /* reg = reg>>63 */ + LONG $0xef61c1c4; BYTE $0xdf // VPXOR XMM3,XMM3,XMM15 /* ORed together: v6 = v6<<(64-63) | v6>>63, v7 = v7<<(64-63) | v7>>63 */ + +#define DIAGONALIZE \ + \ // DIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h); + MOVOU X6, X13 \ /* t0 = row4l;\ */ + MOVOU X2, X14 \ /* t1 = row2l;\ */ + MOVOU X4, X6 \ /* row4l = row3l;\ */ + MOVOU X5, X4 \ /* row3l = row3h;\ */ + MOVOU X6, X5 \ /* row3h = row4l;\ */ + LONG $0x6c1141c4; BYTE $0xfd \ // VPUNPCKLQDQ XMM15, XMM13, XMM13 /* _mm_unpacklo_epi64(t0, t0) */ + LONG $0x6d41c1c4; BYTE $0xf7 \ // VPUNPCKHQDQ XMM6, XMM7, XMM15 /* row4l = _mm_unpackhi_epi64(row4h, ); \ */ + LONG $0xff6c41c5 \ // VPUNPCKLQDQ XMM15, XMM7, XMM7 /* _mm_unpacklo_epi64(row4h, row4h) */ + LONG $0x6d11c1c4; BYTE $0xff \ // VPUNPCKHQDQ XMM7, XMM13, XMM15 /* row4h = _mm_unpackhi_epi64(t0, ); \ */ + LONG $0xfb6c61c5 \ // VPUNPCKLQDQ XMM15, XMM3, XMM3 /* _mm_unpacklo_epi64(row2h, row2h) */ + LONG $0x6d69c1c4; BYTE $0xd7 \ // VPUNPCKHQDQ XMM2, XMM2, XMM15 /* row2l = _mm_unpackhi_epi64(row2l, ); \ */ + LONG $0x6c0941c4; BYTE $0xfe \ // VPUNPCKLQDQ XMM15, XMM14, XMM14 /* _mm_unpacklo_epi64(t1, t1) */ + LONG $0x6d61c1c4; BYTE $0xdf // VPUNPCKHQDQ XMM3, XMM3, XMM15 /* row2h = _mm_unpackhi_epi64(row2h, ) */ + +#define UNDIAGONALIZE \ + \ // UNDIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h); + MOVOU X4, X13 \ /* t0 = row3l;\ */ + MOVOU X5, X4 \ /* row3l = row3h;\ */ + MOVOU X13, X5 \ /* row3h = t0;\ */ + MOVOU X2, X13 \ /* t0 = row2l;\ */ + MOVOU X6, X14 \ /* t1 = row4l;\ */ + LONG $0xfa6c69c5 \ // VPUNPCKLQDQ XMM15, XMM2, XMM2 /* _mm_unpacklo_epi64(row2l, row2l) */ + LONG $0x6d61c1c4; BYTE $0xd7 \ // VPUNPCKHQDQ XMM2, XMM3, XMM15 /* row2l = _mm_unpackhi_epi64(row2h, ); \ */ + LONG $0xfb6c61c5 \ // VPUNPCKLQDQ XMM15, XMM3, XMM3 /* _mm_unpacklo_epi64(row2h, row2h) */ + LONG $0x6d11c1c4; BYTE $0xdf \ // VPUNPCKHQDQ XMM3, XMM13, XMM15 /* row2h = _mm_unpackhi_epi64(t0, ); \ */ + LONG $0xff6c41c5 \ // VPUNPCKLQDQ XMM15, XMM7, XMM7 /* _mm_unpacklo_epi64(row4h, row4h) */ + LONG $0x6d49c1c4; BYTE $0xf7 \ // VPUNPCKHQDQ XMM6, XMM6, XMM15 /* row4l = _mm_unpackhi_epi64(row4l, ); \ */ + LONG $0x6c0941c4; BYTE $0xfe \ // VPUNPCKLQDQ XMM15, XMM14, XMM14 /* _mm_unpacklo_epi64(t1, t1) */ + LONG $0x6d41c1c4; BYTE $0xff // VPUNPCKHQDQ XMM7, XMM7, XMM15 /* row4h = _mm_unpackhi_epi64(row4h, ) */ + +#define LOAD_SHUFFLE \ + \ // Load shuffle value + MOVQ shffle+120(FP), SI \ // SI: &shuffle + MOVOU 0(SI), X12 // X12 = 03040506 07000102 0b0c0d0e 0f08090a + +// func blockAVXLoop(p []uint8, in, iv, t, f, shffle, out []uint64) +TEXT ·blockAVXLoop(SB), 7, $0 + // REGISTER USE + // R8: loop counter + // DX: message pointer + // SI: temp pointer for loading + // X0 - X7: v0 - v15 + // X8 - X11: m[0] - m[7] + // X12: shuffle value + // X13 - X15: temp registers + + // Load digest + MOVQ in+24(FP), SI // SI: &in + MOVOU 0(SI), X0 // X0 = in[0]+in[1] /* row1l = LOAD( &S->h[0] ); */ + MOVOU 16(SI), X1 // X1 = in[2]+in[3] /* row1h = LOAD( &S->h[2] ); */ + MOVOU 32(SI), X2 // X2 = in[4]+in[5] /* row2l = LOAD( &S->h[4] ); */ + MOVOU 48(SI), X3 // X3 = in[6]+in[7] /* row2h = LOAD( &S->h[6] ); */ + + // Already store digest into &out (so we can reload it later generically) + MOVQ out+144(FP), SI // SI: &out + MOVOU X0, 0(SI) // out[0]+out[1] = X0 + MOVOU X1, 16(SI) // out[2]+out[3] = X1 + MOVOU X2, 32(SI) // out[4]+out[5] = X2 + MOVOU X3, 48(SI) // out[6]+out[7] = X3 + + // Initialize message pointer and loop counter + MOVQ message+0(FP), DX // DX: &p (message) + MOVQ message_len+8(FP), R8 // R8: len(message) + SHRQ $7, R8 // len(message) / 128 + CMPQ R8, $0 + JEQ complete + +loop: + // Increment counter + MOVQ t+72(FP), SI // SI: &t + MOVQ 0(SI), R9 + ADDQ $128, R9 // /* d.t[0] += BlockSize */ + MOVQ R9, 0(SI) + CMPQ R9, $128 // /* if d.t[0] < BlockSize { */ + JGE noincr + MOVQ 8(SI), R9 + ADDQ $1, R9 // /* d.t[1]++ */ + MOVQ R9, 8(SI) +noincr: // /* } */ + + // Load initialization vector + MOVQ iv+48(FP), SI // SI: &iv + MOVOU 0(SI), X4 // X4 = iv[0]+iv[1] /* row3l = LOAD( &blake2b_IV[0] ); */ + MOVOU 16(SI), X5 // X5 = iv[2]+iv[3] /* row3h = LOAD( &blake2b_IV[2] ); */ + MOVOU 32(SI), X6 // X6 = iv[4]+iv[5] /* LOAD( &blake2b_IV[4] ) */ + MOVOU 48(SI), X7 // X7 = iv[6]+iv[7] /* LOAD( &blake2b_IV[6] ) */ + MOVQ t+72(FP), SI // SI: &t + MOVOU 0(SI), X8 // X8 = t[0]+t[1] /* LOAD( &S->t[0] ) */ + PXOR X8, X6 // X6 = X6 ^ X8 /* row4l = _mm_xor_si128( , ); */ + MOVQ t+96(FP), SI // SI: &f + MOVOU 0(SI), X8 // X8 = f[0]+f[1] /* LOAD( &S->f[0] ) */ + PXOR X8, X7 // X7 = X7 ^ X8 /* row4h = _mm_xor_si128( , ); */ + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 1 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 0(DX), X12 // X12 = m[0]+m[1] + MOVOU 16(DX), X13 // X13 = m[2]+m[3] + MOVOU 32(DX), X14 // X14 = m[4]+m[5] + MOVOU 48(DX), X15 // X15 = m[6]+m[7] + LONG $0x6c1941c4; BYTE $0xc5 // VPUNPCKLQDQ XMM8, XMM12, XMM13 /* m[0], m[2] */ + LONG $0x6c0941c4; BYTE $0xcf // VPUNPCKLQDQ XMM9, XMM14, XMM15 /* m[4], m[6] */ + LONG $0x6d1941c4; BYTE $0xd5 // VPUNPCKHQDQ XMM10, XMM12, XMM13 /* m[1], m[3] */ + LONG $0x6d0941c4; BYTE $0xdf // VPUNPCKHQDQ XMM11, XMM14, XMM15 /* m[5], m[7] */ + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 64(DX), X12 // X12 = m[8]+ m[9] + MOVOU 80(DX), X13 // X13 = m[10]+m[11] + MOVOU 96(DX), X14 // X14 = m[12]+m[13] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + LONG $0x6c1941c4; BYTE $0xc5 // VPUNPCKLQDQ XMM8, XMM12, XMM13 /* m[8],m[10] */ + LONG $0x6c0941c4; BYTE $0xcf // VPUNPCKLQDQ XMM9, XMM14, XMM15 /* m[12],m[14] */ + LONG $0x6d1941c4; BYTE $0xd5 // VPUNPCKHQDQ XMM10, XMM12, XMM13 /* m[9],m[11] */ + LONG $0x6d0941c4; BYTE $0xdf // VPUNPCKHQDQ XMM11, XMM14, XMM15 /* m[13],m[15] */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 2 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 112(DX), X12 // X12 = m[14]+m[15] + MOVOU 32(DX), X13 // X13 = m[4]+ m[5] + MOVOU 64(DX), X14 // X14 = m[8]+ m[9] + MOVOU 96(DX), X15 // X15 = m[12]+m[13] + LONG $0x6c1941c4; BYTE $0xc5 // VPUNPCKLQDQ XMM8, XMM12, XMM13 /* m[14], m[4] */ + LONG $0x6d0941c4; BYTE $0xcf // VPUNPCKHQDQ XMM9, XMM14, XMM15 /* m[9], m[13] */ + MOVOU 80(DX), X13 // X13 = m[10]+m[11] + MOVOU 48(DX), X15 // X15 = m[6]+ m[7] + LONG $0x6c1141c4; BYTE $0xd6 // VPUNPCKLQDQ XMM10, XMM13, XMM14 /* m[10], m[8] */ + LONG $0x0f0143c4; WORD $0x08dc // VPALIGNR XMM11, XMM15, XMM12, 0x8 /* m[15], m[6] */ + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 32(DX), X13 // X13 = m[4]+ m[5] + MOVOU 80(DX), X14 // X14 = m[10]+m[11] + LONG $0x0f1943c4; WORD $0x08c4 // VPALIGNR XMM8, XMM12, XMM12, 0x8 /* m[1], m[0] */ + LONG $0x6d0941c4; BYTE $0xcd // VPUNPCKHQDQ XMM9, XMM14, XMM13 /* m[11], m[5] */ + MOVOU 16(DX), X12 // X12 = m[2]+ m[3] + MOVOU 48(DX), X13 // X13 = m[6]+ m[7] + MOVOU 96(DX), X14 // X14 = m[12]+m[13] + LONG $0x6c0941c4; BYTE $0xd4 // VPUNPCKLQDQ XMM10, XMM14, XMM12 /* m[12], m[2] */ + LONG $0x6d1141c4; BYTE $0xdc // VPUNPCKHQDQ XMM11, XMM13, XMM12 /* m[7], m[3] */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 3 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 32(DX), X12 // X12 = m[4]+ m[5] + MOVOU 80(DX), X13 // X13 = m[10]+m[11] + MOVOU 96(DX), X14 // X14 = m[12]+m[13] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + LONG $0x0f0943c4; WORD $0x08c5 // VPALIGNR XMM8, XMM14, XMM13, 0x8 /* m[11], m[12] */ + LONG $0x6d1941c4; BYTE $0xcf // VPUNPCKHQDQ XMM9, XMM12, XMM15 /* m[5], m[15] */ + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 16(DX), X13 // X13 = m[2]+ m[3] + MOVOU 64(DX), X15 // X15 = m[8]+ m[9] + LONG $0x6c0141c4; BYTE $0xd4 // VPUNPCKLQDQ XMM10, XMM15, XMM12 /* m[8], m[0] */ + LONG $0x6d0941c4; BYTE $0xde // VPUNPCKHQDQ XMM11, XMM14, XMM14 /* ___, m[13] */ + LONG $0x6c1141c4; BYTE $0xdb // VPUNPCKLQDQ XMM11, XMM13, XMM11 /* m[2], ___ */ + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 16(DX), X12 // X12 = m[2]+ m[3] + MOVOU 48(DX), X13 // X13 = m[6]+ m[7] + MOVOU 64(DX), X14 // X14 = m[8]+ m[9] + MOVOU 80(DX), X15 // X15 = m[10]+m[11] + LONG $0x6d1941c4; BYTE $0xc4 // VPUNPCKHQDQ XMM8, XMM12, XMM12 /* ___, m[3] */ + LONG $0x6c0141c4; BYTE $0xc0 // VPUNPCKLQDQ XMM8, XMM15, XMM8 /* m[10], ___ */ + LONG $0x6d1141c4; BYTE $0xce // VPUNPCKHQDQ XMM9, XMM13, XMM14 /* m[7], m[9] */ + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 32(DX), X14 // X14 = m[4]+ m[5] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + LONG $0x6c0141c4; BYTE $0xd5 // VPUNPCKLQDQ XMM10, XMM15, XMM13 /* m[14], m[6] */ + LONG $0x0f0943c4; WORD $0x08dc // VPALIGNR XMM11, XMM14, XMM12, 0x8 /* m[1], m[4] */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 4 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 16(DX), X12 // X12 = m[2]+ m[3] + MOVOU 48(DX), X13 // X13 = m[6]+ m[7] + MOVOU 80(DX), X14 // X14 = m[10]+m[11] + MOVOU 96(DX), X15 // X15 = m[12]+m[13] + LONG $0x6d1141c4; BYTE $0xc4 // VPUNPCKHQDQ XMM8, XMM13, XMM12 /* m[7], m[3] */ + LONG $0x6d0141c4; BYTE $0xce // VPUNPCKHQDQ XMM9, XMM15, XMM14 /* m[13], m[11] */ + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 64(DX), X13 // X13 = m[8]+ m[9] + MOVOU 112(DX), X14 // X14 = m[14]+m[15] + LONG $0x6d1141c4; BYTE $0xd4 // VPUNPCKHQDQ XMM10, XMM13, XMM12 /* m[9], m[1] */ + LONG $0x6c0141c4; BYTE $0xde // VPUNPCKLQDQ XMM11, XMM15, XMM14 /* m[12], m[14] */ + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 16(DX), X12 // X12 = m[2]+ m[3] + MOVOU 32(DX), X13 // X13 = m[4]+ m[5] + MOVOU 80(DX), X14 // X14 = m[10]+m[11] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + LONG $0x6d1141c4; BYTE $0xc5 // VPUNPCKHQDQ XMM8, XMM13, XMM13 /* ___, m[5] */ + LONG $0x6c1941c4; BYTE $0xc0 // VPUNPCKLQDQ XMM8, XMM12, XMM8 /* m[2], ____ */ + LONG $0x6d0141c4; BYTE $0xcf // VPUNPCKHQDQ XMM9, XMM15, XMM15 /* ___, m[15] */ + LONG $0x6c1141c4; BYTE $0xc9 // VPUNPCKLQDQ XMM9, XMM13, XMM9 /* m[4], ____ */ + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 48(DX), X13 // X13 = m[6]+ m[7] + MOVOU 64(DX), X15 // X15 = m[8]+ m[9] + LONG $0x6c1141c4; BYTE $0xd6 // VPUNPCKLQDQ XMM10, XMM13, XMM14 /* m[6], m[10] */ + LONG $0x6c1941c4; BYTE $0xdf // VPUNPCKLQDQ XMM11, XMM12, XMM15 /* m[0], m[8] */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 5 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 16(DX), X12 // X12 = m[2]+ m[3] + MOVOU 32(DX), X13 // X13 = m[4]+ m[5] + MOVOU 64(DX), X14 // X14 = m[8]+ m[9] + MOVOU 80(DX), X15 // X15 = m[10]+m[11] + LONG $0x6d0941c4; BYTE $0xc5 // VPUNPCKHQDQ XMM8, XMM14, XMM13 /* m[9], m[5] */ + LONG $0x6c1941c4; BYTE $0xcf // VPUNPCKLQDQ XMM9, XMM12, XMM15 /* m[2], m[10] */ + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 48(DX), X14 // X14 = m[6]+ m[7] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + LONG $0x6d0941c4; BYTE $0xd6 // VPUNPCKHQDQ XMM10, XMM14, XMM14 /* ___, m[7] */ + LONG $0x6c1941c4; BYTE $0xd2 // VPUNPCKLQDQ XMM10, XMM12, XMM10 /* m[0], ____ */ + LONG $0x6d0141c4; BYTE $0xdf // VPUNPCKHQDQ XMM11, XMM15, XMM15 /* ___, m[15] */ + LONG $0x6c1141c4; BYTE $0xdb // VPUNPCKLQDQ XMM11, XMM13, XMM11 /* m[4], ____ */ + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 16(DX), X12 // X12 = m[2]+ m[3] + MOVOU 48(DX), X13 // X13 = m[6]+ m[7] + MOVOU 80(DX), X14 // X14 = m[10]+m[11] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + LONG $0x6d0941c4; BYTE $0xc6 // VPUNPCKHQDQ XMM8, XMM14, XMM14 /* ___, m[11] */ + LONG $0x6c0141c4; BYTE $0xc0 // VPUNPCKLQDQ XMM8, XMM15, XMM8 /* m[14], ____ */ + LONG $0x6d1941c4; BYTE $0xcc // VPUNPCKHQDQ XMM9, XMM12, XMM12 /* ___, m[3] */ + LONG $0x6c1141c4; BYTE $0xc9 // VPUNPCKLQDQ XMM9, XMM13, XMM9 /* m[6], ____ */ + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 64(DX), X13 // X13 = m[8]+ m[9] + MOVOU 96(DX), X14 // X14 = m[12]+m[13] + LONG $0x0f0943c4; WORD $0x08d4 // VPALIGNR XMM10, XMM14, XMM12, 0x8 /* m[1], m[12] */ + LONG $0x6d0941c4; BYTE $0xde // VPUNPCKHQDQ XMM11, XMM14, XMM14 /* ___, m[13] */ + LONG $0x6c1141c4; BYTE $0xdb // VPUNPCKLQDQ XMM11, XMM13, XMM11 /* m[8], ____ */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 6 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 16(DX), X13 // X13 = m[2]+ m[3] + MOVOU 48(DX), X14 // X14 = m[6]+ m[7] + MOVOU 64(DX), X15 // X15 = m[8]+ m[9] + LONG $0x6c1141c4; BYTE $0xc6 // VPUNPCKLQDQ XMM8, XMM13, XMM14 /* m[2], m[6] */ + LONG $0x6c1941c4; BYTE $0xcf // VPUNPCKLQDQ XMM9, XMM12, XMM15 /* m[0], m[8] */ + MOVOU 80(DX), X12 // X12 = m[10]+m[11] + MOVOU 96(DX), X14 // X14 = m[12]+m[13] + LONG $0x6c0941c4; BYTE $0xd4 // VPUNPCKLQDQ XMM10, XMM14, XMM12 /* m[12], m[10] */ + LONG $0x6d1941c4; BYTE $0xdd // VPUNPCKHQDQ XMM11, XMM12, XMM13 /* m[11], m[3] */ + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 32(DX), X13 // X13 = m[4]+ m[5] + MOVOU 48(DX), X14 // X14 = m[6]+ m[7] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + LONG $0x6d0941c4; BYTE $0xc6 // VPUNPCKHQDQ XMM8, XMM14, XMM14 /* ___, m[7] */ + LONG $0x6c1141c4; BYTE $0xc0 // VPUNPCKLQDQ XMM8, XMM13, XMM8 /* m[4], ____ */ + LONG $0x6d0141c4; BYTE $0xcc // VPUNPCKHQDQ XMM9, XMM15, XMM12 /* m[15], m[1] */ + MOVOU 64(DX), X12 // X12 = m[8]+ m[9] + MOVOU 96(DX), X14 // X14 = m[12]+m[13] + LONG $0x6d0941c4; BYTE $0xd5 // VPUNPCKHQDQ XMM10, XMM14, XMM13 /* m[13], m[5] */ + LONG $0x6d1941c4; BYTE $0xdc // VPUNPCKHQDQ XMM11, XMM12, XMM12 /* ___, m[9] */ + LONG $0x6c0141c4; BYTE $0xdb // VPUNPCKLQDQ XMM11, XMM15, XMM11 /* m[14], ____ */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 7 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 32(DX), X13 // X13 = m[4]+ m[5] + MOVOU 96(DX), X14 // X14 = m[12]+m[13] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + LONG $0x6d1941c4; BYTE $0xc4 // VPUNPCKHQDQ XMM8, XMM12, XMM12 /* ___, m[1] */ + LONG $0x6c0941c4; BYTE $0xc0 // VPUNPCKLQDQ XMM8, XMM14, XMM8 /* m[12], ____ */ + LONG $0x6c0141c4; BYTE $0xcd // VPUNPCKLQDQ XMM9, XMM15, XMM13 /* m[14], m[4] */ + MOVOU 80(DX), X12 // X12 = m[10]+m[11] + LONG $0x6d1141c4; BYTE $0xd7 // VPUNPCKHQDQ XMM10, XMM13, XMM15 /* m[5], m[15] */ + LONG $0x0f1943c4; WORD $0x08de // VPALIGNR XMM11, XMM12, XMM14, 0x8 /* m[13], m[10] */ + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 48(DX), X13 // X13 = m[6]+ m[7] + MOVOU 64(DX), X14 // X14 = m[8]+ m[9] + MOVOU 80(DX), X15 // X15 = m[10]+m[11] + LONG $0x6c1941c4; BYTE $0xc5 // VPUNPCKLQDQ XMM8, XMM12, XMM13 /* m[0], m[6] */ + LONG $0x0f0943c4; WORD $0x08ce // VPALIGNR XMM9, XMM14, XMM14, 0x8 /* m[9], m[8] */ + MOVOU 16(DX), X14 // X14 = m[2]+ m[3] + LONG $0x6d1141c4; BYTE $0xd6 // VPUNPCKHQDQ XMM10, XMM13, XMM14 /* m[7], m[3] */ + LONG $0x6d0141c4; BYTE $0xdf // VPUNPCKHQDQ XMM11, XMM15, XMM15 /* ___, m[11] */ + LONG $0x6c0941c4; BYTE $0xdb // VPUNPCKLQDQ XMM11, XMM14, XMM11 /* m[2], ____ */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 8 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 16(DX), X12 // X12 = m[2]+ m[3] + MOVOU 48(DX), X13 // X13 = m[6]+ m[7] + MOVOU 96(DX), X14 // X14 = m[12]+m[13] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + LONG $0x6d0941c4; BYTE $0xc5 // VPUNPCKHQDQ XMM8, XMM14, XMM13 /* m[13], m[7] */ + LONG $0x6d1941c4; BYTE $0xcc // VPUNPCKHQDQ XMM9, XMM12, XMM12 /* ___, m[3] */ + LONG $0x6c0941c4; BYTE $0xc9 // VPUNPCKLQDQ XMM9, XMM14, XMM9 /* m[12], ____ */ + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 64(DX), X13 // X13 = m[8]+ m[9] + MOVOU 80(DX), X14 // X14 = m[10]+m[11] + LONG $0x0f0143c4; WORD $0x08d6 // VPALIGNR XMM10, XMM15, XMM14, 0x8 /* m[11], m[14] */ + LONG $0x6d1941c4; BYTE $0xdd // VPUNPCKHQDQ XMM11, XMM12, XMM13 /* m[1], m[9] */ + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 16(DX), X12 // X12 = m[2]+ m[3] + MOVOU 32(DX), X13 // X13 = m[4]+ m[5] + MOVOU 64(DX), X14 // X14 = m[8]+ m[9] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + LONG $0x6d1141c4; BYTE $0xc7 // VPUNPCKHQDQ XMM8, XMM13, XMM15 /* m[5], m[15] */ + LONG $0x6c0941c4; BYTE $0xcc // VPUNPCKLQDQ XMM9, XMM14, XMM12 /* m[8], m[2] */ + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 48(DX), X14 // X14 = m[6]+ m[7] + MOVOU 80(DX), X15 // X15 = m[10]+m[11] + LONG $0x6c1941c4; BYTE $0xd5 // VPUNPCKLQDQ XMM10, XMM12, XMM13 /* m[0], m[4] */ + LONG $0x6c0941c4; BYTE $0xdf // VPUNPCKLQDQ XMM11, XMM14, XMM15 /* m[6], m[10] */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 9 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 48(DX), X13 // X13 = m[6]+ m[7] + MOVOU 80(DX), X14 // X14 = m[10]+m[11] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + LONG $0x6c1141c4; BYTE $0xc7 // VPUNPCKLQDQ XMM8, XMM13, XMM15 /* m[6], m[14] */ + LONG $0x0f1943c4; WORD $0x08ce // VPALIGNR XMM9, XMM12, XMM14, 0x8 /* m[11], m[0] */ + MOVOU 16(DX), X13 // X13 = m[2]+ m[3] + MOVOU 64(DX), X14 // X14 = m[8]+ m[9] + LONG $0x6d0141c4; BYTE $0xd6 // VPUNPCKHQDQ XMM10, XMM15, XMM14 /* m[15], m[9] */ + LONG $0x0f0943c4; WORD $0x08dd // VPALIGNR XMM11, XMM14, XMM13, 0x8 /* m[3], m[8] */ + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 16(DX), X13 // X13 = m[2]+ m[3] + MOVOU 80(DX), X14 // X14 = m[10]+m[11] + MOVOU 96(DX), X15 // X15 = m[12]+m[13] + LONG $0x6d0141c4; BYTE $0xc7 // VPUNPCKHQDQ XMM8, XMM15, XMM15 /* ___, m[13] */ + LONG $0x6c0141c4; BYTE $0xc0 // VPUNPCKLQDQ XMM8, XMM15, XMM8 /* m[12], ____ */ + LONG $0x0f0943c4; WORD $0x08cc // VPALIGNR XMM9, XMM14, XMM12, 0x8 /* m[1], m[10] */ + MOVOU 32(DX), X12 // X12 = m[4]+ m[5] + MOVOU 48(DX), X15 // X15 = m[6]+ m[7] + LONG $0x6d0141c4; BYTE $0xd7 // VPUNPCKHQDQ XMM10, XMM15, XMM15 /* ___, m[7] */ + LONG $0x6c1141c4; BYTE $0xd2 // VPUNPCKLQDQ XMM10, XMM13, XMM10 /* m[2], ____ */ + LONG $0x6d1941c4; BYTE $0xdc // VPUNPCKHQDQ XMM11, XMM12, XMM12 /* ___, m[5] */ + LONG $0x6c1941c4; BYTE $0xdb // VPUNPCKLQDQ XMM11, XMM12, XMM11 /* m[4], ____ */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 1 0 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 48(DX), X13 // X13 = m[6]+ m[7] + MOVOU 64(DX), X14 // X14 = m[8]+ m[9] + MOVOU 80(DX), X15 // X15 = m[10]+m[11] + LONG $0x6c0141c4; BYTE $0xc6 // VPUNPCKLQDQ XMM8, XMM15, XMM14 /* m[10], m[8] */ + LONG $0x6d1141c4; BYTE $0xcc // VPUNPCKHQDQ XMM9, XMM13, XMM12 /* m[7], m[1] */ + MOVOU 16(DX), X12 // X12 = m[2]+ m[3] + MOVOU 32(DX), X14 // X14 = m[4]+ m[5] + LONG $0x6c1941c4; BYTE $0xd6 // VPUNPCKLQDQ XMM10, XMM12, XMM14 /* m[2], m[4] */ + LONG $0x6d0941c4; BYTE $0xde // VPUNPCKHQDQ XMM11, XMM14, XMM14 /* ___, m[5] */ + LONG $0x6c1141c4; BYTE $0xdb // VPUNPCKLQDQ XMM11, XMM13, XMM11 /* m[6], ____ */ + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 16(DX), X12 // X12 = m[2]+ m[3] + MOVOU 64(DX), X13 // X13 = m[8]+ m[9] + MOVOU 96(DX), X14 // X14 = m[12]+m[13] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + LONG $0x6d0141c4; BYTE $0xc5 // VPUNPCKHQDQ XMM8, XMM15, XMM13 /* m[15], m[9] */ + LONG $0x6d1941c4; BYTE $0xce // VPUNPCKHQDQ XMM9, XMM12, XMM14 /* m[3], m[13] */ + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 80(DX), X13 // X13 = m[10]+m[11] + LONG $0x0f0143c4; WORD $0x08d5 // VPALIGNR XMM10, XMM15, XMM13, 0x8 /* m[11], m[14] */ + LONG $0x6c0941c4; BYTE $0xdc // VPUNPCKLQDQ XMM11, XMM14, XMM12 /* m[12], m[0] */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 1 1 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 0(DX), X12 // X12 = m[0]+m[1] + MOVOU 16(DX), X13 // X13 = m[2]+m[3] + MOVOU 32(DX), X14 // X14 = m[4]+m[5] + MOVOU 48(DX), X15 // X15 = m[6]+m[7] + LONG $0x6c1941c4; BYTE $0xc5 // VPUNPCKLQDQ XMM8, XMM12, XMM13 /* m[0], m[2] */ + LONG $0x6c0941c4; BYTE $0xcf // VPUNPCKLQDQ XMM9, XMM14, XMM15 /* m[4], m[6] */ + LONG $0x6d1941c4; BYTE $0xd5 // VPUNPCKHQDQ XMM10, XMM12, XMM13 /* m[1], m[3] */ + LONG $0x6d0941c4; BYTE $0xdf // VPUNPCKHQDQ XMM11, XMM14, XMM15 /* m[5], m[7] */ + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 64(DX), X12 // X12 = m[8]+ m[9] + MOVOU 80(DX), X13 // X13 = m[10]+m[11] + MOVOU 96(DX), X14 // X14 = m[12]+m[13] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + LONG $0x6c1941c4; BYTE $0xc5 // VPUNPCKLQDQ XMM8, XMM12, XMM13 /* m[8],m[10] */ + LONG $0x6c0941c4; BYTE $0xcf // VPUNPCKLQDQ XMM9, XMM14, XMM15 /* m[12],m[14] */ + LONG $0x6d1941c4; BYTE $0xd5 // VPUNPCKHQDQ XMM10, XMM12, XMM13 /* m[9],m[11] */ + LONG $0x6d0941c4; BYTE $0xdf // VPUNPCKHQDQ XMM11, XMM14, XMM15 /* m[13],m[15] */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 1 2 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 112(DX), X12 // X12 = m[14]+m[15] + MOVOU 32(DX), X13 // X13 = m[4]+ m[5] + MOVOU 64(DX), X14 // X14 = m[8]+ m[9] + MOVOU 96(DX), X15 // X15 = m[12]+m[13] + LONG $0x6c1941c4; BYTE $0xc5 // VPUNPCKLQDQ XMM8, XMM12, XMM13 /* m[14], m[4] */ + LONG $0x6d0941c4; BYTE $0xcf // VPUNPCKHQDQ XMM9, XMM14, XMM15 /* m[9], m[13] */ + MOVOU 80(DX), X13 // X13 = m[10]+m[11] + MOVOU 48(DX), X15 // X15 = m[6]+ m[7] + LONG $0x6c1141c4; BYTE $0xd6 // VPUNPCKLQDQ XMM10, XMM13, XMM14 /* m[10], m[8] */ + LONG $0x0f0143c4; WORD $0x08dc // VPALIGNR XMM11, XMM15, XMM12, 0x8 /* m[15], m[6] */ + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 32(DX), X13 // X13 = m[4]+ m[5] + MOVOU 80(DX), X14 // X14 = m[10]+m[11] + LONG $0x0f1943c4; WORD $0x08c4 // VPALIGNR XMM8, XMM12, XMM12, 0x8 /* m[1], m[0] */ + LONG $0x6d0941c4; BYTE $0xcd // VPUNPCKHQDQ XMM9, XMM14, XMM13 /* m[11], m[5] */ + MOVOU 16(DX), X12 // X12 = m[2]+ m[3] + MOVOU 48(DX), X13 // X13 = m[6]+ m[7] + MOVOU 96(DX), X14 // X14 = m[12]+m[13] + LONG $0x6c0941c4; BYTE $0xd4 // VPUNPCKLQDQ XMM10, XMM14, XMM12 /* m[12], m[2] */ + LONG $0x6d1141c4; BYTE $0xdc // VPUNPCKHQDQ XMM11, XMM13, XMM12 /* m[7], m[3] */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + // Reload digest (most current value store in &out) + MOVQ out+144(FP), SI // SI: &in + MOVOU 0(SI), X12 // X12 = in[0]+in[1] /* row1l = LOAD( &S->h[0] ); */ + MOVOU 16(SI), X13 // X13 = in[2]+in[3] /* row1h = LOAD( &S->h[2] ); */ + MOVOU 32(SI), X14 // X14 = in[4]+in[5] /* row2l = LOAD( &S->h[4] ); */ + MOVOU 48(SI), X15 // X15 = in[6]+in[7] /* row2h = LOAD( &S->h[6] ); */ + + // Final computations and prepare for storing + PXOR X4, X0 // X0 = X0 ^ X4 /* row1l = _mm_xor_si128( row3l, row1l ); */ + PXOR X5, X1 // X1 = X1 ^ X5 /* row1h = _mm_xor_si128( row3h, row1h ); */ + PXOR X12, X0 // X0 = X0 ^ X12 /* STORE( &S->h[0], _mm_xor_si128( LOAD( &S->h[0] ), row1l ) ); */ + PXOR X13, X1 // X1 = X1 ^ X13 /* STORE( &S->h[2], _mm_xor_si128( LOAD( &S->h[2] ), row1h ) ); */ + PXOR X6, X2 // X2 = X2 ^ X6 /* row2l = _mm_xor_si128( row4l, row2l ); */ + PXOR X7, X3 // X3 = X3 ^ X7 /* row2h = _mm_xor_si128( row4h, row2h ); */ + PXOR X14, X2 // X2 = X2 ^ X14 /* STORE( &S->h[4], _mm_xor_si128( LOAD( &S->h[4] ), row2l ) ); */ + PXOR X15, X3 // X3 = X3 ^ X15 /* STORE( &S->h[6], _mm_xor_si128( LOAD( &S->h[6] ), row2h ) ); */ + + // Store digest into &out + MOVQ out+144(FP), SI // SI: &out + MOVOU X0, 0(SI) // out[0]+out[1] = X0 + MOVOU X1, 16(SI) // out[2]+out[3] = X1 + MOVOU X2, 32(SI) // out[4]+out[5] = X2 + MOVOU X3, 48(SI) // out[6]+out[7] = X3 + + // Increment message pointer and check if there's more to do + ADDQ $128, DX // message += 128 + SUBQ $1, R8 + JNZ loop + +complete: + RET diff --git a/vendor/github.com/minio/blake2b-simd/compressSse_amd64.go b/vendor/github.com/minio/blake2b-simd/compressSse_amd64.go new file mode 100644 index 000000000..d539a7ade --- /dev/null +++ b/vendor/github.com/minio/blake2b-simd/compressSse_amd64.go @@ -0,0 +1,41 @@ +//+build !noasm +//+build !appengine + +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package blake2b + +//go:noescape +func blockSSELoop(p []uint8, in, iv, t, f, shffle, out []uint64) + +func compressSSE(d *digest, p []uint8) { + var ( + in [8]uint64 + out [8]uint64 + shffle [2]uint64 + ) + + // vector for PSHUFB instruction + shffle[0] = 0x0201000706050403 + shffle[1] = 0x0a09080f0e0d0c0b + + in[0], in[1], in[2], in[3], in[4], in[5], in[6], in[7] = d.h[0], d.h[1], d.h[2], d.h[3], d.h[4], d.h[5], d.h[6], d.h[7] + + blockSSELoop(p, in[:], iv[:], d.t[:], d.f[:], shffle[:], out[:]) + + d.h[0], d.h[1], d.h[2], d.h[3], d.h[4], d.h[5], d.h[6], d.h[7] = out[0], out[1], out[2], out[3], out[4], out[5], out[6], out[7] +} diff --git a/vendor/github.com/minio/blake2b-simd/compressSse_amd64.s b/vendor/github.com/minio/blake2b-simd/compressSse_amd64.s new file mode 100644 index 000000000..6f31c949e --- /dev/null +++ b/vendor/github.com/minio/blake2b-simd/compressSse_amd64.s @@ -0,0 +1,770 @@ +//+build !noasm !appengine + +// +// Minio Cloud Storage, (C) 2016 Minio, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// +// Based on SSE implementation from https://github.com/BLAKE2/BLAKE2/blob/master/sse/blake2b.c +// +// Use github.com/fwessels/asm2plan9s on this file to assemble instructions to their Plan9 equivalent +// +// Assembly code below essentially follows the ROUND macro (see blake2b-round.h) which is defined as: +// #define ROUND(r) \ +// LOAD_MSG_ ##r ##_1(b0, b1); \ +// G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ +// LOAD_MSG_ ##r ##_2(b0, b1); \ +// G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ +// DIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h); \ +// LOAD_MSG_ ##r ##_3(b0, b1); \ +// G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ +// LOAD_MSG_ ##r ##_4(b0, b1); \ +// G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ +// UNDIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h); +// +// as well as the go equivalent in https://github.com/dchest/blake2b/blob/master/block.go +// +// As in the macro, G1/G2 in the 1st and 2nd half are identical (so literal copy of assembly) +// +// Rounds are also the same, except for the loading of the message (and rounds 1 & 11 and +// rounds 2 & 12 are identical) +// + +#define G1 \ + \ // G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); + LONG $0xd40f4166; BYTE $0xc0 \ // PADDQ XMM0,XMM8 /* v0 += m[0], v1 += m[2] */ + LONG $0xd40f4166; BYTE $0xc9 \ // PADDQ XMM1,XMM9 /* v2 += m[4], v3 += m[6] */ + LONG $0xc2d40f66 \ // PADDQ XMM0,XMM2 /* v0 += v4, v1 += v5 */ + LONG $0xcbd40f66 \ // PADDQ XMM1,XMM3 /* v2 += v6, v3 += v7 */ + LONG $0xf0ef0f66 \ // PXOR XMM6,XMM0 /* v12 ^= v0, v13 ^= v1 */ + LONG $0xf9ef0f66 \ // PXOR XMM7,XMM1 /* v14 ^= v2, v15 ^= v3 */ + LONG $0xf6700f66; BYTE $0xb1 \ // PSHUFD XMM6,XMM6,0xb1 /* v12 = v12<<(64-32) | v12>>32, v13 = v13<<(64-32) | v13>>32 */ + LONG $0xff700f66; BYTE $0xb1 \ // PSHUFD XMM7,XMM7,0xb1 /* v14 = v14<<(64-32) | v14>>32, v15 = v15<<(64-32) | v15>>32 */ + LONG $0xe6d40f66 \ // PADDQ XMM4,XMM6 /* v8 += v12, v9 += v13 */ + LONG $0xefd40f66 \ // PADDQ XMM5,XMM7 /* v10 += v14, v11 += v15 */ + LONG $0xd4ef0f66 \ // PXOR XMM2,XMM4 /* v4 ^= v8, v5 ^= v9 */ + LONG $0xddef0f66 \ // PXOR XMM3,XMM5 /* v6 ^= v10, v7 ^= v11 */ + LONG $0x380f4166; WORD $0xd400 \ // PSHUFB XMM2,XMM12 /* v4 = v4<<(64-24) | v4>>24, v5 = v5<<(64-24) | v5>>24 */ + LONG $0x380f4166; WORD $0xdc00 // PSHUFB XMM3,XMM12 /* v6 = v6<<(64-24) | v6>>24, v7 = v7<<(64-24) | v7>>24 */ + +#define G2 \ + \ // G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); + LONG $0xd40f4166; BYTE $0xc2 \ // PADDQ XMM0,XMM10 /* v0 += m[1], v1 += m[3] */ + LONG $0xd40f4166; BYTE $0xcb \ // PADDQ XMM1,XMM11 /* v2 += m[5], v3 += m[7] */ + LONG $0xc2d40f66 \ // PADDQ XMM0,XMM2 /* v0 += v4, v1 += v5 */ + LONG $0xcbd40f66 \ // PADDQ XMM1,XMM3 /* v2 += v6, v3 += v7 */ + LONG $0xf0ef0f66 \ // PXOR XMM6,XMM0 /* v12 ^= v0, v13 ^= v1 */ + LONG $0xf9ef0f66 \ // PXOR XMM7,XMM1 /* v14 ^= v2, v15 ^= v3 */ + LONG $0xf6700ff2; BYTE $0x39 \ // PSHUFLW XMM6,XMM6,0x39 /* combined with next ... */ + LONG $0xf6700ff3; BYTE $0x39 \ // PSHUFHW XMM6,XMM6,0x39 /* v12 = v12<<(64-16) | v12>>16, v13 = v13<<(64-16) | v13>>16 */ + LONG $0xff700ff2; BYTE $0x39 \ // PSHUFLW XMM7,XMM7,0x39 /* combined with next ... */ + LONG $0xff700ff3; BYTE $0x39 \ // PSHUFHW XMM7,XMM7,0x39 /* v14 = v14<<(64-16) | v14>>16, v15 = v15<<(64-16) | v15>>16 */ + LONG $0xe6d40f66 \ // PADDQ XMM4,XMM6 /* v8 += v12, v9 += v13 */ + LONG $0xefd40f66 \ // PADDQ XMM5,XMM7 /* v10 += v14, v11 += v15 */ + LONG $0xd4ef0f66 \ // PXOR XMM2,XMM4 /* v4 ^= v8, v5 ^= v9 */ + LONG $0xddef0f66 \ // PXOR XMM3,XMM5 /* v6 ^= v10, v7 ^= v11 */ + MOVOU X2, X15 \ + LONG $0xd40f4466; BYTE $0xfa \ // PADDQ XMM15,XMM2 /* temp reg = reg*2 */ + LONG $0xd2730f66; BYTE $0x3f \ // PSRLQ XMM2,0x3f /* reg = reg>>63 */ + LONG $0xef0f4166; BYTE $0xd7 \ // PXOR XMM2,XMM15 /* ORed together: v4 = v4<<(64-63) | v4>>63, v5 = v5<<(64-63) | v5>>63 */ + MOVOU X3, X15 \ + LONG $0xd40f4466; BYTE $0xfb \ // PADDQ XMM15,XMM3 /* temp reg = reg*2 */ + LONG $0xd3730f66; BYTE $0x3f \ // PSRLQ XMM3,0x3f /* reg = reg>>63 */ + LONG $0xef0f4166; BYTE $0xdf // PXOR XMM3,XMM15 /* ORed together: v6 = v6<<(64-63) | v6>>63, v7 = v7<<(64-63) | v7>>63 */ + +#define DIAGONALIZE \ + \ // DIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h); + MOVOU X6, X13 \ /* t0 = row4l;\ */ + MOVOU X2, X14 \ /* t1 = row2l;\ */ + MOVOU X4, X6 \ /* row4l = row3l;\ */ + MOVOU X5, X4 \ /* row3l = row3h;\ */ + MOVOU X6, X5 \ /* row3h = row4l;\ */ + LONG $0x6c0f4566; BYTE $0xfd \ // PUNPCKLQDQ XMM15, XMM13 /* _mm_unpacklo_epi64(t0, t0) */ + MOVOU X7, X6 \ + LONG $0x6d0f4166; BYTE $0xf7 \ // PUNPCKHQDQ XMM6, XMM15 /* row4l = _mm_unpackhi_epi64(row4h, ); \ */ + LONG $0x6c0f4466; BYTE $0xff \ // PUNPCKLQDQ XMM15, XMM7 /* _mm_unpacklo_epi64(row4h, row4h) */ + MOVOU X13, X7 \ + LONG $0x6d0f4166; BYTE $0xff \ // PUNPCKHQDQ XMM7, XMM15 /* row4h = _mm_unpackhi_epi64(t0, ); \ */ + LONG $0x6c0f4466; BYTE $0xfb \ // PUNPCKLQDQ XMM15, XMM3 /* _mm_unpacklo_epi64(row2h, row2h) */ + LONG $0x6d0f4166; BYTE $0xd7 \ // PUNPCKHQDQ XMM2, XMM15 /* row2l = _mm_unpackhi_epi64(row2l, ); \ */ + LONG $0x6c0f4566; BYTE $0xfe \ // PUNPCKLQDQ XMM15, XMM14 /* _mm_unpacklo_epi64(t1, t1) */ + LONG $0x6d0f4166; BYTE $0xdf // PUNPCKHQDQ XMM3, XMM15 /* row2h = _mm_unpackhi_epi64(row2h, ) */ + +#define UNDIAGONALIZE \ + \ // UNDIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h); + MOVOU X4, X13 \ /* t0 = row3l;\ */ + MOVOU X5, X4 \ /* row3l = row3h;\ */ + MOVOU X13, X5 \ /* row3h = t0;\ */ + MOVOU X2, X13 \ /* t0 = row2l;\ */ + MOVOU X6, X14 \ /* t1 = row4l;\ */ + LONG $0x6c0f4466; BYTE $0xfa \ // PUNPCKLQDQ XMM15, XMM2 /* _mm_unpacklo_epi64(row2l, row2l) */ + MOVOU X3, X2 \ + LONG $0x6d0f4166; BYTE $0xd7 \ // PUNPCKHQDQ XMM2, XMM15 /* row2l = _mm_unpackhi_epi64(row2h, ); \ */ + LONG $0x6c0f4466; BYTE $0xfb \ // PUNPCKLQDQ XMM15, XMM3 /* _mm_unpacklo_epi64(row2h, row2h) */ + MOVOU X13, X3 \ + LONG $0x6d0f4166; BYTE $0xdf \ // PUNPCKHQDQ XMM3, XMM15 /* row2h = _mm_unpackhi_epi64(t0, ); \ */ + LONG $0x6c0f4466; BYTE $0xff \ // PUNPCKLQDQ XMM15, XMM7 /* _mm_unpacklo_epi64(row4h, row4h) */ + LONG $0x6d0f4166; BYTE $0xf7 \ // PUNPCKHQDQ XMM6, XMM15 /* row4l = _mm_unpackhi_epi64(row4l, ); \ */ + LONG $0x6c0f4566; BYTE $0xfe \ // PUNPCKLQDQ XMM15, XMM14 /* _mm_unpacklo_epi64(t1, t1) */ + LONG $0x6d0f4166; BYTE $0xff // PUNPCKHQDQ XMM7, XMM15 /* row4h = _mm_unpackhi_epi64(row4h, ) */ + +#define LOAD_SHUFFLE \ + \ // Load shuffle value + MOVQ shffle+120(FP), SI \ // SI: &shuffle + MOVOU 0(SI), X12 // X12 = 03040506 07000102 0b0c0d0e 0f08090a + +// func blockSSELoop(p []uint8, in, iv, t, f, shffle, out []uint64) +TEXT ·blockSSELoop(SB), 7, $0 + // REGISTER USE + // R8: loop counter + // DX: message pointer + // SI: temp pointer for loading + // X0 - X7: v0 - v15 + // X8 - X11: m[0] - m[7] + // X12: shuffle value + // X13 - X15: temp registers + + // Load digest + MOVQ in+24(FP), SI // SI: &in + MOVOU 0(SI), X0 // X0 = in[0]+in[1] /* row1l = LOAD( &S->h[0] ); */ + MOVOU 16(SI), X1 // X1 = in[2]+in[3] /* row1h = LOAD( &S->h[2] ); */ + MOVOU 32(SI), X2 // X2 = in[4]+in[5] /* row2l = LOAD( &S->h[4] ); */ + MOVOU 48(SI), X3 // X3 = in[6]+in[7] /* row2h = LOAD( &S->h[6] ); */ + + // Already store digest into &out (so we can reload it later generically) + MOVQ out+144(FP), SI // SI: &out + MOVOU X0, 0(SI) // out[0]+out[1] = X0 + MOVOU X1, 16(SI) // out[2]+out[3] = X1 + MOVOU X2, 32(SI) // out[4]+out[5] = X2 + MOVOU X3, 48(SI) // out[6]+out[7] = X3 + + // Initialize message pointer and loop counter + MOVQ message+0(FP), DX // DX: &p (message) + MOVQ message_len+8(FP), R8 // R8: len(message) + SHRQ $7, R8 // len(message) / 128 + CMPQ R8, $0 + JEQ complete + +loop: + // Increment counter + MOVQ t+72(FP), SI // SI: &t + MOVQ 0(SI), R9 + ADDQ $128, R9 // /* d.t[0] += BlockSize */ + MOVQ R9, 0(SI) + CMPQ R9, $128 // /* if d.t[0] < BlockSize { */ + JGE noincr + MOVQ 8(SI), R9 + ADDQ $1, R9 // /* d.t[1]++ */ + MOVQ R9, 8(SI) + +noincr: // /* } */ + + // Load initialization vector + MOVQ iv+48(FP), SI // SI: &iv + MOVOU 0(SI), X4 // X4 = iv[0]+iv[1] /* row3l = LOAD( &blake2b_IV[0] ); */ + MOVOU 16(SI), X5 // X5 = iv[2]+iv[3] /* row3h = LOAD( &blake2b_IV[2] ); */ + MOVOU 32(SI), X6 // X6 = iv[4]+iv[5] /* LOAD( &blake2b_IV[4] ) */ + MOVOU 48(SI), X7 // X7 = iv[6]+iv[7] /* LOAD( &blake2b_IV[6] ) */ + MOVQ t+72(FP), SI // SI: &t + MOVOU 0(SI), X8 // X8 = t[0]+t[1] /* LOAD( &S->t[0] ) */ + PXOR X8, X6 // X6 = X6 ^ X8 /* row4l = _mm_xor_si128( , ); */ + MOVQ t+96(FP), SI // SI: &f + MOVOU 0(SI), X8 // X8 = f[0]+f[1] /* LOAD( &S->f[0] ) */ + PXOR X8, X7 // X7 = X7 ^ X8 /* row4h = _mm_xor_si128( , ); */ + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 1 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 0(DX), X12 // X12 = m[0]+m[1] + MOVOU 16(DX), X13 // X13 = m[2]+m[3] + MOVOU 32(DX), X14 // X14 = m[4]+m[5] + MOVOU 48(DX), X15 // X15 = m[6]+m[7] + MOVOU X12, X8 + LONG $0x6c0f4566; BYTE $0xc5 // PUNPCKLQDQ XMM8, XMM13 /* m[0], m[2] */ + MOVOU X14, X9 + LONG $0x6c0f4566; BYTE $0xcf // PUNPCKLQDQ XMM9, XMM15 /* m[4], m[6] */ + MOVOU X12, X10 + LONG $0x6d0f4566; BYTE $0xd5 // PUNPCKHQDQ XMM10, XMM13 /* m[1], m[3] */ + MOVOU X14, X11 + LONG $0x6d0f4566; BYTE $0xdf // PUNPCKHQDQ XMM11, XMM15 /* m[5], m[7] */ + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 64(DX), X12 // X12 = m[8]+ m[9] + MOVOU 80(DX), X13 // X13 = m[10]+m[11] + MOVOU 96(DX), X14 // X14 = m[12]+m[13] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + MOVOU X12, X8 + LONG $0x6c0f4566; BYTE $0xc5 // PUNPCKLQDQ XMM8, XMM13 /* m[8],m[10] */ + MOVOU X14, X9 + LONG $0x6c0f4566; BYTE $0xcf // PUNPCKLQDQ XMM9, XMM15 /* m[12],m[14] */ + MOVOU X12, X10 + LONG $0x6d0f4566; BYTE $0xd5 // PUNPCKHQDQ XMM10, XMM13 /* m[9],m[11] */ + MOVOU X14, X11 + LONG $0x6d0f4566; BYTE $0xdf // PUNPCKHQDQ XMM11, XMM15 /* m[13],m[15] */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 2 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 112(DX), X12 // X12 = m[14]+m[15] + MOVOU 32(DX), X13 // X13 = m[4]+ m[5] + MOVOU 64(DX), X14 // X14 = m[8]+ m[9] + MOVOU 96(DX), X15 // X15 = m[12]+m[13] + MOVOU X12, X8 + LONG $0x6c0f4566; BYTE $0xc5 // PUNPCKLQDQ XMM8, XMM13 /* m[14], m[4] */ + MOVOU X14, X9 + LONG $0x6d0f4566; BYTE $0xcf // PUNPCKHQDQ XMM9, XMM15 /* m[9], m[13] */ + MOVOU 80(DX), X10 // X10 = m[10]+m[11] + MOVOU 48(DX), X11 // X11 = m[6]+ m[7] + LONG $0x6c0f4566; BYTE $0xd6 // PUNPCKLQDQ XMM10, XMM14 /* m[10], m[8] */ + LONG $0x3a0f4566; WORD $0xdc0f; BYTE $0x08 // PALIGNR XMM11, XMM12, 0x8 /* m[15], m[6] */; ; ; ; ; + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 32(DX), X13 // X13 = m[4]+ m[5] + MOVOU 80(DX), X14 // X14 = m[10]+m[11] + MOVOU X12, X8 + LONG $0x3a0f4566; WORD $0xc40f; BYTE $0x08 // PALIGNR XMM8, XMM12, 0x8 /* m[1], m[0] */ + MOVOU X14, X9 + LONG $0x6d0f4566; BYTE $0xcd // PUNPCKHQDQ XMM9, XMM13 /* m[11], m[5] */ + MOVOU 16(DX), X12 // X12 = m[2]+ m[3] + MOVOU 48(DX), X11 // X11 = m[6]+ m[7] + MOVOU 96(DX), X10 // X10 = m[12]+m[13] + LONG $0x6c0f4566; BYTE $0xd4 // PUNPCKLQDQ XMM10, XMM12 /* m[12], m[2] */ + LONG $0x6d0f4566; BYTE $0xdc // PUNPCKHQDQ XMM11, XMM12 /* m[7], m[3] */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 3 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 32(DX), X12 // X12 = m[4]+ m[5] + MOVOU 80(DX), X13 // X13 = m[10]+m[11] + MOVOU 96(DX), X14 // X14 = m[12]+m[13] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + MOVOU X14, X8 + LONG $0x3a0f4566; WORD $0xc50f; BYTE $0x08 // PALIGNR XMM8, XMM13, 0x8 /* m[11], m[12] */ + MOVOU X12, X9 + LONG $0x6d0f4566; BYTE $0xcf // PUNPCKHQDQ XMM9, XMM15 /* m[5], m[15] */ + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 16(DX), X13 // X13 = m[2]+ m[3] + MOVOU 64(DX), X10 // X10 = m[8]+ m[9] + LONG $0x6c0f4566; BYTE $0xd4 // PUNPCKLQDQ XMM10, XMM12 /* m[8], m[0] */ + LONG $0x6d0f4566; BYTE $0xf6 // PUNPCKHQDQ XMM14, XMM14 /* ___, m[13] */ + MOVOU X13, X11 + LONG $0x6c0f4566; BYTE $0xde // PUNPCKLQDQ XMM11, XMM14 /* m[2], ___ */ + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 16(DX), X12 // X12 = m[2]+ m[3] + MOVOU 48(DX), X13 // X13 = m[6]+ m[7] + MOVOU 64(DX), X14 // X14 = m[8]+ m[9] + MOVOU 80(DX), X15 // X15 = m[10]+m[11] + MOVOU X12, X9 + LONG $0x6d0f4566; BYTE $0xcc // PUNPCKHQDQ XMM9, XMM12 /* ___, m[3] */ + MOVOU X15, X8 + LONG $0x6c0f4566; BYTE $0xc1 // PUNPCKLQDQ XMM8, XMM9 /* m[10], ___ */ + MOVOU X13, X9 + LONG $0x6d0f4566; BYTE $0xce // PUNPCKHQDQ XMM9, XMM14 /* m[7], m[9] */ + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 32(DX), X11 // X11 = m[4]+ m[5] + MOVOU 112(DX), X10 // X10 = m[14]+m[15] + LONG $0x6c0f4566; BYTE $0xd5 // PUNPCKLQDQ XMM10, XMM13 /* m[14], m[6] */ + LONG $0x3a0f4566; WORD $0xdc0f; BYTE $0x08 // PALIGNR XMM11, XMM12, 0x8 /* m[1], m[4] */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 4 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 16(DX), X12 // X12 = m[2]+ m[3] + MOVOU 48(DX), X13 // X13 = m[6]+ m[7] + MOVOU 80(DX), X14 // X14 = m[10]+m[11] + MOVOU 96(DX), X15 // X15 = m[12]+m[13] + MOVOU X13, X8 + LONG $0x6d0f4566; BYTE $0xc4 // PUNPCKHQDQ XMM8, XMM12 /* m[7], m[3] */ + MOVOU X15, X9 + LONG $0x6d0f4566; BYTE $0xce // PUNPCKHQDQ XMM9, XMM14 /* m[13], m[11] */ + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 64(DX), X10 // X10 = m[8]+ m[9] + MOVOU 112(DX), X14 // X14 = m[14]+m[15] + LONG $0x6d0f4566; BYTE $0xd4 // PUNPCKHQDQ XMM10, XMM12 /* m[9], m[1] */ + MOVOU X15, X11 + LONG $0x6c0f4566; BYTE $0xde // PUNPCKLQDQ XMM11, XMM14 /* m[12], m[14] */ + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 16(DX), X12 // X12 = m[2]+ m[3] + MOVOU 32(DX), X13 // X13 = m[4]+ m[5] + MOVOU 80(DX), X14 // X14 = m[10]+m[11] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + MOVOU X13, X9 + LONG $0x6d0f4566; BYTE $0xcd // PUNPCKHQDQ XMM9, XMM13 /* ___, m[5] */ + MOVOU X12, X8 + LONG $0x6c0f4566; BYTE $0xc1 // PUNPCKLQDQ XMM8, XMM9 /* m[2], ____ */ + MOVOU X15, X10 + LONG $0x6d0f4566; BYTE $0xd7 // PUNPCKHQDQ XMM10, XMM15 /* ___, m[15] */ + MOVOU X13, X9 + LONG $0x6c0f4566; BYTE $0xca // PUNPCKLQDQ XMM9, XMM10 /* m[4], ____ */ + MOVOU 0(DX), X11 // X11 = m[0]+ m[1] + MOVOU 48(DX), X10 // X10 = m[6]+ m[7] + MOVOU 64(DX), X15 // X15 = m[8]+ m[9] + LONG $0x6c0f4566; BYTE $0xd6 // PUNPCKLQDQ XMM10, XMM14 /* m[6], m[10] */ + LONG $0x6c0f4566; BYTE $0xdf // PUNPCKLQDQ XMM11, XMM15 /* m[0], m[8] */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 5 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 16(DX), X12 // X12 = m[2]+ m[3] + MOVOU 32(DX), X13 // X13 = m[4]+ m[5] + MOVOU 64(DX), X14 // X14 = m[8]+ m[9] + MOVOU 80(DX), X15 // X15 = m[10]+m[11] + MOVOU X14, X8 + LONG $0x6d0f4566; BYTE $0xc5 // PUNPCKHQDQ XMM8, XMM13 /* m[9], m[5] */ + MOVOU X12, X9 + LONG $0x6c0f4566; BYTE $0xcf // PUNPCKLQDQ XMM9, XMM15 /* m[2], m[10] */ + MOVOU 0(DX), X10 // X10 = m[0]+ m[1] + MOVOU 48(DX), X14 // X14 = m[6]+ m[7] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + LONG $0x6d0f4566; BYTE $0xf6 // PUNPCKHQDQ XMM14, XMM14 /* ___, m[7] */ + LONG $0x6c0f4566; BYTE $0xd6 // PUNPCKLQDQ XMM10, XMM14 /* m[0], ____ */ + LONG $0x6d0f4566; BYTE $0xff // PUNPCKHQDQ XMM15, XMM15 /* ___, m[15] */ + MOVOU X13, X11 + LONG $0x6c0f4566; BYTE $0xdf // PUNPCKLQDQ XMM11, XMM15 /* m[4], ____ */ + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 16(DX), X12 // X12 = m[2]+ m[3] + MOVOU 48(DX), X13 // X13 = m[6]+ m[7] + MOVOU 80(DX), X14 // X14 = m[10]+m[11] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + LONG $0x6d0f4566; BYTE $0xf6 // PUNPCKHQDQ XMM14, XMM14 /* ___, m[11] */ + MOVOU X15, X8 + LONG $0x6c0f4566; BYTE $0xc6 // PUNPCKLQDQ XMM8, XMM14 /* m[14], ____ */ + LONG $0x6d0f4566; BYTE $0xe4 // PUNPCKHQDQ XMM12, XMM12 /* ___, m[3] */ + MOVOU X13, X9 + LONG $0x6c0f4566; BYTE $0xcc // PUNPCKLQDQ XMM9, XMM12 /* m[6], ____ */ + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 64(DX), X11 // X11 = m[8]+ m[9] + MOVOU 96(DX), X14 // X14 = m[12]+m[13] + MOVOU X14, X10 + LONG $0x3a0f4566; WORD $0xd40f; BYTE $0x08 // PALIGNR XMM10, XMM12, 0x8 /* m[1], m[12] */ + LONG $0x6d0f4566; BYTE $0xf6 // PUNPCKHQDQ XMM14, XMM14 /* ___, m[13] */ + LONG $0x6c0f4566; BYTE $0xde // PUNPCKLQDQ XMM11, XMM14 /* m[8], ____ */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 6 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 16(DX), X13 // X13 = m[2]+ m[3] + MOVOU 48(DX), X14 // X14 = m[6]+ m[7] + MOVOU 64(DX), X15 // X15 = m[8]+ m[9] + MOVOU X13, X8 + LONG $0x6c0f4566; BYTE $0xc6 // PUNPCKLQDQ XMM8, XMM14 /* m[2], m[6] */ + MOVOU X12, X9 + LONG $0x6c0f4566; BYTE $0xcf // PUNPCKLQDQ XMM9, XMM15 /* m[0], m[8] */ + MOVOU 80(DX), X12 // X12 = m[10]+m[11] + MOVOU 96(DX), X10 // X10 = m[12]+m[13] + LONG $0x6c0f4566; BYTE $0xd4 // PUNPCKLQDQ XMM10, XMM12 /* m[12], m[10] */ + MOVOU X12, X11 + LONG $0x6d0f4566; BYTE $0xdd // PUNPCKHQDQ XMM11, XMM13 /* m[11], m[3] */ + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 32(DX), X13 // X13 = m[4]+ m[5] + MOVOU 48(DX), X14 // X14 = m[6]+ m[7] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + MOVOU X14, X9 + LONG $0x6d0f4566; BYTE $0xce // PUNPCKHQDQ XMM9, XMM14 /* ___, m[7] */ + MOVOU X13, X8 + LONG $0x6c0f4566; BYTE $0xc1 // PUNPCKLQDQ XMM8, XMM9 /* m[4], ____ */ + MOVOU X15, X9 + LONG $0x6d0f4566; BYTE $0xcc // PUNPCKHQDQ XMM9, XMM12 /* m[15], m[1] */ + MOVOU 64(DX), X12 // X12 = m[8]+ m[9] + MOVOU 96(DX), X10 // X10 = m[12]+m[13] + LONG $0x6d0f4566; BYTE $0xd5 // PUNPCKHQDQ XMM10, XMM13 /* m[13], m[5] */ + LONG $0x6d0f4566; BYTE $0xe4 // PUNPCKHQDQ XMM12, XMM12 /* ___, m[9] */ + MOVOU X15, X11 + LONG $0x6c0f4566; BYTE $0xdc // PUNPCKLQDQ XMM11, XMM12 /* m[14], ____ */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 7 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 32(DX), X13 // X13 = m[4]+ m[5] + MOVOU 96(DX), X14 // X14 = m[12]+m[13] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + MOVOU X12, X9 + LONG $0x6d0f4566; BYTE $0xcc // PUNPCKHQDQ XMM9, XMM12 /* ___, m[1] */ + MOVOU X14, X8 + LONG $0x6c0f4566; BYTE $0xc1 // PUNPCKLQDQ XMM8, XMM9 /* m[12], ____ */ + MOVOU X15, X9 + LONG $0x6c0f4566; BYTE $0xcd // PUNPCKLQDQ XMM9, XMM13 /* m[14], m[4] */ + MOVOU 80(DX), X11 // X11 = m[10]+m[11] + MOVOU X13, X10 + LONG $0x6d0f4566; BYTE $0xd7 // PUNPCKHQDQ XMM10, XMM15 /* m[5], m[15] */ + LONG $0x3a0f4566; WORD $0xde0f; BYTE $0x08 // PALIGNR XMM11, XMM14, 0x8 /* m[13], m[10] */ + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 48(DX), X13 // X13 = m[6]+ m[7] + MOVOU 64(DX), X14 // X14 = m[8]+ m[9] + MOVOU 80(DX), X15 // X15 = m[10]+m[11] + MOVOU X12, X8 + LONG $0x6c0f4566; BYTE $0xc5 // PUNPCKLQDQ XMM8, XMM13 /* m[0], m[6] */ + MOVOU X14, X9 + LONG $0x3a0f4566; WORD $0xce0f; BYTE $0x08 // PALIGNR XMM9, XMM14, 0x8 /* m[9], m[8] */ + MOVOU 16(DX), X11 // X14 = m[2]+ m[3] + MOVOU X13, X10 + LONG $0x6d0f4566; BYTE $0xd3 // PUNPCKHQDQ XMM10, XMM11 /* m[7], m[3] */ + LONG $0x6d0f4566; BYTE $0xff // PUNPCKHQDQ XMM15, XMM15 /* ___, m[11] */ + LONG $0x6c0f4566; BYTE $0xdf // PUNPCKLQDQ XMM11, XMM15 /* m[2], ____ */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 8 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 16(DX), X12 // X12 = m[2]+ m[3] + MOVOU 48(DX), X13 // X13 = m[6]+ m[7] + MOVOU 96(DX), X14 // X14 = m[12]+m[13] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + MOVOU X14, X8 + LONG $0x6d0f4566; BYTE $0xc5 // PUNPCKHQDQ XMM8, XMM13 /* m[13], m[7] */ + MOVOU X12, X10 + LONG $0x6d0f4566; BYTE $0xd4 // PUNPCKHQDQ XMM10, XMM12 /* ___, m[3] */ + MOVOU X14, X9 + LONG $0x6c0f4566; BYTE $0xca // PUNPCKLQDQ XMM9, XMM10 /* m[12], ____ */ + MOVOU 0(DX), X11 // X11 = m[0]+ m[1] + MOVOU 64(DX), X13 // X13 = m[8]+ m[9] + MOVOU 80(DX), X14 // X14 = m[10]+m[11] + MOVOU X15, X10 + LONG $0x3a0f4566; WORD $0xd60f; BYTE $0x08 // PALIGNR XMM10, XMM14, 0x8 /* m[11], m[14] */ + LONG $0x6d0f4566; BYTE $0xdd // PUNPCKHQDQ XMM11, XMM13 /* m[1], m[9] */ + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 16(DX), X12 // X12 = m[2]+ m[3] + MOVOU 32(DX), X13 // X13 = m[4]+ m[5] + MOVOU 64(DX), X14 // X14 = m[8]+ m[9] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + MOVOU X13, X8 + LONG $0x6d0f4566; BYTE $0xc7 // PUNPCKHQDQ XMM8, XMM15 /* m[5], m[15] */ + MOVOU X14, X9 + LONG $0x6c0f4566; BYTE $0xcc // PUNPCKLQDQ XMM9, XMM12 /* m[8], m[2] */ + MOVOU 0(DX), X10 // X10 = m[0]+ m[1] + MOVOU 48(DX), X11 // X11 = m[6]+ m[7] + MOVOU 80(DX), X15 // X15 = m[10]+m[11] + LONG $0x6c0f4566; BYTE $0xd5 // PUNPCKLQDQ XMM10, XMM13 /* m[0], m[4] */ + LONG $0x6c0f4566; BYTE $0xdf // PUNPCKLQDQ XMM11, XMM15 /* m[6], m[10] */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 9 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 48(DX), X13 // X13 = m[6]+ m[7] + MOVOU 80(DX), X14 // X14 = m[10]+m[11] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + MOVOU X13, X8 + LONG $0x6c0f4566; BYTE $0xc7 // PUNPCKLQDQ XMM8, XMM15 /* m[6], m[14] */ + MOVOU X12, X9 + LONG $0x3a0f4566; WORD $0xce0f; BYTE $0x08 // PALIGNR XMM9, XMM14, 0x8 /* m[11], m[0] */ + MOVOU 16(DX), X13 // X13 = m[2]+ m[3] + MOVOU 64(DX), X11 // X11 = m[8]+ m[9] + MOVOU X15, X10 + LONG $0x6d0f4566; BYTE $0xd3 // PUNPCKHQDQ XMM10, XMM11 /* m[15], m[9] */ + LONG $0x3a0f4566; WORD $0xdd0f; BYTE $0x08 // PALIGNR XMM11, XMM13, 0x8 /* m[3], m[8] */ + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 16(DX), X13 // X13 = m[2]+ m[3] + MOVOU 80(DX), X14 // X14 = m[10]+m[11] + MOVOU 96(DX), X15 // X15 = m[12]+m[13] + MOVOU X15, X9 + LONG $0x6d0f4566; BYTE $0xcf // PUNPCKHQDQ XMM9, XMM15 /* ___, m[13] */ + MOVOU X15, X8 + LONG $0x6c0f4566; BYTE $0xc1 // PUNPCKLQDQ XMM8, XMM9 /* m[12], ____ */ + MOVOU X14, X9 + LONG $0x3a0f4566; WORD $0xcc0f; BYTE $0x08 // PALIGNR XMM9, XMM12, 0x8 /* m[1], m[10] */ + MOVOU 32(DX), X12 // X12 = m[4]+ m[5] + MOVOU 48(DX), X15 // X15 = m[6]+ m[7] + MOVOU X15, X11 + LONG $0x6d0f4566; BYTE $0xdf // PUNPCKHQDQ XMM11, XMM15 /* ___, m[7] */ + MOVOU X13, X10 + LONG $0x6c0f4566; BYTE $0xd3 // PUNPCKLQDQ XMM10, XMM11 /* m[2], ____ */ + MOVOU X12, X15 + LONG $0x6d0f4566; BYTE $0xfc // PUNPCKHQDQ XMM15, XMM12 /* ___, m[5] */ + MOVOU X12, X11 + LONG $0x6c0f4566; BYTE $0xdf // PUNPCKLQDQ XMM11, XMM15 /* m[4], ____ */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 1 0 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 48(DX), X13 // X13 = m[6]+ m[7] + MOVOU 64(DX), X14 // X14 = m[8]+ m[9] + MOVOU 80(DX), X15 // X15 = m[10]+m[11] + MOVOU X15, X8 + LONG $0x6c0f4566; BYTE $0xc6 // PUNPCKLQDQ XMM8, XMM14 /* m[10], m[8] */ + MOVOU X13, X9 + LONG $0x6d0f4566; BYTE $0xcc // PUNPCKHQDQ XMM9, XMM12 /* m[7], m[1] */ + MOVOU 16(DX), X10 // X10 = m[2]+ m[3] + MOVOU 32(DX), X14 // X14 = m[4]+ m[5] + LONG $0x6c0f4566; BYTE $0xd6 // PUNPCKLQDQ XMM10, XMM14 /* m[2], m[4] */ + MOVOU X14, X15 + LONG $0x6d0f4566; BYTE $0xfe // PUNPCKHQDQ XMM15, XMM14 /* ___, m[5] */ + MOVOU X13, X11 + LONG $0x6c0f4566; BYTE $0xdf // PUNPCKLQDQ XMM11, XMM15 /* m[6], ____ */ + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 16(DX), X12 // X12 = m[2]+ m[3] + MOVOU 64(DX), X13 // X13 = m[8]+ m[9] + MOVOU 96(DX), X14 // X14 = m[12]+m[13] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + MOVOU X15, X8 + LONG $0x6d0f4566; BYTE $0xc5 // PUNPCKHQDQ XMM8, XMM13 /* m[15], m[9] */ + MOVOU X12, X9 + LONG $0x6d0f4566; BYTE $0xce // PUNPCKHQDQ XMM9, XMM14 /* m[3], m[13] */ + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 80(DX), X13 // X13 = m[10]+m[11] + MOVOU X15, X10 + LONG $0x3a0f4566; WORD $0xd50f; BYTE $0x08 // PALIGNR XMM10, XMM13, 0x8 /* m[11], m[14] */ + MOVOU X14, X11 + LONG $0x6c0f4566; BYTE $0xdc // PUNPCKLQDQ XMM11, XMM12 /* m[12], m[0] */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 1 1 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 0(DX), X12 // X12 = m[0]+m[1] + MOVOU 16(DX), X13 // X13 = m[2]+m[3] + MOVOU 32(DX), X14 // X14 = m[4]+m[5] + MOVOU 48(DX), X15 // X15 = m[6]+m[7] + MOVOU X12, X8 + LONG $0x6c0f4566; BYTE $0xc5 // PUNPCKLQDQ XMM8, XMM13 /* m[0], m[2] */ + MOVOU X14, X9 + LONG $0x6c0f4566; BYTE $0xcf // PUNPCKLQDQ XMM9, XMM15 /* m[4], m[6] */ + MOVOU X12, X10 + LONG $0x6d0f4566; BYTE $0xd5 // PUNPCKHQDQ XMM10, XMM13 /* m[1], m[3] */ + MOVOU X14, X11 + LONG $0x6d0f4566; BYTE $0xdf // PUNPCKHQDQ XMM11, XMM15 /* m[5], m[7] */ + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 64(DX), X12 // X12 = m[8]+ m[9] + MOVOU 80(DX), X13 // X13 = m[10]+m[11] + MOVOU 96(DX), X14 // X14 = m[12]+m[13] + MOVOU 112(DX), X15 // X15 = m[14]+m[15] + MOVOU X12, X8 + LONG $0x6c0f4566; BYTE $0xc5 // PUNPCKLQDQ XMM8, XMM13 /* m[8],m[10] */ + MOVOU X14, X9 + LONG $0x6c0f4566; BYTE $0xcf // PUNPCKLQDQ XMM9, XMM15 /* m[12],m[14] */ + MOVOU X12, X10 + LONG $0x6d0f4566; BYTE $0xd5 // PUNPCKHQDQ XMM10, XMM13 /* m[9],m[11] */ + MOVOU X14, X11 + LONG $0x6d0f4566; BYTE $0xdf // PUNPCKHQDQ XMM11, XMM15 /* m[13],m[15] */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + /////////////////////////////////////////////////////////////////////////// + // R O U N D 1 2 + /////////////////////////////////////////////////////////////////////////// + + // LOAD_MSG_ ##r ##_1 / ##_2(b0, b1); (X12 is temp register) + MOVOU 112(DX), X12 // X12 = m[14]+m[15] + MOVOU 32(DX), X13 // X13 = m[4]+ m[5] + MOVOU 64(DX), X14 // X14 = m[8]+ m[9] + MOVOU 96(DX), X15 // X15 = m[12]+m[13] + MOVOU X12, X8 + LONG $0x6c0f4566; BYTE $0xc5 // PUNPCKLQDQ XMM8, XMM13 /* m[14], m[4] */ + MOVOU X14, X9 + LONG $0x6d0f4566; BYTE $0xcf // PUNPCKHQDQ XMM9, XMM15 /* m[9], m[13] */ + MOVOU 80(DX), X10 // X10 = m[10]+m[11] + MOVOU 48(DX), X11 // X11 = m[6]+ m[7] + LONG $0x6c0f4566; BYTE $0xd6 // PUNPCKLQDQ XMM10, XMM14 /* m[10], m[8] */ + LONG $0x3a0f4566; WORD $0xdc0f; BYTE $0x08 // PALIGNR XMM11, XMM12, 0x8 /* m[15], m[6] */; ; ; ; ; + + LOAD_SHUFFLE + G1 + G2 + DIAGONALIZE + + // LOAD_MSG_ ##r ##_3 / ##_4(b0, b1); (X12 is temp register) + MOVOU 0(DX), X12 // X12 = m[0]+ m[1] + MOVOU 32(DX), X13 // X13 = m[4]+ m[5] + MOVOU 80(DX), X14 // X14 = m[10]+m[11] + MOVOU X12, X8 + LONG $0x3a0f4566; WORD $0xc40f; BYTE $0x08 // PALIGNR XMM8, XMM12, 0x8 /* m[1], m[0] */ + MOVOU X14, X9 + LONG $0x6d0f4566; BYTE $0xcd // PUNPCKHQDQ XMM9, XMM13 /* m[11], m[5] */ + MOVOU 16(DX), X12 // X12 = m[2]+ m[3] + MOVOU 48(DX), X11 // X11 = m[6]+ m[7] + MOVOU 96(DX), X10 // X10 = m[12]+m[13] + LONG $0x6c0f4566; BYTE $0xd4 // PUNPCKLQDQ XMM10, XMM12 /* m[12], m[2] */ + LONG $0x6d0f4566; BYTE $0xdc // PUNPCKHQDQ XMM11, XMM12 /* m[7], m[3] */ + + LOAD_SHUFFLE + G1 + G2 + UNDIAGONALIZE + + // Reload digest (most current value store in &out) + MOVQ out+144(FP), SI // SI: &in + MOVOU 0(SI), X12 // X12 = in[0]+in[1] /* row1l = LOAD( &S->h[0] ); */ + MOVOU 16(SI), X13 // X13 = in[2]+in[3] /* row1h = LOAD( &S->h[2] ); */ + MOVOU 32(SI), X14 // X14 = in[4]+in[5] /* row2l = LOAD( &S->h[4] ); */ + MOVOU 48(SI), X15 // X15 = in[6]+in[7] /* row2h = LOAD( &S->h[6] ); */ + + // Final computations and prepare for storing + PXOR X4, X0 // X0 = X0 ^ X4 /* row1l = _mm_xor_si128( row3l, row1l ); */ + PXOR X5, X1 // X1 = X1 ^ X5 /* row1h = _mm_xor_si128( row3h, row1h ); */ + PXOR X12, X0 // X0 = X0 ^ X12 /* STORE( &S->h[0], _mm_xor_si128( LOAD( &S->h[0] ), row1l ) ); */ + PXOR X13, X1 // X1 = X1 ^ X13 /* STORE( &S->h[2], _mm_xor_si128( LOAD( &S->h[2] ), row1h ) ); */ + PXOR X6, X2 // X2 = X2 ^ X6 /* row2l = _mm_xor_si128( row4l, row2l ); */ + PXOR X7, X3 // X3 = X3 ^ X7 /* row2h = _mm_xor_si128( row4h, row2h ); */ + PXOR X14, X2 // X2 = X2 ^ X14 /* STORE( &S->h[4], _mm_xor_si128( LOAD( &S->h[4] ), row2l ) ); */ + PXOR X15, X3 // X3 = X3 ^ X15 /* STORE( &S->h[6], _mm_xor_si128( LOAD( &S->h[6] ), row2h ) ); */ + + // Store digest into &out + MOVQ out+144(FP), SI // SI: &out + MOVOU X0, 0(SI) // out[0]+out[1] = X0 + MOVOU X1, 16(SI) // out[2]+out[3] = X1 + MOVOU X2, 32(SI) // out[4]+out[5] = X2 + MOVOU X3, 48(SI) // out[6]+out[7] = X3 + + // Increment message pointer and check if there's more to do + ADDQ $128, DX // message += 128 + SUBQ $1, R8 + JNZ loop + +complete: + RET diff --git a/vendor/github.com/minio/blake2b-simd/compress_amd64.go b/vendor/github.com/minio/blake2b-simd/compress_amd64.go new file mode 100644 index 000000000..4fc5e388c --- /dev/null +++ b/vendor/github.com/minio/blake2b-simd/compress_amd64.go @@ -0,0 +1,30 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package blake2b + +func compress(d *digest, p []uint8) { + // Verifies if AVX2 or AVX is available, use optimized code path. + if avx2 { + compressAVX2(d, p) + } else if avx { + compressAVX(d, p) + } else if ssse3 { + compressSSE(d, p) + } else { + compressGeneric(d, p) + } +} diff --git a/vendor/github.com/minio/blake2b-simd/compress_generic.go b/vendor/github.com/minio/blake2b-simd/compress_generic.go new file mode 100644 index 000000000..e9e16e8b9 --- /dev/null +++ b/vendor/github.com/minio/blake2b-simd/compress_generic.go @@ -0,0 +1,1419 @@ +// Written in 2012 by Dmitry Chestnykh. +// +// To the extent possible under law, the author have dedicated all copyright +// and related and neighboring rights to this software to the public domain +// worldwide. This software is distributed without any warranty. +// http://creativecommons.org/publicdomain/zero/1.0/ + +package blake2b + +func compressGeneric(d *digest, p []uint8) { + h0, h1, h2, h3, h4, h5, h6, h7 := d.h[0], d.h[1], d.h[2], d.h[3], d.h[4], d.h[5], d.h[6], d.h[7] + + for len(p) >= BlockSize { + // Increment counter. + d.t[0] += BlockSize + if d.t[0] < BlockSize { + d.t[1]++ + } + // Initialize compression function. + v0, v1, v2, v3, v4, v5, v6, v7 := h0, h1, h2, h3, h4, h5, h6, h7 + v8 := iv[0] + v9 := iv[1] + v10 := iv[2] + v11 := iv[3] + v12 := iv[4] ^ d.t[0] + v13 := iv[5] ^ d.t[1] + v14 := iv[6] ^ d.f[0] + v15 := iv[7] ^ d.f[1] + + j := 0 + var m [16]uint64 + for i := range m { + m[i] = uint64(p[j]) | uint64(p[j+1])<<8 | uint64(p[j+2])<<16 | + uint64(p[j+3])<<24 | uint64(p[j+4])<<32 | uint64(p[j+5])<<40 | + uint64(p[j+6])<<48 | uint64(p[j+7])<<56 + j += 8 + } + + // Round 1. + v0 += m[0] + v0 += v4 + v12 ^= v0 + v12 = v12<<(64-32) | v12>>32 + v8 += v12 + v4 ^= v8 + v4 = v4<<(64-24) | v4>>24 + v1 += m[2] + v1 += v5 + v13 ^= v1 + v13 = v13<<(64-32) | v13>>32 + v9 += v13 + v5 ^= v9 + v5 = v5<<(64-24) | v5>>24 + v2 += m[4] + v2 += v6 + v14 ^= v2 + v14 = v14<<(64-32) | v14>>32 + v10 += v14 + v6 ^= v10 + v6 = v6<<(64-24) | v6>>24 + v3 += m[6] + v3 += v7 + v15 ^= v3 + v15 = v15<<(64-32) | v15>>32 + v11 += v15 + v7 ^= v11 + v7 = v7<<(64-24) | v7>>24 + v2 += m[5] + v2 += v6 + v14 ^= v2 + v14 = v14<<(64-16) | v14>>16 + v10 += v14 + v6 ^= v10 + v6 = v6<<(64-63) | v6>>63 + v3 += m[7] + v3 += v7 + v15 ^= v3 + v15 = v15<<(64-16) | v15>>16 + v11 += v15 + v7 ^= v11 + v7 = v7<<(64-63) | v7>>63 + v1 += m[3] + v1 += v5 + v13 ^= v1 + v13 = v13<<(64-16) | v13>>16 + v9 += v13 + v5 ^= v9 + v5 = v5<<(64-63) | v5>>63 + v0 += m[1] + v0 += v4 + v12 ^= v0 + v12 = v12<<(64-16) | v12>>16 + v8 += v12 + v4 ^= v8 + v4 = v4<<(64-63) | v4>>63 + v0 += m[8] + v0 += v5 + v15 ^= v0 + v15 = v15<<(64-32) | v15>>32 + v10 += v15 + v5 ^= v10 + v5 = v5<<(64-24) | v5>>24 + v1 += m[10] + v1 += v6 + v12 ^= v1 + v12 = v12<<(64-32) | v12>>32 + v11 += v12 + v6 ^= v11 + v6 = v6<<(64-24) | v6>>24 + v2 += m[12] + v2 += v7 + v13 ^= v2 + v13 = v13<<(64-32) | v13>>32 + v8 += v13 + v7 ^= v8 + v7 = v7<<(64-24) | v7>>24 + v3 += m[14] + v3 += v4 + v14 ^= v3 + v14 = v14<<(64-32) | v14>>32 + v9 += v14 + v4 ^= v9 + v4 = v4<<(64-24) | v4>>24 + v2 += m[13] + v2 += v7 + v13 ^= v2 + v13 = v13<<(64-16) | v13>>16 + v8 += v13 + v7 ^= v8 + v7 = v7<<(64-63) | v7>>63 + v3 += m[15] + v3 += v4 + v14 ^= v3 + v14 = v14<<(64-16) | v14>>16 + v9 += v14 + v4 ^= v9 + v4 = v4<<(64-63) | v4>>63 + v1 += m[11] + v1 += v6 + v12 ^= v1 + v12 = v12<<(64-16) | v12>>16 + v11 += v12 + v6 ^= v11 + v6 = v6<<(64-63) | v6>>63 + v0 += m[9] + v0 += v5 + v15 ^= v0 + v15 = v15<<(64-16) | v15>>16 + v10 += v15 + v5 ^= v10 + v5 = v5<<(64-63) | v5>>63 + + // Round 2. + v0 += m[14] + v0 += v4 + v12 ^= v0 + v12 = v12<<(64-32) | v12>>32 + v8 += v12 + v4 ^= v8 + v4 = v4<<(64-24) | v4>>24 + v1 += m[4] + v1 += v5 + v13 ^= v1 + v13 = v13<<(64-32) | v13>>32 + v9 += v13 + v5 ^= v9 + v5 = v5<<(64-24) | v5>>24 + v2 += m[9] + v2 += v6 + v14 ^= v2 + v14 = v14<<(64-32) | v14>>32 + v10 += v14 + v6 ^= v10 + v6 = v6<<(64-24) | v6>>24 + v3 += m[13] + v3 += v7 + v15 ^= v3 + v15 = v15<<(64-32) | v15>>32 + v11 += v15 + v7 ^= v11 + v7 = v7<<(64-24) | v7>>24 + v2 += m[15] + v2 += v6 + v14 ^= v2 + v14 = v14<<(64-16) | v14>>16 + v10 += v14 + v6 ^= v10 + v6 = v6<<(64-63) | v6>>63 + v3 += m[6] + v3 += v7 + v15 ^= v3 + v15 = v15<<(64-16) | v15>>16 + v11 += v15 + v7 ^= v11 + v7 = v7<<(64-63) | v7>>63 + v1 += m[8] + v1 += v5 + v13 ^= v1 + v13 = v13<<(64-16) | v13>>16 + v9 += v13 + v5 ^= v9 + v5 = v5<<(64-63) | v5>>63 + v0 += m[10] + v0 += v4 + v12 ^= v0 + v12 = v12<<(64-16) | v12>>16 + v8 += v12 + v4 ^= v8 + v4 = v4<<(64-63) | v4>>63 + v0 += m[1] + v0 += v5 + v15 ^= v0 + v15 = v15<<(64-32) | v15>>32 + v10 += v15 + v5 ^= v10 + v5 = v5<<(64-24) | v5>>24 + v1 += m[0] + v1 += v6 + v12 ^= v1 + v12 = v12<<(64-32) | v12>>32 + v11 += v12 + v6 ^= v11 + v6 = v6<<(64-24) | v6>>24 + v2 += m[11] + v2 += v7 + v13 ^= v2 + v13 = v13<<(64-32) | v13>>32 + v8 += v13 + v7 ^= v8 + v7 = v7<<(64-24) | v7>>24 + v3 += m[5] + v3 += v4 + v14 ^= v3 + v14 = v14<<(64-32) | v14>>32 + v9 += v14 + v4 ^= v9 + v4 = v4<<(64-24) | v4>>24 + v2 += m[7] + v2 += v7 + v13 ^= v2 + v13 = v13<<(64-16) | v13>>16 + v8 += v13 + v7 ^= v8 + v7 = v7<<(64-63) | v7>>63 + v3 += m[3] + v3 += v4 + v14 ^= v3 + v14 = v14<<(64-16) | v14>>16 + v9 += v14 + v4 ^= v9 + v4 = v4<<(64-63) | v4>>63 + v1 += m[2] + v1 += v6 + v12 ^= v1 + v12 = v12<<(64-16) | v12>>16 + v11 += v12 + v6 ^= v11 + v6 = v6<<(64-63) | v6>>63 + v0 += m[12] + v0 += v5 + v15 ^= v0 + v15 = v15<<(64-16) | v15>>16 + v10 += v15 + v5 ^= v10 + v5 = v5<<(64-63) | v5>>63 + + // Round 3. + v0 += m[11] + v0 += v4 + v12 ^= v0 + v12 = v12<<(64-32) | v12>>32 + v8 += v12 + v4 ^= v8 + v4 = v4<<(64-24) | v4>>24 + v1 += m[12] + v1 += v5 + v13 ^= v1 + v13 = v13<<(64-32) | v13>>32 + v9 += v13 + v5 ^= v9 + v5 = v5<<(64-24) | v5>>24 + v2 += m[5] + v2 += v6 + v14 ^= v2 + v14 = v14<<(64-32) | v14>>32 + v10 += v14 + v6 ^= v10 + v6 = v6<<(64-24) | v6>>24 + v3 += m[15] + v3 += v7 + v15 ^= v3 + v15 = v15<<(64-32) | v15>>32 + v11 += v15 + v7 ^= v11 + v7 = v7<<(64-24) | v7>>24 + v2 += m[2] + v2 += v6 + v14 ^= v2 + v14 = v14<<(64-16) | v14>>16 + v10 += v14 + v6 ^= v10 + v6 = v6<<(64-63) | v6>>63 + v3 += m[13] + v3 += v7 + v15 ^= v3 + v15 = v15<<(64-16) | v15>>16 + v11 += v15 + v7 ^= v11 + v7 = v7<<(64-63) | v7>>63 + v1 += m[0] + v1 += v5 + v13 ^= v1 + v13 = v13<<(64-16) | v13>>16 + v9 += v13 + v5 ^= v9 + v5 = v5<<(64-63) | v5>>63 + v0 += m[8] + v0 += v4 + v12 ^= v0 + v12 = v12<<(64-16) | v12>>16 + v8 += v12 + v4 ^= v8 + v4 = v4<<(64-63) | v4>>63 + v0 += m[10] + v0 += v5 + v15 ^= v0 + v15 = v15<<(64-32) | v15>>32 + v10 += v15 + v5 ^= v10 + v5 = v5<<(64-24) | v5>>24 + v1 += m[3] + v1 += v6 + v12 ^= v1 + v12 = v12<<(64-32) | v12>>32 + v11 += v12 + v6 ^= v11 + v6 = v6<<(64-24) | v6>>24 + v2 += m[7] + v2 += v7 + v13 ^= v2 + v13 = v13<<(64-32) | v13>>32 + v8 += v13 + v7 ^= v8 + v7 = v7<<(64-24) | v7>>24 + v3 += m[9] + v3 += v4 + v14 ^= v3 + v14 = v14<<(64-32) | v14>>32 + v9 += v14 + v4 ^= v9 + v4 = v4<<(64-24) | v4>>24 + v2 += m[1] + v2 += v7 + v13 ^= v2 + v13 = v13<<(64-16) | v13>>16 + v8 += v13 + v7 ^= v8 + v7 = v7<<(64-63) | v7>>63 + v3 += m[4] + v3 += v4 + v14 ^= v3 + v14 = v14<<(64-16) | v14>>16 + v9 += v14 + v4 ^= v9 + v4 = v4<<(64-63) | v4>>63 + v1 += m[6] + v1 += v6 + v12 ^= v1 + v12 = v12<<(64-16) | v12>>16 + v11 += v12 + v6 ^= v11 + v6 = v6<<(64-63) | v6>>63 + v0 += m[14] + v0 += v5 + v15 ^= v0 + v15 = v15<<(64-16) | v15>>16 + v10 += v15 + v5 ^= v10 + v5 = v5<<(64-63) | v5>>63 + + // Round 4. + v0 += m[7] + v0 += v4 + v12 ^= v0 + v12 = v12<<(64-32) | v12>>32 + v8 += v12 + v4 ^= v8 + v4 = v4<<(64-24) | v4>>24 + v1 += m[3] + v1 += v5 + v13 ^= v1 + v13 = v13<<(64-32) | v13>>32 + v9 += v13 + v5 ^= v9 + v5 = v5<<(64-24) | v5>>24 + v2 += m[13] + v2 += v6 + v14 ^= v2 + v14 = v14<<(64-32) | v14>>32 + v10 += v14 + v6 ^= v10 + v6 = v6<<(64-24) | v6>>24 + v3 += m[11] + v3 += v7 + v15 ^= v3 + v15 = v15<<(64-32) | v15>>32 + v11 += v15 + v7 ^= v11 + v7 = v7<<(64-24) | v7>>24 + v2 += m[12] + v2 += v6 + v14 ^= v2 + v14 = v14<<(64-16) | v14>>16 + v10 += v14 + v6 ^= v10 + v6 = v6<<(64-63) | v6>>63 + v3 += m[14] + v3 += v7 + v15 ^= v3 + v15 = v15<<(64-16) | v15>>16 + v11 += v15 + v7 ^= v11 + v7 = v7<<(64-63) | v7>>63 + v1 += m[1] + v1 += v5 + v13 ^= v1 + v13 = v13<<(64-16) | v13>>16 + v9 += v13 + v5 ^= v9 + v5 = v5<<(64-63) | v5>>63 + v0 += m[9] + v0 += v4 + v12 ^= v0 + v12 = v12<<(64-16) | v12>>16 + v8 += v12 + v4 ^= v8 + v4 = v4<<(64-63) | v4>>63 + v0 += m[2] + v0 += v5 + v15 ^= v0 + v15 = v15<<(64-32) | v15>>32 + v10 += v15 + v5 ^= v10 + v5 = v5<<(64-24) | v5>>24 + v1 += m[5] + v1 += v6 + v12 ^= v1 + v12 = v12<<(64-32) | v12>>32 + v11 += v12 + v6 ^= v11 + v6 = v6<<(64-24) | v6>>24 + v2 += m[4] + v2 += v7 + v13 ^= v2 + v13 = v13<<(64-32) | v13>>32 + v8 += v13 + v7 ^= v8 + v7 = v7<<(64-24) | v7>>24 + v3 += m[15] + v3 += v4 + v14 ^= v3 + v14 = v14<<(64-32) | v14>>32 + v9 += v14 + v4 ^= v9 + v4 = v4<<(64-24) | v4>>24 + v2 += m[0] + v2 += v7 + v13 ^= v2 + v13 = v13<<(64-16) | v13>>16 + v8 += v13 + v7 ^= v8 + v7 = v7<<(64-63) | v7>>63 + v3 += m[8] + v3 += v4 + v14 ^= v3 + v14 = v14<<(64-16) | v14>>16 + v9 += v14 + v4 ^= v9 + v4 = v4<<(64-63) | v4>>63 + v1 += m[10] + v1 += v6 + v12 ^= v1 + v12 = v12<<(64-16) | v12>>16 + v11 += v12 + v6 ^= v11 + v6 = v6<<(64-63) | v6>>63 + v0 += m[6] + v0 += v5 + v15 ^= v0 + v15 = v15<<(64-16) | v15>>16 + v10 += v15 + v5 ^= v10 + v5 = v5<<(64-63) | v5>>63 + + // Round 5. + v0 += m[9] + v0 += v4 + v12 ^= v0 + v12 = v12<<(64-32) | v12>>32 + v8 += v12 + v4 ^= v8 + v4 = v4<<(64-24) | v4>>24 + v1 += m[5] + v1 += v5 + v13 ^= v1 + v13 = v13<<(64-32) | v13>>32 + v9 += v13 + v5 ^= v9 + v5 = v5<<(64-24) | v5>>24 + v2 += m[2] + v2 += v6 + v14 ^= v2 + v14 = v14<<(64-32) | v14>>32 + v10 += v14 + v6 ^= v10 + v6 = v6<<(64-24) | v6>>24 + v3 += m[10] + v3 += v7 + v15 ^= v3 + v15 = v15<<(64-32) | v15>>32 + v11 += v15 + v7 ^= v11 + v7 = v7<<(64-24) | v7>>24 + v2 += m[4] + v2 += v6 + v14 ^= v2 + v14 = v14<<(64-16) | v14>>16 + v10 += v14 + v6 ^= v10 + v6 = v6<<(64-63) | v6>>63 + v3 += m[15] + v3 += v7 + v15 ^= v3 + v15 = v15<<(64-16) | v15>>16 + v11 += v15 + v7 ^= v11 + v7 = v7<<(64-63) | v7>>63 + v1 += m[7] + v1 += v5 + v13 ^= v1 + v13 = v13<<(64-16) | v13>>16 + v9 += v13 + v5 ^= v9 + v5 = v5<<(64-63) | v5>>63 + v0 += m[0] + v0 += v4 + v12 ^= v0 + v12 = v12<<(64-16) | v12>>16 + v8 += v12 + v4 ^= v8 + v4 = v4<<(64-63) | v4>>63 + v0 += m[14] + v0 += v5 + v15 ^= v0 + v15 = v15<<(64-32) | v15>>32 + v10 += v15 + v5 ^= v10 + v5 = v5<<(64-24) | v5>>24 + v1 += m[11] + v1 += v6 + v12 ^= v1 + v12 = v12<<(64-32) | v12>>32 + v11 += v12 + v6 ^= v11 + v6 = v6<<(64-24) | v6>>24 + v2 += m[6] + v2 += v7 + v13 ^= v2 + v13 = v13<<(64-32) | v13>>32 + v8 += v13 + v7 ^= v8 + v7 = v7<<(64-24) | v7>>24 + v3 += m[3] + v3 += v4 + v14 ^= v3 + v14 = v14<<(64-32) | v14>>32 + v9 += v14 + v4 ^= v9 + v4 = v4<<(64-24) | v4>>24 + v2 += m[8] + v2 += v7 + v13 ^= v2 + v13 = v13<<(64-16) | v13>>16 + v8 += v13 + v7 ^= v8 + v7 = v7<<(64-63) | v7>>63 + v3 += m[13] + v3 += v4 + v14 ^= v3 + v14 = v14<<(64-16) | v14>>16 + v9 += v14 + v4 ^= v9 + v4 = v4<<(64-63) | v4>>63 + v1 += m[12] + v1 += v6 + v12 ^= v1 + v12 = v12<<(64-16) | v12>>16 + v11 += v12 + v6 ^= v11 + v6 = v6<<(64-63) | v6>>63 + v0 += m[1] + v0 += v5 + v15 ^= v0 + v15 = v15<<(64-16) | v15>>16 + v10 += v15 + v5 ^= v10 + v5 = v5<<(64-63) | v5>>63 + + // Round 6. + v0 += m[2] + v0 += v4 + v12 ^= v0 + v12 = v12<<(64-32) | v12>>32 + v8 += v12 + v4 ^= v8 + v4 = v4<<(64-24) | v4>>24 + v1 += m[6] + v1 += v5 + v13 ^= v1 + v13 = v13<<(64-32) | v13>>32 + v9 += v13 + v5 ^= v9 + v5 = v5<<(64-24) | v5>>24 + v2 += m[0] + v2 += v6 + v14 ^= v2 + v14 = v14<<(64-32) | v14>>32 + v10 += v14 + v6 ^= v10 + v6 = v6<<(64-24) | v6>>24 + v3 += m[8] + v3 += v7 + v15 ^= v3 + v15 = v15<<(64-32) | v15>>32 + v11 += v15 + v7 ^= v11 + v7 = v7<<(64-24) | v7>>24 + v2 += m[11] + v2 += v6 + v14 ^= v2 + v14 = v14<<(64-16) | v14>>16 + v10 += v14 + v6 ^= v10 + v6 = v6<<(64-63) | v6>>63 + v3 += m[3] + v3 += v7 + v15 ^= v3 + v15 = v15<<(64-16) | v15>>16 + v11 += v15 + v7 ^= v11 + v7 = v7<<(64-63) | v7>>63 + v1 += m[10] + v1 += v5 + v13 ^= v1 + v13 = v13<<(64-16) | v13>>16 + v9 += v13 + v5 ^= v9 + v5 = v5<<(64-63) | v5>>63 + v0 += m[12] + v0 += v4 + v12 ^= v0 + v12 = v12<<(64-16) | v12>>16 + v8 += v12 + v4 ^= v8 + v4 = v4<<(64-63) | v4>>63 + v0 += m[4] + v0 += v5 + v15 ^= v0 + v15 = v15<<(64-32) | v15>>32 + v10 += v15 + v5 ^= v10 + v5 = v5<<(64-24) | v5>>24 + v1 += m[7] + v1 += v6 + v12 ^= v1 + v12 = v12<<(64-32) | v12>>32 + v11 += v12 + v6 ^= v11 + v6 = v6<<(64-24) | v6>>24 + v2 += m[15] + v2 += v7 + v13 ^= v2 + v13 = v13<<(64-32) | v13>>32 + v8 += v13 + v7 ^= v8 + v7 = v7<<(64-24) | v7>>24 + v3 += m[1] + v3 += v4 + v14 ^= v3 + v14 = v14<<(64-32) | v14>>32 + v9 += v14 + v4 ^= v9 + v4 = v4<<(64-24) | v4>>24 + v2 += m[14] + v2 += v7 + v13 ^= v2 + v13 = v13<<(64-16) | v13>>16 + v8 += v13 + v7 ^= v8 + v7 = v7<<(64-63) | v7>>63 + v3 += m[9] + v3 += v4 + v14 ^= v3 + v14 = v14<<(64-16) | v14>>16 + v9 += v14 + v4 ^= v9 + v4 = v4<<(64-63) | v4>>63 + v1 += m[5] + v1 += v6 + v12 ^= v1 + v12 = v12<<(64-16) | v12>>16 + v11 += v12 + v6 ^= v11 + v6 = v6<<(64-63) | v6>>63 + v0 += m[13] + v0 += v5 + v15 ^= v0 + v15 = v15<<(64-16) | v15>>16 + v10 += v15 + v5 ^= v10 + v5 = v5<<(64-63) | v5>>63 + + // Round 7. + v0 += m[12] + v0 += v4 + v12 ^= v0 + v12 = v12<<(64-32) | v12>>32 + v8 += v12 + v4 ^= v8 + v4 = v4<<(64-24) | v4>>24 + v1 += m[1] + v1 += v5 + v13 ^= v1 + v13 = v13<<(64-32) | v13>>32 + v9 += v13 + v5 ^= v9 + v5 = v5<<(64-24) | v5>>24 + v2 += m[14] + v2 += v6 + v14 ^= v2 + v14 = v14<<(64-32) | v14>>32 + v10 += v14 + v6 ^= v10 + v6 = v6<<(64-24) | v6>>24 + v3 += m[4] + v3 += v7 + v15 ^= v3 + v15 = v15<<(64-32) | v15>>32 + v11 += v15 + v7 ^= v11 + v7 = v7<<(64-24) | v7>>24 + v2 += m[13] + v2 += v6 + v14 ^= v2 + v14 = v14<<(64-16) | v14>>16 + v10 += v14 + v6 ^= v10 + v6 = v6<<(64-63) | v6>>63 + v3 += m[10] + v3 += v7 + v15 ^= v3 + v15 = v15<<(64-16) | v15>>16 + v11 += v15 + v7 ^= v11 + v7 = v7<<(64-63) | v7>>63 + v1 += m[15] + v1 += v5 + v13 ^= v1 + v13 = v13<<(64-16) | v13>>16 + v9 += v13 + v5 ^= v9 + v5 = v5<<(64-63) | v5>>63 + v0 += m[5] + v0 += v4 + v12 ^= v0 + v12 = v12<<(64-16) | v12>>16 + v8 += v12 + v4 ^= v8 + v4 = v4<<(64-63) | v4>>63 + v0 += m[0] + v0 += v5 + v15 ^= v0 + v15 = v15<<(64-32) | v15>>32 + v10 += v15 + v5 ^= v10 + v5 = v5<<(64-24) | v5>>24 + v1 += m[6] + v1 += v6 + v12 ^= v1 + v12 = v12<<(64-32) | v12>>32 + v11 += v12 + v6 ^= v11 + v6 = v6<<(64-24) | v6>>24 + v2 += m[9] + v2 += v7 + v13 ^= v2 + v13 = v13<<(64-32) | v13>>32 + v8 += v13 + v7 ^= v8 + v7 = v7<<(64-24) | v7>>24 + v3 += m[8] + v3 += v4 + v14 ^= v3 + v14 = v14<<(64-32) | v14>>32 + v9 += v14 + v4 ^= v9 + v4 = v4<<(64-24) | v4>>24 + v2 += m[2] + v2 += v7 + v13 ^= v2 + v13 = v13<<(64-16) | v13>>16 + v8 += v13 + v7 ^= v8 + v7 = v7<<(64-63) | v7>>63 + v3 += m[11] + v3 += v4 + v14 ^= v3 + v14 = v14<<(64-16) | v14>>16 + v9 += v14 + v4 ^= v9 + v4 = v4<<(64-63) | v4>>63 + v1 += m[3] + v1 += v6 + v12 ^= v1 + v12 = v12<<(64-16) | v12>>16 + v11 += v12 + v6 ^= v11 + v6 = v6<<(64-63) | v6>>63 + v0 += m[7] + v0 += v5 + v15 ^= v0 + v15 = v15<<(64-16) | v15>>16 + v10 += v15 + v5 ^= v10 + v5 = v5<<(64-63) | v5>>63 + + // Round 8. + v0 += m[13] + v0 += v4 + v12 ^= v0 + v12 = v12<<(64-32) | v12>>32 + v8 += v12 + v4 ^= v8 + v4 = v4<<(64-24) | v4>>24 + v1 += m[7] + v1 += v5 + v13 ^= v1 + v13 = v13<<(64-32) | v13>>32 + v9 += v13 + v5 ^= v9 + v5 = v5<<(64-24) | v5>>24 + v2 += m[12] + v2 += v6 + v14 ^= v2 + v14 = v14<<(64-32) | v14>>32 + v10 += v14 + v6 ^= v10 + v6 = v6<<(64-24) | v6>>24 + v3 += m[3] + v3 += v7 + v15 ^= v3 + v15 = v15<<(64-32) | v15>>32 + v11 += v15 + v7 ^= v11 + v7 = v7<<(64-24) | v7>>24 + v2 += m[1] + v2 += v6 + v14 ^= v2 + v14 = v14<<(64-16) | v14>>16 + v10 += v14 + v6 ^= v10 + v6 = v6<<(64-63) | v6>>63 + v3 += m[9] + v3 += v7 + v15 ^= v3 + v15 = v15<<(64-16) | v15>>16 + v11 += v15 + v7 ^= v11 + v7 = v7<<(64-63) | v7>>63 + v1 += m[14] + v1 += v5 + v13 ^= v1 + v13 = v13<<(64-16) | v13>>16 + v9 += v13 + v5 ^= v9 + v5 = v5<<(64-63) | v5>>63 + v0 += m[11] + v0 += v4 + v12 ^= v0 + v12 = v12<<(64-16) | v12>>16 + v8 += v12 + v4 ^= v8 + v4 = v4<<(64-63) | v4>>63 + v0 += m[5] + v0 += v5 + v15 ^= v0 + v15 = v15<<(64-32) | v15>>32 + v10 += v15 + v5 ^= v10 + v5 = v5<<(64-24) | v5>>24 + v1 += m[15] + v1 += v6 + v12 ^= v1 + v12 = v12<<(64-32) | v12>>32 + v11 += v12 + v6 ^= v11 + v6 = v6<<(64-24) | v6>>24 + v2 += m[8] + v2 += v7 + v13 ^= v2 + v13 = v13<<(64-32) | v13>>32 + v8 += v13 + v7 ^= v8 + v7 = v7<<(64-24) | v7>>24 + v3 += m[2] + v3 += v4 + v14 ^= v3 + v14 = v14<<(64-32) | v14>>32 + v9 += v14 + v4 ^= v9 + v4 = v4<<(64-24) | v4>>24 + v2 += m[6] + v2 += v7 + v13 ^= v2 + v13 = v13<<(64-16) | v13>>16 + v8 += v13 + v7 ^= v8 + v7 = v7<<(64-63) | v7>>63 + v3 += m[10] + v3 += v4 + v14 ^= v3 + v14 = v14<<(64-16) | v14>>16 + v9 += v14 + v4 ^= v9 + v4 = v4<<(64-63) | v4>>63 + v1 += m[4] + v1 += v6 + v12 ^= v1 + v12 = v12<<(64-16) | v12>>16 + v11 += v12 + v6 ^= v11 + v6 = v6<<(64-63) | v6>>63 + v0 += m[0] + v0 += v5 + v15 ^= v0 + v15 = v15<<(64-16) | v15>>16 + v10 += v15 + v5 ^= v10 + v5 = v5<<(64-63) | v5>>63 + + // Round 9. + v0 += m[6] + v0 += v4 + v12 ^= v0 + v12 = v12<<(64-32) | v12>>32 + v8 += v12 + v4 ^= v8 + v4 = v4<<(64-24) | v4>>24 + v1 += m[14] + v1 += v5 + v13 ^= v1 + v13 = v13<<(64-32) | v13>>32 + v9 += v13 + v5 ^= v9 + v5 = v5<<(64-24) | v5>>24 + v2 += m[11] + v2 += v6 + v14 ^= v2 + v14 = v14<<(64-32) | v14>>32 + v10 += v14 + v6 ^= v10 + v6 = v6<<(64-24) | v6>>24 + v3 += m[0] + v3 += v7 + v15 ^= v3 + v15 = v15<<(64-32) | v15>>32 + v11 += v15 + v7 ^= v11 + v7 = v7<<(64-24) | v7>>24 + v2 += m[3] + v2 += v6 + v14 ^= v2 + v14 = v14<<(64-16) | v14>>16 + v10 += v14 + v6 ^= v10 + v6 = v6<<(64-63) | v6>>63 + v3 += m[8] + v3 += v7 + v15 ^= v3 + v15 = v15<<(64-16) | v15>>16 + v11 += v15 + v7 ^= v11 + v7 = v7<<(64-63) | v7>>63 + v1 += m[9] + v1 += v5 + v13 ^= v1 + v13 = v13<<(64-16) | v13>>16 + v9 += v13 + v5 ^= v9 + v5 = v5<<(64-63) | v5>>63 + v0 += m[15] + v0 += v4 + v12 ^= v0 + v12 = v12<<(64-16) | v12>>16 + v8 += v12 + v4 ^= v8 + v4 = v4<<(64-63) | v4>>63 + v0 += m[12] + v0 += v5 + v15 ^= v0 + v15 = v15<<(64-32) | v15>>32 + v10 += v15 + v5 ^= v10 + v5 = v5<<(64-24) | v5>>24 + v1 += m[13] + v1 += v6 + v12 ^= v1 + v12 = v12<<(64-32) | v12>>32 + v11 += v12 + v6 ^= v11 + v6 = v6<<(64-24) | v6>>24 + v2 += m[1] + v2 += v7 + v13 ^= v2 + v13 = v13<<(64-32) | v13>>32 + v8 += v13 + v7 ^= v8 + v7 = v7<<(64-24) | v7>>24 + v3 += m[10] + v3 += v4 + v14 ^= v3 + v14 = v14<<(64-32) | v14>>32 + v9 += v14 + v4 ^= v9 + v4 = v4<<(64-24) | v4>>24 + v2 += m[4] + v2 += v7 + v13 ^= v2 + v13 = v13<<(64-16) | v13>>16 + v8 += v13 + v7 ^= v8 + v7 = v7<<(64-63) | v7>>63 + v3 += m[5] + v3 += v4 + v14 ^= v3 + v14 = v14<<(64-16) | v14>>16 + v9 += v14 + v4 ^= v9 + v4 = v4<<(64-63) | v4>>63 + v1 += m[7] + v1 += v6 + v12 ^= v1 + v12 = v12<<(64-16) | v12>>16 + v11 += v12 + v6 ^= v11 + v6 = v6<<(64-63) | v6>>63 + v0 += m[2] + v0 += v5 + v15 ^= v0 + v15 = v15<<(64-16) | v15>>16 + v10 += v15 + v5 ^= v10 + v5 = v5<<(64-63) | v5>>63 + + // Round 10. + v0 += m[10] + v0 += v4 + v12 ^= v0 + v12 = v12<<(64-32) | v12>>32 + v8 += v12 + v4 ^= v8 + v4 = v4<<(64-24) | v4>>24 + v1 += m[8] + v1 += v5 + v13 ^= v1 + v13 = v13<<(64-32) | v13>>32 + v9 += v13 + v5 ^= v9 + v5 = v5<<(64-24) | v5>>24 + v2 += m[7] + v2 += v6 + v14 ^= v2 + v14 = v14<<(64-32) | v14>>32 + v10 += v14 + v6 ^= v10 + v6 = v6<<(64-24) | v6>>24 + v3 += m[1] + v3 += v7 + v15 ^= v3 + v15 = v15<<(64-32) | v15>>32 + v11 += v15 + v7 ^= v11 + v7 = v7<<(64-24) | v7>>24 + v2 += m[6] + v2 += v6 + v14 ^= v2 + v14 = v14<<(64-16) | v14>>16 + v10 += v14 + v6 ^= v10 + v6 = v6<<(64-63) | v6>>63 + v3 += m[5] + v3 += v7 + v15 ^= v3 + v15 = v15<<(64-16) | v15>>16 + v11 += v15 + v7 ^= v11 + v7 = v7<<(64-63) | v7>>63 + v1 += m[4] + v1 += v5 + v13 ^= v1 + v13 = v13<<(64-16) | v13>>16 + v9 += v13 + v5 ^= v9 + v5 = v5<<(64-63) | v5>>63 + v0 += m[2] + v0 += v4 + v12 ^= v0 + v12 = v12<<(64-16) | v12>>16 + v8 += v12 + v4 ^= v8 + v4 = v4<<(64-63) | v4>>63 + v0 += m[15] + v0 += v5 + v15 ^= v0 + v15 = v15<<(64-32) | v15>>32 + v10 += v15 + v5 ^= v10 + v5 = v5<<(64-24) | v5>>24 + v1 += m[9] + v1 += v6 + v12 ^= v1 + v12 = v12<<(64-32) | v12>>32 + v11 += v12 + v6 ^= v11 + v6 = v6<<(64-24) | v6>>24 + v2 += m[3] + v2 += v7 + v13 ^= v2 + v13 = v13<<(64-32) | v13>>32 + v8 += v13 + v7 ^= v8 + v7 = v7<<(64-24) | v7>>24 + v3 += m[13] + v3 += v4 + v14 ^= v3 + v14 = v14<<(64-32) | v14>>32 + v9 += v14 + v4 ^= v9 + v4 = v4<<(64-24) | v4>>24 + v2 += m[12] + v2 += v7 + v13 ^= v2 + v13 = v13<<(64-16) | v13>>16 + v8 += v13 + v7 ^= v8 + v7 = v7<<(64-63) | v7>>63 + v3 += m[0] + v3 += v4 + v14 ^= v3 + v14 = v14<<(64-16) | v14>>16 + v9 += v14 + v4 ^= v9 + v4 = v4<<(64-63) | v4>>63 + v1 += m[14] + v1 += v6 + v12 ^= v1 + v12 = v12<<(64-16) | v12>>16 + v11 += v12 + v6 ^= v11 + v6 = v6<<(64-63) | v6>>63 + v0 += m[11] + v0 += v5 + v15 ^= v0 + v15 = v15<<(64-16) | v15>>16 + v10 += v15 + v5 ^= v10 + v5 = v5<<(64-63) | v5>>63 + + // Round 11. + v0 += m[0] + v0 += v4 + v12 ^= v0 + v12 = v12<<(64-32) | v12>>32 + v8 += v12 + v4 ^= v8 + v4 = v4<<(64-24) | v4>>24 + v1 += m[2] + v1 += v5 + v13 ^= v1 + v13 = v13<<(64-32) | v13>>32 + v9 += v13 + v5 ^= v9 + v5 = v5<<(64-24) | v5>>24 + v2 += m[4] + v2 += v6 + v14 ^= v2 + v14 = v14<<(64-32) | v14>>32 + v10 += v14 + v6 ^= v10 + v6 = v6<<(64-24) | v6>>24 + v3 += m[6] + v3 += v7 + v15 ^= v3 + v15 = v15<<(64-32) | v15>>32 + v11 += v15 + v7 ^= v11 + v7 = v7<<(64-24) | v7>>24 + v2 += m[5] + v2 += v6 + v14 ^= v2 + v14 = v14<<(64-16) | v14>>16 + v10 += v14 + v6 ^= v10 + v6 = v6<<(64-63) | v6>>63 + v3 += m[7] + v3 += v7 + v15 ^= v3 + v15 = v15<<(64-16) | v15>>16 + v11 += v15 + v7 ^= v11 + v7 = v7<<(64-63) | v7>>63 + v1 += m[3] + v1 += v5 + v13 ^= v1 + v13 = v13<<(64-16) | v13>>16 + v9 += v13 + v5 ^= v9 + v5 = v5<<(64-63) | v5>>63 + v0 += m[1] + v0 += v4 + v12 ^= v0 + v12 = v12<<(64-16) | v12>>16 + v8 += v12 + v4 ^= v8 + v4 = v4<<(64-63) | v4>>63 + v0 += m[8] + v0 += v5 + v15 ^= v0 + v15 = v15<<(64-32) | v15>>32 + v10 += v15 + v5 ^= v10 + v5 = v5<<(64-24) | v5>>24 + v1 += m[10] + v1 += v6 + v12 ^= v1 + v12 = v12<<(64-32) | v12>>32 + v11 += v12 + v6 ^= v11 + v6 = v6<<(64-24) | v6>>24 + v2 += m[12] + v2 += v7 + v13 ^= v2 + v13 = v13<<(64-32) | v13>>32 + v8 += v13 + v7 ^= v8 + v7 = v7<<(64-24) | v7>>24 + v3 += m[14] + v3 += v4 + v14 ^= v3 + v14 = v14<<(64-32) | v14>>32 + v9 += v14 + v4 ^= v9 + v4 = v4<<(64-24) | v4>>24 + v2 += m[13] + v2 += v7 + v13 ^= v2 + v13 = v13<<(64-16) | v13>>16 + v8 += v13 + v7 ^= v8 + v7 = v7<<(64-63) | v7>>63 + v3 += m[15] + v3 += v4 + v14 ^= v3 + v14 = v14<<(64-16) | v14>>16 + v9 += v14 + v4 ^= v9 + v4 = v4<<(64-63) | v4>>63 + v1 += m[11] + v1 += v6 + v12 ^= v1 + v12 = v12<<(64-16) | v12>>16 + v11 += v12 + v6 ^= v11 + v6 = v6<<(64-63) | v6>>63 + v0 += m[9] + v0 += v5 + v15 ^= v0 + v15 = v15<<(64-16) | v15>>16 + v10 += v15 + v5 ^= v10 + v5 = v5<<(64-63) | v5>>63 + + // Round 12. + v0 += m[14] + v0 += v4 + v12 ^= v0 + v12 = v12<<(64-32) | v12>>32 + v8 += v12 + v4 ^= v8 + v4 = v4<<(64-24) | v4>>24 + v1 += m[4] + v1 += v5 + v13 ^= v1 + v13 = v13<<(64-32) | v13>>32 + v9 += v13 + v5 ^= v9 + v5 = v5<<(64-24) | v5>>24 + v2 += m[9] + v2 += v6 + v14 ^= v2 + v14 = v14<<(64-32) | v14>>32 + v10 += v14 + v6 ^= v10 + v6 = v6<<(64-24) | v6>>24 + v3 += m[13] + v3 += v7 + v15 ^= v3 + v15 = v15<<(64-32) | v15>>32 + v11 += v15 + v7 ^= v11 + v7 = v7<<(64-24) | v7>>24 + v2 += m[15] + v2 += v6 + v14 ^= v2 + v14 = v14<<(64-16) | v14>>16 + v10 += v14 + v6 ^= v10 + v6 = v6<<(64-63) | v6>>63 + v3 += m[6] + v3 += v7 + v15 ^= v3 + v15 = v15<<(64-16) | v15>>16 + v11 += v15 + v7 ^= v11 + v7 = v7<<(64-63) | v7>>63 + v1 += m[8] + v1 += v5 + v13 ^= v1 + v13 = v13<<(64-16) | v13>>16 + v9 += v13 + v5 ^= v9 + v5 = v5<<(64-63) | v5>>63 + v0 += m[10] + v0 += v4 + v12 ^= v0 + v12 = v12<<(64-16) | v12>>16 + v8 += v12 + v4 ^= v8 + v4 = v4<<(64-63) | v4>>63 + v0 += m[1] + v0 += v5 + v15 ^= v0 + v15 = v15<<(64-32) | v15>>32 + v10 += v15 + v5 ^= v10 + v5 = v5<<(64-24) | v5>>24 + v1 += m[0] + v1 += v6 + v12 ^= v1 + v12 = v12<<(64-32) | v12>>32 + v11 += v12 + v6 ^= v11 + v6 = v6<<(64-24) | v6>>24 + v2 += m[11] + v2 += v7 + v13 ^= v2 + v13 = v13<<(64-32) | v13>>32 + v8 += v13 + v7 ^= v8 + v7 = v7<<(64-24) | v7>>24 + v3 += m[5] + v3 += v4 + v14 ^= v3 + v14 = v14<<(64-32) | v14>>32 + v9 += v14 + v4 ^= v9 + v4 = v4<<(64-24) | v4>>24 + v2 += m[7] + v2 += v7 + v13 ^= v2 + v13 = v13<<(64-16) | v13>>16 + v8 += v13 + v7 ^= v8 + v7 = v7<<(64-63) | v7>>63 + v3 += m[3] + v3 += v4 + v14 ^= v3 + v14 = v14<<(64-16) | v14>>16 + v9 += v14 + v4 ^= v9 + v4 = v4<<(64-63) | v4>>63 + v1 += m[2] + v1 += v6 + v12 ^= v1 + v12 = v12<<(64-16) | v12>>16 + v11 += v12 + v6 ^= v11 + v6 = v6<<(64-63) | v6>>63 + v0 += m[12] + v0 += v5 + v15 ^= v0 + v15 = v15<<(64-16) | v15>>16 + v10 += v15 + v5 ^= v10 + v5 = v5<<(64-63) | v5>>63 + + h0 ^= v0 ^ v8 + h1 ^= v1 ^ v9 + h2 ^= v2 ^ v10 + h3 ^= v3 ^ v11 + h4 ^= v4 ^ v12 + h5 ^= v5 ^ v13 + h6 ^= v6 ^ v14 + h7 ^= v7 ^ v15 + + p = p[BlockSize:] + } + d.h[0], d.h[1], d.h[2], d.h[3], d.h[4], d.h[5], d.h[6], d.h[7] = h0, h1, h2, h3, h4, h5, h6, h7 +} diff --git a/vendor/github.com/minio/blake2b-simd/compress_noasm.go b/vendor/github.com/minio/blake2b-simd/compress_noasm.go new file mode 100644 index 000000000..d3c675847 --- /dev/null +++ b/vendor/github.com/minio/blake2b-simd/compress_noasm.go @@ -0,0 +1,23 @@ +//+build !amd64 noasm appengine + +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package blake2b + +func compress(d *digest, p []uint8) { + compressGeneric(d, p) +} diff --git a/vendor/github.com/minio/blake2b-simd/cpuid.go b/vendor/github.com/minio/blake2b-simd/cpuid.go new file mode 100644 index 000000000..a9f95508e --- /dev/null +++ b/vendor/github.com/minio/blake2b-simd/cpuid.go @@ -0,0 +1,60 @@ +// +build 386,!gccgo amd64,!gccgo + +// Copyright 2016 Frank Wessels +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package blake2b + +func cpuid(op uint32) (eax, ebx, ecx, edx uint32) +func cpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +func xgetbv(index uint32) (eax, edx uint32) + +// True when SIMD instructions are available. +var avx2 = haveAVX2() +var avx = haveAVX() +var ssse3 = haveSSSE3() + +// haveAVX returns true when there is AVX support +func haveAVX() bool { + _, _, c, _ := cpuid(1) + + // Check XGETBV, OXSAVE and AVX bits + if c&(1<<26) != 0 && c&(1<<27) != 0 && c&(1<<28) != 0 { + // Check for OS support + eax, _ := xgetbv(0) + return (eax & 0x6) == 0x6 + } + return false +} + +// haveAVX2 returns true when there is AVX2 support +func haveAVX2() bool { + mfi, _, _, _ := cpuid(0) + + // Check AVX2, AVX2 requires OS support, but BMI1/2 don't. + if mfi >= 7 && haveAVX() { + _, ebx, _, _ := cpuidex(7, 0) + return (ebx & 0x00000020) != 0 + } + return false +} + +// haveSSSE3 returns true when there is SSSE3 support +func haveSSSE3() bool { + + _, _, c, _ := cpuid(1) + + return (c & 0x00000200) != 0 +} diff --git a/vendor/github.com/minio/blake2b-simd/cpuid_386.s b/vendor/github.com/minio/blake2b-simd/cpuid_386.s new file mode 100644 index 000000000..fa38814ec --- /dev/null +++ b/vendor/github.com/minio/blake2b-simd/cpuid_386.s @@ -0,0 +1,33 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +// +build 386,!gccgo + +// func cpuid(op uint32) (eax, ebx, ecx, edx uint32) +TEXT ·cpuid(SB), 7, $0 + XORL CX, CX + MOVL op+0(FP), AX + CPUID + MOVL AX, eax+4(FP) + MOVL BX, ebx+8(FP) + MOVL CX, ecx+12(FP) + MOVL DX, edx+16(FP) + RET + +// func cpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +TEXT ·cpuidex(SB), 7, $0 + MOVL op+0(FP), AX + MOVL op2+4(FP), CX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func xgetbv(index uint32) (eax, edx uint32) +TEXT ·xgetbv(SB), 7, $0 + MOVL index+0(FP), CX + BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV + MOVL AX, eax+4(FP) + MOVL DX, edx+8(FP) + RET diff --git a/vendor/github.com/minio/blake2b-simd/cpuid_amd64.s b/vendor/github.com/minio/blake2b-simd/cpuid_amd64.s new file mode 100644 index 000000000..fb45a6560 --- /dev/null +++ b/vendor/github.com/minio/blake2b-simd/cpuid_amd64.s @@ -0,0 +1,34 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +// +build amd64,!gccgo + +// func cpuid(op uint32) (eax, ebx, ecx, edx uint32) +TEXT ·cpuid(SB), 7, $0 + XORQ CX, CX + MOVL op+0(FP), AX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + + +// func cpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +TEXT ·cpuidex(SB), 7, $0 + MOVL op+0(FP), AX + MOVL op2+4(FP), CX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func xgetbv(index uint32) (eax, edx uint32) +TEXT ·xgetbv(SB), 7, $0 + MOVL index+0(FP), CX + BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV + MOVL AX, eax+8(FP) + MOVL DX, edx+12(FP) + RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b.go b/vendor/golang.org/x/crypto/blake2b/blake2b.go deleted file mode 100644 index fa9e48e31..000000000 --- a/vendor/golang.org/x/crypto/blake2b/blake2b.go +++ /dev/null @@ -1,194 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package blake2b implements the BLAKE2b hash algorithm as -// defined in RFC 7693. -package blake2b - -import ( - "encoding/binary" - "errors" - "hash" -) - -const ( - // The blocksize of BLAKE2b in bytes. - BlockSize = 128 - // The hash size of BLAKE2b-512 in bytes. - Size = 64 - // The hash size of BLAKE2b-384 in bytes. - Size384 = 48 - // The hash size of BLAKE2b-256 in bytes. - Size256 = 32 -) - -var ( - useAVX2 bool - useAVX bool - useSSE4 bool -) - -var errKeySize = errors.New("blake2b: invalid key size") - -var iv = [8]uint64{ - 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1, - 0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179, -} - -// Sum512 returns the BLAKE2b-512 checksum of the data. -func Sum512(data []byte) [Size]byte { - var sum [Size]byte - checkSum(&sum, Size, data) - return sum -} - -// Sum384 returns the BLAKE2b-384 checksum of the data. -func Sum384(data []byte) [Size384]byte { - var sum [Size]byte - var sum384 [Size384]byte - checkSum(&sum, Size384, data) - copy(sum384[:], sum[:Size384]) - return sum384 -} - -// Sum256 returns the BLAKE2b-256 checksum of the data. -func Sum256(data []byte) [Size256]byte { - var sum [Size]byte - var sum256 [Size256]byte - checkSum(&sum, Size256, data) - copy(sum256[:], sum[:Size256]) - return sum256 -} - -// New512 returns a new hash.Hash computing the BLAKE2b-512 checksum. A non-nil -// key turns the hash into a MAC. The key must between zero and 64 bytes long. -func New512(key []byte) (hash.Hash, error) { return newDigest(Size, key) } - -// New384 returns a new hash.Hash computing the BLAKE2b-384 checksum. A non-nil -// key turns the hash into a MAC. The key must between zero and 64 bytes long. -func New384(key []byte) (hash.Hash, error) { return newDigest(Size384, key) } - -// New256 returns a new hash.Hash computing the BLAKE2b-256 checksum. A non-nil -// key turns the hash into a MAC. The key must between zero and 64 bytes long. -func New256(key []byte) (hash.Hash, error) { return newDigest(Size256, key) } - -func newDigest(hashSize int, key []byte) (*digest, error) { - if len(key) > Size { - return nil, errKeySize - } - d := &digest{ - size: hashSize, - keyLen: len(key), - } - copy(d.key[:], key) - d.Reset() - return d, nil -} - -func checkSum(sum *[Size]byte, hashSize int, data []byte) { - h := iv - h[0] ^= uint64(hashSize) | (1 << 16) | (1 << 24) - var c [2]uint64 - - if length := len(data); length > BlockSize { - n := length &^ (BlockSize - 1) - if length == n { - n -= BlockSize - } - hashBlocks(&h, &c, 0, data[:n]) - data = data[n:] - } - - var block [BlockSize]byte - offset := copy(block[:], data) - remaining := uint64(BlockSize - offset) - if c[0] < remaining { - c[1]-- - } - c[0] -= remaining - - hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:]) - - for i, v := range h[:(hashSize+7)/8] { - binary.LittleEndian.PutUint64(sum[8*i:], v) - } -} - -type digest struct { - h [8]uint64 - c [2]uint64 - size int - block [BlockSize]byte - offset int - - key [BlockSize]byte - keyLen int -} - -func (d *digest) BlockSize() int { return BlockSize } - -func (d *digest) Size() int { return d.size } - -func (d *digest) Reset() { - d.h = iv - d.h[0] ^= uint64(d.size) | (uint64(d.keyLen) << 8) | (1 << 16) | (1 << 24) - d.offset, d.c[0], d.c[1] = 0, 0, 0 - if d.keyLen > 0 { - d.block = d.key - d.offset = BlockSize - } -} - -func (d *digest) Write(p []byte) (n int, err error) { - n = len(p) - - if d.offset > 0 { - remaining := BlockSize - d.offset - if n <= remaining { - d.offset += copy(d.block[d.offset:], p) - return - } - copy(d.block[d.offset:], p[:remaining]) - hashBlocks(&d.h, &d.c, 0, d.block[:]) - d.offset = 0 - p = p[remaining:] - } - - if length := len(p); length > BlockSize { - nn := length &^ (BlockSize - 1) - if length == nn { - nn -= BlockSize - } - hashBlocks(&d.h, &d.c, 0, p[:nn]) - p = p[nn:] - } - - if len(p) > 0 { - d.offset += copy(d.block[:], p) - } - - return -} - -func (d *digest) Sum(b []byte) []byte { - var block [BlockSize]byte - copy(block[:], d.block[:d.offset]) - remaining := uint64(BlockSize - d.offset) - - c := d.c - if c[0] < remaining { - c[1]-- - } - c[0] -= remaining - - h := d.h - hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:]) - - var sum [Size]byte - for i, v := range h[:(d.size+7)/8] { - binary.LittleEndian.PutUint64(sum[8*i:], v) - } - - return append(b, sum[:d.size]...) -} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go deleted file mode 100644 index 8c41cf6c7..000000000 --- a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.7,amd64,!gccgo,!appengine - -package blake2b - -func init() { - useAVX2 = supportsAVX2() - useAVX = supportsAVX() - useSSE4 = supportsSSE4() -} - -//go:noescape -func supportsSSE4() bool - -//go:noescape -func supportsAVX() bool - -//go:noescape -func supportsAVX2() bool - -//go:noescape -func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) - -//go:noescape -func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) - -//go:noescape -func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) - -func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { - if useAVX2 { - hashBlocksAVX2(h, c, flag, blocks) - } else if useAVX { - hashBlocksAVX(h, c, flag, blocks) - } else if useSSE4 { - hashBlocksSSE4(h, c, flag, blocks) - } else { - hashBlocksGeneric(h, c, flag, blocks) - } -} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s deleted file mode 100644 index 96a51d524..000000000 --- a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s +++ /dev/null @@ -1,502 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.7,amd64,!gccgo,!appengine - -#include "textflag.h" - -DATA ·AVX2_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 -DATA ·AVX2_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b -DATA ·AVX2_iv0<>+0x10(SB)/8, $0x3c6ef372fe94f82b -DATA ·AVX2_iv0<>+0x18(SB)/8, $0xa54ff53a5f1d36f1 -GLOBL ·AVX2_iv0<>(SB), (NOPTR+RODATA), $32 - -DATA ·AVX2_iv1<>+0x00(SB)/8, $0x510e527fade682d1 -DATA ·AVX2_iv1<>+0x08(SB)/8, $0x9b05688c2b3e6c1f -DATA ·AVX2_iv1<>+0x10(SB)/8, $0x1f83d9abfb41bd6b -DATA ·AVX2_iv1<>+0x18(SB)/8, $0x5be0cd19137e2179 -GLOBL ·AVX2_iv1<>(SB), (NOPTR+RODATA), $32 - -DATA ·AVX2_c40<>+0x00(SB)/8, $0x0201000706050403 -DATA ·AVX2_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b -DATA ·AVX2_c40<>+0x10(SB)/8, $0x0201000706050403 -DATA ·AVX2_c40<>+0x18(SB)/8, $0x0a09080f0e0d0c0b -GLOBL ·AVX2_c40<>(SB), (NOPTR+RODATA), $32 - -DATA ·AVX2_c48<>+0x00(SB)/8, $0x0100070605040302 -DATA ·AVX2_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a -DATA ·AVX2_c48<>+0x10(SB)/8, $0x0100070605040302 -DATA ·AVX2_c48<>+0x18(SB)/8, $0x09080f0e0d0c0b0a -GLOBL ·AVX2_c48<>(SB), (NOPTR+RODATA), $32 - -DATA ·AVX_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 -DATA ·AVX_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b -GLOBL ·AVX_iv0<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b -DATA ·AVX_iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 -GLOBL ·AVX_iv1<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_iv2<>+0x00(SB)/8, $0x510e527fade682d1 -DATA ·AVX_iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f -GLOBL ·AVX_iv2<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b -DATA ·AVX_iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 -GLOBL ·AVX_iv3<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_c40<>+0x00(SB)/8, $0x0201000706050403 -DATA ·AVX_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b -GLOBL ·AVX_c40<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_c48<>+0x00(SB)/8, $0x0100070605040302 -DATA ·AVX_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a -GLOBL ·AVX_c48<>(SB), (NOPTR+RODATA), $16 - -// unfortunately the BYTE representation of VPERMQ must be used -#define ROUND_AVX2(m0, m1, m2, m3, t, c40, c48) \ - VPADDQ m0, Y0, Y0; \ - VPADDQ Y1, Y0, Y0; \ - VPXOR Y0, Y3, Y3; \ - VPSHUFD $-79, Y3, Y3; \ - VPADDQ Y3, Y2, Y2; \ - VPXOR Y2, Y1, Y1; \ - VPSHUFB c40, Y1, Y1; \ - VPADDQ m1, Y0, Y0; \ - VPADDQ Y1, Y0, Y0; \ - VPXOR Y0, Y3, Y3; \ - VPSHUFB c48, Y3, Y3; \ - VPADDQ Y3, Y2, Y2; \ - VPXOR Y2, Y1, Y1; \ - VPADDQ Y1, Y1, t; \ - VPSRLQ $63, Y1, Y1; \ - VPXOR t, Y1, Y1; \ - BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x39 \ // VPERMQ 0x39, Y1, Y1 - BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2; BYTE $0x4e \ // VPERMQ 0x4e, Y2, Y2 - BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x93 \ // VPERMQ 0x93, Y3, Y3 - VPADDQ m2, Y0, Y0; \ - VPADDQ Y1, Y0, Y0; \ - VPXOR Y0, Y3, Y3; \ - VPSHUFD $-79, Y3, Y3; \ - VPADDQ Y3, Y2, Y2; \ - VPXOR Y2, Y1, Y1; \ - VPSHUFB c40, Y1, Y1; \ - VPADDQ m3, Y0, Y0; \ - VPADDQ Y1, Y0, Y0; \ - VPXOR Y0, Y3, Y3; \ - VPSHUFB c48, Y3, Y3; \ - VPADDQ Y3, Y2, Y2; \ - VPXOR Y2, Y1, Y1; \ - VPADDQ Y1, Y1, t; \ - VPSRLQ $63, Y1, Y1; \ - VPXOR t, Y1, Y1; \ - BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x39 \ // VPERMQ 0x39, Y3, Y3 - BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2; BYTE $0x4e \ // VPERMQ 0x4e, Y2, Y2 - BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x93 \ // VPERMQ 0x93, Y1, Y1 - -// load msg into Y12, Y13, Y14, Y15 -#define LOAD_MSG_AVX2(src, i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, i11, i12, i13, i14, i15) \ - MOVQ i0*8(src), X12; \ - PINSRQ $1, i1*8(src), X12; \ - MOVQ i2*8(src), X11; \ - PINSRQ $1, i3*8(src), X11; \ - VINSERTI128 $1, X11, Y12, Y12; \ - MOVQ i4*8(src), X13; \ - PINSRQ $1, i5*8(src), X13; \ - MOVQ i6*8(src), X11; \ - PINSRQ $1, i7*8(src), X11; \ - VINSERTI128 $1, X11, Y13, Y13; \ - MOVQ i8*8(src), X14; \ - PINSRQ $1, i9*8(src), X14; \ - MOVQ i10*8(src), X11; \ - PINSRQ $1, i11*8(src), X11; \ - VINSERTI128 $1, X11, Y14, Y14; \ - MOVQ i12*8(src), X15; \ - PINSRQ $1, i13*8(src), X15; \ - MOVQ i14*8(src), X11; \ - PINSRQ $1, i15*8(src), X11; \ - VINSERTI128 $1, X11, Y15, Y15 - -// func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) -TEXT ·hashBlocksAVX2(SB), 4, $320-48 // frame size = 288 + 32 byte alignment - MOVQ h+0(FP), AX - MOVQ c+8(FP), BX - MOVQ flag+16(FP), CX - MOVQ blocks_base+24(FP), SI - MOVQ blocks_len+32(FP), DI - - MOVQ SP, DX - MOVQ SP, R9 - ADDQ $31, R9 - ANDQ $~31, R9 - MOVQ R9, SP - - MOVQ CX, 16(SP) - XORQ CX, CX - MOVQ CX, 24(SP) - - VMOVDQU ·AVX2_c40<>(SB), Y4 - VMOVDQU ·AVX2_c48<>(SB), Y5 - - VMOVDQU 0(AX), Y8 - VMOVDQU 32(AX), Y9 - VMOVDQU ·AVX2_iv0<>(SB), Y6 - VMOVDQU ·AVX2_iv1<>(SB), Y7 - - MOVQ 0(BX), R8 - MOVQ 8(BX), R9 - MOVQ R9, 8(SP) - -loop: - ADDQ $128, R8 - MOVQ R8, 0(SP) - CMPQ R8, $128 - JGE noinc - INCQ R9 - MOVQ R9, 8(SP) - -noinc: - VMOVDQA Y8, Y0 - VMOVDQA Y9, Y1 - VMOVDQA Y6, Y2 - VPXOR 0(SP), Y7, Y3 - - LOAD_MSG_AVX2(SI, 0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15) - VMOVDQA Y12, 32(SP) - VMOVDQA Y13, 64(SP) - VMOVDQA Y14, 96(SP) - VMOVDQA Y15, 128(SP) - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2(SI, 14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3) - VMOVDQA Y12, 160(SP) - VMOVDQA Y13, 192(SP) - VMOVDQA Y14, 224(SP) - VMOVDQA Y15, 256(SP) - - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2(SI, 11, 12, 5, 15, 8, 0, 2, 13, 10, 3, 7, 9, 14, 6, 1, 4) - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2(SI, 7, 3, 13, 11, 9, 1, 12, 14, 2, 5, 4, 15, 6, 10, 0, 8) - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2(SI, 9, 5, 2, 10, 0, 7, 4, 15, 14, 11, 6, 3, 1, 12, 8, 13) - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2(SI, 2, 6, 0, 8, 12, 10, 11, 3, 4, 7, 15, 1, 13, 5, 14, 9) - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2(SI, 12, 1, 14, 4, 5, 15, 13, 10, 0, 6, 9, 8, 7, 3, 2, 11) - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2(SI, 13, 7, 12, 3, 11, 14, 1, 9, 5, 15, 8, 2, 0, 4, 6, 10) - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2(SI, 6, 14, 11, 0, 15, 9, 3, 8, 12, 13, 1, 10, 2, 7, 4, 5) - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2(SI, 10, 8, 7, 1, 2, 4, 6, 5, 15, 9, 3, 13, 11, 14, 12, 0) - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - - ROUND_AVX2(32(SP), 64(SP), 96(SP), 128(SP), Y10, Y4, Y5) - ROUND_AVX2(160(SP), 192(SP), 224(SP), 256(SP), Y10, Y4, Y5) - - VPXOR Y0, Y8, Y8 - VPXOR Y1, Y9, Y9 - VPXOR Y2, Y8, Y8 - VPXOR Y3, Y9, Y9 - - LEAQ 128(SI), SI - SUBQ $128, DI - JNE loop - - MOVQ R8, 0(BX) - MOVQ R9, 8(BX) - - VMOVDQU Y8, 0(AX) - VMOVDQU Y9, 32(AX) - - MOVQ DX, SP - RET - -// unfortunately the BYTE representation of VPUNPCKLQDQ and VPUNPCKHQDQ must be used -#define VPUNPCKLQDQ_X8_X8_X10 BYTE $0xC4; BYTE $0x41; BYTE $0x39; BYTE $0x6C; BYTE $0xD0 -#define VPUNPCKHQDQ_X7_X10_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xF2 -#define VPUNPCKLQDQ_X7_X7_X10 BYTE $0xC5; BYTE $0x41; BYTE $0x6C; BYTE $0xD7 -#define VPUNPCKHQDQ_X8_X10_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x39; BYTE $0x6D; BYTE $0xFA -#define VPUNPCKLQDQ_X3_X3_X10 BYTE $0xC5; BYTE $0x61; BYTE $0x6C; BYTE $0xD3 -#define VPUNPCKHQDQ_X2_X10_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x69; BYTE $0x6D; BYTE $0xD2 -#define VPUNPCKLQDQ_X9_X9_X10 BYTE $0xC4; BYTE $0x41; BYTE $0x31; BYTE $0x6C; BYTE $0xD1 -#define VPUNPCKHQDQ_X3_X10_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xDA -#define VPUNPCKLQDQ_X2_X2_X10 BYTE $0xC5; BYTE $0x69; BYTE $0x6C; BYTE $0xD2 -#define VPUNPCKHQDQ_X3_X10_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xD2 -#define VPUNPCKHQDQ_X8_X10_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x39; BYTE $0x6D; BYTE $0xDA -#define VPUNPCKHQDQ_X6_X10_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x49; BYTE $0x6D; BYTE $0xF2 -#define VPUNPCKHQDQ_X7_X10_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xFA - -// shuffle X2 and X6 using the temp registers X8, X9, X10 -#define SHUFFLE_AVX() \ - VMOVDQA X4, X9; \ - VMOVDQA X5, X4; \ - VMOVDQA X9, X5; \ - VMOVDQA X6, X8; \ - VPUNPCKLQDQ_X8_X8_X10; \ - VPUNPCKHQDQ_X7_X10_X6; \ - VPUNPCKLQDQ_X7_X7_X10; \ - VPUNPCKHQDQ_X8_X10_X7; \ - VPUNPCKLQDQ_X3_X3_X10; \ - VMOVDQA X2, X9; \ - VPUNPCKHQDQ_X2_X10_X2; \ - VPUNPCKLQDQ_X9_X9_X10; \ - VPUNPCKHQDQ_X3_X10_X3; \ - -// inverse shuffle X2 and X6 using the temp registers X8, X9, X10 -#define SHUFFLE_AVX_INV() \ - VMOVDQA X4, X9; \ - VMOVDQA X5, X4; \ - VMOVDQA X9, X5; \ - VMOVDQA X2, X8; \ - VPUNPCKLQDQ_X2_X2_X10; \ - VPUNPCKHQDQ_X3_X10_X2; \ - VPUNPCKLQDQ_X3_X3_X10; \ - VPUNPCKHQDQ_X8_X10_X3; \ - VPUNPCKLQDQ_X7_X7_X10; \ - VMOVDQA X6, X9; \ - VPUNPCKHQDQ_X6_X10_X6; \ - VPUNPCKLQDQ_X9_X9_X10; \ - VPUNPCKHQDQ_X7_X10_X7; \ - -#define HALF_ROUND_AVX(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ - VPADDQ m0, v0, v0; \ - VPADDQ v2, v0, v0; \ - VPADDQ m1, v1, v1; \ - VPADDQ v3, v1, v1; \ - VPXOR v0, v6, v6; \ - VPXOR v1, v7, v7; \ - VPSHUFD $-79, v6, v6; \ - VPSHUFD $-79, v7, v7; \ - VPADDQ v6, v4, v4; \ - VPADDQ v7, v5, v5; \ - VPXOR v4, v2, v2; \ - VPXOR v5, v3, v3; \ - VPSHUFB c40, v2, v2; \ - VPSHUFB c40, v3, v3; \ - VPADDQ m2, v0, v0; \ - VPADDQ v2, v0, v0; \ - VPADDQ m3, v1, v1; \ - VPADDQ v3, v1, v1; \ - VPXOR v0, v6, v6; \ - VPXOR v1, v7, v7; \ - VPSHUFB c48, v6, v6; \ - VPSHUFB c48, v7, v7; \ - VPADDQ v6, v4, v4; \ - VPADDQ v7, v5, v5; \ - VPXOR v4, v2, v2; \ - VPXOR v5, v3, v3; \ - VPADDQ v2, v2, t0; \ - VPSRLQ $63, v2, v2; \ - VPXOR t0, v2, v2; \ - VPADDQ v3, v3, t0; \ - VPSRLQ $63, v3, v3; \ - VPXOR t0, v3, v3 - -// unfortunately the BYTE representation of VPINSRQ must be used -#define VPINSRQ_1_R10_X8_X8 BYTE $0xC4; BYTE $0x43; BYTE $0xB9; BYTE $0x22; BYTE $0xC2; BYTE $0x01 -#define VPINSRQ_1_R11_X9_X9 BYTE $0xC4; BYTE $0x43; BYTE $0xB1; BYTE $0x22; BYTE $0xCB; BYTE $0x01 -#define VPINSRQ_1_R12_X10_X10 BYTE $0xC4; BYTE $0x43; BYTE $0xA9; BYTE $0x22; BYTE $0xD4; BYTE $0x01 -#define VPINSRQ_1_R13_X11_X11 BYTE $0xC4; BYTE $0x43; BYTE $0xA1; BYTE $0x22; BYTE $0xDD; BYTE $0x01 - -#define VPINSRQ_1_R9_X8_X8 BYTE $0xC4; BYTE $0x43; BYTE $0xB9; BYTE $0x22; BYTE $0xC1; BYTE $0x01 - -// load src into X8, X9, X10 and X11 using R10, R11, R12 and R13 for temp registers -#define LOAD_MSG_AVX(src, i0, i1, i2, i3, i4, i5, i6, i7) \ - MOVQ i0*8(src), X8; \ - MOVQ i1*8(src), R10; \ - MOVQ i2*8(src), X9; \ - MOVQ i3*8(src), R11; \ - MOVQ i4*8(src), X10; \ - MOVQ i5*8(src), R12; \ - MOVQ i6*8(src), X11; \ - MOVQ i7*8(src), R13; \ - VPINSRQ_1_R10_X8_X8; \ - VPINSRQ_1_R11_X9_X9; \ - VPINSRQ_1_R12_X10_X10; \ - VPINSRQ_1_R13_X11_X11 - -// func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) -TEXT ·hashBlocksAVX(SB), 4, $288-48 // frame size = 272 + 16 byte alignment - MOVQ h+0(FP), AX - MOVQ c+8(FP), BX - MOVQ flag+16(FP), CX - MOVQ blocks_base+24(FP), SI - MOVQ blocks_len+32(FP), DI - - MOVQ SP, BP - MOVQ SP, R9 - ADDQ $15, R9 - ANDQ $~15, R9 - MOVQ R9, SP - - MOVOU ·AVX_c40<>(SB), X13 - MOVOU ·AVX_c48<>(SB), X14 - - VMOVDQU ·AVX_iv3<>(SB), X0 - VMOVDQA X0, 0(SP) - XORQ CX, 0(SP) // 0(SP) = ·AVX_iv3 ^ (CX || 0) - - VMOVDQU 0(AX), X12 - VMOVDQU 16(AX), X15 - VMOVDQU 32(AX), X2 - VMOVDQU 48(AX), X3 - - MOVQ 0(BX), R8 - MOVQ 8(BX), R9 - -loop: - ADDQ $128, R8 - CMPQ R8, $128 - JGE noinc - INCQ R9 - -noinc: - MOVQ R8, X8 - VPINSRQ_1_R9_X8_X8 - - VMOVDQA X12, X0 - VMOVDQA X15, X1 - VMOVDQU ·AVX_iv0<>(SB), X4 - VMOVDQU ·AVX_iv1<>(SB), X5 - VMOVDQU ·AVX_iv2<>(SB), X6 - - VPXOR X8, X6, X6 - VMOVDQA 0(SP), X7 - - LOAD_MSG_AVX(SI, 0, 2, 4, 6, 1, 3, 5, 7) - VMOVDQA X8, 16(SP) - VMOVDQA X9, 32(SP) - VMOVDQA X10, 48(SP) - VMOVDQA X11, 64(SP) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_AVX() - LOAD_MSG_AVX(SI, 8, 10, 12, 14, 9, 11, 13, 15) - VMOVDQA X8, 80(SP) - VMOVDQA X9, 96(SP) - VMOVDQA X10, 112(SP) - VMOVDQA X11, 128(SP) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(SI, 14, 4, 9, 13, 10, 8, 15, 6) - VMOVDQA X8, 144(SP) - VMOVDQA X9, 160(SP) - VMOVDQA X10, 176(SP) - VMOVDQA X11, 192(SP) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_AVX() - LOAD_MSG_AVX(SI, 1, 0, 11, 5, 12, 2, 7, 3) - VMOVDQA X8, 208(SP) - VMOVDQA X9, 224(SP) - VMOVDQA X10, 240(SP) - VMOVDQA X11, 256(SP) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(SI, 11, 12, 5, 15, 8, 0, 2, 13) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_AVX() - LOAD_MSG_AVX(SI, 10, 3, 7, 9, 14, 6, 1, 4) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(SI, 7, 3, 13, 11, 9, 1, 12, 14) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_AVX() - LOAD_MSG_AVX(SI, 2, 5, 4, 15, 6, 10, 0, 8) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(SI, 9, 5, 2, 10, 0, 7, 4, 15) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_AVX() - LOAD_MSG_AVX(SI, 14, 11, 6, 3, 1, 12, 8, 13) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(SI, 2, 6, 0, 8, 12, 10, 11, 3) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_AVX() - LOAD_MSG_AVX(SI, 4, 7, 15, 1, 13, 5, 14, 9) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(SI, 12, 1, 14, 4, 5, 15, 13, 10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_AVX() - LOAD_MSG_AVX(SI, 0, 6, 9, 8, 7, 3, 2, 11) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(SI, 13, 7, 12, 3, 11, 14, 1, 9) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_AVX() - LOAD_MSG_AVX(SI, 5, 15, 8, 2, 0, 4, 6, 10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(SI, 6, 14, 11, 0, 15, 9, 3, 8) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_AVX() - LOAD_MSG_AVX(SI, 12, 13, 1, 10, 2, 7, 4, 5) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(SI, 10, 8, 7, 1, 2, 4, 6, 5) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_AVX() - LOAD_MSG_AVX(SI, 15, 9, 3, 13, 11, 14, 12, 0) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_AVX_INV() - - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X11, X13, X14) - SHUFFLE_AVX() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 80(SP), 96(SP), 112(SP), 128(SP), X11, X13, X14) - SHUFFLE_AVX_INV() - - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 144(SP), 160(SP), 176(SP), 192(SP), X11, X13, X14) - SHUFFLE_AVX() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 208(SP), 224(SP), 240(SP), 256(SP), X11, X13, X14) - SHUFFLE_AVX_INV() - - VMOVDQU 32(AX), X10 - VMOVDQU 48(AX), X11 - VPXOR X0, X12, X12 - VPXOR X1, X15, X15 - VPXOR X2, X10, X10 - VPXOR X3, X11, X11 - VPXOR X4, X12, X12 - VPXOR X5, X15, X15 - VPXOR X6, X10, X2 - VPXOR X7, X11, X3 - VMOVDQU X2, 32(AX) - VMOVDQU X3, 48(AX) - - LEAQ 128(SI), SI - SUBQ $128, DI - JNE loop - - VMOVDQU X12, 0(AX) - VMOVDQU X15, 16(AX) - - MOVQ R8, 0(BX) - MOVQ R9, 8(BX) - - VZEROUPPER - - MOVQ BP, SP - RET - -// func supportsAVX2() bool -TEXT ·supportsAVX2(SB), 4, $0-1 - MOVQ runtime·support_avx2(SB), AX - MOVB AX, ret+0(FP) - RET - -// func supportsAVX() bool -TEXT ·supportsAVX(SB), 4, $0-1 - MOVQ runtime·support_avx(SB), AX - MOVB AX, ret+0(FP) - RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go deleted file mode 100644 index 2ab7c30fc..000000000 --- a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.7,amd64,!gccgo,!appengine - -package blake2b - -func init() { - useSSE4 = supportsSSE4() -} - -//go:noescape -func supportsSSE4() bool - -//go:noescape -func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) - -func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { - if useSSE4 { - hashBlocksSSE4(h, c, flag, blocks) - } else { - hashBlocksGeneric(h, c, flag, blocks) - } -} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s deleted file mode 100644 index 64530740b..000000000 --- a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s +++ /dev/null @@ -1,290 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build amd64,!gccgo,!appengine - -#include "textflag.h" - -DATA ·iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 -DATA ·iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b -GLOBL ·iv0<>(SB), (NOPTR+RODATA), $16 - -DATA ·iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b -DATA ·iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 -GLOBL ·iv1<>(SB), (NOPTR+RODATA), $16 - -DATA ·iv2<>+0x00(SB)/8, $0x510e527fade682d1 -DATA ·iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f -GLOBL ·iv2<>(SB), (NOPTR+RODATA), $16 - -DATA ·iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b -DATA ·iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 -GLOBL ·iv3<>(SB), (NOPTR+RODATA), $16 - -DATA ·c40<>+0x00(SB)/8, $0x0201000706050403 -DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b -GLOBL ·c40<>(SB), (NOPTR+RODATA), $16 - -DATA ·c48<>+0x00(SB)/8, $0x0100070605040302 -DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a -GLOBL ·c48<>(SB), (NOPTR+RODATA), $16 - -#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \ - MOVO v4, t1; \ - MOVO v5, v4; \ - MOVO t1, v5; \ - MOVO v6, t1; \ - PUNPCKLQDQ v6, t2; \ - PUNPCKHQDQ v7, v6; \ - PUNPCKHQDQ t2, v6; \ - PUNPCKLQDQ v7, t2; \ - MOVO t1, v7; \ - MOVO v2, t1; \ - PUNPCKHQDQ t2, v7; \ - PUNPCKLQDQ v3, t2; \ - PUNPCKHQDQ t2, v2; \ - PUNPCKLQDQ t1, t2; \ - PUNPCKHQDQ t2, v3 - -#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \ - MOVO v4, t1; \ - MOVO v5, v4; \ - MOVO t1, v5; \ - MOVO v2, t1; \ - PUNPCKLQDQ v2, t2; \ - PUNPCKHQDQ v3, v2; \ - PUNPCKHQDQ t2, v2; \ - PUNPCKLQDQ v3, t2; \ - MOVO t1, v3; \ - MOVO v6, t1; \ - PUNPCKHQDQ t2, v3; \ - PUNPCKLQDQ v7, t2; \ - PUNPCKHQDQ t2, v6; \ - PUNPCKLQDQ t1, t2; \ - PUNPCKHQDQ t2, v7 - -#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ - PADDQ m0, v0; \ - PADDQ m1, v1; \ - PADDQ v2, v0; \ - PADDQ v3, v1; \ - PXOR v0, v6; \ - PXOR v1, v7; \ - PSHUFD $0xB1, v6, v6; \ - PSHUFD $0xB1, v7, v7; \ - PADDQ v6, v4; \ - PADDQ v7, v5; \ - PXOR v4, v2; \ - PXOR v5, v3; \ - PSHUFB c40, v2; \ - PSHUFB c40, v3; \ - PADDQ m2, v0; \ - PADDQ m3, v1; \ - PADDQ v2, v0; \ - PADDQ v3, v1; \ - PXOR v0, v6; \ - PXOR v1, v7; \ - PSHUFB c48, v6; \ - PSHUFB c48, v7; \ - PADDQ v6, v4; \ - PADDQ v7, v5; \ - PXOR v4, v2; \ - PXOR v5, v3; \ - MOVOU v2, t0; \ - PADDQ v2, t0; \ - PSRLQ $63, v2; \ - PXOR t0, v2; \ - MOVOU v3, t0; \ - PADDQ v3, t0; \ - PSRLQ $63, v3; \ - PXOR t0, v3 - -#define LOAD_MSG(m0, m1, m2, m3, src, i0, i1, i2, i3, i4, i5, i6, i7) \ - MOVQ i0*8(src), m0; \ - PINSRQ $1, i1*8(src), m0; \ - MOVQ i2*8(src), m1; \ - PINSRQ $1, i3*8(src), m1; \ - MOVQ i4*8(src), m2; \ - PINSRQ $1, i5*8(src), m2; \ - MOVQ i6*8(src), m3; \ - PINSRQ $1, i7*8(src), m3 - -// func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) -TEXT ·hashBlocksSSE4(SB), 4, $288-48 // frame size = 272 + 16 byte alignment - MOVQ h+0(FP), AX - MOVQ c+8(FP), BX - MOVQ flag+16(FP), CX - MOVQ blocks_base+24(FP), SI - MOVQ blocks_len+32(FP), DI - - MOVQ SP, BP - MOVQ SP, R9 - ADDQ $15, R9 - ANDQ $~15, R9 - MOVQ R9, SP - - MOVOU ·iv3<>(SB), X0 - MOVO X0, 0(SP) - XORQ CX, 0(SP) // 0(SP) = ·iv3 ^ (CX || 0) - - MOVOU ·c40<>(SB), X13 - MOVOU ·c48<>(SB), X14 - - MOVOU 0(AX), X12 - MOVOU 16(AX), X15 - - MOVQ 0(BX), R8 - MOVQ 8(BX), R9 - -loop: - ADDQ $128, R8 - CMPQ R8, $128 - JGE noinc - INCQ R9 - -noinc: - MOVQ R8, X8 - PINSRQ $1, R9, X8 - - MOVO X12, X0 - MOVO X15, X1 - MOVOU 32(AX), X2 - MOVOU 48(AX), X3 - MOVOU ·iv0<>(SB), X4 - MOVOU ·iv1<>(SB), X5 - MOVOU ·iv2<>(SB), X6 - - PXOR X8, X6 - MOVO 0(SP), X7 - - LOAD_MSG(X8, X9, X10, X11, SI, 0, 2, 4, 6, 1, 3, 5, 7) - MOVO X8, 16(SP) - MOVO X9, 32(SP) - MOVO X10, 48(SP) - MOVO X11, 64(SP) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 8, 10, 12, 14, 9, 11, 13, 15) - MOVO X8, 80(SP) - MOVO X9, 96(SP) - MOVO X10, 112(SP) - MOVO X11, 128(SP) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 14, 4, 9, 13, 10, 8, 15, 6) - MOVO X8, 144(SP) - MOVO X9, 160(SP) - MOVO X10, 176(SP) - MOVO X11, 192(SP) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 1, 0, 11, 5, 12, 2, 7, 3) - MOVO X8, 208(SP) - MOVO X9, 224(SP) - MOVO X10, 240(SP) - MOVO X11, 256(SP) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 11, 12, 5, 15, 8, 0, 2, 13) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 10, 3, 7, 9, 14, 6, 1, 4) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 7, 3, 13, 11, 9, 1, 12, 14) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 2, 5, 4, 15, 6, 10, 0, 8) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 9, 5, 2, 10, 0, 7, 4, 15) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 14, 11, 6, 3, 1, 12, 8, 13) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 2, 6, 0, 8, 12, 10, 11, 3) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 4, 7, 15, 1, 13, 5, 14, 9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 12, 1, 14, 4, 5, 15, 13, 10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 0, 6, 9, 8, 7, 3, 2, 11) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 13, 7, 12, 3, 11, 14, 1, 9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 5, 15, 8, 2, 0, 4, 6, 10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 6, 14, 11, 0, 15, 9, 3, 8) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 12, 13, 1, 10, 2, 7, 4, 5) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 10, 8, 7, 1, 2, 4, 6, 5) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 15, 9, 3, 13, 11, 14, 12, 0) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 80(SP), 96(SP), 112(SP), 128(SP), X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 144(SP), 160(SP), 176(SP), 192(SP), X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 208(SP), 224(SP), 240(SP), 256(SP), X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - MOVOU 32(AX), X10 - MOVOU 48(AX), X11 - PXOR X0, X12 - PXOR X1, X15 - PXOR X2, X10 - PXOR X3, X11 - PXOR X4, X12 - PXOR X5, X15 - PXOR X6, X10 - PXOR X7, X11 - MOVOU X10, 32(AX) - MOVOU X11, 48(AX) - - LEAQ 128(SI), SI - SUBQ $128, DI - JNE loop - - MOVOU X12, 0(AX) - MOVOU X15, 16(AX) - - MOVQ R8, 0(BX) - MOVQ R9, 8(BX) - - MOVQ BP, SP - RET - -// func supportsSSE4() bool -TEXT ·supportsSSE4(SB), 4, $0-1 - MOVL $1, AX - CPUID - SHRL $19, CX // Bit 19 indicates SSE4 support - ANDL $1, CX // CX != 0 if support SSE4 - MOVB CX, ret+0(FP) - RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go b/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go deleted file mode 100644 index 4bd2abc91..000000000 --- a/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package blake2b - -import "encoding/binary" - -// the precomputed values for BLAKE2b -// there are 12 16-byte arrays - one for each round -// the entries are calculated from the sigma constants. -var precomputed = [12][16]byte{ - {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, - {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, - {11, 12, 5, 15, 8, 0, 2, 13, 10, 3, 7, 9, 14, 6, 1, 4}, - {7, 3, 13, 11, 9, 1, 12, 14, 2, 5, 4, 15, 6, 10, 0, 8}, - {9, 5, 2, 10, 0, 7, 4, 15, 14, 11, 6, 3, 1, 12, 8, 13}, - {2, 6, 0, 8, 12, 10, 11, 3, 4, 7, 15, 1, 13, 5, 14, 9}, - {12, 1, 14, 4, 5, 15, 13, 10, 0, 6, 9, 8, 7, 3, 2, 11}, - {13, 7, 12, 3, 11, 14, 1, 9, 5, 15, 8, 2, 0, 4, 6, 10}, - {6, 14, 11, 0, 15, 9, 3, 8, 12, 13, 1, 10, 2, 7, 4, 5}, - {10, 8, 7, 1, 2, 4, 6, 5, 15, 9, 3, 13, 11, 14, 12, 0}, - {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, // equal to the first - {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, // equal to the second -} - -func hashBlocksGeneric(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { - var m [16]uint64 - c0, c1 := c[0], c[1] - - for i := 0; i < len(blocks); { - c0 += BlockSize - if c0 < BlockSize { - c1++ - } - - v0, v1, v2, v3, v4, v5, v6, v7 := h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7] - v8, v9, v10, v11, v12, v13, v14, v15 := iv[0], iv[1], iv[2], iv[3], iv[4], iv[5], iv[6], iv[7] - v12 ^= c0 - v13 ^= c1 - v14 ^= flag - - for j := range m { - m[j] = binary.LittleEndian.Uint64(blocks[i:]) - i += 8 - } - - for j := range precomputed { - s := &(precomputed[j]) - - v0 += m[s[0]] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-32) | v12>>32 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-24) | v4>>24 - v1 += m[s[1]] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-32) | v13>>32 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-24) | v5>>24 - v2 += m[s[2]] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-32) | v14>>32 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-24) | v6>>24 - v3 += m[s[3]] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-32) | v15>>32 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-24) | v7>>24 - - v0 += m[s[4]] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-16) | v12>>16 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-63) | v4>>63 - v1 += m[s[5]] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-16) | v13>>16 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-63) | v5>>63 - v2 += m[s[6]] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-16) | v14>>16 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-63) | v6>>63 - v3 += m[s[7]] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-16) | v15>>16 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-63) | v7>>63 - - v0 += m[s[8]] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-32) | v15>>32 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-24) | v5>>24 - v1 += m[s[9]] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-32) | v12>>32 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-24) | v6>>24 - v2 += m[s[10]] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-32) | v13>>32 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-24) | v7>>24 - v3 += m[s[11]] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-32) | v14>>32 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-24) | v4>>24 - - v0 += m[s[12]] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-16) | v15>>16 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-63) | v5>>63 - v1 += m[s[13]] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-16) | v12>>16 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-63) | v6>>63 - v2 += m[s[14]] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-16) | v13>>16 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-63) | v7>>63 - v3 += m[s[15]] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-16) | v14>>16 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-63) | v4>>63 - - } - - h[0] ^= v0 ^ v8 - h[1] ^= v1 ^ v9 - h[2] ^= v2 ^ v10 - h[3] ^= v3 ^ v11 - h[4] ^= v4 ^ v12 - h[5] ^= v5 ^ v13 - h[6] ^= v6 ^ v14 - h[7] ^= v7 ^ v15 - } - c[0], c[1] = c0, c1 -} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go deleted file mode 100644 index da156a1ba..000000000 --- a/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64 appengine gccgo - -package blake2b - -func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { - hashBlocksGeneric(h, c, flag, blocks) -} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_test.go b/vendor/golang.org/x/crypto/blake2b/blake2b_test.go deleted file mode 100644 index a38fceb20..000000000 --- a/vendor/golang.org/x/crypto/blake2b/blake2b_test.go +++ /dev/null @@ -1,448 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package blake2b - -import ( - "bytes" - "encoding/hex" - "fmt" - "hash" - "testing" -) - -func fromHex(s string) []byte { - b, err := hex.DecodeString(s) - if err != nil { - panic(err) - } - return b -} - -func TestHashes(t *testing.T) { - defer func(sse4, avx, avx2 bool) { - useSSE4, useAVX, useAVX2 = sse4, useAVX, avx2 - }(useSSE4, useAVX, useAVX2) - - if useAVX2 { - t.Log("AVX2 version") - testHashes(t) - useAVX2 = false - } - if useAVX { - t.Log("AVX version") - testHashes(t) - useAVX = false - } - if useSSE4 { - t.Log("SSE4 version") - testHashes(t) - useSSE4 = false - } - t.Log("generic version") - testHashes(t) -} - -func testHashes(t *testing.T) { - key, _ := hex.DecodeString("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f") - - input := make([]byte, 255) - for i := range input { - input[i] = byte(i) - } - - for i, expectedHex := range hashes { - h, err := New512(key) - if err != nil { - t.Fatalf("#%d: error from New512: %v", i, err) - } - - h.Write(input[:i]) - sum := h.Sum(nil) - - if gotHex := fmt.Sprintf("%x", sum); gotHex != expectedHex { - t.Fatalf("#%d (single write): got %s, wanted %s", i, gotHex, expectedHex) - } - - h.Reset() - for j := 0; j < i; j++ { - h.Write(input[j : j+1]) - } - - sum = h.Sum(sum[:0]) - if gotHex := fmt.Sprintf("%x", sum); gotHex != expectedHex { - t.Fatalf("#%d (byte-by-byte): got %s, wanted %s", i, gotHex, expectedHex) - } - } -} - -func generateSequence(out []byte, seed uint32) { - a := 0xDEAD4BAD * seed // prime - b := uint32(1) - - for i := range out { // fill the buf - a, b = b, a+b - out[i] = byte(b >> 24) - } -} - -func computeMAC(msg []byte, hashSize int, key []byte) (sum []byte) { - var h hash.Hash - switch hashSize { - case Size: - h, _ = New512(key) - case Size384: - h, _ = New384(key) - case Size256: - h, _ = New256(key) - case 20: - h, _ = newDigest(20, key) - default: - panic("unexpected hashSize") - } - - h.Write(msg) - return h.Sum(sum) -} - -func computeHash(msg []byte, hashSize int) (sum []byte) { - switch hashSize { - case Size: - hash := Sum512(msg) - return hash[:] - case Size384: - hash := Sum384(msg) - return hash[:] - case Size256: - hash := Sum256(msg) - return hash[:] - case 20: - var hash [64]byte - checkSum(&hash, 20, msg) - return hash[:20] - default: - panic("unexpected hashSize") - } -} - -// Test function from RFC 7693. -func TestSelfTest(t *testing.T) { - hashLens := [4]int{20, 32, 48, 64} - msgLens := [6]int{0, 3, 128, 129, 255, 1024} - - msg := make([]byte, 1024) - key := make([]byte, 64) - - h, _ := New256(nil) - for _, hashSize := range hashLens { - for _, msgLength := range msgLens { - generateSequence(msg[:msgLength], uint32(msgLength)) // unkeyed hash - - md := computeHash(msg[:msgLength], hashSize) - h.Write(md) - - generateSequence(key[:], uint32(hashSize)) // keyed hash - md = computeMAC(msg[:msgLength], hashSize, key[:hashSize]) - h.Write(md) - } - } - - sum := h.Sum(nil) - expected := [32]byte{ - 0xc2, 0x3a, 0x78, 0x00, 0xd9, 0x81, 0x23, 0xbd, - 0x10, 0xf5, 0x06, 0xc6, 0x1e, 0x29, 0xda, 0x56, - 0x03, 0xd7, 0x63, 0xb8, 0xbb, 0xad, 0x2e, 0x73, - 0x7f, 0x5e, 0x76, 0x5a, 0x7b, 0xcc, 0xd4, 0x75, - } - if !bytes.Equal(sum, expected[:]) { - t.Fatalf("got %x, wanted %x", sum, expected) - } -} - -// Benchmarks - -func benchmarkSum(b *testing.B, size int) { - data := make([]byte, size) - b.SetBytes(int64(size)) - b.ResetTimer() - for i := 0; i < b.N; i++ { - Sum512(data) - } -} - -func benchmarkWrite(b *testing.B, size int) { - data := make([]byte, size) - h, _ := New512(nil) - b.SetBytes(int64(size)) - b.ResetTimer() - for i := 0; i < b.N; i++ { - h.Write(data) - } -} - -func BenchmarkWrite128(b *testing.B) { benchmarkWrite(b, 128) } -func BenchmarkWrite1K(b *testing.B) { benchmarkWrite(b, 1024) } - -func BenchmarkSum128(b *testing.B) { benchmarkSum(b, 128) } -func BenchmarkSum1K(b *testing.B) { benchmarkSum(b, 1024) } - -// These values were taken from https://blake2.net/blake2b-test.txt. -var hashes = []string{ - "10ebb67700b1868efb4417987acf4690ae9d972fb7a590c2f02871799aaa4786b5e996e8f0f4eb981fc214b005f42d2ff4233499391653df7aefcbc13fc51568", - "961f6dd1e4dd30f63901690c512e78e4b45e4742ed197c3c5e45c549fd25f2e4187b0bc9fe30492b16b0d0bc4ef9b0f34c7003fac09a5ef1532e69430234cebd", - "da2cfbe2d8409a0f38026113884f84b50156371ae304c4430173d08a99d9fb1b983164a3770706d537f49e0c916d9f32b95cc37a95b99d857436f0232c88a965", - "33d0825dddf7ada99b0e7e307104ad07ca9cfd9692214f1561356315e784f3e5a17e364ae9dbb14cb2036df932b77f4b292761365fb328de7afdc6d8998f5fc1", - "beaa5a3d08f3807143cf621d95cd690514d0b49efff9c91d24b59241ec0eefa5f60196d407048bba8d2146828ebcb0488d8842fd56bb4f6df8e19c4b4daab8ac", - "098084b51fd13deae5f4320de94a688ee07baea2800486689a8636117b46c1f4c1f6af7f74ae7c857600456a58a3af251dc4723a64cc7c0a5ab6d9cac91c20bb", - "6044540d560853eb1c57df0077dd381094781cdb9073e5b1b3d3f6c7829e12066bbaca96d989a690de72ca3133a83652ba284a6d62942b271ffa2620c9e75b1f", - "7a8cfe9b90f75f7ecb3acc053aaed6193112b6f6a4aeeb3f65d3de541942deb9e2228152a3c4bbbe72fc3b12629528cfbb09fe630f0474339f54abf453e2ed52", - "380beaf6ea7cc9365e270ef0e6f3a64fb902acae51dd5512f84259ad2c91f4bc4108db73192a5bbfb0cbcf71e46c3e21aee1c5e860dc96e8eb0b7b8426e6abe9", - "60fe3c4535e1b59d9a61ea8500bfac41a69dffb1ceadd9aca323e9a625b64da5763bad7226da02b9c8c4f1a5de140ac5a6c1124e4f718ce0b28ea47393aa6637", - "4fe181f54ad63a2983feaaf77d1e7235c2beb17fa328b6d9505bda327df19fc37f02c4b6f0368ce23147313a8e5738b5fa2a95b29de1c7f8264eb77b69f585cd", - "f228773ce3f3a42b5f144d63237a72d99693adb8837d0e112a8a0f8ffff2c362857ac49c11ec740d1500749dac9b1f4548108bf3155794dcc9e4082849e2b85b", - "962452a8455cc56c8511317e3b1f3b2c37df75f588e94325fdd77070359cf63a9ae6e930936fdf8e1e08ffca440cfb72c28f06d89a2151d1c46cd5b268ef8563", - "43d44bfa18768c59896bf7ed1765cb2d14af8c260266039099b25a603e4ddc5039d6ef3a91847d1088d401c0c7e847781a8a590d33a3c6cb4df0fab1c2f22355", - "dcffa9d58c2a4ca2cdbb0c7aa4c4c1d45165190089f4e983bb1c2cab4aaeff1fa2b5ee516fecd780540240bf37e56c8bcca7fab980e1e61c9400d8a9a5b14ac6", - "6fbf31b45ab0c0b8dad1c0f5f4061379912dde5aa922099a030b725c73346c524291adef89d2f6fd8dfcda6d07dad811a9314536c2915ed45da34947e83de34e", - "a0c65bddde8adef57282b04b11e7bc8aab105b99231b750c021f4a735cb1bcfab87553bba3abb0c3e64a0b6955285185a0bd35fb8cfde557329bebb1f629ee93", - "f99d815550558e81eca2f96718aed10d86f3f1cfb675cce06b0eff02f617c5a42c5aa760270f2679da2677c5aeb94f1142277f21c7f79f3c4f0cce4ed8ee62b1", - "95391da8fc7b917a2044b3d6f5374e1ca072b41454d572c7356c05fd4bc1e0f40b8bb8b4a9f6bce9be2c4623c399b0dca0dab05cb7281b71a21b0ebcd9e55670", - "04b9cd3d20d221c09ac86913d3dc63041989a9a1e694f1e639a3ba7e451840f750c2fc191d56ad61f2e7936bc0ac8e094b60caeed878c18799045402d61ceaf9", - "ec0e0ef707e4ed6c0c66f9e089e4954b058030d2dd86398fe84059631f9ee591d9d77375355149178c0cf8f8e7c49ed2a5e4f95488a2247067c208510fadc44c", - "9a37cce273b79c09913677510eaf7688e89b3314d3532fd2764c39de022a2945b5710d13517af8ddc0316624e73bec1ce67df15228302036f330ab0cb4d218dd", - "4cf9bb8fb3d4de8b38b2f262d3c40f46dfe747e8fc0a414c193d9fcf753106ce47a18f172f12e8a2f1c26726545358e5ee28c9e2213a8787aafbc516d2343152", - "64e0c63af9c808fd893137129867fd91939d53f2af04be4fa268006100069b2d69daa5c5d8ed7fddcb2a70eeecdf2b105dd46a1e3b7311728f639ab489326bc9", - "5e9c93158d659b2def06b0c3c7565045542662d6eee8a96a89b78ade09fe8b3dcc096d4fe48815d88d8f82620156602af541955e1f6ca30dce14e254c326b88f", - "7775dff889458dd11aef417276853e21335eb88e4dec9cfb4e9edb49820088551a2ca60339f12066101169f0dfe84b098fddb148d9da6b3d613df263889ad64b", - "f0d2805afbb91f743951351a6d024f9353a23c7ce1fc2b051b3a8b968c233f46f50f806ecb1568ffaa0b60661e334b21dde04f8fa155ac740eeb42e20b60d764", - "86a2af316e7d7754201b942e275364ac12ea8962ab5bd8d7fb276dc5fbffc8f9a28cae4e4867df6780d9b72524160927c855da5b6078e0b554aa91e31cb9ca1d", - "10bdf0caa0802705e706369baf8a3f79d72c0a03a80675a7bbb00be3a45e516424d1ee88efb56f6d5777545ae6e27765c3a8f5e493fc308915638933a1dfee55", - "b01781092b1748459e2e4ec178696627bf4ebafebba774ecf018b79a68aeb84917bf0b84bb79d17b743151144cd66b7b33a4b9e52c76c4e112050ff5385b7f0b", - "c6dbc61dec6eaeac81e3d5f755203c8e220551534a0b2fd105a91889945a638550204f44093dd998c076205dffad703a0e5cd3c7f438a7e634cd59fededb539e", - "eba51acffb4cea31db4b8d87e9bf7dd48fe97b0253ae67aa580f9ac4a9d941f2bea518ee286818cc9f633f2a3b9fb68e594b48cdd6d515bf1d52ba6c85a203a7", - "86221f3ada52037b72224f105d7999231c5e5534d03da9d9c0a12acb68460cd375daf8e24386286f9668f72326dbf99ba094392437d398e95bb8161d717f8991", - "5595e05c13a7ec4dc8f41fb70cb50a71bce17c024ff6de7af618d0cc4e9c32d9570d6d3ea45b86525491030c0d8f2b1836d5778c1ce735c17707df364d054347", - "ce0f4f6aca89590a37fe034dd74dd5fa65eb1cbd0a41508aaddc09351a3cea6d18cb2189c54b700c009f4cbf0521c7ea01be61c5ae09cb54f27bc1b44d658c82", - "7ee80b06a215a3bca970c77cda8761822bc103d44fa4b33f4d07dcb997e36d55298bceae12241b3fa07fa63be5576068da387b8d5859aeab701369848b176d42", - "940a84b6a84d109aab208c024c6ce9647676ba0aaa11f86dbb7018f9fd2220a6d901a9027f9abcf935372727cbf09ebd61a2a2eeb87653e8ecad1bab85dc8327", - "2020b78264a82d9f4151141adba8d44bf20c5ec062eee9b595a11f9e84901bf148f298e0c9f8777dcdbc7cc4670aac356cc2ad8ccb1629f16f6a76bcefbee760", - "d1b897b0e075ba68ab572adf9d9c436663e43eb3d8e62d92fc49c9be214e6f27873fe215a65170e6bea902408a25b49506f47babd07cecf7113ec10c5dd31252", - "b14d0c62abfa469a357177e594c10c194243ed2025ab8aa5ad2fa41ad318e0ff48cd5e60bec07b13634a711d2326e488a985f31e31153399e73088efc86a5c55", - "4169c5cc808d2697dc2a82430dc23e3cd356dc70a94566810502b8d655b39abf9e7f902fe717e0389219859e1945df1af6ada42e4ccda55a197b7100a30c30a1", - "258a4edb113d66c839c8b1c91f15f35ade609f11cd7f8681a4045b9fef7b0b24c82cda06a5f2067b368825e3914e53d6948ede92efd6e8387fa2e537239b5bee", - "79d2d8696d30f30fb34657761171a11e6c3f1e64cbe7bebee159cb95bfaf812b4f411e2f26d9c421dc2c284a3342d823ec293849e42d1e46b0a4ac1e3c86abaa", - "8b9436010dc5dee992ae38aea97f2cd63b946d94fedd2ec9671dcde3bd4ce9564d555c66c15bb2b900df72edb6b891ebcadfeff63c9ea4036a998be7973981e7", - "c8f68e696ed28242bf997f5b3b34959508e42d613810f1e2a435c96ed2ff560c7022f361a9234b9837feee90bf47922ee0fd5f8ddf823718d86d1e16c6090071", - "b02d3eee4860d5868b2c39ce39bfe81011290564dd678c85e8783f29302dfc1399ba95b6b53cd9ebbf400cca1db0ab67e19a325f2d115812d25d00978ad1bca4", - "7693ea73af3ac4dad21ca0d8da85b3118a7d1c6024cfaf557699868217bc0c2f44a199bc6c0edd519798ba05bd5b1b4484346a47c2cadf6bf30b785cc88b2baf", - "a0e5c1c0031c02e48b7f09a5e896ee9aef2f17fc9e18e997d7f6cac7ae316422c2b1e77984e5f3a73cb45deed5d3f84600105e6ee38f2d090c7d0442ea34c46d", - "41daa6adcfdb69f1440c37b596440165c15ada596813e2e22f060fcd551f24dee8e04ba6890387886ceec4a7a0d7fc6b44506392ec3822c0d8c1acfc7d5aebe8", - "14d4d40d5984d84c5cf7523b7798b254e275a3a8cc0a1bd06ebc0bee726856acc3cbf516ff667cda2058ad5c3412254460a82c92187041363cc77a4dc215e487", - "d0e7a1e2b9a447fee83e2277e9ff8010c2f375ae12fa7aaa8ca5a6317868a26a367a0b69fbc1cf32a55d34eb370663016f3d2110230eba754028a56f54acf57c", - "e771aa8db5a3e043e8178f39a0857ba04a3f18e4aa05743cf8d222b0b095825350ba422f63382a23d92e4149074e816a36c1cd28284d146267940b31f8818ea2", - "feb4fd6f9e87a56bef398b3284d2bda5b5b0e166583a66b61e538457ff0584872c21a32962b9928ffab58de4af2edd4e15d8b35570523207ff4e2a5aa7754caa", - "462f17bf005fb1c1b9e671779f665209ec2873e3e411f98dabf240a1d5ec3f95ce6796b6fc23fe171903b502023467dec7273ff74879b92967a2a43a5a183d33", - "d3338193b64553dbd38d144bea71c5915bb110e2d88180dbc5db364fd6171df317fc7268831b5aef75e4342b2fad8797ba39eddcef80e6ec08159350b1ad696d", - "e1590d585a3d39f7cb599abd479070966409a6846d4377acf4471d065d5db94129cc9be92573b05ed226be1e9b7cb0cabe87918589f80dadd4ef5ef25a93d28e", - "f8f3726ac5a26cc80132493a6fedcb0e60760c09cfc84cad178175986819665e76842d7b9fedf76dddebf5d3f56faaad4477587af21606d396ae570d8e719af2", - "30186055c07949948183c850e9a756cc09937e247d9d928e869e20bafc3cd9721719d34e04a0899b92c736084550186886efba2e790d8be6ebf040b209c439a4", - "f3c4276cb863637712c241c444c5cc1e3554e0fddb174d035819dd83eb700b4ce88df3ab3841ba02085e1a99b4e17310c5341075c0458ba376c95a6818fbb3e2", - "0aa007c4dd9d5832393040a1583c930bca7dc5e77ea53add7e2b3f7c8e231368043520d4a3ef53c969b6bbfd025946f632bd7f765d53c21003b8f983f75e2a6a", - "08e9464720533b23a04ec24f7ae8c103145f765387d738777d3d343477fd1c58db052142cab754ea674378e18766c53542f71970171cc4f81694246b717d7564", - "d37ff7ad297993e7ec21e0f1b4b5ae719cdc83c5db687527f27516cbffa822888a6810ee5c1ca7bfe3321119be1ab7bfa0a502671c8329494df7ad6f522d440f", - "dd9042f6e464dcf86b1262f6accfafbd8cfd902ed3ed89abf78ffa482dbdeeb6969842394c9a1168ae3d481a017842f660002d42447c6b22f7b72f21aae021c9", - "bd965bf31e87d70327536f2a341cebc4768eca275fa05ef98f7f1b71a0351298de006fba73fe6733ed01d75801b4a928e54231b38e38c562b2e33ea1284992fa", - "65676d800617972fbd87e4b9514e1c67402b7a331096d3bfac22f1abb95374abc942f16e9ab0ead33b87c91968a6e509e119ff07787b3ef483e1dcdccf6e3022", - "939fa189699c5d2c81ddd1ffc1fa207c970b6a3685bb29ce1d3e99d42f2f7442da53e95a72907314f4588399a3ff5b0a92beb3f6be2694f9f86ecf2952d5b41c", - "c516541701863f91005f314108ceece3c643e04fc8c42fd2ff556220e616aaa6a48aeb97a84bad74782e8dff96a1a2fa949339d722edcaa32b57067041df88cc", - "987fd6e0d6857c553eaebb3d34970a2c2f6e89a3548f492521722b80a1c21a153892346d2cba6444212d56da9a26e324dccbc0dcde85d4d2ee4399eec5a64e8f", - "ae56deb1c2328d9c4017706bce6e99d41349053ba9d336d677c4c27d9fd50ae6aee17e853154e1f4fe7672346da2eaa31eea53fcf24a22804f11d03da6abfc2b", - "49d6a608c9bde4491870498572ac31aac3fa40938b38a7818f72383eb040ad39532bc06571e13d767e6945ab77c0bdc3b0284253343f9f6c1244ebf2ff0df866", - "da582ad8c5370b4469af862aa6467a2293b2b28bd80ae0e91f425ad3d47249fdf98825cc86f14028c3308c9804c78bfeeeee461444ce243687e1a50522456a1d", - "d5266aa3331194aef852eed86d7b5b2633a0af1c735906f2e13279f14931a9fc3b0eac5ce9245273bd1aa92905abe16278ef7efd47694789a7283b77da3c70f8", - "2962734c28252186a9a1111c732ad4de4506d4b4480916303eb7991d659ccda07a9911914bc75c418ab7a4541757ad054796e26797feaf36e9f6ad43f14b35a4", - "e8b79ec5d06e111bdfafd71e9f5760f00ac8ac5d8bf768f9ff6f08b8f026096b1cc3a4c973333019f1e3553e77da3f98cb9f542e0a90e5f8a940cc58e59844b3", - "dfb320c44f9d41d1efdcc015f08dd5539e526e39c87d509ae6812a969e5431bf4fa7d91ffd03b981e0d544cf72d7b1c0374f8801482e6dea2ef903877eba675e", - "d88675118fdb55a5fb365ac2af1d217bf526ce1ee9c94b2f0090b2c58a06ca58187d7fe57c7bed9d26fca067b4110eefcd9a0a345de872abe20de368001b0745", - "b893f2fc41f7b0dd6e2f6aa2e0370c0cff7df09e3acfcc0e920b6e6fad0ef747c40668417d342b80d2351e8c175f20897a062e9765e6c67b539b6ba8b9170545", - "6c67ec5697accd235c59b486d7b70baeedcbd4aa64ebd4eef3c7eac189561a726250aec4d48cadcafbbe2ce3c16ce2d691a8cce06e8879556d4483ed7165c063", - "f1aa2b044f8f0c638a3f362e677b5d891d6fd2ab0765f6ee1e4987de057ead357883d9b405b9d609eea1b869d97fb16d9b51017c553f3b93c0a1e0f1296fedcd", - "cbaa259572d4aebfc1917acddc582b9f8dfaa928a198ca7acd0f2aa76a134a90252e6298a65b08186a350d5b7626699f8cb721a3ea5921b753ae3a2dce24ba3a", - "fa1549c9796cd4d303dcf452c1fbd5744fd9b9b47003d920b92de34839d07ef2a29ded68f6fc9e6c45e071a2e48bd50c5084e96b657dd0404045a1ddefe282ed", - "5cf2ac897ab444dcb5c8d87c495dbdb34e1838b6b629427caa51702ad0f9688525f13bec503a3c3a2c80a65e0b5715e8afab00ffa56ec455a49a1ad30aa24fcd", - "9aaf80207bace17bb7ab145757d5696bde32406ef22b44292ef65d4519c3bb2ad41a59b62cc3e94b6fa96d32a7faadae28af7d35097219aa3fd8cda31e40c275", - "af88b163402c86745cb650c2988fb95211b94b03ef290eed9662034241fd51cf398f8073e369354c43eae1052f9b63b08191caa138aa54fea889cc7024236897", - "48fa7d64e1ceee27b9864db5ada4b53d00c9bc7626555813d3cd6730ab3cc06ff342d727905e33171bde6e8476e77fb1720861e94b73a2c538d254746285f430", - "0e6fd97a85e904f87bfe85bbeb34f69e1f18105cf4ed4f87aec36c6e8b5f68bd2a6f3dc8a9ecb2b61db4eedb6b2ea10bf9cb0251fb0f8b344abf7f366b6de5ab", - "06622da5787176287fdc8fed440bad187d830099c94e6d04c8e9c954cda70c8bb9e1fc4a6d0baa831b9b78ef6648681a4867a11da93ee36e5e6a37d87fc63f6f", - "1da6772b58fabf9c61f68d412c82f182c0236d7d575ef0b58dd22458d643cd1dfc93b03871c316d8430d312995d4197f0874c99172ba004a01ee295abac24e46", - "3cd2d9320b7b1d5fb9aab951a76023fa667be14a9124e394513918a3f44096ae4904ba0ffc150b63bc7ab1eeb9a6e257e5c8f000a70394a5afd842715de15f29", - "04cdc14f7434e0b4be70cb41db4c779a88eaef6accebcb41f2d42fffe7f32a8e281b5c103a27021d0d08362250753cdf70292195a53a48728ceb5844c2d98bab", - "9071b7a8a075d0095b8fb3ae5113785735ab98e2b52faf91d5b89e44aac5b5d4ebbf91223b0ff4c71905da55342e64655d6ef8c89a4768c3f93a6dc0366b5bc8", - "ebb30240dd96c7bc8d0abe49aa4edcbb4afdc51ff9aaf720d3f9e7fbb0f9c6d6571350501769fc4ebd0b2141247ff400d4fd4be414edf37757bb90a32ac5c65a", - "8532c58bf3c8015d9d1cbe00eef1f5082f8f3632fbe9f1ed4f9dfb1fa79e8283066d77c44c4af943d76b300364aecbd0648c8a8939bd204123f4b56260422dec", - "fe9846d64f7c7708696f840e2d76cb4408b6595c2f81ec6a28a7f2f20cb88cfe6ac0b9e9b8244f08bd7095c350c1d0842f64fb01bb7f532dfcd47371b0aeeb79", - "28f17ea6fb6c42092dc264257e29746321fb5bdaea9873c2a7fa9d8f53818e899e161bc77dfe8090afd82bf2266c5c1bc930a8d1547624439e662ef695f26f24", - "ec6b7d7f030d4850acae3cb615c21dd25206d63e84d1db8d957370737ba0e98467ea0ce274c66199901eaec18a08525715f53bfdb0aacb613d342ebdceeddc3b", - "b403d3691c03b0d3418df327d5860d34bbfcc4519bfbce36bf33b208385fadb9186bc78a76c489d89fd57e7dc75412d23bcd1dae8470ce9274754bb8585b13c5", - "31fc79738b8772b3f55cd8178813b3b52d0db5a419d30ba9495c4b9da0219fac6df8e7c23a811551a62b827f256ecdb8124ac8a6792ccfecc3b3012722e94463", - "bb2039ec287091bcc9642fc90049e73732e02e577e2862b32216ae9bedcd730c4c284ef3968c368b7d37584f97bd4b4dc6ef6127acfe2e6ae2509124e66c8af4", - "f53d68d13f45edfcb9bd415e2831e938350d5380d3432278fc1c0c381fcb7c65c82dafe051d8c8b0d44e0974a0e59ec7bf7ed0459f86e96f329fc79752510fd3", - "8d568c7984f0ecdf7640fbc483b5d8c9f86634f6f43291841b309a350ab9c1137d24066b09da9944bac54d5bb6580d836047aac74ab724b887ebf93d4b32eca9", - "c0b65ce5a96ff774c456cac3b5f2c4cd359b4ff53ef93a3da0778be4900d1e8da1601e769e8f1b02d2a2f8c5b9fa10b44f1c186985468feeb008730283a6657d", - "4900bba6f5fb103ece8ec96ada13a5c3c85488e05551da6b6b33d988e611ec0fe2e3c2aa48ea6ae8986a3a231b223c5d27cec2eadde91ce07981ee652862d1e4", - "c7f5c37c7285f927f76443414d4357ff789647d7a005a5a787e03c346b57f49f21b64fa9cf4b7e45573e23049017567121a9c3d4b2b73ec5e9413577525db45a", - "ec7096330736fdb2d64b5653e7475da746c23a4613a82687a28062d3236364284ac01720ffb406cfe265c0df626a188c9e5963ace5d3d5bb363e32c38c2190a6", - "82e744c75f4649ec52b80771a77d475a3bc091989556960e276a5f9ead92a03f718742cdcfeaee5cb85c44af198adc43a4a428f5f0c2ddb0be36059f06d7df73", - "2834b7a7170f1f5b68559ab78c1050ec21c919740b784a9072f6e5d69f828d70c919c5039fb148e39e2c8a52118378b064ca8d5001cd10a5478387b966715ed6", - "16b4ada883f72f853bb7ef253efcab0c3e2161687ad61543a0d2824f91c1f81347d86be709b16996e17f2dd486927b0288ad38d13063c4a9672c39397d3789b6", - "78d048f3a69d8b54ae0ed63a573ae350d89f7c6cf1f3688930de899afa037697629b314e5cd303aa62feea72a25bf42b304b6c6bcb27fae21c16d925e1fbdac3", - "0f746a48749287ada77a82961f05a4da4abdb7d77b1220f836d09ec814359c0ec0239b8c7b9ff9e02f569d1b301ef67c4612d1de4f730f81c12c40cc063c5caa", - "f0fc859d3bd195fbdc2d591e4cdac15179ec0f1dc821c11df1f0c1d26e6260aaa65b79fafacafd7d3ad61e600f250905f5878c87452897647a35b995bcadc3a3", - "2620f687e8625f6a412460b42e2cef67634208ce10a0cbd4dff7044a41b7880077e9f8dc3b8d1216d3376a21e015b58fb279b521d83f9388c7382c8505590b9b", - "227e3aed8d2cb10b918fcb04f9de3e6d0a57e08476d93759cd7b2ed54a1cbf0239c528fb04bbf288253e601d3bc38b21794afef90b17094a182cac557745e75f", - "1a929901b09c25f27d6b35be7b2f1c4745131fdebca7f3e2451926720434e0db6e74fd693ad29b777dc3355c592a361c4873b01133a57c2e3b7075cbdb86f4fc", - "5fd7968bc2fe34f220b5e3dc5af9571742d73b7d60819f2888b629072b96a9d8ab2d91b82d0a9aaba61bbd39958132fcc4257023d1eca591b3054e2dc81c8200", - "dfcce8cf32870cc6a503eadafc87fd6f78918b9b4d0737db6810be996b5497e7e5cc80e312f61e71ff3e9624436073156403f735f56b0b01845c18f6caf772e6", - "02f7ef3a9ce0fff960f67032b296efca3061f4934d690749f2d01c35c81c14f39a67fa350bc8a0359bf1724bffc3bca6d7c7bba4791fd522a3ad353c02ec5aa8", - "64be5c6aba65d594844ae78bb022e5bebe127fd6b6ffa5a13703855ab63b624dcd1a363f99203f632ec386f3ea767fc992e8ed9686586aa27555a8599d5b808f", - "f78585505c4eaa54a8b5be70a61e735e0ff97af944ddb3001e35d86c4e2199d976104b6ae31750a36a726ed285064f5981b503889fef822fcdc2898dddb7889a", - "e4b5566033869572edfd87479a5bb73c80e8759b91232879d96b1dda36c012076ee5a2ed7ae2de63ef8406a06aea82c188031b560beafb583fb3de9e57952a7e", - "e1b3e7ed867f6c9484a2a97f7715f25e25294e992e41f6a7c161ffc2adc6daaeb7113102d5e6090287fe6ad94ce5d6b739c6ca240b05c76fb73f25dd024bf935", - "85fd085fdc12a080983df07bd7012b0d402a0f4043fcb2775adf0bad174f9b08d1676e476985785c0a5dcc41dbff6d95ef4d66a3fbdc4a74b82ba52da0512b74", - "aed8fa764b0fbff821e05233d2f7b0900ec44d826f95e93c343c1bc3ba5a24374b1d616e7e7aba453a0ada5e4fab5382409e0d42ce9c2bc7fb39a99c340c20f0", - "7ba3b2e297233522eeb343bd3ebcfd835a04007735e87f0ca300cbee6d416565162171581e4020ff4cf176450f1291ea2285cb9ebffe4c56660627685145051c", - "de748bcf89ec88084721e16b85f30adb1a6134d664b5843569babc5bbd1a15ca9b61803c901a4fef32965a1749c9f3a4e243e173939dc5a8dc495c671ab52145", - "aaf4d2bdf200a919706d9842dce16c98140d34bc433df320aba9bd429e549aa7a3397652a4d768277786cf993cde2338673ed2e6b66c961fefb82cd20c93338f", - "c408218968b788bf864f0997e6bc4c3dba68b276e2125a4843296052ff93bf5767b8cdce7131f0876430c1165fec6c4f47adaa4fd8bcfacef463b5d3d0fa61a0", - "76d2d819c92bce55fa8e092ab1bf9b9eab237a25267986cacf2b8ee14d214d730dc9a5aa2d7b596e86a1fd8fa0804c77402d2fcd45083688b218b1cdfa0dcbcb", - "72065ee4dd91c2d8509fa1fc28a37c7fc9fa7d5b3f8ad3d0d7a25626b57b1b44788d4caf806290425f9890a3a2a35a905ab4b37acfd0da6e4517b2525c9651e4", - "64475dfe7600d7171bea0b394e27c9b00d8e74dd1e416a79473682ad3dfdbb706631558055cfc8a40e07bd015a4540dcdea15883cbbf31412df1de1cd4152b91", - "12cd1674a4488a5d7c2b3160d2e2c4b58371bedad793418d6f19c6ee385d70b3e06739369d4df910edb0b0a54cbff43d54544cd37ab3a06cfa0a3ddac8b66c89", - "60756966479dedc6dd4bcff8ea7d1d4ce4d4af2e7b097e32e3763518441147cc12b3c0ee6d2ecabf1198cec92e86a3616fba4f4e872f5825330adbb4c1dee444", - "a7803bcb71bc1d0f4383dde1e0612e04f872b715ad30815c2249cf34abb8b024915cb2fc9f4e7cc4c8cfd45be2d5a91eab0941c7d270e2da4ca4a9f7ac68663a", - "b84ef6a7229a34a750d9a98ee2529871816b87fbe3bc45b45fa5ae82d5141540211165c3c5d7a7476ba5a4aa06d66476f0d9dc49a3f1ee72c3acabd498967414", - "fae4b6d8efc3f8c8e64d001dabec3a21f544e82714745251b2b4b393f2f43e0da3d403c64db95a2cb6e23ebb7b9e94cdd5ddac54f07c4a61bd3cb10aa6f93b49", - "34f7286605a122369540141ded79b8957255da2d4155abbf5a8dbb89c8eb7ede8eeef1daa46dc29d751d045dc3b1d658bb64b80ff8589eddb3824b13da235a6b", - "3b3b48434be27b9eababba43bf6b35f14b30f6a88dc2e750c358470d6b3aa3c18e47db4017fa55106d8252f016371a00f5f8b070b74ba5f23cffc5511c9f09f0", - "ba289ebd6562c48c3e10a8ad6ce02e73433d1e93d7c9279d4d60a7e879ee11f441a000f48ed9f7c4ed87a45136d7dccdca482109c78a51062b3ba4044ada2469", - "022939e2386c5a37049856c850a2bb10a13dfea4212b4c732a8840a9ffa5faf54875c5448816b2785a007da8a8d2bc7d71a54e4e6571f10b600cbdb25d13ede3", - "e6fec19d89ce8717b1a087024670fe026f6c7cbda11caef959bb2d351bf856f8055d1c0ebdaaa9d1b17886fc2c562b5e99642fc064710c0d3488a02b5ed7f6fd", - "94c96f02a8f576aca32ba61c2b206f907285d9299b83ac175c209a8d43d53bfe683dd1d83e7549cb906c28f59ab7c46f8751366a28c39dd5fe2693c9019666c8", - "31a0cd215ebd2cb61de5b9edc91e6195e31c59a5648d5c9f737e125b2605708f2e325ab3381c8dce1a3e958886f1ecdc60318f882cfe20a24191352e617b0f21", - "91ab504a522dce78779f4c6c6ba2e6b6db5565c76d3e7e7c920caf7f757ef9db7c8fcf10e57f03379ea9bf75eb59895d96e149800b6aae01db778bb90afbc989", - "d85cabc6bd5b1a01a5afd8c6734740da9fd1c1acc6db29bfc8a2e5b668b028b6b3154bfb8703fa3180251d589ad38040ceb707c4bad1b5343cb426b61eaa49c1", - "d62efbec2ca9c1f8bd66ce8b3f6a898cb3f7566ba6568c618ad1feb2b65b76c3ce1dd20f7395372faf28427f61c9278049cf0140df434f5633048c86b81e0399", - "7c8fdc6175439e2c3db15bafa7fb06143a6a23bc90f449e79deef73c3d492a671715c193b6fea9f036050b946069856b897e08c00768f5ee5ddcf70b7cd6d0e0", - "58602ee7468e6bc9df21bd51b23c005f72d6cb013f0a1b48cbec5eca299299f97f09f54a9a01483eaeb315a6478bad37ba47ca1347c7c8fc9e6695592c91d723", - "27f5b79ed256b050993d793496edf4807c1d85a7b0a67c9c4fa99860750b0ae66989670a8ffd7856d7ce411599e58c4d77b232a62bef64d15275be46a68235ff", - "3957a976b9f1887bf004a8dca942c92d2b37ea52600f25e0c9bc5707d0279c00c6e85a839b0d2d8eb59c51d94788ebe62474a791cadf52cccf20f5070b6573fc", - "eaa2376d55380bf772ecca9cb0aa4668c95c707162fa86d518c8ce0ca9bf7362b9f2a0adc3ff59922df921b94567e81e452f6c1a07fc817cebe99604b3505d38", - "c1e2c78b6b2734e2480ec550434cb5d613111adcc21d475545c3b1b7e6ff12444476e5c055132e2229dc0f807044bb919b1a5662dd38a9ee65e243a3911aed1a", - "8ab48713389dd0fcf9f965d3ce66b1e559a1f8c58741d67683cd971354f452e62d0207a65e436c5d5d8f8ee71c6abfe50e669004c302b31a7ea8311d4a916051", - "24ce0addaa4c65038bd1b1c0f1452a0b128777aabc94a29df2fd6c7e2f85f8ab9ac7eff516b0e0a825c84a24cfe492eaad0a6308e46dd42fe8333ab971bb30ca", - "5154f929ee03045b6b0c0004fa778edee1d139893267cc84825ad7b36c63de32798e4a166d24686561354f63b00709a1364b3c241de3febf0754045897467cd4", - "e74e907920fd87bd5ad636dd11085e50ee70459c443e1ce5809af2bc2eba39f9e6d7128e0e3712c316da06f4705d78a4838e28121d4344a2c79c5e0db307a677", - "bf91a22334bac20f3fd80663b3cd06c4e8802f30e6b59f90d3035cc9798a217ed5a31abbda7fa6842827bdf2a7a1c21f6fcfccbb54c6c52926f32da816269be1", - "d9d5c74be5121b0bd742f26bffb8c89f89171f3f934913492b0903c271bbe2b3395ef259669bef43b57f7fcc3027db01823f6baee66e4f9fead4d6726c741fce", - "50c8b8cf34cd879f80e2faab3230b0c0e1cc3e9dcadeb1b9d97ab923415dd9a1fe38addd5c11756c67990b256e95ad6d8f9fedce10bf1c90679cde0ecf1be347", - "0a386e7cd5dd9b77a035e09fe6fee2c8ce61b5383c87ea43205059c5e4cd4f4408319bb0a82360f6a58e6c9ce3f487c446063bf813bc6ba535e17fc1826cfc91", - "1f1459cb6b61cbac5f0efe8fc487538f42548987fcd56221cfa7beb22504769e792c45adfb1d6b3d60d7b749c8a75b0bdf14e8ea721b95dca538ca6e25711209", - "e58b3836b7d8fedbb50ca5725c6571e74c0785e97821dab8b6298c10e4c079d4a6cdf22f0fedb55032925c16748115f01a105e77e00cee3d07924dc0d8f90659", - "b929cc6505f020158672deda56d0db081a2ee34c00c1100029bdf8ea98034fa4bf3e8655ec697fe36f40553c5bb46801644a627d3342f4fc92b61f03290fb381", - "72d353994b49d3e03153929a1e4d4f188ee58ab9e72ee8e512f29bc773913819ce057ddd7002c0433ee0a16114e3d156dd2c4a7e80ee53378b8670f23e33ef56", - "c70ef9bfd775d408176737a0736d68517ce1aaad7e81a93c8c1ed967ea214f56c8a377b1763e676615b60f3988241eae6eab9685a5124929d28188f29eab06f7", - "c230f0802679cb33822ef8b3b21bf7a9a28942092901d7dac3760300831026cf354c9232df3e084d9903130c601f63c1f4a4a4b8106e468cd443bbe5a734f45f", - "6f43094cafb5ebf1f7a4937ec50f56a4c9da303cbb55ac1f27f1f1976cd96beda9464f0e7b9c54620b8a9fba983164b8be3578425a024f5fe199c36356b88972", - "3745273f4c38225db2337381871a0c6aafd3af9b018c88aa02025850a5dc3a42a1a3e03e56cbf1b0876d63a441f1d2856a39b8801eb5af325201c415d65e97fe", - "c50c44cca3ec3edaae779a7e179450ebdda2f97067c690aa6c5a4ac7c30139bb27c0df4db3220e63cb110d64f37ffe078db72653e2daacf93ae3f0a2d1a7eb2e", - "8aef263e385cbc61e19b28914243262af5afe8726af3ce39a79c27028cf3ecd3f8d2dfd9cfc9ad91b58f6f20778fd5f02894a3d91c7d57d1e4b866a7f364b6be", - "28696141de6e2d9bcb3235578a66166c1448d3e905a1b482d423be4bc5369bc8c74dae0acc9cc123e1d8ddce9f97917e8c019c552da32d39d2219b9abf0fa8c8", - "2fb9eb2085830181903a9dafe3db428ee15be7662224efd643371fb25646aee716e531eca69b2bdc8233f1a8081fa43da1500302975a77f42fa592136710e9dc", - "66f9a7143f7a3314a669bf2e24bbb35014261d639f495b6c9c1f104fe8e320aca60d4550d69d52edbd5a3cdeb4014ae65b1d87aa770b69ae5c15f4330b0b0ad8", - "f4c4dd1d594c3565e3e25ca43dad82f62abea4835ed4cd811bcd975e46279828d44d4c62c3679f1b7f7b9dd4571d7b49557347b8c5460cbdc1bef690fb2a08c0", - "8f1dc9649c3a84551f8f6e91cac68242a43b1f8f328ee92280257387fa7559aa6db12e4aeadc2d26099178749c6864b357f3f83b2fb3efa8d2a8db056bed6bcc", - "3139c1a7f97afd1675d460ebbc07f2728aa150df849624511ee04b743ba0a833092f18c12dc91b4dd243f333402f59fe28abdbbbae301e7b659c7a26d5c0f979", - "06f94a2996158a819fe34c40de3cf0379fd9fb85b3e363ba3926a0e7d960e3f4c2e0c70c7ce0ccb2a64fc29869f6e7ab12bd4d3f14fce943279027e785fb5c29", - "c29c399ef3eee8961e87565c1ce263925fc3d0ce267d13e48dd9e732ee67b0f69fad56401b0f10fcaac119201046cca28c5b14abdea3212ae65562f7f138db3d", - "4cec4c9df52eef05c3f6faaa9791bc7445937183224ecc37a1e58d0132d35617531d7e795f52af7b1eb9d147de1292d345fe341823f8e6bc1e5badca5c656108", - "898bfbae93b3e18d00697eab7d9704fa36ec339d076131cefdf30edbe8d9cc81c3a80b129659b163a323bab9793d4feed92d54dae966c77529764a09be88db45", - "ee9bd0469d3aaf4f14035be48a2c3b84d9b4b1fff1d945e1f1c1d38980a951be197b25fe22c731f20aeacc930ba9c4a1f4762227617ad350fdabb4e80273a0f4", - "3d4d3113300581cd96acbf091c3d0f3c310138cd6979e6026cde623e2dd1b24d4a8638bed1073344783ad0649cc6305ccec04beb49f31c633088a99b65130267", - "95c0591ad91f921ac7be6d9ce37e0663ed8011c1cfd6d0162a5572e94368bac02024485e6a39854aa46fe38e97d6c6b1947cd272d86b06bb5b2f78b9b68d559d", - "227b79ded368153bf46c0a3ca978bfdbef31f3024a5665842468490b0ff748ae04e7832ed4c9f49de9b1706709d623e5c8c15e3caecae8d5e433430ff72f20eb", - "5d34f3952f0105eef88ae8b64c6ce95ebfade0e02c69b08762a8712d2e4911ad3f941fc4034dc9b2e479fdbcd279b902faf5d838bb2e0c6495d372b5b7029813", - "7f939bf8353abce49e77f14f3750af20b7b03902e1a1e7fb6aaf76d0259cd401a83190f15640e74f3e6c5a90e839c7821f6474757f75c7bf9002084ddc7a62dc", - "062b61a2f9a33a71d7d0a06119644c70b0716a504de7e5e1be49bd7b86e7ed6817714f9f0fc313d06129597e9a2235ec8521de36f7290a90ccfc1ffa6d0aee29", - "f29e01eeae64311eb7f1c6422f946bf7bea36379523e7b2bbaba7d1d34a22d5ea5f1c5a09d5ce1fe682cced9a4798d1a05b46cd72dff5c1b355440b2a2d476bc", - "ec38cd3bbab3ef35d7cb6d5c914298351d8a9dc97fcee051a8a02f58e3ed6184d0b7810a5615411ab1b95209c3c810114fdeb22452084e77f3f847c6dbaafe16", - "c2aef5e0ca43e82641565b8cb943aa8ba53550caef793b6532fafad94b816082f0113a3ea2f63608ab40437ecc0f0229cb8fa224dcf1c478a67d9b64162b92d1", - "15f534efff7105cd1c254d074e27d5898b89313b7d366dc2d7d87113fa7d53aae13f6dba487ad8103d5e854c91fdb6e1e74b2ef6d1431769c30767dde067a35c", - "89acbca0b169897a0a2714c2df8c95b5b79cb69390142b7d6018bb3e3076b099b79a964152a9d912b1b86412b7e372e9cecad7f25d4cbab8a317be36492a67d7", - "e3c0739190ed849c9c962fd9dbb55e207e624fcac1eb417691515499eea8d8267b7e8f1287a63633af5011fde8c4ddf55bfdf722edf88831414f2cfaed59cb9a", - "8d6cf87c08380d2d1506eee46fd4222d21d8c04e585fbfd08269c98f702833a156326a0724656400ee09351d57b440175e2a5de93cc5f80db6daf83576cf75fa", - "da24bede383666d563eeed37f6319baf20d5c75d1635a6ba5ef4cfa1ac95487e96f8c08af600aab87c986ebad49fc70a58b4890b9c876e091016daf49e1d322e", - "f9d1d1b1e87ea7ae753a029750cc1cf3d0157d41805e245c5617bb934e732f0ae3180b78e05bfe76c7c3051e3e3ac78b9b50c05142657e1e03215d6ec7bfd0fc", - "11b7bc1668032048aa43343de476395e814bbbc223678db951a1b03a021efac948cfbe215f97fe9a72a2f6bc039e3956bfa417c1a9f10d6d7ba5d3d32ff323e5", - "b8d9000e4fc2b066edb91afee8e7eb0f24e3a201db8b6793c0608581e628ed0bcc4e5aa6787992a4bcc44e288093e63ee83abd0bc3ec6d0934a674a4da13838a", - "ce325e294f9b6719d6b61278276ae06a2564c03bb0b783fafe785bdf89c7d5acd83e78756d301b445699024eaeb77b54d477336ec2a4f332f2b3f88765ddb0c3", - "29acc30e9603ae2fccf90bf97e6cc463ebe28c1b2f9b4b765e70537c25c702a29dcbfbf14c99c54345ba2b51f17b77b5f15db92bbad8fa95c471f5d070a137cc", - "3379cbaae562a87b4c0425550ffdd6bfe1203f0d666cc7ea095be407a5dfe61ee91441cd5154b3e53b4f5fb31ad4c7a9ad5c7af4ae679aa51a54003a54ca6b2d", - "3095a349d245708c7cf550118703d7302c27b60af5d4e67fc978f8a4e60953c7a04f92fcf41aee64321ccb707a895851552b1e37b00bc5e6b72fa5bcef9e3fff", - "07262d738b09321f4dbccec4bb26f48cb0f0ed246ce0b31b9a6e7bc683049f1f3e5545f28ce932dd985c5ab0f43bd6de0770560af329065ed2e49d34624c2cbb", - "b6405eca8ee3316c87061cc6ec18dba53e6c250c63ba1f3bae9e55dd3498036af08cd272aa24d713c6020d77ab2f3919af1a32f307420618ab97e73953994fb4", - "7ee682f63148ee45f6e5315da81e5c6e557c2c34641fc509c7a5701088c38a74756168e2cd8d351e88fd1a451f360a01f5b2580f9b5a2e8cfc138f3dd59a3ffc", - "1d263c179d6b268f6fa016f3a4f29e943891125ed8593c81256059f5a7b44af2dcb2030d175c00e62ecaf7ee96682aa07ab20a611024a28532b1c25b86657902", - "106d132cbdb4cd2597812846e2bc1bf732fec5f0a5f65dbb39ec4e6dc64ab2ce6d24630d0f15a805c3540025d84afa98e36703c3dbee713e72dde8465bc1be7e", - "0e79968226650667a8d862ea8da4891af56a4e3a8b6d1750e394f0dea76d640d85077bcec2cc86886e506751b4f6a5838f7f0b5fef765d9dc90dcdcbaf079f08", - "521156a82ab0c4e566e5844d5e31ad9aaf144bbd5a464fdca34dbd5717e8ff711d3ffebbfa085d67fe996a34f6d3e4e60b1396bf4b1610c263bdbb834d560816", - "1aba88befc55bc25efbce02db8b9933e46f57661baeabeb21cc2574d2a518a3cba5dc5a38e49713440b25f9c744e75f6b85c9d8f4681f676160f6105357b8406", - "5a9949fcb2c473cda968ac1b5d08566dc2d816d960f57e63b898fa701cf8ebd3f59b124d95bfbbedc5f1cf0e17d5eaed0c02c50b69d8a402cabcca4433b51fd4", - "b0cead09807c672af2eb2b0f06dde46cf5370e15a4096b1a7d7cbb36ec31c205fbefca00b7a4162fa89fb4fb3eb78d79770c23f44e7206664ce3cd931c291e5d", - "bb6664931ec97044e45b2ae420ae1c551a8874bc937d08e969399c3964ebdba8346cdd5d09caafe4c28ba7ec788191ceca65ddd6f95f18583e040d0f30d0364d", - "65bc770a5faa3792369803683e844b0be7ee96f29f6d6a35568006bd5590f9a4ef639b7a8061c7b0424b66b60ac34af3119905f33a9d8c3ae18382ca9b689900", - "ea9b4dca333336aaf839a45c6eaa48b8cb4c7ddabffea4f643d6357ea6628a480a5b45f2b052c1b07d1fedca918b6f1139d80f74c24510dcbaa4be70eacc1b06", - "e6342fb4a780ad975d0e24bce149989b91d360557e87994f6b457b895575cc02d0c15bad3ce7577f4c63927ff13f3e381ff7e72bdbe745324844a9d27e3f1c01", - "3e209c9b33e8e461178ab46b1c64b49a07fb745f1c8bc95fbfb94c6b87c69516651b264ef980937fad41238b91ddc011a5dd777c7efd4494b4b6ecd3a9c22ac0", - "fd6a3d5b1875d80486d6e69694a56dbb04a99a4d051f15db2689776ba1c4882e6d462a603b7015dc9f4b7450f05394303b8652cfb404a266962c41bae6e18a94", - "951e27517e6bad9e4195fc8671dee3e7e9be69cee1422cb9fecfce0dba875f7b310b93ee3a3d558f941f635f668ff832d2c1d033c5e2f0997e4c66f147344e02", - "8eba2f874f1ae84041903c7c4253c82292530fc8509550bfdc34c95c7e2889d5650b0ad8cb988e5c4894cb87fbfbb19612ea93ccc4c5cad17158b9763464b492", - "16f712eaa1b7c6354719a8e7dbdfaf55e4063a4d277d947550019b38dfb564830911057d50506136e2394c3b28945cc964967d54e3000c2181626cfb9b73efd2", - "c39639e7d5c7fb8cdd0fd3e6a52096039437122f21c78f1679cea9d78a734c56ecbeb28654b4f18e342c331f6f7229ec4b4bc281b2d80a6eb50043f31796c88c", - "72d081af99f8a173dcc9a0ac4eb3557405639a29084b54a40172912a2f8a395129d5536f0918e902f9e8fa6000995f4168ddc5f893011be6a0dbc9b8a1a3f5bb", - "c11aa81e5efd24d5fc27ee586cfd8847fbb0e27601ccece5ecca0198e3c7765393bb74457c7e7a27eb9170350e1fb53857177506be3e762cc0f14d8c3afe9077", - "c28f2150b452e6c0c424bcde6f8d72007f9310fed7f2f87de0dbb64f4479d6c1441ba66f44b2accee61609177ed340128b407ecec7c64bbe50d63d22d8627727", - "f63d88122877ec30b8c8b00d22e89000a966426112bd44166e2f525b769ccbe9b286d437a0129130dde1a86c43e04bedb594e671d98283afe64ce331de9828fd", - "348b0532880b88a6614a8d7408c3f913357fbb60e995c60205be9139e74998aede7f4581e42f6b52698f7fa1219708c14498067fd1e09502de83a77dd281150c", - "5133dc8bef725359dff59792d85eaf75b7e1dcd1978b01c35b1b85fcebc63388ad99a17b6346a217dc1a9622ebd122ecf6913c4d31a6b52a695b86af00d741a0", - "2753c4c0e98ecad806e88780ec27fccd0f5c1ab547f9e4bf1659d192c23aa2cc971b58b6802580baef8adc3b776ef7086b2545c2987f348ee3719cdef258c403", - "b1663573ce4b9d8caefc865012f3e39714b9898a5da6ce17c25a6a47931a9ddb9bbe98adaa553beed436e89578455416c2a52a525cf2862b8d1d49a2531b7391", - "64f58bd6bfc856f5e873b2a2956ea0eda0d6db0da39c8c7fc67c9f9feefcff3072cdf9e6ea37f69a44f0c61aa0da3693c2db5b54960c0281a088151db42b11e8", - "0764c7be28125d9065c4b98a69d60aede703547c66a12e17e1c618994132f5ef82482c1e3fe3146cc65376cc109f0138ed9a80e49f1f3c7d610d2f2432f20605", - "f748784398a2ff03ebeb07e155e66116a839741a336e32da71ec696001f0ad1b25cd48c69cfca7265eca1dd71904a0ce748ac4124f3571076dfa7116a9cf00e9", - "3f0dbc0186bceb6b785ba78d2a2a013c910be157bdaffae81bb6663b1a73722f7f1228795f3ecada87cf6ef0078474af73f31eca0cc200ed975b6893f761cb6d", - "d4762cd4599876ca75b2b8fe249944dbd27ace741fdab93616cbc6e425460feb51d4e7adcc38180e7fc47c89024a7f56191adb878dfde4ead62223f5a2610efe", - "cd36b3d5b4c91b90fcbba79513cfee1907d8645a162afd0cd4cf4192d4a5f4c892183a8eacdb2b6b6a9d9aa8c11ac1b261b380dbee24ca468f1bfd043c58eefe", - "98593452281661a53c48a9d8cd790826c1a1ce567738053d0bee4a91a3d5bd92eefdbabebe3204f2031ca5f781bda99ef5d8ae56e5b04a9e1ecd21b0eb05d3e1", - "771f57dd2775ccdab55921d3e8e30ccf484d61fe1c1b9c2ae819d0fb2a12fab9be70c4a7a138da84e8280435daade5bbe66af0836a154f817fb17f3397e725a3", - "c60897c6f828e21f16fbb5f15b323f87b6c8955eabf1d38061f707f608abdd993fac3070633e286cf8339ce295dd352df4b4b40b2f29da1dd50b3a05d079e6bb", - "8210cd2c2d3b135c2cf07fa0d1433cd771f325d075c6469d9c7f1ba0943cd4ab09808cabf4acb9ce5bb88b498929b4b847f681ad2c490d042db2aec94214b06b", - "1d4edfffd8fd80f7e4107840fa3aa31e32598491e4af7013c197a65b7f36dd3ac4b478456111cd4309d9243510782fa31b7c4c95fa951520d020eb7e5c36e4ef", - "af8e6e91fab46ce4873e1a50a8ef448cc29121f7f74deef34a71ef89cc00d9274bc6c2454bbb3230d8b2ec94c62b1dec85f3593bfa30ea6f7a44d7c09465a253", - "29fd384ed4906f2d13aa9fe7af905990938bed807f1832454a372ab412eea1f5625a1fcc9ac8343b7c67c5aba6e0b1cc4644654913692c6b39eb9187ceacd3ec", - "a268c7885d9874a51c44dffed8ea53e94f78456e0b2ed99ff5a3924760813826d960a15edbedbb5de5226ba4b074e71b05c55b9756bb79e55c02754c2c7b6c8a", - "0cf8545488d56a86817cd7ecb10f7116b7ea530a45b6ea497b6c72c997e09e3d0da8698f46bb006fc977c2cd3d1177463ac9057fdd1662c85d0c126443c10473", - "b39614268fdd8781515e2cfebf89b4d5402bab10c226e6344e6b9ae000fb0d6c79cb2f3ec80e80eaeb1980d2f8698916bd2e9f747236655116649cd3ca23a837", - "74bef092fc6f1e5dba3663a3fb003b2a5ba257496536d99f62b9d73f8f9eb3ce9ff3eec709eb883655ec9eb896b9128f2afc89cf7d1ab58a72f4a3bf034d2b4a", - "3a988d38d75611f3ef38b8774980b33e573b6c57bee0469ba5eed9b44f29945e7347967fba2c162e1c3be7f310f2f75ee2381e7bfd6b3f0baea8d95dfb1dafb1", - "58aedfce6f67ddc85a28c992f1c0bd0969f041e66f1ee88020a125cbfcfebcd61709c9c4eba192c15e69f020d462486019fa8dea0cd7a42921a19d2fe546d43d", - "9347bd291473e6b4e368437b8e561e065f649a6d8ada479ad09b1999a8f26b91cf6120fd3bfe014e83f23acfa4c0ad7b3712b2c3c0733270663112ccd9285cd9", - "b32163e7c5dbb5f51fdc11d2eac875efbbcb7e7699090a7e7ff8a8d50795af5d74d9ff98543ef8cdf89ac13d0485278756e0ef00c817745661e1d59fe38e7537", - "1085d78307b1c4b008c57a2e7e5b234658a0a82e4ff1e4aaac72b312fda0fe27d233bc5b10e9cc17fdc7697b540c7d95eb215a19a1a0e20e1abfa126efd568c7", - "4e5c734c7dde011d83eac2b7347b373594f92d7091b9ca34cb9c6f39bdf5a8d2f134379e16d822f6522170ccf2ddd55c84b9e6c64fc927ac4cf8dfb2a17701f2", - "695d83bd990a1117b3d0ce06cc888027d12a054c2677fd82f0d4fbfc93575523e7991a5e35a3752e9b70ce62992e268a877744cdd435f5f130869c9a2074b338", - "a6213743568e3b3158b9184301f3690847554c68457cb40fc9a4b8cfd8d4a118c301a07737aeda0f929c68913c5f51c80394f53bff1c3e83b2e40ca97eba9e15", - "d444bfa2362a96df213d070e33fa841f51334e4e76866b8139e8af3bb3398be2dfaddcbc56b9146de9f68118dc5829e74b0c28d7711907b121f9161cb92b69a9", - "142709d62e28fcccd0af97fad0f8465b971e82201dc51070faa0372aa43e92484be1c1e73ba10906d5d1853db6a4106e0a7bf9800d373d6dee2d46d62ef2a461", -} diff --git a/vendor/vendor.json b/vendor/vendor.json index 8b55ad8eb..87783717c 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -154,6 +154,12 @@ "revision": "56b76bdf51f7708750eac80fa38b952bb9f32639", "revisionTime": "2015-12-11T09:06:21+09:00" }, + { + "checksumSHA1": "4gphCNVXIjp5ytz3+S3SD3Dp948=", + "path": "github.com/minio/blake2b-simd", + "revision": "3f5f724cb5b182a5c278d6d3d55b40e7f8c2efb4", + "revisionTime": "2016-07-23T06:10:19Z" + }, { "path": "github.com/minio/cli", "revision": "c4a07c7b68db77ccd119183fb1d01dd5972434ab",