diff --git a/Dockerfile.release b/Dockerfile.release index 10272aa63..762e14ffa 100644 --- a/Dockerfile.release +++ b/Dockerfile.release @@ -1,23 +1,26 @@ FROM alpine:3.5 MAINTAINER Minio Inc + +COPY buildscripts/docker-entrypoint.sh buildscripts/healthcheck.sh /usr/bin/ RUN \ apk add --no-cache ca-certificates && \ apk add --no-cache --virtual .build-deps curl && \ echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \ curl https://dl.minio.io/server/minio/release/linux-amd64/minio > /usr/bin/minio && \ - chmod +x /usr/bin/minio && apk del .build-deps + chmod +x /usr/bin/minio && \ + chmod +x /usr/bin/docker-entrypoint.sh && \ + chmod +x /usr/bin/healthcheck.sh EXPOSE 9000 -COPY buildscripts/docker-entrypoint.sh /usr/bin/ - -RUN chmod +x /usr/bin/docker-entrypoint.sh - ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"] VOLUME ["/export"] +HEALTHCHECK --interval=30s --timeout=5s \ + CMD /usr/bin/healthcheck.sh + CMD ["minio"] diff --git a/Dockerfile.release.aarch64 b/Dockerfile.release.aarch64 index 072b4f858..64bb9b246 100644 --- a/Dockerfile.release.aarch64 +++ b/Dockerfile.release.aarch64 @@ -2,21 +2,24 @@ FROM resin/aarch64-alpine:3.5 MAINTAINER Minio Inc +COPY buildscripts/docker-entrypoint.sh buildscripts/healthcheck.sh /usr/bin/ + RUN \ apk add --no-cache ca-certificates && \ apk add --no-cache --virtual .build-deps curl && \ echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \ curl https://dl.minio.io/server/minio/release/linux-arm64/minio > /usr/bin/minio && \ - chmod +x /usr/bin/minio && apk del .build-deps + chmod +x /usr/bin/minio && \ + chmod +x /usr/bin/docker-entrypoint.sh && \ + chmod +x /usr/bin/healthcheck.sh EXPOSE 9000 -COPY buildscripts/docker-entrypoint.sh /usr/bin/ - -RUN chmod +x /usr/bin/docker-entrypoint.sh - ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"] VOLUME ["/export"] +HEALTHCHECK --interval=30s --timeout=5s \ + CMD /usr/bin/healthcheck.sh + CMD ["minio"] diff --git a/Dockerfile.release.armhf b/Dockerfile.release.armhf index b21ef6b58..6e8649735 100644 --- a/Dockerfile.release.armhf +++ b/Dockerfile.release.armhf @@ -2,21 +2,24 @@ FROM resin/armhf-alpine:3.5 MAINTAINER Minio Inc +COPY buildscripts/docker-entrypoint.sh buildscripts/healthcheck.sh /usr/bin/ + RUN \ apk add --no-cache ca-certificates && \ apk add --no-cache --virtual .build-deps curl && \ echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \ curl https://dl.minio.io/server/minio/release/linux-arm/minio > /usr/bin/minio && \ - chmod +x /usr/bin/minio && apk del .build-deps + chmod +x /usr/bin/minio && \ + chmod +x /usr/bin/docker-entrypoint.sh && \ + chmod +x /usr/bin/healthcheck.sh EXPOSE 9000 -COPY buildscripts/docker-entrypoint.sh /usr/bin/ - -RUN chmod +x /usr/bin/docker-entrypoint.sh - ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"] VOLUME ["/export"] +HEALTHCHECK --interval=30s --timeout=5s \ + CMD /usr/bin/healthcheck.sh + CMD ["minio"] diff --git a/Makefile b/Makefile index 5fda58a42..0044112bb 100644 --- a/Makefile +++ b/Makefile @@ -1,59 +1,10 @@ -LDFLAGS := $(shell go run buildscripts/gen-ldflags.go) PWD := $(shell pwd) GOPATH := $(shell go env GOPATH) +LDFLAGS := $(shell go run buildscripts/gen-ldflags.go) + BUILD_LDFLAGS := '$(LDFLAGS)' -TAG := latest -HOST ?= $(shell uname) -CPU ?= $(shell uname -m) - -# if no host is identifed (no uname tool) -# we assume a Linux-64bit build -ifeq ($(HOST),) - HOST = Linux -endif - -# identify CPU -ifeq ($(CPU), x86_64) - HOST := $(HOST)64 -else -ifeq ($(CPU), amd64) - HOST := $(HOST)64 -else -ifeq ($(CPU), i686) - HOST := $(HOST)32 -endif -endif -endif - - -############################################# -# now we find out the target OS for -# which we are going to compile in case -# the caller didn't yet define OS himself -ifndef (OS) - ifeq ($(HOST), Linux64) - arch = gcc - else - ifeq ($(HOST), Linux32) - arch = 32 - else - ifeq ($(HOST), Darwin64) - arch = clang - else - ifeq ($(HOST), Darwin32) - arch = clang - else - ifeq ($(HOST), FreeBSD64) - arch = gcc - endif - endif - endif - endif - endif -endif - -all: install +all: build checks: @echo "Check deps" @@ -68,7 +19,7 @@ getdeps: checks @echo "Installing misspell" && go get -u github.com/client9/misspell/cmd/misspell @echo "Installing ineffassign" && go get -u github.com/gordonklaus/ineffassign -verifiers: vet fmt lint cyclo spelling +verifiers: getdeps vet fmt lint cyclo spelling vet: @echo "Running $@" @@ -94,8 +45,6 @@ cyclo: @${GOPATH}/bin/gocyclo -over 100 cmd @${GOPATH}/bin/gocyclo -over 100 pkg -build: getdeps verifiers $(UI_ASSETS) - deadcode: @${GOPATH}/bin/deadcode @@ -104,7 +53,9 @@ spelling: @${GOPATH}/bin/misspell -error `find pkg/` @${GOPATH}/bin/misspell -error `find docs/` -test: build +# Builds minio, runs the verifiers then runs the tests. +check: test +test: verifiers build @echo "Running all minio testing" @go test $(GOFLAGS) . @go test $(GOFLAGS) github.com/minio/minio/cmd... @@ -114,9 +65,10 @@ coverage: build @echo "Running all coverage for minio" @./buildscripts/go-coverage.sh -gomake-all: build - @echo "Installing minio at $(GOPATH)/bin/minio" - @go build --ldflags $(BUILD_LDFLAGS) -o $(GOPATH)/bin/minio +# Builds minio locally. +build: + @echo "Building minio to $(PWD)/minio ..." + @CGO_ENABLED=0 go build --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio pkg-add: @echo "Adding new package $(PKG)" @@ -133,7 +85,11 @@ pkg-remove: pkg-list: @$(GOPATH)/bin/govendor list -install: gomake-all +# Builds minio and installs it to $GOPATH/bin. +install: build + @echo "Installing minio at $(GOPATH)/bin/minio ..." + @cp $(PWD)/minio $(GOPATH)/bin/minio + @echo "Check 'minio -h' for help." release: verifiers @MINIO_RELEASE=RELEASE ./buildscripts/build.sh diff --git a/README.md b/README.md index 6c5c09006..5129399ba 100644 --- a/README.md +++ b/README.md @@ -27,20 +27,13 @@ brew install minio/stable/minio minio server ~/Photos ``` #### Note -If you previously installed minio using `brew install minio` then uninstall minio as shown below +If you previously installed minio using `brew install minio` then reinstall minio from `minio/stable/minio` official repo. Homebrew builds are unstable due to golang 1.8 bugs. ``` brew uninstall minio -``` - -Then re-install the latest minio using: - -``` brew install minio/stable/minio ``` ->`brew install minio` and `brew upgrade minio` will no longer install/upgrade the latest minio binaries on macOS. Upstream bugs in golang 1.8 broke Minio brew installer. Use the updated `minio/stable/minio` in your brew paths. - ### Binary Download | Platform| Architecture | URL| | ----------| -------- | ------| diff --git a/appveyor.yml b/appveyor.yml index 06a1149b2..1e3f3b8e8 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -24,6 +24,8 @@ install: # To run your custom scripts instead of automatic MSBuild build_script: # Compile + # We need to disable firewall - https://github.com/appveyor/ci/issues/1579#issuecomment-309830648 + - ps: Disable-NetFirewallRule -DisplayName 'File and Printer Sharing (SMB-Out)' - appveyor AddCompilationMessage "Starting Compile" - cd c:\gopath\src\github.com\minio\minio - go run buildscripts/gen-ldflags.go > temp.txt diff --git a/browser/app/js/components/Browse.js b/browser/app/js/components/Browse.js index df54cf34c..15d1d508c 100644 --- a/browser/app/js/components/Browse.js +++ b/browser/app/js/components/Browse.js @@ -370,8 +370,24 @@ export default class Browse extends React.Component { } handleExpireValue(targetInput, inc, object) { - inc === -1 ? this.refs[targetInput].stepDown(1) : this.refs[targetInput].stepUp(1) + let value = this.refs[targetInput].value + let maxValue = (targetInput == 'expireHours') ? 23 : (targetInput == 'expireMins') ? 59 : (targetInput == 'expireDays') ? 7 : 0 + value = isNaN(value) ? 0 : value + // Use custom step count to support browser Edge + if((inc === -1)) { + if(value != 0) { + value-- + } + } + else { + if(value != maxValue) { + value++ + } + } + this.refs[targetInput].value = value + + // Reset hours and mins when days reaches it's max value if (this.refs.expireDays.value == 7) { this.refs.expireHours.value = 0 this.refs.expireMins.value = 0 @@ -379,6 +395,7 @@ export default class Browse extends React.Component { if (this.refs.expireDays.value + this.refs.expireHours.value + this.refs.expireMins.value == 0) { this.refs.expireDays.value = 7 } + const {dispatch} = this.props dispatch(actions.shareObject(object, this.refs.expireDays.value, this.refs.expireHours.value, this.refs.expireMins.value)) } diff --git a/browser/ui-assets.go b/browser/ui-assets.go index 30e69f27c..93225ff79 100644 --- a/browser/ui-assets.go +++ b/browser/ui-assets.go @@ -4,7 +4,7 @@ // production/favicon.ico // production/firefox.png // production/index.html -// production/index_bundle-2017-06-02T21-36-18Z.js +// production/index_bundle-2017-06-24T08-35-58Z.js // production/loader.css // production/logo.svg // production/safari.png @@ -65,7 +65,7 @@ func productionChromePng() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "production/chrome.png", size: 3726, mode: os.FileMode(436), modTime: time.Unix(1496439395, 0)} + info := bindataFileInfo{name: "production/chrome.png", size: 3726, mode: os.FileMode(436), modTime: time.Unix(1498293380, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -82,7 +82,7 @@ func productionFaviconIco() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "production/favicon.ico", size: 1340, mode: os.FileMode(436), modTime: time.Unix(1496439395, 0)} + info := bindataFileInfo{name: "production/favicon.ico", size: 1340, mode: os.FileMode(436), modTime: time.Unix(1498293380, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -99,7 +99,7 @@ func productionFirefoxPng() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "production/firefox.png", size: 4795, mode: os.FileMode(436), modTime: time.Unix(1496439395, 0)} + info := bindataFileInfo{name: "production/firefox.png", size: 4795, mode: os.FileMode(436), modTime: time.Unix(1498293380, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -156,8 +156,8 @@ var _productionIndexHTML = []byte(` - - + + `) @@ -172,22 +172,22 @@ func productionIndexHTML() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "production/index.html", size: 1996, mode: os.FileMode(436), modTime: time.Unix(1496439395, 0)} + info := bindataFileInfo{name: "production/index.html", size: 1996, mode: os.FileMode(436), modTime: time.Unix(1498293380, 0)} a := &asset{bytes: bytes, info: info} return a, nil } -var _productionIndex_bundle20170602t213618zJs = []byte(`!function(A){function M(I){if(t[I])return t[I].exports;var g=t[I]={exports:{},id:I,loaded:!1};return A[I].call(g.exports,g,g.exports,M),g.loaded=!0,g.exports}var t={};return M.m=A,M.c=t,M.p="",M(0)}([function(A,M,t){A.exports=t(349)},function(A,M,t){var I=t(5),g=t(48),e=t(25),i=t(26),T=t(49),E="prototype",N=function(A,M,t){var n,o,c,C,a=A&N.F,D=A&N.G,r=A&N.S,B=A&N.P,Q=A&N.B,s=D?I:r?I[M]||(I[M]={}):(I[M]||{})[E],u=D?g:g[M]||(g[M]={}),x=u[E]||(u[E]={});D&&(t=M);for(n in t)o=!a&&s&&void 0!==s[n],c=(o?s:t)[n],C=Q&&o?T(c,I):B&&"function"==typeof c?T(Function.call,c):c,s&&i(s,n,c,A&N.U),u[n]!=c&&e(u,n,C),B&&x[n]!=c&&(x[n]=c)};I.core=g,N.F=1,N.G=2,N.S=4,N.P=8,N.B=16,N.W=32,N.U=64,N.R=128,A.exports=N},function(A,M,t){"use strict";A.exports=t(732)},function(A,M,t){"use strict";function I(A,M,t,I,g,e,i,T){if(!A){var E;if(void 0===M)E=new Error("Minified exception occurred; use the non-minified dev environment for the full error message and additional helpful warnings.");else{var N=[t,I,g,e,i,T],n=0;E=new Error(M.replace(/%s/g,function(){return N[n++]})),E.name="Invariant Violation"}throw E.framesToPop=1,E}}A.exports=I},function(A,M,t){var I=t(9);A.exports=function(A){if(!I(A))throw TypeError(A+" is not an object!");return A}},function(A,M){var t=A.exports="undefined"!=typeof window&&window.Math==Math?window:"undefined"!=typeof self&&self.Math==Math?self:Function("return this")();"number"==typeof __g&&(__g=t)},function(A,M){A.exports=function(A){try{return!!A()}catch(A){return!0}}},function(A,M,t){"use strict";var I=t(44),g=I;A.exports=g},function(A,M){"use strict";function t(A,M){if(null==A)throw new TypeError("Object.assign target cannot be null or undefined");for(var t=Object(A),I=Object.prototype.hasOwnProperty,g=1;g0?g(I(A),9007199254740991):0}},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}function g(A,M){if(M.indexOf("deprecated")!==-1){if(E[M])return;E[M]=!0}M="[react-router] "+M;for(var t=arguments.length,I=Array(t>2?t-2:0),g=2;g"+g+""};A.exports=function(A,M){var t={};t[A]=M(T),I(I.P+I.F*g(function(){var M=""[A]('"');return M!==M.toLowerCase()||M.split('"').length>3}),"String",t)}},function(A,M,t){var I=t(95),g=t(36);A.exports=function(A){return I(g(A))}},function(A,M,t){"use strict";var I=t(53),g=t(8),e=(t(133),"function"==typeof Symbol&&Symbol.for&&Symbol.for("react.element")||60103),i={key:!0,ref:!0,__self:!0,__source:!0},T=function(A,M,t,I,g,i,T){var E={$$typeof:e,type:A,key:M,ref:t,props:T,_owner:i};return E};T.createElement=function(A,M,t){var g,e={},E=null,N=null,n=null,o=null;if(null!=M){N=void 0===M.ref?null:M.ref,E=void 0===M.key?null:""+M.key,n=void 0===M.__self?null:M.__self,o=void 0===M.__source?null:M.__source;for(g in M)M.hasOwnProperty(g)&&!i.hasOwnProperty(g)&&(e[g]=M[g])}var c=arguments.length-2;if(1===c)e.children=t;else if(c>1){for(var C=Array(c),a=0;a1){for(var D=Array(a),r=0;r1?"-":"";g.href=e,g.download=M.bucketName+i+M.prefix.slice(0,-1)+".zip",g.click(),window.URL.revokeObjectURL(e),g.remove()}},t.send(JSON.stringify(M))}},M.uploadFile=function(A,M){return function(t,I){var g=I(),i=g.currentBucket,T=g.currentPath,E=""+T+A.name,n=window.location.origin+"/minio/upload/"+i+"/"+E,o=i+"-"+T+"-"+A.name;M.open("PUT",n,!0),M.withCredentials=!1;var c=N.default.getItem("token");c&&M.setRequestHeader("Authorization","Bearer "+N.default.getItem("token")),M.setRequestHeader("x-amz-date",(0,e.default)().utc().format("YYYYMMDDTHHmmss")+"Z"),t(eA({slug:o,xhr:M,size:A.size,name:A.name})),M.onload=function(I){401!=M.status&&403!=M.status&&500!=M.status||(EA(!1),t(iA({slug:o})),t(_({type:"danger",message:"Unauthorized request."}))),200==M.status&&(EA(!1),t(iA({slug:o})),t(_({type:"success",message:"File '"+A.name+"' uploaded successfully."})),t(gA(T)))},M.upload.addEventListener("error",function(M){t(_({type:"danger",message:"Error occurred uploading '"+A.name+"'."})),t(iA({slug:o}))}),M.upload.addEventListener("progress",function(A){if(A.lengthComputable){var M=A.loaded;A.total;t(TA({slug:o,loaded:M}))}}),M.send(A)}},M.showAbout=function(){return{type:h,showAbout:!0}},M.hideAbout=function(){return{type:h,showAbout:!1}},M.setSortNameOrder=function(A){return{type:S,sortNameOrder:A}}),nA=(M.setSortSizeOrder=function(A){return{type:z,sortSizeOrder:A}},M.setSortDateOrder=function(A){return{type:p,sortDateOrder:A}},M.setLatestUIVersion=function(A){return{type:U,latestUiVersion:A}},M.showSettings=function(){return{type:k,showSettings:!0}},M.hideSettings=function(){return{type:k,showSettings:!1}},M.setSettings=function(A){return{type:R,settings:A}},M.showBucketPolicy=function(){return{type:J,showBucketPolicy:!0}},M.hideBucketPolicy=function(){return{type:J,showBucketPolicy:!1}},M.setPolicies=function(A){return{type:G,policies:A}},M.checkedObjectsAdd=function(A){return{type:W,objectName:A}},M.checkedObjectsRemove=function(A){return{type:V,objectName:A}},M.checkedObjectsReset=function(A){return{type:P,objectName:A}})},function(A,M,t){var I=t(49),g=t(95),e=t(19),i=t(17),T=t(373);A.exports=function(A,M){var t=1==A,E=2==A,N=3==A,n=4==A,o=6==A,c=5==A||o,C=M||T;return function(M,T,a){for(var D,r,B=e(M),Q=g(B),s=I(T,a,3),u=i(Q.length),x=0,y=t?C(M,u):E?C(M,0):void 0;u>x;x++)if((c||x in Q)&&(D=Q[x],r=s(D,x,B),A))if(t)y[x]=r;else if(r)switch(A){case 3:return!0;case 5:return D;case 6:return x;case 2:y.push(D)}else if(n)return!1;return o?-1:N||n?n:y}}},function(A,M,t){var I=t(1),g=t(48),e=t(6);A.exports=function(A,M){var t=(g.Object||{})[A]||Object[A],i={};i[A]=M(t),I(I.S+I.F*e(function(){t(1)}),"Object",i)}},function(A,M,t){var I=t(9);A.exports=function(A,M){if(!I(A))return A;var t,g;if(M&&"function"==typeof(t=A.toString)&&!I(g=t.call(A)))return g;if("function"==typeof(t=A.valueOf)&&!I(g=t.call(A)))return g;if(!M&&"function"==typeof(t=A.toString)&&!I(g=t.call(A)))return g;throw TypeError("Can't convert object to primitive value")}},function(A,M){"use strict";function t(A){return function(){return A}}function I(){}I.thatReturns=t,I.thatReturnsFalse=t(!1),I.thatReturnsTrue=t(!0),I.thatReturnsNull=t(null),I.thatReturnsThis=function(){return this},I.thatReturnsArgument=function(A){return A},A.exports=I},function(A,M,t){function I(A){if(i.unindexedChars&&e(A)){for(var M=-1,t=A.length,I=Object(A);++M3&&void 0!==arguments[3]?arguments[3]:{},N=Boolean(A),c=A||l,a=void 0;a="function"==typeof M?M:M?(0,B.default)(M):w;var r=t||L,Q=I.pure,s=void 0===Q||Q,u=I.withRef,y=void 0!==u&&u,h=s&&r!==L,S=d++;return function(A){function M(A,M,t){var I=r(A,M,t);return I}var t="Connect("+T(A)+")",I=function(I){function T(A,M){g(this,T);var i=e(this,I.call(this,A,M));i.version=S,i.store=A.store||M.store,(0,j.default)(i.store,'Could not find "store" in either the context or '+('props of "'+t+'". ')+"Either wrap the root component in a , "+('or explicitly pass "store" as a prop to "'+t+'".'));var E=i.store.getState();return i.state={storeState:E},i.clearCache(),i}return i(T,I),T.prototype.shouldComponentUpdate=function(){return!s||this.haveOwnPropsChanged||this.hasStoreStateChanged},T.prototype.computeStateProps=function(A,M){if(!this.finalMapStateToProps)return this.configureFinalMapState(A,M);var t=A.getState(),I=this.doStatePropsDependOnOwnProps?this.finalMapStateToProps(t,M):this.finalMapStateToProps(t);return I},T.prototype.configureFinalMapState=function(A,M){var t=c(A.getState(),M),I="function"==typeof t;return this.finalMapStateToProps=I?t:c,this.doStatePropsDependOnOwnProps=1!==this.finalMapStateToProps.length,I?this.computeStateProps(A,M):t},T.prototype.computeDispatchProps=function(A,M){if(!this.finalMapDispatchToProps)return this.configureFinalMapDispatch(A,M);var t=A.dispatch,I=this.doDispatchPropsDependOnOwnProps?this.finalMapDispatchToProps(t,M):this.finalMapDispatchToProps(t);return I},T.prototype.configureFinalMapDispatch=function(A,M){var t=a(A.dispatch,M),I="function"==typeof t;return this.finalMapDispatchToProps=I?t:a,this.doDispatchPropsDependOnOwnProps=1!==this.finalMapDispatchToProps.length,I?this.computeDispatchProps(A,M):t},T.prototype.updateStatePropsIfNeeded=function(){var A=this.computeStateProps(this.store,this.props);return(!this.stateProps||!(0,D.default)(A,this.stateProps))&&(this.stateProps=A,!0)},T.prototype.updateDispatchPropsIfNeeded=function(){var A=this.computeDispatchProps(this.store,this.props);return(!this.dispatchProps||!(0,D.default)(A,this.dispatchProps))&&(this.dispatchProps=A,!0)},T.prototype.updateMergedPropsIfNeeded=function(){var A=M(this.stateProps,this.dispatchProps,this.props);return!(this.mergedProps&&h&&(0,D.default)(A,this.mergedProps))&&(this.mergedProps=A,!0)},T.prototype.isSubscribed=function(){return"function"==typeof this.unsubscribe},T.prototype.trySubscribe=function(){N&&!this.unsubscribe&&(this.unsubscribe=this.store.subscribe(this.handleChange.bind(this)),this.handleChange())},T.prototype.tryUnsubscribe=function(){this.unsubscribe&&(this.unsubscribe(),this.unsubscribe=null)},T.prototype.componentDidMount=function(){this.trySubscribe()},T.prototype.componentWillReceiveProps=function(A){s&&(0,D.default)(A,this.props)||(this.haveOwnPropsChanged=!0)},T.prototype.componentWillUnmount=function(){this.tryUnsubscribe(),this.clearCache()},T.prototype.clearCache=function(){this.dispatchProps=null,this.stateProps=null,this.mergedProps=null,this.haveOwnPropsChanged=!0,this.hasStoreStateChanged=!0,this.haveStatePropsBeenPrecalculated=!1,this.statePropsPrecalculationError=null,this.renderedElement=null,this.finalMapDispatchToProps=null,this.finalMapStateToProps=null},T.prototype.handleChange=function(){if(this.unsubscribe){var A=this.store.getState(),M=this.state.storeState;if(!s||M!==A){if(s&&!this.doStatePropsDependOnOwnProps){var t=E(this.updateStatePropsIfNeeded,this);if(!t)return;t===Y&&(this.statePropsPrecalculationError=Y.value),this.haveStatePropsBeenPrecalculated=!0}this.hasStoreStateChanged=!0,this.setState({storeState:A})}}},T.prototype.getWrappedInstance=function(){return(0,j.default)(y,"To access the wrapped instance, you need to specify { withRef: true } as the fourth argument of the connect() call."),this.refs.wrappedInstance},T.prototype.render=function(){var M=this.haveOwnPropsChanged,t=this.hasStoreStateChanged,I=this.haveStatePropsBeenPrecalculated,g=this.statePropsPrecalculationError,e=this.renderedElement;if(this.haveOwnPropsChanged=!1,this.hasStoreStateChanged=!1,this.haveStatePropsBeenPrecalculated=!1,this.statePropsPrecalculationError=null,g)throw g;var i=!0,T=!0;s&&e&&(i=t||M&&this.doStatePropsDependOnOwnProps,T=M&&this.doDispatchPropsDependOnOwnProps);var E=!1,N=!1;I?E=!0:i&&(E=this.updateStatePropsIfNeeded()),T&&(N=this.updateDispatchPropsIfNeeded());var c=!0;return c=!!(E||N||M)&&this.updateMergedPropsIfNeeded(),!c&&e?e:(y?this.renderedElement=(0,o.createElement)(A,n({},this.mergedProps,{ref:"wrappedInstance"})):this.renderedElement=(0,o.createElement)(A,this.mergedProps),this.renderedElement)},T}(o.Component); -return I.displayName=t,I.WrappedComponent=A,I.contextTypes={store:C.default},I.propTypes={store:C.default},(0,x.default)(I,A)}}M.__esModule=!0;var n=Object.assign||function(A){for(var M=1;Mt;)g[t]=M[t++];return g},fA=function(A,M,t){G(A,M,{get:function(){return this._d[t]}})},mA=function(A){var M,t,I,g,e,i,T=y(A),E=arguments.length,n=E>1?arguments[1]:void 0,o=void 0!==n,c=Y(T);if(void 0!=c&&!j(c)){for(i=c.call(T),I=[],M=0;!(e=i.next()).done;M++)I.push(e.value);T=I}for(o&&E>2&&(n=N(n,arguments[2],2)),M=0,t=D(T.length),g=pA(this,t);t>M;M++)g[M]=o?n(T[M],M):T[M];return g},FA=function(){for(var A=0,M=arguments.length,t=pA(this,M);M>A;)t[A]=arguments[A++];return t},kA=!!X&&e(function(){BA.call(new X(1))}),RA=function(){return BA.apply(kA?DA.call(zA(this)):zA(this),arguments)},JA={copyWithin:function(A,M){return k.call(zA(this),A,M,arguments.length>2?arguments[2]:void 0)},every:function(A){return tA(zA(this),A,arguments.length>1?arguments[1]:void 0)},fill:function(A){return F.apply(zA(this),arguments)},filter:function(A){return UA(this,AA(zA(this),A,arguments.length>1?arguments[1]:void 0))},find:function(A){return IA(zA(this),A,arguments.length>1?arguments[1]:void 0)},findIndex:function(A){return gA(zA(this),A,arguments.length>1?arguments[1]:void 0)},forEach:function(A){$(zA(this),A,arguments.length>1?arguments[1]:void 0)},indexOf:function(A){return iA(zA(this),A,arguments.length>1?arguments[1]:void 0)},includes:function(A){return eA(zA(this),A,arguments.length>1?arguments[1]:void 0)},join:function(A){return CA.apply(zA(this),arguments)},lastIndexOf:function(A){return nA.apply(zA(this),arguments)},map:function(A){return LA(zA(this),A,arguments.length>1?arguments[1]:void 0)},reduce:function(A){return oA.apply(zA(this),arguments)},reduceRight:function(A){return cA.apply(zA(this),arguments)},reverse:function(){for(var A,M=this,t=zA(M).length,I=Math.floor(t/2),g=0;g1?arguments[1]:void 0)},sort:function(A){return aA.call(zA(this),A)},subarray:function(A,M){var t=zA(this),I=t.length,g=r(A,I);return new(p(t,t[xA]))(t.buffer,t.byteOffset+g*t.BYTES_PER_ELEMENT,D((void 0===M?I:r(M,I))-g))}},GA=function(A,M){return UA(this,DA.call(zA(this),A,M))},HA=function(A){zA(this);var M=SA(arguments[1],1),t=this.length,I=y(A),g=D(I.length),e=0;if(g+M>t)throw v(wA);for(;e255?255:255&I),g.v[a](t*M+g.o,I,YA)},h=function(A,M){G(A,M,{get:function(){return Y(this,M)},set:function(A){return d(this,M,A)},enumerable:!0})};s?(r=t(function(A,t,I,g){n(A,r,N,"_d");var e,i,T,E,o=0,C=0;if(x(t)){if(!(t instanceof q||(E=u(t))==W||E==V))return jA in t?OA(r,t):mA.call(r,t);e=t,C=SA(I,M);var a=t.byteLength;if(void 0===g){if(a%M)throw v(wA);if(i=a-C,i<0)throw v(wA)}else if(i=D(g)*M,i+C>a)throw v(wA);T=i/M}else T=hA(t,!0),i=T*M,e=new q(i);for(c(A,"_d",{b:e,o:C,l:i,e:T,v:new _(e)});o0?I:t)(A)}},function(A,M){"use strict";var t=function(A){var M;for(M in A)if(A.hasOwnProperty(M))return M;return null};A.exports=t},function(A,M,t){var I=t(120),g=t(85),e=t(73),i="[object Array]",T=Object.prototype,E=T.toString,N=I(Array,"isArray"),n=N||function(A){return e(A)&&g(A.length)&&E.call(A)==i};A.exports=n},function(A,M){function t(A){var M=typeof A;return!!A&&("object"==M||"function"==M)}A.exports=t},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}function g(A){return null==A||c.default.isValidElement(A)}function e(A){return g(A)||Array.isArray(A)&&A.every(g)}function i(A,M){return n({},A,M)}function T(A){var M=A.type,t=i(M.defaultProps,A.props);if(t.children){var I=E(t.children,t);I.length&&(t.childRoutes=I),delete t.children}return t}function E(A,M){var t=[];return c.default.Children.forEach(A,function(A){if(c.default.isValidElement(A))if(A.type.createRouteFromReactElement){var I=A.type.createRouteFromReactElement(A,M);I&&t.push(I)}else t.push(T(A))}),t}function N(A){return e(A)?A=E(A):A&&!Array.isArray(A)&&(A=[A]),A}M.__esModule=!0;var n=Object.assign||function(A){for(var M=1;M";for(M.style.display="none",t(142).appendChild(M),M.src="javascript:",A=M.contentWindow.document,A.open(),A.write(g+"script"+i+"document.F=Object"+g+"/script"+i),A.close(),N=A.F;I--;)delete N[E][e[I]];return N()};A.exports=Object.create||function(A,M){var t;return null!==A?(T[E]=I(A),t=new T,T[E]=null,t[i]=A):t=N(),void 0===M?t:g(t,M)}},function(A,M,t){var I=t(233),g=t(140).concat("length","prototype");M.f=Object.getOwnPropertyNames||function(A){return I(A,g)}},function(A,M,t){var I=t(233),g=t(140);A.exports=Object.keys||function(A){return I(A,g)}},function(A,M,t){var I=t(26);A.exports=function(A,M,t){for(var g in M)I(A,g,M[g],t);return A}},function(A,M,t){"use strict";var I=t(5),g=t(14),e=t(13),i=t(11)("species");A.exports=function(A){var M=I[A];e&&M&&!M[i]&&g.f(M,i,{configurable:!0,get:function(){return this}})}},function(A,M,t){var I=t(57),g=Math.max,e=Math.min;A.exports=function(A,M){return A=I(A),A<0?g(A+M,0):e(A,M)}},function(A,M){var t=0,I=Math.random();A.exports=function(A){return"Symbol(".concat(void 0===A?"":A,")_",(++t+I).toString(36))}},function(A,M){"use strict";A.exports=!("undefined"==typeof window||!window.document||!window.document.createElement)},function(A,M){function t(A){return!!A&&"object"==typeof A}A.exports=t},function(A,M,t){"use strict";function I(A,M,t){if(A[M])return new Error("<"+t+'> should not have a "'+M+'" prop')}M.__esModule=!0,M.routes=M.route=M.components=M.component=M.history=void 0,M.falsy=I;var g=t(2),e=g.PropTypes.func,i=g.PropTypes.object,T=g.PropTypes.arrayOf,E=g.PropTypes.oneOfType,N=g.PropTypes.element,n=g.PropTypes.shape,o=g.PropTypes.string,c=(M.history=n({listen:e.isRequired,push:e.isRequired,replace:e.isRequired,go:e.isRequired,goBack:e.isRequired,goForward:e.isRequired}),M.component=E([e,o])),C=(M.components=E([c,i]),M.route=E([i,N]));M.routes=E([C,T(C)])},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}function g(A){var M=A.match(/^https?:\/\/[^\/]*/);return null==M?A:A.substring(M[0].length)}function e(A){var M=g(A),t="",I="",e=M.indexOf("#");e!==-1&&(I=M.substring(e),M=M.substring(0,e));var i=M.indexOf("?");return i!==-1&&(t=M.substring(i),M=M.substring(0,i)),""===M&&(M="/"),{pathname:M,search:t,hash:I}}M.__esModule=!0,M.extractPath=g,M.parsePath=e;var i=t(47);I(i)},function(A,M,t){"use strict";function I(){g.attachRefs(this,this._currentElement)}var g=t(751),e={mountComponent:function(A,M,t,g){var e=A.mountComponent(M,t,g);return A._currentElement&&null!=A._currentElement.ref&&t.getReactMountReady().enqueue(I,A),e},unmountComponent:function(A){g.detachRefs(A,A._currentElement),A.unmountComponent()},receiveComponent:function(A,M,t,e){var i=A._currentElement;if(M!==i||e!==A._context){var T=g.shouldUpdateRefs(i,M);T&&g.detachRefs(A,i),A.receiveComponent(M,t,e),T&&A._currentElement&&null!=A._currentElement.ref&&t.getReactMountReady().enqueue(I,A)}},performUpdateIfNecessary:function(A,M){A.performUpdateIfNecessary(M)}};A.exports=e},function(A,M,t){"use strict";function I(A,M,t,I){this.dispatchConfig=A,this.dispatchMarker=M,this.nativeEvent=t;var g=this.constructor.Interface;for(var e in g)if(g.hasOwnProperty(e)){var T=g[e];T?this[e]=T(t):"target"===e?this.target=I:this[e]=t[e]}var E=null!=t.defaultPrevented?t.defaultPrevented:t.returnValue===!1;E?this.isDefaultPrevented=i.thatReturnsTrue:this.isDefaultPrevented=i.thatReturnsFalse,this.isPropagationStopped=i.thatReturnsFalse}var g=t(62),e=t(8),i=t(44),T=(t(7),{type:null,target:null,currentTarget:i.thatReturnsNull,eventPhase:null,bubbles:null,cancelable:null,timeStamp:function(A){return A.timeStamp||Date.now()},defaultPrevented:null,isTrusted:null});e(I.prototype,{preventDefault:function(){this.defaultPrevented=!0;var A=this.nativeEvent;A&&(A.preventDefault?A.preventDefault():A.returnValue=!1,this.isDefaultPrevented=i.thatReturnsTrue)},stopPropagation:function(){var A=this.nativeEvent;A&&(A.stopPropagation?A.stopPropagation():A.cancelBubble=!0,this.isPropagationStopped=i.thatReturnsTrue)},persist:function(){this.isPersistent=i.thatReturnsTrue},isPersistent:i.thatReturnsFalse,destructor:function(){var A=this.constructor.Interface;for(var M in A)this[M]=null;this.dispatchConfig=null,this.dispatchMarker=null,this.nativeEvent=null}}),I.Interface=T,I.augmentClass=function(A,M){var t=this,I=Object.create(t.prototype);e(I,A.prototype),A.prototype=I,A.prototype.constructor=A,A.Interface=e({},t.Interface,M),A.augmentClass=t.augmentClass,g.addPoolingTo(A,g.fourArgumentPooler)},g.addPoolingTo(I,g.fourArgumentPooler),A.exports=I},function(A,M,t){var I=t(11)("unscopables"),g=Array.prototype;void 0==g[I]&&t(25)(g,I,{}),A.exports=function(A){g[I][A]=!0}},function(A,M,t){var I=t(49),g=t(227),e=t(144),i=t(4),T=t(17),E=t(161),N={},n={},M=A.exports=function(A,M,t,o,c){var C,a,D,r,B=c?function(){return A}:E(A),Q=I(t,o,M?2:1),s=0;if("function"!=typeof B)throw TypeError(A+" is not iterable!");if(e(B)){for(C=T(A.length);C>s;s++)if(r=M?Q(i(a=A[s])[0],a[1]):Q(A[s]),r===N||r===n)return r}else for(D=B.call(A);!(a=D.next()).done;)if(r=g(D,Q,a.value,M),r===N||r===n)return r};M.BREAK=N,M.RETURN=n},function(A,M){A.exports={}},function(A,M,t){var I=t(14).f,g=t(21),e=t(11)("toStringTag");A.exports=function(A,M,t){A&&!g(A=t?A:A.prototype,e)&&I(A,e,{configurable:!0,value:M})}},function(A,M,t){var I=t(1),g=t(36),e=t(6),i=t(157),T="["+i+"]",E="​…",N=RegExp("^"+T+T+"*"),n=RegExp(T+T+"*$"),o=function(A,M,t){var g={},T=e(function(){return!!i[A]()||E[A]()!=E}),N=g[A]=T?M(c):i[A];t&&(g[t]=N),I(I.P+I.F*T,"String",g)},c=o.trim=function(A,M){return A=String(g(A)),1&M&&(A=A.replace(N,"")),2&M&&(A=A.replace(n,"")),A};A.exports=o},function(A,M){"use strict";function t(A){return A&&A.ownerDocument||document}M.__esModule=!0,M.default=t,A.exports=M.default},function(A,M,t){"use strict";var I=t(72),g=function(){var A=I&&document.documentElement;return A&&A.contains?function(A,M){return A.contains(M)}:A&&A.compareDocumentPosition?function(A,M){return A===M||!!(16&A.compareDocumentPosition(M))}:function(A,M){if(M)do if(M===A)return!0;while(M=M.parentNode);return!1}}();A.exports=g},function(A,M){function t(A){return"number"==typeof A&&A>-1&&A%1==0&&A<=I}var I=9007199254740991;A.exports=t},function(A,M,t){(function(A){!function(M,t){A.exports=t()}(this,function(){"use strict";function M(){return DI.apply(null,arguments)}function t(A){DI=A}function I(A){return A instanceof Array||"[object Array]"===Object.prototype.toString.call(A)}function g(A){return null!=A&&"[object Object]"===Object.prototype.toString.call(A)}function e(A){var M;for(M in A)return!1;return!0}function i(A){return"number"==typeof A||"[object Number]"===Object.prototype.toString.call(A)}function T(A){return A instanceof Date||"[object Date]"===Object.prototype.toString.call(A)}function E(A,M){var t,I=[];for(t=0;t0)for(t in QI)I=QI[t],g=M[I],r(g)||(A[I]=g);return A}function Q(A){B(this,A),this._d=new Date(null!=A._d?A._d.getTime():NaN),this.isValid()||(this._d=new Date(NaN)),sI===!1&&(sI=!0,M.updateOffset(this),sI=!1)}function s(A){return A instanceof Q||null!=A&&null!=A._isAMomentObject}function u(A){return A<0?Math.ceil(A)||0:Math.floor(A)}function x(A){var M=+A,t=0;return 0!==M&&isFinite(M)&&(t=u(M)),t}function y(A,M,t){var I,g=Math.min(A.length,M.length),e=Math.abs(A.length-M.length),i=0;for(I=0;I0?"future":"past"];return L(t)?t(M):t.replace(/%s/i,M)}function m(A,M){var t=A.toLowerCase();SI[t]=SI[t+"s"]=SI[M]=A}function F(A){return"string"==typeof A?SI[A]||SI[A.toLowerCase()]:void 0}function k(A){var M,t,I={};for(t in A)N(A,t)&&(M=F(t),M&&(I[M]=A[t]));return I}function R(A,M){zI[A]=M}function J(A){var M=[];for(var t in A)M.push({unit:t,priority:zI[t]});return M.sort(function(A,M){return A.priority-M.priority}),M}function G(A,t){return function(I){return null!=I?(v(this,A,I),M.updateOffset(this,t),this):H(this,A)}}function H(A,M){return A.isValid()?A._d["get"+(A._isUTC?"UTC":"")+M]():NaN}function v(A,M,t){A.isValid()&&A._d["set"+(A._isUTC?"UTC":"")+M](t)}function b(A){return A=F(A),L(this[A])?this[A]():this}function X(A,M){if("object"==typeof A){A=k(A);for(var t=J(A),I=0;I=0;return(e?t?"+":"":"-")+Math.pow(10,Math.max(0,g)).toString().substr(1)+I}function V(A,M,t,I){var g=I;"string"==typeof I&&(g=function(){return this[I]()}),A&&(fI[A]=g),M&&(fI[M[0]]=function(){return W(g.apply(this,arguments),M[1],M[2])}),t&&(fI[t]=function(){return this.localeData().ordinal(g.apply(this,arguments),A)})}function P(A){return A.match(/\[[\s\S]/)?A.replace(/^\[|\]$/g,""):A.replace(/\\/g,"")}function Z(A){var M,t,I=A.match(pI);for(M=0,t=I.length;M=0&&UI.test(A);)A=A.replace(UI,t),UI.lastIndex=0,I-=1;return A}function _(A,M,t){$I[A]=L(M)?M:function(A,I){return A&&t?t:M}}function $(A,M){return N($I,A)?$I[A](M._strict,M._locale):new RegExp(AA(A))}function AA(A){return MA(A.replace("\\","").replace(/\\(\[)|\\(\])|\[([^\]\[]*)\]|\\(.)/g,function(A,M,t,I,g){return M||t||I||g}))}function MA(A){return A.replace(/[-\/\\^$*+?.()|[\]{}]/g,"\\$&")}function tA(A,M){var t,I=M;for("string"==typeof A&&(A=[A]),i(M)&&(I=function(A,t){t[M]=x(A)}),t=0;t=0&&isFinite(T.getFullYear())&&T.setFullYear(A),T}function uA(A){var M=new Date(Date.UTC.apply(null,arguments));return A<100&&A>=0&&isFinite(M.getUTCFullYear())&&M.setUTCFullYear(A),M}function xA(A,M,t){var I=7+M-t,g=(7+uA(A,0,I).getUTCDay()-M)%7;return-g+I-1}function yA(A,M,t,I,g){var e,i,T=(7+t-I)%7,E=xA(A,I,g),N=1+7*(M-1)+T+E;return N<=0?(e=A-1,i=rA(e)+N):N>rA(A)?(e=A+1,i=N-rA(A)):(e=A,i=N),{year:e,dayOfYear:i}}function jA(A,M,t){var I,g,e=xA(A.year(),M,t),i=Math.floor((A.dayOfYear()-e-1)/7)+1;return i<1?(g=A.year()-1,I=i+lA(g,M,t)):i>lA(A.year(),M,t)?(I=i-lA(A.year(),M,t),g=A.year()+1):(g=A.year(),I=i),{week:I,year:g}}function lA(A,M,t){var I=xA(A,M,t),g=xA(A+1,M,t);return(rA(A)-I+g)/7}function wA(A){return jA(A,this._week.dow,this._week.doy).week}function LA(){return this._week.dow}function YA(){return this._week.doy}function dA(A){var M=this.localeData().week(this);return null==A?M:this.add(7*(A-M),"d")}function hA(A){var M=jA(this,1,4).week;return null==A?M:this.add(7*(A-M),"d")}function SA(A,M){return"string"!=typeof A?A:isNaN(A)?(A=M.weekdaysParse(A),"number"==typeof A?A:null):parseInt(A,10)}function zA(A,M){return"string"==typeof A?M.weekdaysParse(A)%7||7:isNaN(A)?null:A}function pA(A,M){return A?I(this._weekdays)?this._weekdays[A.day()]:this._weekdays[this._weekdays.isFormat.test(M)?"format":"standalone"][A.day()]:this._weekdays}function UA(A){return A?this._weekdaysShort[A.day()]:this._weekdaysShort}function OA(A){return A?this._weekdaysMin[A.day()]:this._weekdaysMin}function fA(A,M,t){var I,g,e,i=A.toLocaleLowerCase();if(!this._weekdaysParse)for(this._weekdaysParse=[],this._shortWeekdaysParse=[],this._minWeekdaysParse=[],I=0;I<7;++I)e=o([2e3,1]).day(I),this._minWeekdaysParse[I]=this.weekdaysMin(e,"").toLocaleLowerCase(),this._shortWeekdaysParse[I]=this.weekdaysShort(e,"").toLocaleLowerCase(),this._weekdaysParse[I]=this.weekdays(e,"").toLocaleLowerCase();return t?"dddd"===M?(g=ng.call(this._weekdaysParse,i),g!==-1?g:null):"ddd"===M?(g=ng.call(this._shortWeekdaysParse,i),g!==-1?g:null):(g=ng.call(this._minWeekdaysParse,i),g!==-1?g:null):"dddd"===M?(g=ng.call(this._weekdaysParse,i),g!==-1?g:(g=ng.call(this._shortWeekdaysParse,i),g!==-1?g:(g=ng.call(this._minWeekdaysParse,i),g!==-1?g:null))):"ddd"===M?(g=ng.call(this._shortWeekdaysParse,i),g!==-1?g:(g=ng.call(this._weekdaysParse,i),g!==-1?g:(g=ng.call(this._minWeekdaysParse,i),g!==-1?g:null))):(g=ng.call(this._minWeekdaysParse,i),g!==-1?g:(g=ng.call(this._weekdaysParse,i),g!==-1?g:(g=ng.call(this._shortWeekdaysParse,i),g!==-1?g:null)))}function mA(A,M,t){var I,g,e;if(this._weekdaysParseExact)return fA.call(this,A,M,t);for(this._weekdaysParse||(this._weekdaysParse=[],this._minWeekdaysParse=[],this._shortWeekdaysParse=[],this._fullWeekdaysParse=[]), -I=0;I<7;I++){if(g=o([2e3,1]).day(I),t&&!this._fullWeekdaysParse[I]&&(this._fullWeekdaysParse[I]=new RegExp("^"+this.weekdays(g,"").replace(".",".?")+"$","i"),this._shortWeekdaysParse[I]=new RegExp("^"+this.weekdaysShort(g,"").replace(".",".?")+"$","i"),this._minWeekdaysParse[I]=new RegExp("^"+this.weekdaysMin(g,"").replace(".",".?")+"$","i")),this._weekdaysParse[I]||(e="^"+this.weekdays(g,"")+"|^"+this.weekdaysShort(g,"")+"|^"+this.weekdaysMin(g,""),this._weekdaysParse[I]=new RegExp(e.replace(".",""),"i")),t&&"dddd"===M&&this._fullWeekdaysParse[I].test(A))return I;if(t&&"ddd"===M&&this._shortWeekdaysParse[I].test(A))return I;if(t&&"dd"===M&&this._minWeekdaysParse[I].test(A))return I;if(!t&&this._weekdaysParse[I].test(A))return I}}function FA(A){if(!this.isValid())return null!=A?this:NaN;var M=this._isUTC?this._d.getUTCDay():this._d.getDay();return null!=A?(A=SA(A,this.localeData()),this.add(A-M,"d")):M}function kA(A){if(!this.isValid())return null!=A?this:NaN;var M=(this.day()+7-this.localeData()._week.dow)%7;return null==A?M:this.add(A-M,"d")}function RA(A){if(!this.isValid())return null!=A?this:NaN;if(null!=A){var M=zA(A,this.localeData());return this.day(this.day()%7?M:M-7)}return this.day()||7}function JA(A){return this._weekdaysParseExact?(N(this,"_weekdaysRegex")||vA.call(this),A?this._weekdaysStrictRegex:this._weekdaysRegex):(N(this,"_weekdaysRegex")||(this._weekdaysRegex=xg),this._weekdaysStrictRegex&&A?this._weekdaysStrictRegex:this._weekdaysRegex)}function GA(A){return this._weekdaysParseExact?(N(this,"_weekdaysRegex")||vA.call(this),A?this._weekdaysShortStrictRegex:this._weekdaysShortRegex):(N(this,"_weekdaysShortRegex")||(this._weekdaysShortRegex=yg),this._weekdaysShortStrictRegex&&A?this._weekdaysShortStrictRegex:this._weekdaysShortRegex)}function HA(A){return this._weekdaysParseExact?(N(this,"_weekdaysRegex")||vA.call(this),A?this._weekdaysMinStrictRegex:this._weekdaysMinRegex):(N(this,"_weekdaysMinRegex")||(this._weekdaysMinRegex=jg),this._weekdaysMinStrictRegex&&A?this._weekdaysMinStrictRegex:this._weekdaysMinRegex)}function vA(){function A(A,M){return M.length-A.length}var M,t,I,g,e,i=[],T=[],E=[],N=[];for(M=0;M<7;M++)t=o([2e3,1]).day(M),I=this.weekdaysMin(t,""),g=this.weekdaysShort(t,""),e=this.weekdays(t,""),i.push(I),T.push(g),E.push(e),N.push(I),N.push(g),N.push(e);for(i.sort(A),T.sort(A),E.sort(A),N.sort(A),M=0;M<7;M++)T[M]=MA(T[M]),E[M]=MA(E[M]),N[M]=MA(N[M]);this._weekdaysRegex=new RegExp("^("+N.join("|")+")","i"),this._weekdaysShortRegex=this._weekdaysRegex,this._weekdaysMinRegex=this._weekdaysRegex,this._weekdaysStrictRegex=new RegExp("^("+E.join("|")+")","i"),this._weekdaysShortStrictRegex=new RegExp("^("+T.join("|")+")","i"),this._weekdaysMinStrictRegex=new RegExp("^("+i.join("|")+")","i")}function bA(){return this.hours()%12||12}function XA(){return this.hours()||24}function WA(A,M){V(A,0,0,function(){return this.localeData().meridiem(this.hours(),this.minutes(),M)})}function VA(A,M){return M._meridiemParse}function PA(A){return"p"===(A+"").toLowerCase().charAt(0)}function ZA(A,M,t){return A>11?t?"pm":"PM":t?"am":"AM"}function KA(A){return A?A.toLowerCase().replace("_","-"):A}function qA(A){for(var M,t,I,g,e=0;e0;){if(I=_A(g.slice(0,M).join("-")))return I;if(t&&t.length>=M&&y(g,t,!0)>=M-1)break;M--}e++}return null}function _A(M){var t=null;if(!dg[M]&&"undefined"!=typeof A&&A&&A.exports)try{t=lg._abbr,!function(){var A=new Error('Cannot find module "./locale"');throw A.code="MODULE_NOT_FOUND",A}(),$A(t)}catch(A){}return dg[M]}function $A(A,M){var t;return A&&(t=r(M)?tM(A):AM(A,M),t&&(lg=t)),lg._abbr}function AM(A,M){if(null!==M){var t=Yg;if(M.abbr=A,null!=dg[A])w("defineLocaleOverride","use moment.updateLocale(localeName, config) to change an existing locale. moment.defineLocale(localeName, config) should only be used for creating a new locale See http://momentjs.com/guides/#/warnings/define-locale/ for more info."),t=dg[A]._config;else if(null!=M.parentLocale){if(null==dg[M.parentLocale])return hg[M.parentLocale]||(hg[M.parentLocale]=[]),hg[M.parentLocale].push({name:A,config:M}),null;t=dg[M.parentLocale]._config}return dg[A]=new h(d(t,M)),hg[A]&&hg[A].forEach(function(A){AM(A.name,A.config)}),$A(A),dg[A]}return delete dg[A],null}function MM(A,M){if(null!=M){var t,I=Yg;null!=dg[A]&&(I=dg[A]._config),M=d(I,M),t=new h(M),t.parentLocale=dg[A],dg[A]=t,$A(A)}else null!=dg[A]&&(null!=dg[A].parentLocale?dg[A]=dg[A].parentLocale:null!=dg[A]&&delete dg[A]);return dg[A]}function tM(A){var M;if(A&&A._locale&&A._locale._abbr&&(A=A._locale._abbr),!A)return lg;if(!I(A)){if(M=_A(A))return M;A=[A]}return qA(A)}function IM(){return jI(dg)}function gM(A){var M,t=A._a;return t&&C(A).overflow===-2&&(M=t[tg]<0||t[tg]>11?tg:t[Ig]<1||t[Ig]>eA(t[Mg],t[tg])?Ig:t[gg]<0||t[gg]>24||24===t[gg]&&(0!==t[eg]||0!==t[ig]||0!==t[Tg])?gg:t[eg]<0||t[eg]>59?eg:t[ig]<0||t[ig]>59?ig:t[Tg]<0||t[Tg]>999?Tg:-1,C(A)._overflowDayOfYear&&(MIg)&&(M=Ig),C(A)._overflowWeeks&&M===-1&&(M=Eg),C(A)._overflowWeekday&&M===-1&&(M=Ng),C(A).overflow=M),A}function eM(A){var M,t,I,g,e,i,T=A._i,E=Sg.exec(T)||zg.exec(T);if(E){for(C(A).iso=!0,M=0,t=Ug.length;MrA(g)&&(C(A)._overflowDayOfYear=!0),t=uA(g,0,A._dayOfYear),A._a[tg]=t.getUTCMonth(),A._a[Ig]=t.getUTCDate()),M=0;M<3&&null==A._a[M];++M)A._a[M]=e[M]=I[M];for(;M<7;M++)A._a[M]=e[M]=null==A._a[M]?2===M?1:0:A._a[M];24===A._a[gg]&&0===A._a[eg]&&0===A._a[ig]&&0===A._a[Tg]&&(A._nextDay=!0,A._a[gg]=0),A._d=(A._useUTC?uA:sA).apply(null,e),null!=A._tzm&&A._d.setUTCMinutes(A._d.getUTCMinutes()-A._tzm),A._nextDay&&(A._a[gg]=24)}}function nM(A){var M,t,I,g,e,i,T,E;if(M=A._w,null!=M.GG||null!=M.W||null!=M.E)e=1,i=4,t=TM(M.GG,A._a[Mg],jA(sM(),1,4).year),I=TM(M.W,1),g=TM(M.E,1),(g<1||g>7)&&(E=!0);else{e=A._locale._week.dow,i=A._locale._week.doy;var N=jA(sM(),e,i);t=TM(M.gg,A._a[Mg],N.year),I=TM(M.w,N.week),null!=M.d?(g=M.d,(g<0||g>6)&&(E=!0)):null!=M.e?(g=M.e+e,(M.e<0||M.e>6)&&(E=!0)):g=e}I<1||I>lA(t,e,i)?C(A)._overflowWeeks=!0:null!=E?C(A)._overflowWeekday=!0:(T=yA(t,I,g,e,i),A._a[Mg]=T.year,A._dayOfYear=T.dayOfYear)}function oM(A){if(A._f===M.ISO_8601)return void eM(A);A._a=[],C(A).empty=!0;var t,I,g,e,i,T=""+A._i,E=T.length,N=0;for(g=q(A._f,A._locale).match(pI)||[],t=0;t0&&C(A).unusedInput.push(i),T=T.slice(T.indexOf(I)+I.length),N+=I.length),fI[e]?(I?C(A).empty=!1:C(A).unusedTokens.push(e),gA(e,I,A)):A._strict&&!I&&C(A).unusedTokens.push(e);C(A).charsLeftOver=E-N,T.length>0&&C(A).unusedInput.push(T),A._a[gg]<=12&&C(A).bigHour===!0&&A._a[gg]>0&&(C(A).bigHour=void 0),C(A).parsedDateParts=A._a.slice(0),C(A).meridiem=A._meridiem,A._a[gg]=cM(A._locale,A._a[gg],A._meridiem),NM(A),gM(A)}function cM(A,M,t){var I;return null==t?M:null!=A.meridiemHour?A.meridiemHour(M,t):null!=A.isPM?(I=A.isPM(t),I&&M<12&&(M+=12),I||12!==M||(M=0),M):M}function CM(A){var M,t,I,g,e;if(0===A._f.length)return C(A).invalidFormat=!0,void(A._d=new Date(NaN));for(g=0;gthis.clone().month(0).utcOffset()||this.utcOffset()>this.clone().month(5).utcOffset()}function FM(){if(!r(this._isDSTShifted))return this._isDSTShifted;var A={};if(B(A,this),A=rM(A),A._a){var M=A._isUTC?o(A._a):sM(A._a);this._isDSTShifted=this.isValid()&&y(A._a,M.toArray())>0}else this._isDSTShifted=!1;return this._isDSTShifted}function kM(){return!!this.isValid()&&!this._isUTC}function RM(){return!!this.isValid()&&this._isUTC}function JM(){return!!this.isValid()&&(this._isUTC&&0===this._offset)}function GM(A,M){var t,I,g,e=A,T=null;return lM(A)?e={ms:A._milliseconds,d:A._days,M:A._months}:i(A)?(e={},M?e[M]=A:e.milliseconds=A):(T=Jg.exec(A))?(t="-"===T[1]?-1:1,e={y:0,d:x(T[Ig])*t,h:x(T[gg])*t,m:x(T[eg])*t,s:x(T[ig])*t,ms:x(wM(1e3*T[Tg]))*t}):(T=Gg.exec(A))?(t="-"===T[1]?-1:1,e={y:HM(T[2],t),M:HM(T[3],t),w:HM(T[4],t),d:HM(T[5],t),h:HM(T[6],t),m:HM(T[7],t),s:HM(T[8],t)}):null==e?e={}:"object"==typeof e&&("from"in e||"to"in e)&&(g=bM(sM(e.from),sM(e.to)),e={},e.ms=g.milliseconds,e.M=g.months),I=new jM(e),lM(A)&&N(A,"_locale")&&(I._locale=A._locale),I}function HM(A,M){var t=A&&parseFloat(A.replace(",","."));return(isNaN(t)?0:t)*M}function vM(A,M){var t={milliseconds:0,months:0};return t.months=M.month()-A.month()+12*(M.year()-A.year()),A.clone().add(t.months,"M").isAfter(M)&&--t.months,t.milliseconds=+M-+A.clone().add(t.months,"M"),t}function bM(A,M){var t;return A.isValid()&&M.isValid()?(M=dM(M,A),A.isBefore(M)?t=vM(A,M):(t=vM(M,A),t.milliseconds=-t.milliseconds,t.months=-t.months),t):{milliseconds:0,months:0}}function XM(A,M){return function(t,I){var g,e;return null===I||isNaN(+I)||(w(M,"moment()."+M+"(period, number) is deprecated. Please use moment()."+M+"(number, period). See http://momentjs.com/guides/#/warnings/add-inverted-param/ for more info."),e=t,t=I,I=e),t="string"==typeof t?+t:t,g=GM(t,I),WM(this,g,A),this}}function WM(A,t,I,g){var e=t._milliseconds,i=wM(t._days),T=wM(t._months);A.isValid()&&(g=null==g||g,e&&A._d.setTime(A._d.valueOf()+e*I),i&&v(A,"Date",H(A,"Date")+i*I),T&&nA(A,H(A,"Month")+T*I),g&&M.updateOffset(A,i||T))}function VM(A,M){var t=A.diff(M,"days",!0);return t<-6?"sameElse":t<-1?"lastWeek":t<0?"lastDay":t<1?"sameDay":t<2?"nextDay":t<7?"nextWeek":"sameElse"}function PM(A,t){var I=A||sM(),g=dM(I,this).startOf("day"),e=M.calendarFormat(this,g)||"sameElse",i=t&&(L(t[e])?t[e].call(this,I):t[e]);return this.format(i||this.localeData().calendar(e,this,sM(I)))}function ZM(){return new Q(this)}function KM(A,M){var t=s(A)?A:sM(A);return!(!this.isValid()||!t.isValid())&&(M=F(r(M)?"millisecond":M),"millisecond"===M?this.valueOf()>t.valueOf():t.valueOf()e&&(M=e),pt.call(this,A,M,t,I,g))}function pt(A,M,t,I,g){var e=yA(A,M,t,I,g),i=uA(e.year,0,e.dayOfYear);return this.year(i.getUTCFullYear()),this.month(i.getUTCMonth()),this.date(i.getUTCDate()),this}function Ut(A){return null==A?Math.ceil((this.month()+1)/3):this.month(3*(A-1)+this.month()%3)}function Ot(A){var M=Math.round((this.clone().startOf("day")-this.clone().startOf("year"))/864e5)+1;return null==A?M:this.add(A-M,"d")}function ft(A,M){M[Tg]=x(1e3*("0."+A))}function mt(){return this._isUTC?"UTC":""}function Ft(){return this._isUTC?"Coordinated Universal Time":""}function kt(A){return sM(1e3*A)}function Rt(){return sM.apply(null,arguments).parseZone()}function Jt(A){return A}function Gt(A,M,t,I){var g=tM(),e=o().set(I,M);return g[t](e,A)}function Ht(A,M,t){if(i(A)&&(M=A,A=void 0),A=A||"",null!=M)return Gt(A,M,t,"month");var I,g=[];for(I=0;I<12;I++)g[I]=Gt(A,I,t,"month");return g}function vt(A,M,t,I){"boolean"==typeof A?(i(M)&&(t=M,M=void 0),M=M||""):(M=A,t=M,A=!1,i(M)&&(t=M,M=void 0),M=M||"");var g=tM(),e=A?g._week.dow:0;if(null!=t)return Gt(M,(t+e)%7,I,"day");var T,E=[];for(T=0;T<7;T++)E[T]=Gt(M,(T+e)%7,I,"day");return E}function bt(A,M){return Ht(A,M,"months")}function Xt(A,M){return Ht(A,M,"monthsShort")}function Wt(A,M,t){return vt(A,M,t,"weekdays")}function Vt(A,M,t){return vt(A,M,t,"weekdaysShort")}function Pt(A,M,t){return vt(A,M,t,"weekdaysMin")}function Zt(){var A=this._data;return this._milliseconds=_g(this._milliseconds),this._days=_g(this._days),this._months=_g(this._months),A.milliseconds=_g(A.milliseconds),A.seconds=_g(A.seconds),A.minutes=_g(A.minutes),A.hours=_g(A.hours),A.months=_g(A.months),A.years=_g(A.years),this}function Kt(A,M,t,I){var g=GM(M,t);return A._milliseconds+=I*g._milliseconds,A._days+=I*g._days,A._months+=I*g._months,A._bubble()}function qt(A,M){return Kt(this,A,M,1)}function _t(A,M){return Kt(this,A,M,-1)}function $t(A){return A<0?Math.floor(A):Math.ceil(A)}function AI(){var A,M,t,I,g,e=this._milliseconds,i=this._days,T=this._months,E=this._data;return e>=0&&i>=0&&T>=0||e<=0&&i<=0&&T<=0||(e+=864e5*$t(tI(T)+i),i=0,T=0),E.milliseconds=e%1e3,A=u(e/1e3),E.seconds=A%60,M=u(A/60),E.minutes=M%60,t=u(M/60),E.hours=t%24,i+=u(t/24),g=u(MI(i)),T+=g,i-=$t(tI(g)),I=u(T/12),T%=12,E.days=i,E.months=T,E.years=I,this}function MI(A){return 4800*A/146097}function tI(A){return 146097*A/4800}function II(A){var M,t,I=this._milliseconds;if(A=F(A),"month"===A||"year"===A)return M=this._days+I/864e5,t=this._months+MI(M),"month"===A?t:t/12;switch(M=this._days+Math.round(tI(this._months)),A){case"week":return M/7+I/6048e5;case"day":return M+I/864e5;case"hour":return 24*M+I/36e5;case"minute":return 1440*M+I/6e4;case"second":return 86400*M+I/1e3;case"millisecond":return Math.floor(864e5*M)+I;default:throw new Error("Unknown unit "+A)}}function gI(){return this._milliseconds+864e5*this._days+this._months%12*2592e6+31536e6*x(this._months/12)}function eI(A){return function(){return this.as(A)}}function iI(A){return A=F(A),this[A+"s"]()}function TI(A){return function(){return this._data[A]}}function EI(){return u(this.days()/7)}function NI(A,M,t,I,g){return g.relativeTime(M||1,!!t,A,I)}function nI(A,M,t){var I=GM(A).abs(),g=ae(I.as("s")),e=ae(I.as("m")),i=ae(I.as("h")),T=ae(I.as("d")),E=ae(I.as("M")),N=ae(I.as("y")),n=g0,n[4]=t,NI.apply(null,n)}function oI(A){return void 0===A?ae:"function"==typeof A&&(ae=A,!0)}function cI(A,M){return void 0!==De[A]&&(void 0===M?De[A]:(De[A]=M,!0))}function CI(A){var M=this.localeData(),t=nI(this,!A,M);return A&&(t=M.pastFuture(+this,t)),M.postformat(t)}function aI(){var A,M,t,I=re(this._milliseconds)/1e3,g=re(this._days),e=re(this._months);A=u(I/60),M=u(A/60),I%=60,A%=60,t=u(e/12),e%=12;var i=t,T=e,E=g,N=M,n=A,o=I,c=this.asSeconds();return c?(c<0?"-":"")+"P"+(i?i+"Y":"")+(T?T+"M":"")+(E?E+"D":"")+(N||n||o?"T":"")+(N?N+"H":"")+(n?n+"M":"")+(o?o+"S":""):"P0D"}var DI,rI;rI=Array.prototype.some?Array.prototype.some:function(A){for(var M=Object(this),t=M.length>>>0,I=0;I68?1900:2e3)};var rg=G("FullYear",!0);V("w",["ww",2],"wo","week"),V("W",["WW",2],"Wo","isoWeek"),m("week","w"),m("isoWeek","W"),R("week",5),R("isoWeek",5),_("w",GI),_("ww",GI,FI),_("W",GI),_("WW",GI,FI),IA(["w","ww","W","WW"],function(A,M,t,I){M[I.substr(0,1)]=x(A)});var Bg={dow:0,doy:6};V("d",0,"do","day"),V("dd",0,0,function(A){return this.localeData().weekdaysMin(this,A)}),V("ddd",0,0,function(A){return this.localeData().weekdaysShort(this,A)}),V("dddd",0,0,function(A){return this.localeData().weekdays(this,A)}),V("e",0,0,"weekday"),V("E",0,0,"isoWeekday"),m("day","d"),m("weekday","e"),m("isoWeekday","E"),R("day",11),R("weekday",11),R("isoWeekday",11),_("d",GI),_("e",GI),_("E",GI),_("dd",function(A,M){return M.weekdaysMinRegex(A)}),_("ddd",function(A,M){return M.weekdaysShortRegex(A)}),_("dddd",function(A,M){return M.weekdaysRegex(A)}),IA(["dd","ddd","dddd"],function(A,M,t,I){var g=t._locale.weekdaysParse(A,I,t._strict);null!=g?M.d=g:C(t).invalidWeekday=A}),IA(["d","e","E"],function(A,M,t,I){M[I]=x(A)});var Qg="Sunday_Monday_Tuesday_Wednesday_Thursday_Friday_Saturday".split("_"),sg="Sun_Mon_Tue_Wed_Thu_Fri_Sat".split("_"),ug="Su_Mo_Tu_We_Th_Fr_Sa".split("_"),xg=_I,yg=_I,jg=_I;V("H",["HH",2],0,"hour"),V("h",["hh",2],0,bA),V("k",["kk",2],0,XA),V("hmm",0,0,function(){return""+bA.apply(this)+W(this.minutes(),2)}),V("hmmss",0,0,function(){return""+bA.apply(this)+W(this.minutes(),2)+W(this.seconds(),2)}),V("Hmm",0,0,function(){return""+this.hours()+W(this.minutes(),2)}),V("Hmmss",0,0,function(){return""+this.hours()+W(this.minutes(),2)+W(this.seconds(),2)}),WA("a",!0),WA("A",!1),m("hour","h"),R("hour",13),_("a",VA),_("A",VA),_("H",GI),_("h",GI),_("HH",GI,FI),_("hh",GI,FI),_("hmm",HI),_("hmmss",vI),_("Hmm",HI),_("Hmmss",vI),tA(["H","HH"],gg),tA(["a","A"],function(A,M,t){t._isPm=t._locale.isPM(A),t._meridiem=A}),tA(["h","hh"],function(A,M,t){M[gg]=x(A),C(t).bigHour=!0}),tA("hmm",function(A,M,t){var I=A.length-2;M[gg]=x(A.substr(0,I)),M[eg]=x(A.substr(I)),C(t).bigHour=!0}),tA("hmmss",function(A,M,t){var I=A.length-4,g=A.length-2;M[gg]=x(A.substr(0,I)),M[eg]=x(A.substr(I,2)),M[ig]=x(A.substr(g)),C(t).bigHour=!0}),tA("Hmm",function(A,M,t){var I=A.length-2;M[gg]=x(A.substr(0,I)),M[eg]=x(A.substr(I))}),tA("Hmmss",function(A,M,t){var I=A.length-4,g=A.length-2;M[gg]=x(A.substr(0,I)),M[eg]=x(A.substr(I,2)),M[ig]=x(A.substr(g))});var lg,wg=/[ap]\.?m?\.?/i,Lg=G("Hours",!0),Yg={calendar:lI,longDateFormat:wI,invalidDate:LI,ordinal:YI,ordinalParse:dI,relativeTime:hI,months:cg,monthsShort:Cg,week:Bg,weekdays:Qg,weekdaysMin:ug,weekdaysShort:sg,meridiemParse:wg},dg={},hg={},Sg=/^\s*((?:[+-]\d{6}|\d{4})-(?:\d\d-\d\d|W\d\d-\d|W\d\d|\d\d\d|\d\d))(?:(T| )(\d\d(?::\d\d(?::\d\d(?:[.,]\d+)?)?)?)([\+\-]\d\d(?::?\d\d)?|\s*Z)?)?$/,zg=/^\s*((?:[+-]\d{6}|\d{4})(?:\d\d\d\d|W\d\d\d|W\d\d|\d\d\d|\d\d))(?:(T| )(\d\d(?:\d\d(?:\d\d(?:[.,]\d+)?)?)?)([\+\-]\d\d(?::?\d\d)?|\s*Z)?)?$/,pg=/Z|[+-]\d\d(?::?\d\d)?/,Ug=[["YYYYYY-MM-DD",/[+-]\d{6}-\d\d-\d\d/],["YYYY-MM-DD",/\d{4}-\d\d-\d\d/],["GGGG-[W]WW-E",/\d{4}-W\d\d-\d/],["GGGG-[W]WW",/\d{4}-W\d\d/,!1],["YYYY-DDD",/\d{4}-\d{3}/],["YYYY-MM",/\d{4}-\d\d/,!1],["YYYYYYMMDD",/[+-]\d{10}/],["YYYYMMDD",/\d{8}/],["GGGG[W]WWE",/\d{4}W\d{3}/],["GGGG[W]WW",/\d{4}W\d{2}/,!1],["YYYYDDD",/\d{7}/]],Og=[["HH:mm:ss.SSSS",/\d\d:\d\d:\d\d\.\d+/],["HH:mm:ss,SSSS",/\d\d:\d\d:\d\d,\d+/],["HH:mm:ss",/\d\d:\d\d:\d\d/],["HH:mm",/\d\d:\d\d/],["HHmmss.SSSS",/\d\d\d\d\d\d\.\d+/],["HHmmss,SSSS",/\d\d\d\d\d\d,\d+/],["HHmmss",/\d\d\d\d\d\d/],["HHmm",/\d\d\d\d/],["HH",/\d\d/]],fg=/^\/?Date\((\-?\d+)/i;M.createFromInputFallback=l("value provided is not in a recognized ISO format. moment construction falls back to js Date(), which is not reliable across all browsers and versions. Non ISO date formats are discouraged and will be removed in an upcoming major release. Please refer to http://momentjs.com/guides/#/warnings/js-date/ for more info.",function(A){A._d=new Date(A._i+(A._useUTC?" UTC":""))}),M.ISO_8601=function(){};var mg=l("moment().min is deprecated, use moment.max instead. http://momentjs.com/guides/#/warnings/min-max/",function(){var A=sM.apply(null,arguments);return this.isValid()&&A.isValid()?Athis?this:A:D()}),kg=function(){return Date.now?Date.now():+new Date};LM("Z",":"),LM("ZZ",""),_("Z",KI),_("ZZ",KI),tA(["Z","ZZ"],function(A,M,t){t._useUTC=!0,t._tzm=YM(KI,A)});var Rg=/([\+\-]|\d\d)/gi;M.updateOffset=function(){};var Jg=/^(\-)?(?:(\d*)[. ])?(\d+)\:(\d+)(?:\:(\d+)(\.\d*)?)?$/,Gg=/^(-)?P(?:(-?[0-9,.]*)Y)?(?:(-?[0-9,.]*)M)?(?:(-?[0-9,.]*)W)?(?:(-?[0-9,.]*)D)?(?:T(?:(-?[0-9,.]*)H)?(?:(-?[0-9,.]*)M)?(?:(-?[0-9,.]*)S)?)?$/;GM.fn=jM.prototype;var Hg=XM(1,"add"),vg=XM(-1,"subtract");M.defaultFormat="YYYY-MM-DDTHH:mm:ssZ",M.defaultFormatUtc="YYYY-MM-DDTHH:mm:ss[Z]";var bg=l("moment().lang() is deprecated. Instead, use moment().localeData() to get the language configuration. Use moment().locale() to change languages.",function(A){return void 0===A?this.localeData():this.locale(A)});V(0,["gg",2],0,function(){return this.weekYear()%100}),V(0,["GG",2],0,function(){return this.isoWeekYear()%100}),Lt("gggg","weekYear"),Lt("ggggg","weekYear"),Lt("GGGG","isoWeekYear"),Lt("GGGGG","isoWeekYear"),m("weekYear","gg"),m("isoWeekYear","GG"),R("weekYear",1),R("isoWeekYear",1),_("G",PI),_("g",PI),_("GG",GI,FI),_("gg",GI,FI),_("GGGG",XI,RI),_("gggg",XI,RI),_("GGGGG",WI,JI),_("ggggg",WI,JI),IA(["gggg","ggggg","GGGG","GGGGG"],function(A,M,t,I){M[I.substr(0,2)]=x(A)}),IA(["gg","GG"],function(A,t,I,g){t[g]=M.parseTwoDigitYear(A)}),V("Q",0,"Qo","quarter"),m("quarter","Q"),R("quarter",7),_("Q",mI),tA("Q",function(A,M){M[tg]=3*(x(A)-1)}),V("D",["DD",2],"Do","date"),m("date","D"),R("date",9),_("D",GI),_("DD",GI,FI),_("Do",function(A,M){return A?M._ordinalParse:M._ordinalParseLenient}),tA(["D","DD"],Ig),tA("Do",function(A,M){M[Ig]=x(A.match(GI)[0],10)});var Xg=G("Date",!0);V("DDD",["DDDD",3],"DDDo","dayOfYear"),m("dayOfYear","DDD"),R("dayOfYear",4),_("DDD",bI),_("DDDD",kI),tA(["DDD","DDDD"],function(A,M,t){t._dayOfYear=x(A)}),V("m",["mm",2],0,"minute"),m("minute","m"),R("minute",14),_("m",GI),_("mm",GI,FI),tA(["m","mm"],eg);var Wg=G("Minutes",!1);V("s",["ss",2],0,"second"),m("second","s"),R("second",15),_("s",GI),_("ss",GI,FI),tA(["s","ss"],ig);var Vg=G("Seconds",!1);V("S",0,0,function(){return~~(this.millisecond()/100)}),V(0,["SS",2],0,function(){return~~(this.millisecond()/10)}),V(0,["SSS",3],0,"millisecond"),V(0,["SSSS",4],0,function(){return 10*this.millisecond()}),V(0,["SSSSS",5],0,function(){return 100*this.millisecond()}),V(0,["SSSSSS",6],0,function(){return 1e3*this.millisecond(); -}),V(0,["SSSSSSS",7],0,function(){return 1e4*this.millisecond()}),V(0,["SSSSSSSS",8],0,function(){return 1e5*this.millisecond()}),V(0,["SSSSSSSSS",9],0,function(){return 1e6*this.millisecond()}),m("millisecond","ms"),R("millisecond",16),_("S",bI,mI),_("SS",bI,FI),_("SSS",bI,kI);var Pg;for(Pg="SSSS";Pg.length<=9;Pg+="S")_(Pg,VI);for(Pg="S";Pg.length<=9;Pg+="S")tA(Pg,ft);var Zg=G("Milliseconds",!1);V("z",0,0,"zoneAbbr"),V("zz",0,0,"zoneName");var Kg=Q.prototype;Kg.add=Hg,Kg.calendar=PM,Kg.clone=ZM,Kg.diff=tt,Kg.endOf=Dt,Kg.format=Tt,Kg.from=Et,Kg.fromNow=Nt,Kg.to=nt,Kg.toNow=ot,Kg.get=b,Kg.invalidAt=lt,Kg.isAfter=KM,Kg.isBefore=qM,Kg.isBetween=_M,Kg.isSame=$M,Kg.isSameOrAfter=At,Kg.isSameOrBefore=Mt,Kg.isValid=yt,Kg.lang=bg,Kg.locale=ct,Kg.localeData=Ct,Kg.max=Fg,Kg.min=mg,Kg.parsingFlags=jt,Kg.set=X,Kg.startOf=at,Kg.subtract=vg,Kg.toArray=st,Kg.toObject=ut,Kg.toDate=Qt,Kg.toISOString=et,Kg.inspect=it,Kg.toJSON=xt,Kg.toString=gt,Kg.unix=Bt,Kg.valueOf=rt,Kg.creationData=wt,Kg.year=rg,Kg.isLeapYear=QA,Kg.weekYear=Yt,Kg.isoWeekYear=dt,Kg.quarter=Kg.quarters=Ut,Kg.month=oA,Kg.daysInMonth=cA,Kg.week=Kg.weeks=dA,Kg.isoWeek=Kg.isoWeeks=hA,Kg.weeksInYear=St,Kg.isoWeeksInYear=ht,Kg.date=Xg,Kg.day=Kg.days=FA,Kg.weekday=kA,Kg.isoWeekday=RA,Kg.dayOfYear=Ot,Kg.hour=Kg.hours=Lg,Kg.minute=Kg.minutes=Wg,Kg.second=Kg.seconds=Vg,Kg.millisecond=Kg.milliseconds=Zg,Kg.utcOffset=SM,Kg.utc=pM,Kg.local=UM,Kg.parseZone=OM,Kg.hasAlignedHourOffset=fM,Kg.isDST=mM,Kg.isLocal=kM,Kg.isUtcOffset=RM,Kg.isUtc=JM,Kg.isUTC=JM,Kg.zoneAbbr=mt,Kg.zoneName=Ft,Kg.dates=l("dates accessor is deprecated. Use date instead.",Xg),Kg.months=l("months accessor is deprecated. Use month instead",oA),Kg.years=l("years accessor is deprecated. Use year instead",rg),Kg.zone=l("moment().zone is deprecated, use moment().utcOffset instead. http://momentjs.com/guides/#/warnings/zone/",zM),Kg.isDSTShifted=l("isDSTShifted is deprecated. See http://momentjs.com/guides/#/warnings/dst-shifted/ for more information",FM);var qg=h.prototype;qg.calendar=S,qg.longDateFormat=z,qg.invalidDate=p,qg.ordinal=U,qg.preparse=Jt,qg.postformat=Jt,qg.relativeTime=O,qg.pastFuture=f,qg.set=Y,qg.months=iA,qg.monthsShort=TA,qg.monthsParse=NA,qg.monthsRegex=aA,qg.monthsShortRegex=CA,qg.week=wA,qg.firstDayOfYear=YA,qg.firstDayOfWeek=LA,qg.weekdays=pA,qg.weekdaysMin=OA,qg.weekdaysShort=UA,qg.weekdaysParse=mA,qg.weekdaysRegex=JA,qg.weekdaysShortRegex=GA,qg.weekdaysMinRegex=HA,qg.isPM=PA,qg.meridiem=ZA,$A("en",{ordinalParse:/\d{1,2}(th|st|nd|rd)/,ordinal:function(A){var M=A%10,t=1===x(A%100/10)?"th":1===M?"st":2===M?"nd":3===M?"rd":"th";return A+t}}),M.lang=l("moment.lang is deprecated. Use moment.locale instead.",$A),M.langData=l("moment.langData is deprecated. Use moment.localeData instead.",tM);var _g=Math.abs,$g=eI("ms"),Ae=eI("s"),Me=eI("m"),te=eI("h"),Ie=eI("d"),ge=eI("w"),ee=eI("M"),ie=eI("y"),Te=TI("milliseconds"),Ee=TI("seconds"),Ne=TI("minutes"),ne=TI("hours"),oe=TI("days"),ce=TI("months"),Ce=TI("years"),ae=Math.round,De={s:45,m:45,h:22,d:26,M:11},re=Math.abs,Be=jM.prototype;return Be.abs=Zt,Be.add=qt,Be.subtract=_t,Be.as=II,Be.asMilliseconds=$g,Be.asSeconds=Ae,Be.asMinutes=Me,Be.asHours=te,Be.asDays=Ie,Be.asWeeks=ge,Be.asMonths=ee,Be.asYears=ie,Be.valueOf=gI,Be._bubble=AI,Be.get=iI,Be.milliseconds=Te,Be.seconds=Ee,Be.minutes=Ne,Be.hours=ne,Be.days=oe,Be.weeks=EI,Be.months=ce,Be.years=Ce,Be.humanize=CI,Be.toISOString=aI,Be.toString=aI,Be.toJSON=aI,Be.locale=ct,Be.localeData=Ct,Be.toIsoString=l("toIsoString() is deprecated. Please use toISOString() instead (notice the capitals)",aI),Be.lang=bg,V("X",0,0,"unix"),V("x",0,0,"valueOf"),_("x",PI),_("X",qI),tA("X",function(A,M,t){t._d=new Date(1e3*parseFloat(A,10))}),tA("x",function(A,M,t){t._d=new Date(x(A))}),M.version="2.17.1",t(sM),M.fn=Kg,M.min=xM,M.max=yM,M.now=kg,M.utc=o,M.unix=kt,M.months=bt,M.isDate=T,M.locale=$A,M.invalid=D,M.duration=GM,M.isMoment=s,M.weekdays=Wt,M.parseZone=Rt,M.localeData=tM,M.isDuration=lM,M.monthsShort=Xt,M.weekdaysMin=Pt,M.defineLocale=AM,M.updateLocale=MM,M.locales=IM,M.weekdaysShort=Vt,M.normalizeUnits=F,M.relativeTimeRounding=oI,M.relativeTimeThreshold=cI,M.calendarFormat=VM,M.prototype=Kg,M})}).call(M,t(215)(A))},function(A,M,t){"use strict";var I=t(284).default,g=t(285).default,e=t(180).default;M.__esModule=!0;var i=function(A){return I(g({values:function(){var A=this;return e(this).map(function(M){return A[M]})}}),A)},T={SIZES:{large:"lg",medium:"md",small:"sm",xsmall:"xs",lg:"lg",md:"md",sm:"sm",xs:"xs"},GRID_COLUMNS:12},E=i({LARGE:"large",MEDIUM:"medium",SMALL:"small",XSMALL:"xsmall"});M.Sizes=E;var N=i({SUCCESS:"success",WARNING:"warning",DANGER:"danger",INFO:"info"});M.State=N;var n="default";M.DEFAULT=n;var o="primary";M.PRIMARY=o;var c="link";M.LINK=c;var C="inverse";M.INVERSE=C,M.default=T},function(A,M){"use strict";function t(){for(var A=arguments.length,M=Array(A),t=0;t=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t},M.__esModule=!0},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}function g(A){return A.replace(/[.*+?^${}()|[\]\\]/g,"\\$&")}function e(A){for(var M="",t=[],I=[],e=void 0,i=0,T=/:([a-zA-Z_$][a-zA-Z0-9_$]*)|\*\*|\*|\(|\)/g;e=T.exec(A);)e.index!==i&&(I.push(A.slice(i,e.index)),M+=g(A.slice(i,e.index))),e[1]?(M+="([^/]+)",t.push(e[1])):"**"===e[0]?(M+="(.*)",t.push("splat")):"*"===e[0]?(M+="(.*?)",t.push("splat")):"("===e[0]?M+="(?:":")"===e[0]&&(M+=")?"),I.push(e[0]),i=T.lastIndex;return i!==A.length&&(I.push(A.slice(i,A.length)),M+=g(A.slice(i,A.length))),{pattern:A,regexpSource:M,paramNames:t,tokens:I}}function i(A){return C[A]||(C[A]=e(A)),C[A]}function T(A,M){"/"!==A.charAt(0)&&(A="/"+A);var t=i(A),I=t.regexpSource,g=t.paramNames,e=t.tokens;"/"!==A.charAt(A.length-1)&&(I+="/?"),"*"===e[e.length-1]&&(I+="$");var T=M.match(new RegExp("^"+I,"i"));if(null==T)return null;var E=T[0],N=M.substr(E.length);if(N){if("/"!==E.charAt(E.length-1))return null;N="/"+N}return{remainingPathname:N,paramNames:g,paramValues:T.slice(1).map(function(A){return A&&decodeURIComponent(A)})}}function E(A){return i(A).paramNames}function N(A,M){var t=T(A,M);if(!t)return null;var I=t.paramNames,g=t.paramValues,e={};return I.forEach(function(A,M){e[A]=g[M]}),e}function n(A,M){M=M||{};for(var t=i(A),I=t.tokens,g=0,e="",T=0,E=void 0,N=void 0,n=void 0,o=0,C=I.length;o0?void 0:(0,c.default)(!1),null!=n&&(e+=encodeURI(n))):"("===E?g+=1:")"===E?g-=1:":"===E.charAt(0)?(N=E.substring(1),n=M[N],null!=n||g>0?void 0:(0,c.default)(!1),null!=n&&(e+=encodeURIComponent(n))):e+=E;return e.replace(/\/+/g,"/")}M.__esModule=!0,M.compilePattern=i,M.matchPattern=T,M.getParamNames=E,M.getParams=N,M.formatPattern=n;var o=t(16),c=I(o),C=Object.create(null)},function(A,M){"use strict";M.__esModule=!0;var t="PUSH";M.PUSH=t;var I="REPLACE";M.REPLACE=I;var g="POP";M.POP=g,M.default={PUSH:t,REPLACE:I,POP:g}},function(A,M,t){"use strict";function I(A,M){return(A&M)===M}var g=t(3),e={MUST_USE_ATTRIBUTE:1,MUST_USE_PROPERTY:2,HAS_SIDE_EFFECTS:4,HAS_BOOLEAN_VALUE:8,HAS_NUMERIC_VALUE:16,HAS_POSITIVE_NUMERIC_VALUE:48,HAS_OVERLOADED_BOOLEAN_VALUE:64,injectDOMPropertyConfig:function(A){var M=e,t=A.Properties||{},i=A.DOMAttributeNamespaces||{},E=A.DOMAttributeNames||{},N=A.DOMPropertyNames||{},n=A.DOMMutationMethods||{};A.isCustomAttribute&&T._isCustomAttributeFunctions.push(A.isCustomAttribute);for(var o in t){T.properties.hasOwnProperty(o)?g(!1):void 0;var c=o.toLowerCase(),C=t[o],a={attributeName:c,attributeNamespace:null,propertyName:o,mutationMethod:null,mustUseAttribute:I(C,M.MUST_USE_ATTRIBUTE),mustUseProperty:I(C,M.MUST_USE_PROPERTY),hasSideEffects:I(C,M.HAS_SIDE_EFFECTS),hasBooleanValue:I(C,M.HAS_BOOLEAN_VALUE),hasNumericValue:I(C,M.HAS_NUMERIC_VALUE),hasPositiveNumericValue:I(C,M.HAS_POSITIVE_NUMERIC_VALUE),hasOverloadedBooleanValue:I(C,M.HAS_OVERLOADED_BOOLEAN_VALUE)};if(a.mustUseAttribute&&a.mustUseProperty?g(!1):void 0,!a.mustUseProperty&&a.hasSideEffects?g(!1):void 0,a.hasBooleanValue+a.hasNumericValue+a.hasOverloadedBooleanValue<=1?void 0:g(!1),E.hasOwnProperty(o)){var D=E[o];a.attributeName=D}i.hasOwnProperty(o)&&(a.attributeNamespace=i[o]),N.hasOwnProperty(o)&&(a.propertyName=N[o]),n.hasOwnProperty(o)&&(a.mutationMethod=n[o]),T.properties[o]=a}}},i={},T={ID_ATTRIBUTE_NAME:"data-reactid",properties:{},getPossibleStandardName:null,_isCustomAttributeFunctions:[],isCustomAttribute:function(A){for(var M=0;M1){var M=A.indexOf(C,1);return M>-1?A.substr(0,M):A}return null},traverseEnterLeave:function(A,M,t,I,g){var e=N(A,M);e!==A&&n(A,e,t,I,!1,!0),e!==M&&n(e,M,t,g,!0,!1)},traverseTwoPhase:function(A,M,t){A&&(n("",A,M,t,!0,!1),n(A,"",M,t,!1,!0))},traverseTwoPhaseSkipTarget:function(A,M,t){A&&(n("",A,M,t,!0,!0),n(A,"",M,t,!0,!0))},traverseAncestors:function(A,M,t){n("",A,M,t,!0,!1)},getFirstCommonAncestorID:N,_getNextDescendantID:E,isAncestorIDOf:i,SEPARATOR:C};A.exports=r},function(A,M,t){var I=t(35),g=t(11)("toStringTag"),e="Arguments"==I(function(){return arguments}()),i=function(A,M){try{return A[M]}catch(A){}};A.exports=function(A){var M,t,T;return void 0===A?"Undefined":null===A?"Null":"string"==typeof(t=i(M=Object(A),g))?t:e?I(M):"Object"==(T=I(M))&&"function"==typeof M.callee?"Arguments":T}},function(A,M,t){var I=t(35);A.exports=Object("z").propertyIsEnumerable(0)?Object:function(A){return"String"==I(A)?A.split(""):Object(A)}},function(A,M){M.f={}.propertyIsEnumerable},function(A,M,t){"use strict";var I={};A.exports=I},function(A,M,t){"use strict";function I(A,M,t){var I=0;return o.default.Children.map(A,function(A){if(o.default.isValidElement(A)){var g=I;return I++,M.call(t,A,g)}return A})}function g(A,M,t){var I=0;return o.default.Children.forEach(A,function(A){o.default.isValidElement(A)&&(M.call(t,A,I),I++)})}function e(A){var M=0;return o.default.Children.forEach(A,function(A){o.default.isValidElement(A)&&M++}),M}function i(A){var M=!1;return o.default.Children.forEach(A,function(A){!M&&o.default.isValidElement(A)&&(M=!0)}),M}function T(A,M){var t=void 0;return g(A,function(I,g){!t&&M(I,g,A)&&(t=I)}),t}function E(A,M,t){var I=0,g=[];return o.default.Children.forEach(A,function(A){o.default.isValidElement(A)&&(M.call(t,A,I)&&g.push(A),I++)}),g}var N=t(12).default;M.__esModule=!0;var n=t(2),o=N(n);M.default={map:I,forEach:g,numberOf:e,find:T,findValidComponents:E,hasValidComponent:i},A.exports=M.default},function(A,M){var t=A.exports={version:"1.2.6"};"number"==typeof __e&&(__e=t)},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}Object.defineProperty(M,"__esModule",{value:!0}),M.default=function(A){return(0,T.default)(e.default.findDOMNode(A))};var g=t(34),e=I(g),i=t(83),T=I(i);A.exports=M.default},function(A,M,t){"use strict";var I=t(316),g=t(729),e=t(329),i=t(338),T=t(339),E=t(3),N=(t(7),{}),n=null,o=function(A,M){A&&(g.executeDispatchesInOrder(A,M),A.isPersistent()||A.constructor.release(A))},c=function(A){return o(A,!0)},C=function(A){return o(A,!1)},a=null,D={injection:{injectMount:g.injection.injectMount,injectInstanceHandle:function(A){a=A},getInstanceHandle:function(){return a},injectEventPluginOrder:I.injectEventPluginOrder,injectEventPluginsByName:I.injectEventPluginsByName},eventNameDispatchConfigs:I.eventNameDispatchConfigs,registrationNameModules:I.registrationNameModules,putListener:function(A,M,t){"function"!=typeof t?E(!1):void 0;var g=N[M]||(N[M]={});g[A]=t;var e=I.registrationNameModules[M];e&&e.didPutListener&&e.didPutListener(A,M,t)},getListener:function(A,M){var t=N[M];return t&&t[A]},deleteListener:function(A,M){var t=I.registrationNameModules[M];t&&t.willDeleteListener&&t.willDeleteListener(A,M);var g=N[M];g&&delete g[A]},deleteAllListeners:function(A){for(var M in N)if(N[M][A]){var t=I.registrationNameModules[M];t&&t.willDeleteListener&&t.willDeleteListener(A,M),delete N[M][A]}},extractEvents:function(A,M,t,g,e){for(var T,E=I.plugins,N=0;Nn;)if(T=E[n++],T!=T)return!0}else for(;N>n;n++)if((A||n in E)&&E[n]===t)return A||n||0;return!A&&-1}}},function(A,M,t){"use strict";var I=t(5),g=t(1),e=t(26),i=t(68),T=t(55),E=t(79),N=t(63),n=t(9),o=t(6),c=t(111),C=t(81),a=t(143);A.exports=function(A,M,t,D,r,B){var Q=I[A],s=Q,u=r?"set":"add",x=s&&s.prototype,y={},j=function(A){var M=x[A];e(x,A,"delete"==A?function(A){return!(B&&!n(A))&&M.call(this,0===A?0:A)}:"has"==A?function(A){return!(B&&!n(A))&&M.call(this,0===A?0:A)}:"get"==A?function(A){return B&&!n(A)?void 0:M.call(this,0===A?0:A)}:"add"==A?function(A){return M.call(this,0===A?0:A),this}:function(A,t){return M.call(this,0===A?0:A,t),this})};if("function"==typeof s&&(B||x.forEach&&!o(function(){(new s).entries().next()}))){var l=new s,w=l[u](B?{}:-0,1)!=l,L=o(function(){l.has(1)}),Y=c(function(A){new s(A)}),d=!B&&o(function(){for(var A=new s,M=5;M--;)A[u](M,M);return!A.has(-0)});Y||(s=M(function(M,t){N(M,s,A);var I=a(new Q,M,s);return void 0!=t&&E(t,r,I[u],I),I}),s.prototype=x,x.constructor=s),(L||d)&&(j("delete"),j("has"),r&&j("get")),(d||w)&&j(u),B&&x.clear&&delete x.clear}else s=D.getConstructor(M,A,r,u),i(s.prototype,t),T.NEED=!0;return C(s,A),y[A]=s,g(g.G+g.W+g.F*(s!=Q),y),B||D.setStrong(s,A,r),s}},function(A,M,t){"use strict";var I=t(25),g=t(26),e=t(6),i=t(36),T=t(11);A.exports=function(A,M,t){var E=T(A),N=t(i,E,""[A]),n=N[0],o=N[1];e(function(){var M={};return M[E]=function(){return 7},7!=""[A](M)})&&(g(String.prototype,A,n),I(RegExp.prototype,E,2==M?function(A,M){return o.call(A,this,M)}:function(A){return o.call(A,this)}))}},function(A,M,t){"use strict";var I=t(4);A.exports=function(){var A=I(this),M="";return A.global&&(M+="g"),A.ignoreCase&&(M+="i"),A.multiline&&(M+="m"),A.unicode&&(M+="u"),A.sticky&&(M+="y"),M}},function(A,M){A.exports=function(A,M,t){var I=void 0===t;switch(M.length){case 0:return I?A():A.call(t);case 1:return I?A(M[0]):A.call(t,M[0]);case 2:return I?A(M[0],M[1]):A.call(t,M[0],M[1]);case 3:return I?A(M[0],M[1],M[2]):A.call(t,M[0],M[1],M[2]);case 4:return I?A(M[0],M[1],M[2],M[3]):A.call(t,M[0],M[1],M[2],M[3])}return A.apply(t,M)}},function(A,M,t){var I=t(9),g=t(35),e=t(11)("match");A.exports=function(A){var M;return I(A)&&(void 0!==(M=A[e])?!!M:"RegExp"==g(A))}},function(A,M,t){var I=t(11)("iterator"),g=!1;try{var e=[7][I]();e.return=function(){g=!0},Array.from(e,function(){throw 2})}catch(A){}A.exports=function(A,M){if(!M&&!g)return!1;var t=!1;try{var e=[7],i=e[I]();i.next=function(){return{done:t=!0}},e[I]=function(){return i},A(e)}catch(A){}return t}},function(A,M,t){A.exports=t(64)||!t(6)(function(){var A=Math.random();__defineSetter__.call(null,A,function(){}),delete t(5)[A]})},function(A,M){M.f=Object.getOwnPropertySymbols},function(A,M,t){var I=t(5),g="__core-js_shared__",e=I[g]||(I[g]={});A.exports=function(A){return e[A]||(e[A]={})}},function(A,M,t){for(var I,g=t(5),e=t(25),i=t(71),T=i("typed_array"),E=i("view"),N=!(!g.ArrayBuffer||!g.DataView),n=N,o=0,c=9,C="Int8Array,Uint8Array,Uint8ClampedArray,Int16Array,Uint16Array,Int32Array,Uint32Array,Float32Array,Float64Array".split(",");o1?I-1:0),e=1;e":">","<":"<",'"':""","'":"'"},e=/[&><"']/g;A.exports=I},function(A,M,t){"use strict";var I=t(20),g=/^[ \r\n\t\f]/,e=/<(!--|link|noscript|meta|script|style)[ \r\n\t\f\/>]/,i=function(A,M){A.innerHTML=M};if("undefined"!=typeof MSApp&&MSApp.execUnsafeLocalFunction&&(i=function(A,M){MSApp.execUnsafeLocalFunction(function(){A.innerHTML=M})}),I.canUseDOM){var T=document.createElement("div");T.innerHTML=" ",""===T.innerHTML&&(i=function(A,M){if(A.parentNode&&A.parentNode.replaceChild(A,A),g.test(M)||"<"===M[0]&&e.test(M)){ -A.innerHTML=String.fromCharCode(65279)+M;var t=A.firstChild;1===t.data.length?A.removeChild(t):t.deleteData(0,1)}else A.innerHTML=M})}A.exports=i},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}Object.defineProperty(M,"__esModule",{value:!0});var g=t(2),e=I(g),i=function(A){var M=A.label,t=A.id,I=A.name,g=A.value,i=A.onChange,T=A.type,E=A.spellCheck,N=A.required,n=A.readonly,o=A.autoComplete,c=A.align,C=A.className,a=e.default.createElement("input",{id:t,name:I,value:g,onChange:i,className:"ig-text",type:T,spellCheck:E,required:N,autoComplete:o});return n&&(a=e.default.createElement("input",{id:t,name:I,value:g,onChange:i,className:"ig-text",type:T,spellCheck:E,required:N,autoComplete:o,disabled:!0})),e.default.createElement("div",{className:"input-group "+c+" "+C},a,e.default.createElement("i",{className:"ig-helpers"}),e.default.createElement("label",{className:"ig-label"},M))};M.default=i},function(A,M,t){"use strict";var I=t(19),g=t(70),e=t(17);A.exports=function(A){for(var M=I(this),t=e(M.length),i=arguments.length,T=g(i>1?arguments[1]:void 0,t),E=i>2?arguments[2]:void 0,N=void 0===E?t:g(E,t);N>T;)M[T++]=A;return M}},function(A,M,t){"use strict";var I=t(14),g=t(56);A.exports=function(A,M,t){M in A?I.f(A,M,g(0,t)):A[M]=t}},function(A,M,t){var I=t(9),g=t(5).document,e=I(g)&&I(g.createElement);A.exports=function(A){return e?g.createElement(A):{}}},function(A,M){A.exports="constructor,hasOwnProperty,isPrototypeOf,propertyIsEnumerable,toLocaleString,toString,valueOf".split(",")},function(A,M,t){var I=t(11)("match");A.exports=function(A){var M=/./;try{"/./"[A](M)}catch(t){try{return M[I]=!1,!"/./"[A](M)}catch(A){}}return!0}},function(A,M,t){A.exports=t(5).document&&document.documentElement},function(A,M,t){var I=t(9),g=t(151).set;A.exports=function(A,M,t){var e,i=M.constructor;return i!==t&&"function"==typeof i&&(e=i.prototype)!==t.prototype&&I(e)&&g&&g(A,e),A}},function(A,M,t){var I=t(80),g=t(11)("iterator"),e=Array.prototype;A.exports=function(A){return void 0!==A&&(I.Array===A||e[g]===A)}},function(A,M,t){var I=t(35);A.exports=Array.isArray||function(A){return"Array"==I(A)}},function(A,M,t){"use strict";var I=t(65),g=t(56),e=t(81),i={};t(25)(i,t(11)("iterator"),function(){return this}),A.exports=function(A,M,t){A.prototype=I(i,{next:g(1,t)}),e(A,M+" Iterator")}},function(A,M,t){"use strict";var I=t(64),g=t(1),e=t(26),i=t(25),T=t(21),E=t(80),N=t(146),n=t(81),o=t(31),c=t(11)("iterator"),C=!([].keys&&"next"in[].keys()),a="@@iterator",D="keys",r="values",B=function(){return this};A.exports=function(A,M,t,Q,s,u,x){N(t,M,Q);var y,j,l,w=function(A){if(!C&&A in h)return h[A];switch(A){case D:return function(){return new t(this,A)};case r:return function(){return new t(this,A)}}return function(){return new t(this,A)}},L=M+" Iterator",Y=s==r,d=!1,h=A.prototype,S=h[c]||h[a]||s&&h[s],z=S||w(s),p=s?Y?w("entries"):z:void 0,U="Array"==M?h.entries||S:S;if(U&&(l=o(U.call(new A)),l!==Object.prototype&&(n(l,L,!0),I||T(l,c)||i(l,c,B))),Y&&S&&S.name!==r&&(d=!0,z=function(){return S.call(this)}),I&&!x||!C&&!d&&h[c]||i(h,c,z),E[M]=z,E[L]=B,s)if(y={values:Y?z:w(r),keys:u?z:w(D),entries:p},x)for(j in y)j in h||e(h,j,y[j]);else g(g.P+g.F*(C||d),M,y);return y}},function(A,M){var t=Math.expm1;A.exports=!t||t(10)>22025.465794806718||t(10)<22025.465794806718||t(-2e-17)!=-2e-17?function(A){return 0==(A=+A)?A:A>-1e-6&&A<1e-6?A+A*A/2:Math.exp(A)-1}:t},function(A,M){A.exports=Math.sign||function(A){return 0==(A=+A)||A!=A?A:A<0?-1:1}},function(A,M,t){var I=t(5),g=t(158).set,e=I.MutationObserver||I.WebKitMutationObserver,i=I.process,T=I.Promise,E="process"==t(35)(i);A.exports=function(){var A,M,t,N=function(){var I,g;for(E&&(I=i.domain)&&I.exit();A;){g=A.fn,A=A.next;try{g()}catch(I){throw A?t():M=void 0,I}}M=void 0,I&&I.enter()};if(E)t=function(){i.nextTick(N)};else if(e){var n=!0,o=document.createTextNode("");new e(N).observe(o,{characterData:!0}),t=function(){o.data=n=!n}}else if(T&&T.resolve){var c=T.resolve();t=function(){c.then(N)}}else t=function(){g.call(I,N)};return function(I){var g={fn:I,next:void 0};M&&(M.next=g),A||(A=g,t()),M=g}}},function(A,M,t){var I=t(9),g=t(4),e=function(A,M){if(g(A),!I(M)&&null!==M)throw TypeError(M+": can't set as prototype!")};A.exports={set:Object.setPrototypeOf||("__proto__"in{}?function(A,M,I){try{I=t(49)(Function.call,t(30).f(Object.prototype,"__proto__").set,2),I(A,[]),M=!(A instanceof Array)}catch(A){M=!0}return function(A,t){return e(A,t),M?A.__proto__=t:I(A,t),A}}({},!1):void 0),check:e}},function(A,M,t){var I=t(114)("keys"),g=t(71);A.exports=function(A){return I[A]||(I[A]=g(A))}},function(A,M,t){var I=t(4),g=t(24),e=t(11)("species");A.exports=function(A,M){var t,i=I(A).constructor;return void 0===i||void 0==(t=I(i)[e])?M:g(t)}},function(A,M,t){var I=t(57),g=t(36);A.exports=function(A){return function(M,t){var e,i,T=String(g(M)),E=I(t),N=T.length;return E<0||E>=N?A?"":void 0:(e=T.charCodeAt(E),e<55296||e>56319||E+1===N||(i=T.charCodeAt(E+1))<56320||i>57343?A?T.charAt(E):e:A?T.slice(E,E+2):(e-55296<<10)+(i-56320)+65536)}}},function(A,M,t){var I=t(110),g=t(36);A.exports=function(A,M,t){if(I(M))throw TypeError("String#"+t+" doesn't accept regex!");return String(g(A))}},function(A,M,t){"use strict";var I=t(57),g=t(36);A.exports=function(A){var M=String(g(this)),t="",e=I(A);if(e<0||e==1/0)throw RangeError("Count can't be negative");for(;e>0;(e>>>=1)&&(M+=M))1&e&&(t+=M);return t}},function(A,M){A.exports="\t\n\v\f\r   ᠎              \u2028\u2029\ufeff"},function(A,M,t){var I,g,e,i=t(49),T=t(109),E=t(142),N=t(139),n=t(5),o=n.process,c=n.setImmediate,C=n.clearImmediate,a=n.MessageChannel,D=0,r={},B="onreadystatechange",Q=function(){var A=+this;if(r.hasOwnProperty(A)){var M=r[A];delete r[A],M()}},s=function(A){Q.call(A.data)};c&&C||(c=function(A){for(var M=[],t=1;arguments.length>t;)M.push(arguments[t++]);return r[++D]=function(){T("function"==typeof A?A:Function(A),M)},I(D),D},C=function(A){delete r[A]},"process"==t(35)(o)?I=function(A){o.nextTick(i(Q,A,1))}:a?(g=new a,e=g.port2,g.port1.onmessage=s,I=i(e.postMessage,e,1)):n.addEventListener&&"function"==typeof postMessage&&!n.importScripts?(I=function(A){n.postMessage(A+"","*")},n.addEventListener("message",s,!1)):I=B in N("script")?function(A){E.appendChild(N("script"))[B]=function(){E.removeChild(this),Q.call(A)}}:function(A){setTimeout(i(Q,A,1),0)}),A.exports={set:c,clear:C}},function(A,M,t){"use strict";var I=t(5),g=t(13),e=t(64),i=t(115),T=t(25),E=t(68),N=t(6),n=t(63),o=t(57),c=t(17),C=t(66).f,a=t(14).f,D=t(137),r=t(81),B="ArrayBuffer",Q="DataView",s="prototype",u="Wrong length!",x="Wrong index!",y=I[B],j=I[Q],l=I.Math,w=I.RangeError,L=I.Infinity,Y=y,d=l.abs,h=l.pow,S=l.floor,z=l.log,p=l.LN2,U="buffer",O="byteLength",f="byteOffset",m=g?"_b":U,F=g?"_l":O,k=g?"_o":f,R=function(A,M,t){var I,g,e,i=Array(t),T=8*t-M-1,E=(1<>1,n=23===M?h(2,-24)-h(2,-77):0,o=0,c=A<0||0===A&&1/A<0?1:0;for(A=d(A),A!=A||A===L?(g=A!=A?1:0,I=E):(I=S(z(A)/p),A*(e=h(2,-I))<1&&(I--,e*=2),A+=I+N>=1?n/e:n*h(2,1-N),A*e>=2&&(I++,e/=2),I+N>=E?(g=0,I=E):I+N>=1?(g=(A*e-1)*h(2,M),I+=N):(g=A*h(2,N-1)*h(2,M),I=0));M>=8;i[o++]=255&g,g/=256,M-=8);for(I=I<0;i[o++]=255&I,I/=256,T-=8);return i[--o]|=128*c,i},J=function(A,M,t){var I,g=8*t-M-1,e=(1<>1,T=g-7,E=t-1,N=A[E--],n=127&N;for(N>>=7;T>0;n=256*n+A[E],E--,T-=8);for(I=n&(1<<-T)-1,n>>=-T,T+=M;T>0;I=256*I+A[E],E--,T-=8);if(0===n)n=1-i;else{if(n===e)return I?NaN:N?-L:L;I+=h(2,M),n-=i}return(N?-1:1)*I*h(2,n-M)},G=function(A){return A[3]<<24|A[2]<<16|A[1]<<8|A[0]},H=function(A){return[255&A]},v=function(A){return[255&A,A>>8&255]},b=function(A){return[255&A,A>>8&255,A>>16&255,A>>24&255]},X=function(A){return R(A,52,8)},W=function(A){return R(A,23,4)},V=function(A,M,t){a(A[s],M,{get:function(){return this[t]}})},P=function(A,M,t,I){var g=+t,e=o(g);if(g!=e||e<0||e+M>A[F])throw w(x);var i=A[m]._b,T=e+A[k],E=i.slice(T,T+M);return I?E:E.reverse()},Z=function(A,M,t,I,g,e){var i=+t,T=o(i);if(i!=T||T<0||T+M>A[F])throw w(x);for(var E=A[m]._b,N=T+A[k],n=I(+g),c=0;cAA;)(q=$[AA++])in y||T(y,q,Y[q]);e||(_.constructor=y)}var MA=new j(new y(2)),tA=j[s].setInt8;MA.setInt8(0,2147483648),MA.setInt8(1,2147483649),!MA.getInt8(0)&&MA.getInt8(1)||E(j[s],{setInt8:function(A,M){tA.call(this,A,M<<24>>24)},setUint8:function(A,M){tA.call(this,A,M<<24>>24)}},!0)}else y=function(A){var M=K(this,A);this._b=D.call(Array(M),0),this[F]=M},j=function(A,M,t){n(this,j,Q),n(A,y,Q);var I=A[F],g=o(M);if(g<0||g>I)throw w("Wrong offset!");if(t=void 0===t?I-g:c(t),g+t>I)throw w(u);this[m]=A,this[k]=g,this[F]=t},g&&(V(y,O,"_l"),V(j,U,"_b"),V(j,O,"_l"),V(j,f,"_o")),E(j[s],{getInt8:function(A){return P(this,1,A)[0]<<24>>24},getUint8:function(A){return P(this,1,A)[0]},getInt16:function(A){var M=P(this,2,A,arguments[1]);return(M[1]<<8|M[0])<<16>>16},getUint16:function(A){var M=P(this,2,A,arguments[1]);return M[1]<<8|M[0]},getInt32:function(A){return G(P(this,4,A,arguments[1]))},getUint32:function(A){return G(P(this,4,A,arguments[1]))>>>0},getFloat32:function(A){return J(P(this,4,A,arguments[1]),23,4)},getFloat64:function(A){return J(P(this,8,A,arguments[1]),52,8)},setInt8:function(A,M){Z(this,1,A,H,M)},setUint8:function(A,M){Z(this,1,A,H,M)},setInt16:function(A,M){Z(this,2,A,v,M,arguments[2])},setUint16:function(A,M){Z(this,2,A,v,M,arguments[2])},setInt32:function(A,M){Z(this,4,A,b,M,arguments[2])},setUint32:function(A,M){Z(this,4,A,b,M,arguments[2])},setFloat32:function(A,M){Z(this,4,A,W,M,arguments[2])},setFloat64:function(A,M){Z(this,8,A,X,M,arguments[2])}});r(y,B),r(j,Q),T(j[s],i.VIEW,!0),M[B]=y,M[Q]=j},function(A,M,t){var I=t(5),g=t(48),e=t(64),i=t(240),T=t(14).f;A.exports=function(A){var M=g.Symbol||(g.Symbol=e?{}:I.Symbol||{});"_"==A.charAt(0)||A in M||T(M,A,{value:i.f(A)})}},function(A,M,t){var I=t(94),g=t(11)("iterator"),e=t(80);A.exports=t(48).getIteratorMethod=function(A){if(void 0!=A)return A[g]||A["@@iterator"]||e[I(A)]}},function(A,M,t){"use strict";var I=t(78),g=t(228),e=t(80),i=t(28);A.exports=t(147)(Array,"Array",function(A,M){this._t=i(A),this._i=0,this._k=M},function(){var A=this._t,M=this._k,t=this._i++;return!A||t>=A.length?(this._t=void 0,g(1)):"keys"==M?g(0,t):"values"==M?g(0,A[t]):g(0,[t,A[t]])},"values"),e.Arguments=e.Array,I("keys"),I("values"),I("entries")},function(A,M,t){"use strict";var I=t(72),g=function(){};I&&(g=function(){return document.addEventListener?function(A,M,t,I){return A.addEventListener(M,t,I||!1)}:document.attachEvent?function(A,M,t){return A.attachEvent("on"+M,t)}:void 0}()),A.exports=g},function(A,M,t){"use strict";var I=t(252),g=t(571),e=t(566),i=t(567),T=Object.prototype.hasOwnProperty;A.exports=function(A,M,t){var E="",N=M;if("string"==typeof M){if(void 0===t)return A.style[I(M)]||e(A).getPropertyValue(g(M));(N={})[M]=t}for(var n in N)T.call(N,n)&&(N[n]||0===N[n]?E+=g(n)+":"+N[n]+";":i(A,g(n)));A.style.cssText+=";"+E}},function(A,M,t){(function(){var t=this,I=t.humanize,g={};"undefined"!=typeof A&&A.exports&&(M=A.exports=g),M.humanize=g,g.noConflict=function(){return t.humanize=I,this},g.pad=function(A,M,t,I){if(A+="",t?t.length>1&&(t=t.charAt(0)):t=" ",I=void 0===I?"left":"right","right"===I)for(;A.length4&&A<21?"th":{1:"st",2:"nd",3:"rd"}[A%10]||"th"},w:function(){return t.getDay()},z:function(){return(n.L()?i[n.n()]:e[n.n()])+n.j()-1},W:function(){var A=n.z()-n.N()+1.5;return g.pad(1+Math.floor(Math.abs(A)/7)+(A%7>3.5?1:0),2,"0")},F:function(){return N[t.getMonth()]},m:function(){return g.pad(n.n(),2,"0")},M:function(){return n.F().slice(0,3)},n:function(){return t.getMonth()+1},t:function(){return new Date(n.Y(),n.n(),0).getDate()},L:function(){return 1===new Date(n.Y(),1,29).getMonth()?1:0},o:function(){var A=n.n(),M=n.W();return n.Y()+(12===A&&M<9?-1:1===A&&M>9)},Y:function(){return t.getFullYear()},y:function(){return String(n.Y()).slice(-2)},a:function(){return t.getHours()>11?"pm":"am"},A:function(){return n.a().toUpperCase()},B:function(){var A=t.getTime()/1e3,M=A%86400+3600;M<0&&(M+=86400);var I=M/86.4%1e3;return A<0?Math.ceil(I):Math.floor(I)},g:function(){return n.G()%12||12},G:function(){return t.getHours()},h:function(){return g.pad(n.g(),2,"0")},H:function(){return g.pad(n.G(),2,"0")},i:function(){return g.pad(t.getMinutes(),2,"0")},s:function(){return g.pad(t.getSeconds(),2,"0")},u:function(){return g.pad(1e3*t.getMilliseconds(),6,"0")},O:function(){var A=t.getTimezoneOffset(),M=Math.abs(A);return(A>0?"-":"+")+g.pad(100*Math.floor(M/60)+M%60,4,"0")},P:function(){var A=n.O();return A.substr(0,3)+":"+A.substr(3,2)},Z:function(){return 60*-t.getTimezoneOffset()},c:function(){return"Y-m-d\\TH:i:sP".replace(I,T)},r:function(){return"D, d M Y H:i:s O".replace(I,T)},U:function(){return t.getTime()/1e3||0}};return A.replace(I,T)},g.numberFormat=function(A,M,t,I){M=isNaN(M)?2:Math.abs(M),t=void 0===t?".":t,I=void 0===I?",":I;var g=A<0?"-":"";A=Math.abs(+A||0);var e=parseInt(A.toFixed(M),10)+"",i=e.length>3?e.length%3:0;return g+(i?e.substr(0,i)+I:"")+e.substr(i).replace(/(\d{3})(?=\d)/g,"$1"+I)+(M?t+Math.abs(A-e).toFixed(M).slice(2):"")},g.naturalDay=function(A,M){A=void 0===A?g.time():A,M=void 0===M?"Y-m-d":M;var t=86400,I=new Date,e=new Date(I.getFullYear(),I.getMonth(),I.getDate()).getTime()/1e3;return A=e-t?"yesterday":A>=e&&A=e+t&&A-2)return(t>=0?"just ":"")+"now";if(t<60&&t>-60)return t>=0?Math.floor(t)+" seconds ago":"in "+Math.floor(-t)+" seconds";if(t<120&&t>-120)return t>=0?"about a minute ago":"in about a minute";if(t<3600&&t>-3600)return t>=0?Math.floor(t/60)+" minutes ago":"in "+Math.floor(-t/60)+" minutes";if(t<7200&&t>-7200)return t>=0?"about an hour ago":"in about an hour";if(t<86400&&t>-86400)return t>=0?Math.floor(t/3600)+" hours ago":"in "+Math.floor(-t/3600)+" hours";var I=172800;if(t-I)return t>=0?"1 day ago":"in 1 day";var e=2505600;if(t-e)return t>=0?Math.floor(t/86400)+" days ago":"in "+Math.floor(-t/86400)+" days";var i=5184e3;if(t-i)return t>=0?"about a month ago":"in about a month";var T=parseInt(g.date("Y",M),10),E=parseInt(g.date("Y",A),10),N=12*T+parseInt(g.date("n",M),10),n=12*E+parseInt(g.date("n",A),10),o=N-n;if(o<12&&o>-12)return o>=0?o+" months ago":"in "+-o+" months";var c=T-E;return c<2&&c>-2?c>=0?"a year ago":"in a year":c>=0?c+" years ago":"in "+-c+" years"},g.ordinal=function(A){A=parseInt(A,10),A=isNaN(A)?0:A;var M=A<0?"-":"";A=Math.abs(A);var t=A%100;return M+A+(t>4&&t<21?"th":{1:"st",2:"nd",3:"rd"}[A%10]||"th")},g.filesize=function(A,M,t,I,e,i){return M=void 0===M?1024:M,A<=0?"0 bytes":(A

"),A=A.replace(/\n/g,"
"),"

"+A+"

"},g.nl2br=function(A){return A.replace(/(\r\n|\n|\r)/g,"
")},g.truncatechars=function(A,M){return A.length<=M?A:A.substr(0,M)+"…"},g.truncatewords=function(A,M){var t=A.split(" ");return t.length0,B=c.enumErrorProps&&(A===j||A instanceof Error),Q=c.enumPrototypes&&T(A);++I1)for(var t=1;tM.documentElement.clientHeight;return{modalStyles:{paddingRight:I&&!g?B.default():void 0,paddingLeft:!I&&g?B.default():void 0}}}});b.Body=z.default,b.Header=U.default,b.Title=f.default,b.Footer=F.default,b.Dialog=h.default,b.TRANSITION_DURATION=300,b.BACKDROP_TRANSITION_DURATION=150,M.default=C.bsSizes([D.Sizes.LARGE,D.Sizes.SMALL],C.bsClass("modal",b)),A.exports=M.default},function(A,M,t){"use strict";var I=t(33).default,g=t(32).default,e=t(89).default,i=t(15).default,T=t(12).default;M.__esModule=!0;var E=t(2),N=T(E),n=t(10),o=T(n),c=t(22),C=T(c),a=t(88),D=T(a),r=function(A){function M(){g(this,M),A.apply(this,arguments)}return I(M,A),M.prototype.render=function(){var A=this.props,M=A["aria-label"],t=e(A,["aria-label"]),I=D.default(this.context.$bs_onModalHide,this.props.onHide);return N.default.createElement("div",i({},t,{className:o.default(this.props.className,C.default.prefix(this.props,"header"))}),this.props.closeButton&&N.default.createElement("button",{type:"button",className:"close","aria-label":M,onClick:I},N.default.createElement("span",{"aria-hidden":"true"},"×")),this.props.children)},M}(N.default.Component);r.propTypes={"aria-label":N.default.PropTypes.string,bsClass:N.default.PropTypes.string,closeButton:N.default.PropTypes.bool,onHide:N.default.PropTypes.func},r.contextTypes={$bs_onModalHide:N.default.PropTypes.func},r.defaultProps={"aria-label":"Close",closeButton:!1},M.default=c.bsClass("modal",r),A.exports=M.default},function(A,M,t){"use strict";function I(A,M){return Array.isArray(M)?M.indexOf(A)>=0:A===M}var g=t(15).default,e=t(180).default,i=t(12).default;M.__esModule=!0;var T=t(84),E=i(T),N=t(277),n=i(N),o=t(2),c=i(o),C=t(34),a=i(C),D=t(214),r=(i(D),t(654)),B=i(r),Q=t(88),s=i(Q),u=c.default.createClass({displayName:"OverlayTrigger",propTypes:g({},B.default.propTypes,{trigger:c.default.PropTypes.oneOfType([c.default.PropTypes.oneOf(["click","hover","focus"]),c.default.PropTypes.arrayOf(c.default.PropTypes.oneOf(["click","hover","focus"]))]),delay:c.default.PropTypes.number,delayShow:c.default.PropTypes.number,delayHide:c.default.PropTypes.number,defaultOverlayShown:c.default.PropTypes.bool,overlay:c.default.PropTypes.node.isRequired,onBlur:c.default.PropTypes.func,onClick:c.default.PropTypes.func,onFocus:c.default.PropTypes.func,onMouseEnter:c.default.PropTypes.func,onMouseLeave:c.default.PropTypes.func,target:function(){},onHide:function(){},show:function(){}}),getDefaultProps:function(){return{defaultOverlayShown:!1,trigger:["hover","focus"]}},getInitialState:function(){return{isOverlayShown:this.props.defaultOverlayShown}},show:function(){this.setState({isOverlayShown:!0})},hide:function(){this.setState({isOverlayShown:!1})},toggle:function(){this.state.isOverlayShown?this.hide():this.show()},componentWillMount:function(){this.handleMouseOver=this.handleMouseOverOut.bind(null,this.handleDelayedShow),this.handleMouseOut=this.handleMouseOverOut.bind(null,this.handleDelayedHide)},componentDidMount:function(){this._mountNode=document.createElement("div"),this.renderOverlay()},renderOverlay:function(){a.default.unstable_renderSubtreeIntoContainer(this,this._overlay,this._mountNode)},componentWillUnmount:function(){a.default.unmountComponentAtNode(this._mountNode), -this._mountNode=null,clearTimeout(this._hoverShowDelay),clearTimeout(this._hoverHideDelay)},componentDidUpdate:function(){this._mountNode&&this.renderOverlay()},getOverlayTarget:function(){return a.default.findDOMNode(this)},getOverlay:function(){var A=g({},n.default(this.props,e(B.default.propTypes)),{show:this.state.isOverlayShown,onHide:this.hide,target:this.getOverlayTarget,onExit:this.props.onExit,onExiting:this.props.onExiting,onExited:this.props.onExited,onEnter:this.props.onEnter,onEntering:this.props.onEntering,onEntered:this.props.onEntered}),M=o.cloneElement(this.props.overlay,{placement:A.placement,container:A.container});return c.default.createElement(B.default,A,M)},render:function(){var A=c.default.Children.only(this.props.children),M=A.props,t={"aria-describedby":this.props.overlay.props.id};return this._overlay=this.getOverlay(),t.onClick=s.default(M.onClick,this.props.onClick),I("click",this.props.trigger)&&(t.onClick=s.default(this.toggle,t.onClick)),I("hover",this.props.trigger)&&(t.onMouseOver=s.default(this.handleMouseOver,this.props.onMouseOver,M.onMouseOver),t.onMouseOut=s.default(this.handleMouseOut,this.props.onMouseOut,M.onMouseOut)),I("focus",this.props.trigger)&&(t.onFocus=s.default(this.handleDelayedShow,this.props.onFocus,M.onFocus),t.onBlur=s.default(this.handleDelayedHide,this.props.onBlur,M.onBlur)),o.cloneElement(A,t)},handleDelayedShow:function(){var A=this;if(null!=this._hoverHideDelay)return clearTimeout(this._hoverHideDelay),void(this._hoverHideDelay=null);if(!this.state.isOverlayShown&&null==this._hoverShowDelay){var M=null!=this.props.delayShow?this.props.delayShow:this.props.delay;return M?void(this._hoverShowDelay=setTimeout(function(){A._hoverShowDelay=null,A.show()},M)):void this.show()}},handleDelayedHide:function(){var A=this;if(null!=this._hoverShowDelay)return clearTimeout(this._hoverShowDelay),void(this._hoverShowDelay=null);if(this.state.isOverlayShown&&null==this._hoverHideDelay){var M=null!=this.props.delayHide?this.props.delayHide:this.props.delay;return M?void(this._hoverHideDelay=setTimeout(function(){A._hoverHideDelay=null,A.hide()},M)):void this.hide()}},handleMouseOverOut:function(A,M){var t=M.currentTarget,I=M.relatedTarget||M.nativeEvent.toElement;I&&(I===t||E.default(t,I))||A(M)}});M.default=u,A.exports=M.default},function(A,M,t){"use strict";var I=t(15).default,g=t(12).default;M.__esModule=!0;var e=t(2),i=g(e),T=t(10),E=g(T),N=t(22),n=g(N),o=t(296),c=g(o),C=i.default.createClass({displayName:"Tooltip",propTypes:{id:c.default(i.default.PropTypes.oneOfType([i.default.PropTypes.string,i.default.PropTypes.number])),placement:i.default.PropTypes.oneOf(["top","right","bottom","left"]),positionLeft:i.default.PropTypes.number,positionTop:i.default.PropTypes.number,arrowOffsetLeft:i.default.PropTypes.oneOfType([i.default.PropTypes.number,i.default.PropTypes.string]),arrowOffsetTop:i.default.PropTypes.oneOfType([i.default.PropTypes.number,i.default.PropTypes.string]),title:i.default.PropTypes.node},getDefaultProps:function(){return{bsClass:"tooltip",placement:"right"}},render:function(){var A,M=(A={},A[n.default.prefix(this.props)]=!0,A[this.props.placement]=!0,A),t=I({left:this.props.positionLeft,top:this.props.positionTop},this.props.style),g={left:this.props.arrowOffsetLeft,top:this.props.arrowOffsetTop};return i.default.createElement("div",I({role:"tooltip"},this.props,{className:E.default(this.props.className,M),style:t}),i.default.createElement("div",{className:n.default.prefix(this.props,"arrow"),style:g}),i.default.createElement("div",{className:n.default.prefix(this.props,"inner")},this.props.children))}});M.default=C,A.exports=M.default},function(A,M,t){A.exports={default:t(661),__esModule:!0}},function(A,M,t){var I=t(667),g=t(99),e=t(286),i="prototype",T=function(A,M,t){var E,N,n,o=A&T.F,c=A&T.G,C=A&T.S,a=A&T.P,D=A&T.B,r=A&T.W,B=c?g:g[M]||(g[M]={}),Q=c?I:C?I[M]:(I[M]||{})[i];c&&(t=M);for(E in t)N=!o&&Q&&E in Q,N&&E in B||(n=N?Q[E]:t[E],B[E]=c&&"function"!=typeof Q[E]?t[E]:D&&N?e(n,I):r&&Q[E]==n?function(A){var M=function(M){return this instanceof A?new A(M):A(M)};return M[i]=A[i],M}(n):a&&"function"==typeof n?e(Function.call,n):n,a&&((B[i]||(B[i]={}))[E]=n))};T.F=1,T.G=2,T.S=4,T.P=8,T.B=16,T.W=32,A.exports=T},function(A,M){var t=Object;A.exports={create:t.create,getProto:t.getPrototypeOf,isEnum:{}.propertyIsEnumerable,getDesc:t.getOwnPropertyDescriptor,setDesc:t.defineProperty,setDescs:t.defineProperties,getKeys:t.keys,getNames:t.getOwnPropertyNames,getSymbols:t.getOwnPropertySymbols,each:[].forEach}},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}function g(A,M){return A="function"==typeof A?A():A,i.default.findDOMNode(A)||M}Object.defineProperty(M,"__esModule",{value:!0}),M.default=g;var e=t(34),i=I(e);A.exports=M.default},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}function g(A,M,t,I,g){var i=A[M],E="undefined"==typeof i?"undefined":e(i);return T.default.isValidElement(i)?new Error("Invalid "+I+" ` + "`" + `"+g+"` + "`" + ` of type ReactElement "+("supplied to ` + "`" + `"+t+"` + "`" + `, expected a ReactComponent or a ")+"DOMElement. You can usually obtain a ReactComponent or DOMElement from a ReactElement by attaching a ref to it."):"object"===E&&"function"==typeof i.render||1===i.nodeType?null:new Error("Invalid "+I+" ` + "`" + `"+g+"` + "`" + ` of value ` + "`" + `"+i+"` + "`" + ` "+("supplied to ` + "`" + `"+t+"` + "`" + `, expected a ReactComponent or a ")+"DOMElement.")}M.__esModule=!0;var e="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(A){return typeof A}:function(A){return A&&"function"==typeof Symbol&&A.constructor===Symbol?"symbol":typeof A},i=t(2),T=I(i),E=t(295),N=I(E);M.default=(0,N.default)(g)},function(A,M,t){"use strict";function I(){function A(A,M,I){for(var g=0;g>",null!=t[I]?A(t,I,g):M?new Error("Required prop '"+I+"' was not specified in '"+g+"'."):void 0}var t=M.bind(null,!1);return t.isRequired=M.bind(null,!0),t}M.__esModule=!0,M.errMsg=t,M.createChainableTypeChecker=I},function(A,M){"use strict";function t(A,M,t){function I(){return i=!0,T?void(N=[].concat(Array.prototype.slice.call(arguments))):void t.apply(this,arguments)}function g(){if(!i&&(E=!0,!T)){for(T=!0;!i&&e=A&&E&&(i=!0,t()))}}var e=0,i=!1,T=!1,E=!1,N=void 0;g()}function I(A,M,t){function I(A,M,I){i||(M?(i=!0,t(M)):(e[A]=I,i=++T===g,i&&t(null,e)))}var g=A.length,e=[];if(0===g)return t(null,e);var i=!1,T=0;A.forEach(function(A,t){M(A,t,function(A,M){I(t,A,M)})})}M.__esModule=!0,M.loopAsync=t,M.mapAsync=I},function(A,M,t){"use strict";function I(A){if(A&&A.__esModule)return A;var M={};if(null!=A)for(var t in A)Object.prototype.hasOwnProperty.call(A,t)&&(M[t]=A[t]);return M.default=A,M}function g(A){return A&&A.__esModule?A:{default:A}}M.__esModule=!0,M.router=M.routes=M.route=M.components=M.component=M.location=M.history=M.falsy=M.locationShape=M.routerShape=void 0;var e=t(2),i=t(125),T=(g(i),t(74)),E=I(T),N=t(18),n=(g(N),e.PropTypes.func),o=e.PropTypes.object,c=e.PropTypes.shape,C=e.PropTypes.string,a=M.routerShape=c({push:n.isRequired,replace:n.isRequired,go:n.isRequired,goBack:n.isRequired,goForward:n.isRequired,setRouteLeaveHook:n.isRequired,isActive:n.isRequired}),D=M.locationShape=c({pathname:C.isRequired,search:C.isRequired,state:o,action:C.isRequired,key:C}),r=M.falsy=E.falsy,B=M.history=E.history,Q=M.location=D,s=M.component=E.component,u=M.components=E.components,x=M.route=E.route,y=(M.routes=E.routes,M.router=a),j={falsy:r,history:B,location:Q,component:s,components:u,route:x,router:y};M.default=j},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}function g(A){for(var M in A)if(Object.prototype.hasOwnProperty.call(A,M))return!0;return!1}function e(A,M){function t(M){var t=!(arguments.length<=1||void 0===arguments[1])&&arguments[1],I=arguments.length<=2||void 0===arguments[2]?null:arguments[2],g=void 0;return t&&t!==!0||null!==I?(M={pathname:M,query:t},g=I||!1):(M=A.createLocation(M),g=t),(0,c.default)(M,g,s.location,s.routes,s.params)}function I(A,t){u&&u.location===A?e(u,t):(0,r.default)(M,A,function(M,I){M?t(M):I?e(i({},I,{location:A}),t):t()})}function e(A,M){function t(t,g){return t||g?I(t,g):void(0,a.default)(A,function(t,I){t?M(t):M(null,null,s=i({},A,{components:I}))})}function I(A,t){A?M(A):M(null,t)}var g=(0,N.default)(s,A),e=g.leaveRoutes,T=g.changeRoutes,E=g.enterRoutes;(0,n.runLeaveHooks)(e,s),e.filter(function(A){return E.indexOf(A)===-1}).forEach(D),(0,n.runChangeHooks)(T,s,A,function(M,g){return M||g?I(M,g):void(0,n.runEnterHooks)(E,A,t)})}function T(A){var M=arguments.length<=1||void 0===arguments[1]||arguments[1];return A.__id__||M&&(A.__id__=x++)}function E(A){return A.reduce(function(A,M){return A.push.apply(A,y[T(M)]),A},[])}function o(A,t){(0,r.default)(M,A,function(M,I){if(null==I)return void t();u=i({},I,{location:A});for(var g=E((0,N.default)(s,u).leaveRoutes),e=void 0,T=0,n=g.length;null==e&&T=32||13===M?M:0}A.exports=t},function(A,M){"use strict";function t(A){var M=this,t=M.nativeEvent;if(t.getModifierState)return t.getModifierState(A);var I=g[A];return!!I&&!!t[I]}function I(A){return t}var g={Alt:"altKey",Control:"ctrlKey",Meta:"metaKey",Shift:"shiftKey"};A.exports=I},function(A,M){"use strict";function t(A){var M=A.target||A.srcElement||window;return 3===M.nodeType?M.parentNode:M}A.exports=t},function(A,M){"use strict";function t(A){var M=A&&(I&&A[I]||A[g]);if("function"==typeof M)return M}var I="function"==typeof Symbol&&Symbol.iterator,g="@@iterator";A.exports=t},function(A,M,t){"use strict";function I(A){return"function"==typeof A&&"undefined"!=typeof A.prototype&&"function"==typeof A.prototype.mountComponent&&"function"==typeof A.prototype.receiveComponent}function g(A){var M;if(null===A||A===!1)M=new i(g);else if("object"==typeof A){var t=A;!t||"function"!=typeof t.type&&"string"!=typeof t.type?N(!1):void 0,M="string"==typeof t.type?T.createInternalComponent(t):I(t.type)?new t.type(t):new n}else"string"==typeof A||"number"==typeof A?M=T.createInstanceForText(A):N(!1);return M.construct(A),M._mountIndex=0,M._mountImage=null,M}var e=t(735),i=t(327),T=t(333),E=t(8),N=t(3),n=(t(7),function(){});E(n.prototype,e.Mixin,{_instantiateReactComponent:g}),A.exports=g},function(A,M,t){"use strict";/** +!function(){"use strict";function t(){for(var A=[],M=0;M0?g(I(A),9007199254740991):0}},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}function g(A,M){if(M.indexOf("deprecated")!==-1){if(E[M])return;E[M]=!0}M="[react-router] "+M;for(var t=arguments.length,I=Array(t>2?t-2:0),g=2;g"+g+""};A.exports=function(A,M){var t={};t[A]=M(T),I(I.P+I.F*g(function(){var M=""[A]('"');return M!==M.toLowerCase()||M.split('"').length>3}),"String",t)}},function(A,M,t){var I=t(93),g=t(36);A.exports=function(A){return I(g(A))}},function(A,M,t){"use strict";var I=t(53),g=t(7),e=(t(132),"function"==typeof Symbol&&Symbol.for&&Symbol.for("react.element")||60103),i={key:!0,ref:!0,__self:!0,__source:!0},T=function(A,M,t,I,g,i,T){var E={$$typeof:e,type:A,key:M,ref:t,props:T,_owner:i};return E};T.createElement=function(A,M,t){var g,e={},E=null,n=null,N=null,o=null;if(null!=M){n=void 0===M.ref?null:M.ref,E=void 0===M.key?null:""+M.key,N=void 0===M.__self?null:M.__self,o=void 0===M.__source?null:M.__source;for(g in M)M.hasOwnProperty(g)&&!i.hasOwnProperty(g)&&(e[g]=M[g])}var c=arguments.length-2;if(1===c)e.children=t;else if(c>1){for(var C=Array(c),a=0;a1){for(var D=Array(a),r=0;r1?"-":"";g.href=e,g.download=M.bucketName+i+M.prefix.slice(0,-1)+".zip",g.click(),window.URL.revokeObjectURL(e),g.remove()}},t.send(JSON.stringify(M))}},M.uploadFile=function(A,M){return function(t,I){var g=I(),i=g.currentBucket,T=g.currentPath,E=""+T+A.name,N=window.location.origin+"/minio/upload/"+i+"/"+E,o=i+"-"+T+"-"+A.name;M.open("PUT",N,!0),M.withCredentials=!1;var c=n.default.getItem("token");c&&M.setRequestHeader("Authorization","Bearer "+n.default.getItem("token")),M.setRequestHeader("x-amz-date",(0,e.default)().utc().format("YYYYMMDDTHHmmss")+"Z"),t(eA({slug:o,xhr:M,size:A.size,name:A.name})),M.onload=function(I){401!=M.status&&403!=M.status&&500!=M.status||(EA(!1),t(iA({slug:o})),t(_({type:"danger",message:"Unauthorized request."}))),200==M.status&&(EA(!1),t(iA({slug:o})),t(_({type:"success",message:"File '"+A.name+"' uploaded successfully."})),t(gA(T)))},M.upload.addEventListener("error",function(M){t(_({type:"danger",message:"Error occurred uploading '"+A.name+"'."})),t(iA({slug:o}))}),M.upload.addEventListener("progress",function(A){if(A.lengthComputable){var M=A.loaded;A.total;t(TA({slug:o,loaded:M}))}}),M.send(A)}},M.showAbout=function(){return{type:h,showAbout:!0}},M.hideAbout=function(){return{type:h,showAbout:!1}},M.setSortNameOrder=function(A){return{type:S,sortNameOrder:A}}),NA=(M.setSortSizeOrder=function(A){return{type:z,sortSizeOrder:A}},M.setSortDateOrder=function(A){return{type:p,sortDateOrder:A}},M.setLatestUIVersion=function(A){return{type:U,latestUiVersion:A}},M.showSettings=function(){return{type:k,showSettings:!0}},M.hideSettings=function(){return{type:k,showSettings:!1}},M.setSettings=function(A){return{type:R,settings:A}},M.showBucketPolicy=function(){return{type:J,showBucketPolicy:!0}},M.hideBucketPolicy=function(){return{type:J,showBucketPolicy:!1}},M.setPolicies=function(A){return{type:G,policies:A}},M.checkedObjectsAdd=function(A){return{type:W,objectName:A}},M.checkedObjectsRemove=function(A){return{type:V,objectName:A}},M.checkedObjectsReset=function(A){return{type:P,objectName:A}})},function(A,M,t){var I=t(49),g=t(93),e=t(19),i=t(17),T=t(375);A.exports=function(A,M){var t=1==A,E=2==A,n=3==A,N=4==A,o=6==A,c=5==A||o,C=M||T;return function(M,T,a){for(var D,r,B=e(M),Q=g(B),s=I(T,a,3),u=i(Q.length),x=0,y=t?C(M,u):E?C(M,0):void 0;u>x;x++)if((c||x in Q)&&(D=Q[x],r=s(D,x,B),A))if(t)y[x]=r;else if(r)switch(A){case 3:return!0;case 5:return D;case 6:return x;case 2:y.push(D)}else if(N)return!1;return o?-1:n||N?N:y}}},function(A,M,t){var I=t(1),g=t(48),e=t(6);A.exports=function(A,M){var t=(g.Object||{})[A]||Object[A],i={};i[A]=M(t),I(I.S+I.F*e(function(){t(1)}),"Object",i)}},function(A,M,t){var I=t(9);A.exports=function(A,M){if(!I(A))return A;var t,g;if(M&&"function"==typeof(t=A.toString)&&!I(g=t.call(A)))return g;if("function"==typeof(t=A.valueOf)&&!I(g=t.call(A)))return g;if(!M&&"function"==typeof(t=A.toString)&&!I(g=t.call(A)))return g;throw TypeError("Can't convert object to primitive value")}},function(A,M,t){function I(A){if(i.unindexedChars&&e(A)){for(var M=-1,t=A.length,I=Object(A);++M3&&void 0!==arguments[3]?arguments[3]:{},n=Boolean(A),c=A||l,a=void 0;a="function"==typeof M?M:M?(0,B.default)(M):w;var r=t||L,Q=I.pure,s=void 0===Q||Q,u=I.withRef,y=void 0!==u&&u,h=s&&r!==L,S=d++;return function(A){function M(A,M,t){var I=r(A,M,t);return I}var t="Connect("+T(A)+")",I=function(I){function T(A,M){g(this,T);var i=e(this,I.call(this,A,M));i.version=S,i.store=A.store||M.store,(0,j.default)(i.store,'Could not find "store" in either the context or '+('props of "'+t+'". ')+"Either wrap the root component in a , "+('or explicitly pass "store" as a prop to "'+t+'".'));var E=i.store.getState();return i.state={storeState:E},i.clearCache(),i}return i(T,I),T.prototype.shouldComponentUpdate=function(){return!s||this.haveOwnPropsChanged||this.hasStoreStateChanged},T.prototype.computeStateProps=function(A,M){if(!this.finalMapStateToProps)return this.configureFinalMapState(A,M);var t=A.getState(),I=this.doStatePropsDependOnOwnProps?this.finalMapStateToProps(t,M):this.finalMapStateToProps(t);return I},T.prototype.configureFinalMapState=function(A,M){var t=c(A.getState(),M),I="function"==typeof t;return this.finalMapStateToProps=I?t:c,this.doStatePropsDependOnOwnProps=1!==this.finalMapStateToProps.length,I?this.computeStateProps(A,M):t},T.prototype.computeDispatchProps=function(A,M){if(!this.finalMapDispatchToProps)return this.configureFinalMapDispatch(A,M);var t=A.dispatch,I=this.doDispatchPropsDependOnOwnProps?this.finalMapDispatchToProps(t,M):this.finalMapDispatchToProps(t);return I},T.prototype.configureFinalMapDispatch=function(A,M){var t=a(A.dispatch,M),I="function"==typeof t;return this.finalMapDispatchToProps=I?t:a,this.doDispatchPropsDependOnOwnProps=1!==this.finalMapDispatchToProps.length,I?this.computeDispatchProps(A,M):t},T.prototype.updateStatePropsIfNeeded=function(){var A=this.computeStateProps(this.store,this.props);return(!this.stateProps||!(0,D.default)(A,this.stateProps))&&(this.stateProps=A,!0)},T.prototype.updateDispatchPropsIfNeeded=function(){var A=this.computeDispatchProps(this.store,this.props);return(!this.dispatchProps||!(0,D.default)(A,this.dispatchProps))&&(this.dispatchProps=A,!0)},T.prototype.updateMergedPropsIfNeeded=function(){var A=M(this.stateProps,this.dispatchProps,this.props);return!(this.mergedProps&&h&&(0,D.default)(A,this.mergedProps))&&(this.mergedProps=A,!0)},T.prototype.isSubscribed=function(){return"function"==typeof this.unsubscribe},T.prototype.trySubscribe=function(){n&&!this.unsubscribe&&(this.unsubscribe=this.store.subscribe(this.handleChange.bind(this)),this.handleChange())},T.prototype.tryUnsubscribe=function(){this.unsubscribe&&(this.unsubscribe(),this.unsubscribe=null)},T.prototype.componentDidMount=function(){this.trySubscribe()},T.prototype.componentWillReceiveProps=function(A){s&&(0,D.default)(A,this.props)||(this.haveOwnPropsChanged=!0)},T.prototype.componentWillUnmount=function(){this.tryUnsubscribe(),this.clearCache()},T.prototype.clearCache=function(){this.dispatchProps=null,this.stateProps=null,this.mergedProps=null,this.haveOwnPropsChanged=!0,this.hasStoreStateChanged=!0,this.haveStatePropsBeenPrecalculated=!1,this.statePropsPrecalculationError=null,this.renderedElement=null,this.finalMapDispatchToProps=null,this.finalMapStateToProps=null},T.prototype.handleChange=function(){if(this.unsubscribe){var A=this.store.getState(),M=this.state.storeState;if(!s||M!==A){if(s&&!this.doStatePropsDependOnOwnProps){var t=E(this.updateStatePropsIfNeeded,this);if(!t)return;t===Y&&(this.statePropsPrecalculationError=Y.value),this.haveStatePropsBeenPrecalculated=!0}this.hasStoreStateChanged=!0,this.setState({storeState:A})}}},T.prototype.getWrappedInstance=function(){return(0,j.default)(y,"To access the wrapped instance, you need to specify { withRef: true } as the fourth argument of the connect() call."),this.refs.wrappedInstance},T.prototype.render=function(){var M=this.haveOwnPropsChanged,t=this.hasStoreStateChanged,I=this.haveStatePropsBeenPrecalculated,g=this.statePropsPrecalculationError,e=this.renderedElement;if(this.haveOwnPropsChanged=!1,this.hasStoreStateChanged=!1,this.haveStatePropsBeenPrecalculated=!1,this.statePropsPrecalculationError=null,g)throw g;var i=!0,T=!0;s&&e&&(i=t||M&&this.doStatePropsDependOnOwnProps,T=M&&this.doDispatchPropsDependOnOwnProps);var E=!1,n=!1;I?E=!0:i&&(E=this.updateStatePropsIfNeeded()),T&&(n=this.updateDispatchPropsIfNeeded());var c=!0;return c=!!(E||n||M)&&this.updateMergedPropsIfNeeded(),!c&&e?e:(y?this.renderedElement=(0,o.createElement)(A,N({},this.mergedProps,{ref:"wrappedInstance"})):this.renderedElement=(0,o.createElement)(A,this.mergedProps),this.renderedElement)},T}(o.Component);return I.displayName=t,I.WrappedComponent=A,I.contextTypes={store:C.default},I.propTypes={store:C.default},(0,x.default)(I,A)}}M.__esModule=!0;var N=Object.assign||function(A){for(var M=1;Mt;)g[t]=M[t++];return g},fA=function(A,M,t){G(A,M,{get:function(){return this._d[t]}})},mA=function(A){var M,t,I,g,e,i,T=y(A),E=arguments.length,N=E>1?arguments[1]:void 0,o=void 0!==N,c=Y(T);if(void 0!=c&&!j(c)){for(i=c.call(T),I=[],M=0;!(e=i.next()).done;M++)I.push(e.value);T=I}for(o&&E>2&&(N=n(N,arguments[2],2)),M=0,t=D(T.length),g=pA(this,t);t>M;M++)g[M]=o?N(T[M],M):T[M];return g},FA=function(){for(var A=0,M=arguments.length,t=pA(this,M);M>A;)t[A]=arguments[A++];return t},kA=!!X&&e(function(){BA.call(new X(1))}),RA=function(){return BA.apply(kA?DA.call(zA(this)):zA(this),arguments)},JA={copyWithin:function(A,M){return k.call(zA(this),A,M,arguments.length>2?arguments[2]:void 0)},every:function(A){return tA(zA(this),A,arguments.length>1?arguments[1]:void 0)},fill:function(A){return F.apply(zA(this),arguments)},filter:function(A){return UA(this,AA(zA(this),A,arguments.length>1?arguments[1]:void 0))},find:function(A){return IA(zA(this),A,arguments.length>1?arguments[1]:void 0)},findIndex:function(A){return gA(zA(this),A,arguments.length>1?arguments[1]:void 0)},forEach:function(A){$(zA(this),A,arguments.length>1?arguments[1]:void 0)},indexOf:function(A){return iA(zA(this),A,arguments.length>1?arguments[1]:void 0)},includes:function(A){return eA(zA(this),A,arguments.length>1?arguments[1]:void 0)},join:function(A){return CA.apply(zA(this),arguments)},lastIndexOf:function(A){return NA.apply(zA(this),arguments)},map:function(A){return LA(zA(this),A,arguments.length>1?arguments[1]:void 0)},reduce:function(A){return oA.apply(zA(this),arguments)},reduceRight:function(A){return cA.apply(zA(this),arguments)},reverse:function(){for(var A,M=this,t=zA(M).length,I=Math.floor(t/2),g=0;g1?arguments[1]:void 0)},sort:function(A){return aA.call(zA(this),A)},subarray:function(A,M){var t=zA(this),I=t.length,g=r(A,I);return new(p(t,t[xA]))(t.buffer,t.byteOffset+g*t.BYTES_PER_ELEMENT,D((void 0===M?I:r(M,I))-g))}},GA=function(A,M){return UA(this,DA.call(zA(this),A,M))},HA=function(A){zA(this);var M=SA(arguments[1],1),t=this.length,I=y(A),g=D(I.length),e=0;if(g+M>t)throw v(wA);for(;e255?255:255&I),g.v[a](t*M+g.o,I,YA)},h=function(A,M){G(A,M,{get:function(){return Y(this,M)},set:function(A){return d(this,M,A)},enumerable:!0})};s?(r=t(function(A,t,I,g){N(A,r,n,"_d");var e,i,T,E,o=0,C=0;if(x(t)){if(!(t instanceof q||(E=u(t))==W||E==V))return jA in t?OA(r,t):mA.call(r,t);e=t,C=SA(I,M);var a=t.byteLength;if(void 0===g){if(a%M)throw v(wA);if(i=a-C,i<0)throw v(wA)}else if(i=D(g)*M,i+C>a)throw v(wA);T=i/M}else T=hA(t,!0),i=T*M,e=new q(i);for(c(A,"_d",{b:e,o:C,l:i,e:T,v:new _(e)});o0?I:t)(A)}},function(A,M,t){var I=t(118),g=t(82),e=t(72),i="[object Array]",T=Object.prototype,E=T.toString,n=I(Array,"isArray"),N=n||function(A){return e(A)&&g(A.length)&&E.call(A)==i};A.exports=N},function(A,M){function t(A){var M=typeof A;return!!A&&("object"==M||"function"==M)}A.exports=t},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}function g(A){return null==A||c.default.isValidElement(A)}function e(A){return g(A)||Array.isArray(A)&&A.every(g)}function i(A,M){return N({},A,M)}function T(A){var M=A.type,t=i(M.defaultProps,A.props);if(t.children){var I=E(t.children,t);I.length&&(t.childRoutes=I),delete t.children}return t}function E(A,M){var t=[];return c.default.Children.forEach(A,function(A){if(c.default.isValidElement(A))if(A.type.createRouteFromReactElement){var I=A.type.createRouteFromReactElement(A,M);I&&t.push(I)}else t.push(T(A))}),t}function n(A){return e(A)?A=E(A):A&&!Array.isArray(A)&&(A=[A]),A}M.__esModule=!0;var N=Object.assign||function(A){for(var M=1;M";for(M.style.display="none",t(142).appendChild(M),M.src="javascript:",A=M.contentWindow.document,A.open(),A.write(g+"script"+i+"document.F=Object"+g+"/script"+i),A.close(),n=A.F;I--;)delete n[E][e[I]];return n()};A.exports=Object.create||function(A,M){var t;return null!==A?(T[E]=I(A),t=new T,T[E]=null,t[i]=A):t=n(),void 0===M?t:g(t,M)}},function(A,M,t){var I=t(234),g=t(140).concat("length","prototype");M.f=Object.getOwnPropertyNames||function(A){return I(A,g)}},function(A,M,t){var I=t(234),g=t(140);A.exports=Object.keys||function(A){return I(A,g)}},function(A,M,t){var I=t(26);A.exports=function(A,M,t){for(var g in M)I(A,g,M[g],t);return A}},function(A,M,t){"use strict";var I=t(5),g=t(14),e=t(13),i=t(11)("species");A.exports=function(A){var M=I[A];e&&M&&!M[i]&&g.f(M,i,{configurable:!0,get:function(){return this}})}},function(A,M,t){var I=t(57),g=Math.max,e=Math.min;A.exports=function(A,M){return A=I(A),A<0?g(A+M,0):e(A,M)}},function(A,M){var t=0,I=Math.random();A.exports=function(A){return"Symbol(".concat(void 0===A?"":A,")_",(++t+I).toString(36))}},function(A,M){function t(A){return!!A&&"object"==typeof A}A.exports=t},function(A,M,t){"use strict";function I(A,M,t){if(A[M])return new Error("<"+t+'> should not have a "'+M+'" prop')}M.__esModule=!0,M.routes=M.route=M.components=M.component=M.history=void 0,M.falsy=I;var g=t(2),e=g.PropTypes.func,i=g.PropTypes.object,T=g.PropTypes.arrayOf,E=g.PropTypes.oneOfType,n=g.PropTypes.element,N=g.PropTypes.shape,o=g.PropTypes.string,c=(M.history=N({listen:e.isRequired,push:e.isRequired,replace:e.isRequired,go:e.isRequired,goBack:e.isRequired,goForward:e.isRequired}),M.component=E([e,o])),C=(M.components=E([c,i]),M.route=E([i,n]));M.routes=E([C,T(C)])},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}function g(A){var M=A.match(/^https?:\/\/[^\/]*/);return null==M?A:A.substring(M[0].length)}function e(A){var M=g(A),t="",I="",e=M.indexOf("#");e!==-1&&(I=M.substring(e),M=M.substring(0,e));var i=M.indexOf("?");return i!==-1&&(t=M.substring(i),M=M.substring(0,i)),""===M&&(M="/"),{pathname:M,search:t,hash:I}}M.__esModule=!0,M.extractPath=g,M.parsePath=e;var i=t(46);I(i)},function(A,M,t){"use strict";function I(){g.attachRefs(this,this._currentElement)}var g=t(757),e={mountComponent:function(A,M,t,g){var e=A.mountComponent(M,t,g);return A._currentElement&&null!=A._currentElement.ref&&t.getReactMountReady().enqueue(I,A),e},unmountComponent:function(A){g.detachRefs(A,A._currentElement),A.unmountComponent()},receiveComponent:function(A,M,t,e){var i=A._currentElement;if(M!==i||e!==A._context){var T=g.shouldUpdateRefs(i,M);T&&g.detachRefs(A,i),A.receiveComponent(M,t,e),T&&A._currentElement&&null!=A._currentElement.ref&&t.getReactMountReady().enqueue(I,A)}},performUpdateIfNecessary:function(A,M){A.performUpdateIfNecessary(M)}};A.exports=e},function(A,M,t){"use strict";function I(A,M,t,I){this.dispatchConfig=A,this.dispatchMarker=M,this.nativeEvent=t;var g=this.constructor.Interface;for(var e in g)if(g.hasOwnProperty(e)){var T=g[e];T?this[e]=T(t):"target"===e?this.target=I:this[e]=t[e]}var E=null!=t.defaultPrevented?t.defaultPrevented:t.returnValue===!1;E?this.isDefaultPrevented=i.thatReturnsTrue:this.isDefaultPrevented=i.thatReturnsFalse,this.isPropagationStopped=i.thatReturnsFalse}var g=t(61),e=t(7),i=t(47),T=(t(8),{type:null,target:null,currentTarget:i.thatReturnsNull,eventPhase:null,bubbles:null,cancelable:null,timeStamp:function(A){return A.timeStamp||Date.now()},defaultPrevented:null,isTrusted:null});e(I.prototype,{preventDefault:function(){this.defaultPrevented=!0;var A=this.nativeEvent;A&&(A.preventDefault?A.preventDefault():A.returnValue=!1,this.isDefaultPrevented=i.thatReturnsTrue)},stopPropagation:function(){var A=this.nativeEvent;A&&(A.stopPropagation?A.stopPropagation():A.cancelBubble=!0,this.isPropagationStopped=i.thatReturnsTrue)},persist:function(){this.isPersistent=i.thatReturnsTrue},isPersistent:i.thatReturnsFalse,destructor:function(){var A=this.constructor.Interface;for(var M in A)this[M]=null;this.dispatchConfig=null,this.dispatchMarker=null,this.nativeEvent=null}}),I.Interface=T,I.augmentClass=function(A,M){var t=this,I=Object.create(t.prototype);e(I,A.prototype),A.prototype=I,A.prototype.constructor=A,A.Interface=e({},t.Interface,M),A.augmentClass=t.augmentClass,g.addPoolingTo(A,g.fourArgumentPooler)},g.addPoolingTo(I,g.fourArgumentPooler),A.exports=I},function(A,M,t){var I=t(11)("unscopables"),g=Array.prototype;void 0==g[I]&&t(25)(g,I,{}),A.exports=function(A){g[I][A]=!0}},function(A,M,t){var I=t(49),g=t(228),e=t(144),i=t(4),T=t(17),E=t(161),n={},N={},M=A.exports=function(A,M,t,o,c){var C,a,D,r,B=c?function(){return A}:E(A),Q=I(t,o,M?2:1),s=0;if("function"!=typeof B)throw TypeError(A+" is not iterable!");if(e(B)){for(C=T(A.length);C>s;s++)if(r=M?Q(i(a=A[s])[0],a[1]):Q(A[s]),r===n||r===N)return r}else for(D=B.call(A);!(a=D.next()).done;)if(r=g(D,Q,a.value,M),r===n||r===N)return r};M.BREAK=n,M.RETURN=N},function(A,M){A.exports={}},function(A,M,t){var I=t(14).f,g=t(21),e=t(11)("toStringTag");A.exports=function(A,M,t){A&&!g(A=t?A:A.prototype,e)&&I(A,e,{configurable:!0,value:M})}},function(A,M,t){var I=t(1),g=t(36),e=t(6),i=t(157),T="["+i+"]",E="​…",n=RegExp("^"+T+T+"*"),N=RegExp(T+T+"*$"),o=function(A,M,t){var g={},T=e(function(){return!!i[A]()||E[A]()!=E}),n=g[A]=T?M(c):i[A];t&&(g[t]=n),I(I.P+I.F*T,"String",g)},c=o.trim=function(A,M){return A=String(g(A)),1&M&&(A=A.replace(n,"")),2&M&&(A=A.replace(N,"")),A};A.exports=o},function(A,M){function t(A){return"number"==typeof A&&A>-1&&A%1==0&&A<=I}var I=9007199254740991;A.exports=t},function(A,M,t){(function(A){!function(M,t){A.exports=t()}(this,function(){"use strict";function M(){return sI.apply(null,arguments)}function t(A){sI=A}function I(A){return A instanceof Array||"[object Array]"===Object.prototype.toString.call(A)}function g(A){return null!=A&&"[object Object]"===Object.prototype.toString.call(A)}function e(A){var M;for(M in A)return!1;return!0}function i(A){return void 0===A}function T(A){return"number"==typeof A||"[object Number]"===Object.prototype.toString.call(A)}function E(A){return A instanceof Date||"[object Date]"===Object.prototype.toString.call(A)}function n(A,M){var t,I=[];for(t=0;t0)for(t=0;t0?"future":"past"];return L(t)?t(M):t.replace(/%s/i,M)}function m(A,M){var t=A.toLowerCase();OI[t]=OI[t+"s"]=OI[M]=A}function F(A){return"string"==typeof A?OI[A]||OI[A.toLowerCase()]:void 0}function k(A){var M,t,I={};for(t in A)N(A,t)&&(M=F(t),M&&(I[M]=A[t]));return I}function R(A,M){fI[A]=M}function J(A){var M=[];for(var t in A)M.push({unit:t,priority:fI[t]});return M.sort(function(A,M){return A.priority-M.priority}),M}function G(A,t){return function(I){return null!=I?(v(this,A,I),M.updateOffset(this,t),this):H(this,A)}}function H(A,M){return A.isValid()?A._d["get"+(A._isUTC?"UTC":"")+M]():NaN}function v(A,M,t){A.isValid()&&A._d["set"+(A._isUTC?"UTC":"")+M](t)}function b(A){return A=F(A),L(this[A])?this[A]():this}function X(A,M){if("object"==typeof A){A=k(A);for(var t=J(A),I=0;I=0;return(e?t?"+":"":"-")+Math.pow(10,Math.max(0,g)).toString().substr(1)+I}function V(A,M,t,I){var g=I;"string"==typeof I&&(g=function(){return this[I]()}),A&&(RI[A]=g),M&&(RI[M[0]]=function(){return W(g.apply(this,arguments),M[1],M[2])}),t&&(RI[t]=function(){return this.localeData().ordinal(g.apply(this,arguments),A)})}function P(A){return A.match(/\[[\s\S]/)?A.replace(/^\[|\]$/g,""):A.replace(/\\/g,"")}function Z(A){var M,t,I=A.match(mI);for(M=0,t=I.length;M=0&&FI.test(A);)A=A.replace(FI,t),FI.lastIndex=0,I-=1;return A}function _(A,M,t){Ig[A]=L(M)?M:function(A,I){return A&&t?t:M}}function $(A,M){return N(Ig,A)?Ig[A](M._strict,M._locale):new RegExp(AA(A))}function AA(A){return MA(A.replace("\\","").replace(/\\(\[)|\\(\])|\[([^\]\[]*)\]|\\(.)/g,function(A,M,t,I,g){return M||t||I||g}))}function MA(A){return A.replace(/[-\/\\^$*+?.()|[\]{}]/g,"\\$&")}function tA(A,M){var t,I=M;for("string"==typeof A&&(A=[A]),T(M)&&(I=function(A,t){t[M]=x(A)}),t=0;t=0&&isFinite(T.getFullYear())&&T.setFullYear(A),T}function uA(A){var M=new Date(Date.UTC.apply(null,arguments));return A<100&&A>=0&&isFinite(M.getUTCFullYear())&&M.setUTCFullYear(A),M}function xA(A,M,t){var I=7+M-t,g=(7+uA(A,0,I).getUTCDay()-M)%7;return-g+I-1}function yA(A,M,t,I,g){var e,i,T=(7+t-I)%7,E=xA(A,I,g),n=1+7*(M-1)+T+E;return n<=0?(e=A-1,i=rA(e)+n):n>rA(A)?(e=A+1,i=n-rA(A)):(e=A,i=n),{year:e,dayOfYear:i}}function jA(A,M,t){var I,g,e=xA(A.year(),M,t),i=Math.floor((A.dayOfYear()-e-1)/7)+1;return i<1?(g=A.year()-1,I=i+lA(g,M,t)):i>lA(A.year(),M,t)?(I=i-lA(A.year(),M,t),g=A.year()+1):(g=A.year(),I=i),{week:I,year:g}}function lA(A,M,t){var I=xA(A,M,t),g=xA(A+1,M,t);return(rA(A)-I+g)/7}function wA(A){return jA(A,this._week.dow,this._week.doy).week}function LA(){return this._week.dow}function YA(){return this._week.doy}function dA(A){var M=this.localeData().week(this);return null==A?M:this.add(7*(A-M),"d")}function hA(A){var M=jA(this,1,4).week;return null==A?M:this.add(7*(A-M),"d")}function SA(A,M){return"string"!=typeof A?A:isNaN(A)?(A=M.weekdaysParse(A),"number"==typeof A?A:null):parseInt(A,10)}function zA(A,M){return"string"==typeof A?M.weekdaysParse(A)%7||7:isNaN(A)?null:A}function pA(A,M){return A?I(this._weekdays)?this._weekdays[A.day()]:this._weekdays[this._weekdays.isFormat.test(M)?"format":"standalone"][A.day()]:I(this._weekdays)?this._weekdays:this._weekdays.standalone}function UA(A){return A?this._weekdaysShort[A.day()]:this._weekdaysShort}function OA(A){return A?this._weekdaysMin[A.day()]:this._weekdaysMin}function fA(A,M,t){var I,g,e,i=A.toLocaleLowerCase();if(!this._weekdaysParse)for(this._weekdaysParse=[],this._shortWeekdaysParse=[],this._minWeekdaysParse=[],I=0;I<7;++I)e=c([2e3,1]).day(I),this._minWeekdaysParse[I]=this.weekdaysMin(e,"").toLocaleLowerCase(),this._shortWeekdaysParse[I]=this.weekdaysShort(e,"").toLocaleLowerCase(),this._weekdaysParse[I]=this.weekdays(e,"").toLocaleLowerCase();return t?"dddd"===M?(g=ag.call(this._weekdaysParse,i),g!==-1?g:null):"ddd"===M?(g=ag.call(this._shortWeekdaysParse,i),g!==-1?g:null):(g=ag.call(this._minWeekdaysParse,i),g!==-1?g:null):"dddd"===M?(g=ag.call(this._weekdaysParse,i),g!==-1?g:(g=ag.call(this._shortWeekdaysParse,i),g!==-1?g:(g=ag.call(this._minWeekdaysParse,i),g!==-1?g:null))):"ddd"===M?(g=ag.call(this._shortWeekdaysParse,i),g!==-1?g:(g=ag.call(this._weekdaysParse,i),g!==-1?g:(g=ag.call(this._minWeekdaysParse,i),g!==-1?g:null))):(g=ag.call(this._minWeekdaysParse,i),g!==-1?g:(g=ag.call(this._weekdaysParse,i),g!==-1?g:(g=ag.call(this._shortWeekdaysParse,i),g!==-1?g:null)))}function mA(A,M,t){var I,g,e;if(this._weekdaysParseExact)return fA.call(this,A,M,t);for(this._weekdaysParse||(this._weekdaysParse=[],this._minWeekdaysParse=[],this._shortWeekdaysParse=[],this._fullWeekdaysParse=[]),I=0;I<7;I++){if(g=c([2e3,1]).day(I),t&&!this._fullWeekdaysParse[I]&&(this._fullWeekdaysParse[I]=new RegExp("^"+this.weekdays(g,"").replace(".",".?")+"$","i"),this._shortWeekdaysParse[I]=new RegExp("^"+this.weekdaysShort(g,"").replace(".",".?")+"$","i"),this._minWeekdaysParse[I]=new RegExp("^"+this.weekdaysMin(g,"").replace(".",".?")+"$","i")),this._weekdaysParse[I]||(e="^"+this.weekdays(g,"")+"|^"+this.weekdaysShort(g,"")+"|^"+this.weekdaysMin(g,""), +this._weekdaysParse[I]=new RegExp(e.replace(".",""),"i")),t&&"dddd"===M&&this._fullWeekdaysParse[I].test(A))return I;if(t&&"ddd"===M&&this._shortWeekdaysParse[I].test(A))return I;if(t&&"dd"===M&&this._minWeekdaysParse[I].test(A))return I;if(!t&&this._weekdaysParse[I].test(A))return I}}function FA(A){if(!this.isValid())return null!=A?this:NaN;var M=this._isUTC?this._d.getUTCDay():this._d.getDay();return null!=A?(A=SA(A,this.localeData()),this.add(A-M,"d")):M}function kA(A){if(!this.isValid())return null!=A?this:NaN;var M=(this.day()+7-this.localeData()._week.dow)%7;return null==A?M:this.add(A-M,"d")}function RA(A){if(!this.isValid())return null!=A?this:NaN;if(null!=A){var M=zA(A,this.localeData());return this.day(this.day()%7?M:M-7)}return this.day()||7}function JA(A){return this._weekdaysParseExact?(N(this,"_weekdaysRegex")||vA.call(this),A?this._weekdaysStrictRegex:this._weekdaysRegex):(N(this,"_weekdaysRegex")||(this._weekdaysRegex=wg),this._weekdaysStrictRegex&&A?this._weekdaysStrictRegex:this._weekdaysRegex)}function GA(A){return this._weekdaysParseExact?(N(this,"_weekdaysRegex")||vA.call(this),A?this._weekdaysShortStrictRegex:this._weekdaysShortRegex):(N(this,"_weekdaysShortRegex")||(this._weekdaysShortRegex=Lg),this._weekdaysShortStrictRegex&&A?this._weekdaysShortStrictRegex:this._weekdaysShortRegex)}function HA(A){return this._weekdaysParseExact?(N(this,"_weekdaysRegex")||vA.call(this),A?this._weekdaysMinStrictRegex:this._weekdaysMinRegex):(N(this,"_weekdaysMinRegex")||(this._weekdaysMinRegex=Yg),this._weekdaysMinStrictRegex&&A?this._weekdaysMinStrictRegex:this._weekdaysMinRegex)}function vA(){function A(A,M){return M.length-A.length}var M,t,I,g,e,i=[],T=[],E=[],n=[];for(M=0;M<7;M++)t=c([2e3,1]).day(M),I=this.weekdaysMin(t,""),g=this.weekdaysShort(t,""),e=this.weekdays(t,""),i.push(I),T.push(g),E.push(e),n.push(I),n.push(g),n.push(e);for(i.sort(A),T.sort(A),E.sort(A),n.sort(A),M=0;M<7;M++)T[M]=MA(T[M]),E[M]=MA(E[M]),n[M]=MA(n[M]);this._weekdaysRegex=new RegExp("^("+n.join("|")+")","i"),this._weekdaysShortRegex=this._weekdaysRegex,this._weekdaysMinRegex=this._weekdaysRegex,this._weekdaysStrictRegex=new RegExp("^("+E.join("|")+")","i"),this._weekdaysShortStrictRegex=new RegExp("^("+T.join("|")+")","i"),this._weekdaysMinStrictRegex=new RegExp("^("+i.join("|")+")","i")}function bA(){return this.hours()%12||12}function XA(){return this.hours()||24}function WA(A,M){V(A,0,0,function(){return this.localeData().meridiem(this.hours(),this.minutes(),M)})}function VA(A,M){return M._meridiemParse}function PA(A){return"p"===(A+"").toLowerCase().charAt(0)}function ZA(A,M,t){return A>11?t?"pm":"PM":t?"am":"AM"}function KA(A){return A?A.toLowerCase().replace("_","-"):A}function qA(A){for(var M,t,I,g,e=0;e0;){if(I=_A(g.slice(0,M).join("-")))return I;if(t&&t.length>=M&&y(g,t,!0)>=M-1)break;M--}e++}return null}function _A(M){var t=null;if(!pg[M]&&"undefined"!=typeof A&&A&&A.exports)try{t=dg._abbr,!function(){var A=new Error('Cannot find module "./locale"');throw A.code="MODULE_NOT_FOUND",A}(),$A(t)}catch(A){}return pg[M]}function $A(A,M){var t;return A&&(t=i(M)?tM(A):AM(A,M),t&&(dg=t)),dg._abbr}function AM(A,M){if(null!==M){var t=zg;if(M.abbr=A,null!=pg[A])w("defineLocaleOverride","use moment.updateLocale(localeName, config) to change an existing locale. moment.defineLocale(localeName, config) should only be used for creating a new locale See http://momentjs.com/guides/#/warnings/define-locale/ for more info."),t=pg[A]._config;else if(null!=M.parentLocale){if(null==pg[M.parentLocale])return Ug[M.parentLocale]||(Ug[M.parentLocale]=[]),Ug[M.parentLocale].push({name:A,config:M}),null;t=pg[M.parentLocale]._config}return pg[A]=new h(d(t,M)),Ug[A]&&Ug[A].forEach(function(A){AM(A.name,A.config)}),$A(A),pg[A]}return delete pg[A],null}function MM(A,M){if(null!=M){var t,I=zg;null!=pg[A]&&(I=pg[A]._config),M=d(I,M),t=new h(M),t.parentLocale=pg[A],pg[A]=t,$A(A)}else null!=pg[A]&&(null!=pg[A].parentLocale?pg[A]=pg[A].parentLocale:null!=pg[A]&&delete pg[A]);return pg[A]}function tM(A){var M;if(A&&A._locale&&A._locale._abbr&&(A=A._locale._abbr),!A)return dg;if(!I(A)){if(M=_A(A))return M;A=[A]}return qA(A)}function IM(){return YI(pg)}function gM(A){var M,t=A._a;return t&&a(A).overflow===-2&&(M=t[ig]<0||t[ig]>11?ig:t[Tg]<1||t[Tg]>eA(t[eg],t[ig])?Tg:t[Eg]<0||t[Eg]>24||24===t[Eg]&&(0!==t[ng]||0!==t[Ng]||0!==t[og])?Eg:t[ng]<0||t[ng]>59?ng:t[Ng]<0||t[Ng]>59?Ng:t[og]<0||t[og]>999?og:-1,a(A)._overflowDayOfYear&&(MTg)&&(M=Tg),a(A)._overflowWeeks&&M===-1&&(M=cg),a(A)._overflowWeekday&&M===-1&&(M=Cg),a(A).overflow=M),A}function eM(A){var M,t,I,g,e,i,T=A._i,E=Og.exec(T)||fg.exec(T);if(E){for(a(A).iso=!0,M=0,t=Fg.length;M10?"YYYY ":"YY "),e="HH:mm"+(t[4]?":ss":""),t[1]){var o=new Date(t[2]),c=["Sun","Mon","Tue","Wed","Thu","Fri","Sat"][o.getDay()];if(t[1].substr(0,3)!==c)return a(A).weekdayMismatch=!0,void(A._isValid=!1)}switch(t[5].length){case 2:0===E?T=" +0000":(E=N.indexOf(t[5][1].toUpperCase())-12,T=(E<0?" -":" +")+(""+E).replace(/^-?/,"0").match(/..$/)[0]+"00");break;case 4:T=n[t[5]];break;default:T=n[" GMT"]}t[5]=T,A._i=t.splice(1).join(""),i=" ZZ",A._f=I+g+e+i,cM(A),a(A).rfc2822=!0}else A._isValid=!1}function TM(A){var t=Rg.exec(A._i);return null!==t?void(A._d=new Date(+t[1])):(eM(A),void(A._isValid===!1&&(delete A._isValid,iM(A),A._isValid===!1&&(delete A._isValid,M.createFromInputFallback(A)))))}function EM(A,M,t){return null!=A?A:null!=M?M:t}function nM(A){var t=new Date(M.now());return A._useUTC?[t.getUTCFullYear(),t.getUTCMonth(),t.getUTCDate()]:[t.getFullYear(),t.getMonth(),t.getDate()]}function NM(A){var M,t,I,g,e=[];if(!A._d){for(I=nM(A),A._w&&null==A._a[Tg]&&null==A._a[ig]&&oM(A),null!=A._dayOfYear&&(g=EM(A._a[eg],I[eg]),(A._dayOfYear>rA(g)||0===A._dayOfYear)&&(a(A)._overflowDayOfYear=!0),t=uA(g,0,A._dayOfYear),A._a[ig]=t.getUTCMonth(),A._a[Tg]=t.getUTCDate()),M=0;M<3&&null==A._a[M];++M)A._a[M]=e[M]=I[M];for(;M<7;M++)A._a[M]=e[M]=null==A._a[M]?2===M?1:0:A._a[M];24===A._a[Eg]&&0===A._a[ng]&&0===A._a[Ng]&&0===A._a[og]&&(A._nextDay=!0,A._a[Eg]=0),A._d=(A._useUTC?uA:sA).apply(null,e),null!=A._tzm&&A._d.setUTCMinutes(A._d.getUTCMinutes()-A._tzm),A._nextDay&&(A._a[Eg]=24)}}function oM(A){var M,t,I,g,e,i,T,E;if(M=A._w,null!=M.GG||null!=M.W||null!=M.E)e=1,i=4,t=EM(M.GG,A._a[eg],jA(uM(),1,4).year),I=EM(M.W,1),g=EM(M.E,1),(g<1||g>7)&&(E=!0);else{e=A._locale._week.dow,i=A._locale._week.doy;var n=jA(uM(),e,i);t=EM(M.gg,A._a[eg],n.year),I=EM(M.w,n.week),null!=M.d?(g=M.d,(g<0||g>6)&&(E=!0)):null!=M.e?(g=M.e+e,(M.e<0||M.e>6)&&(E=!0)):g=e}I<1||I>lA(t,e,i)?a(A)._overflowWeeks=!0:null!=E?a(A)._overflowWeekday=!0:(T=yA(t,I,g,e,i),A._a[eg]=T.year,A._dayOfYear=T.dayOfYear)}function cM(A){if(A._f===M.ISO_8601)return void eM(A);if(A._f===M.RFC_2822)return void iM(A);A._a=[],a(A).empty=!0;var t,I,g,e,i,T=""+A._i,E=T.length,n=0;for(g=q(A._f,A._locale).match(mI)||[],t=0;t0&&a(A).unusedInput.push(i),T=T.slice(T.indexOf(I)+I.length),n+=I.length),RI[e]?(I?a(A).empty=!1:a(A).unusedTokens.push(e),gA(e,I,A)):A._strict&&!I&&a(A).unusedTokens.push(e);a(A).charsLeftOver=E-n,T.length>0&&a(A).unusedInput.push(T),A._a[Eg]<=12&&a(A).bigHour===!0&&A._a[Eg]>0&&(a(A).bigHour=void 0),a(A).parsedDateParts=A._a.slice(0),a(A).meridiem=A._meridiem,A._a[Eg]=CM(A._locale,A._a[Eg],A._meridiem),NM(A),gM(A)}function CM(A,M,t){var I;return null==t?M:null!=A.meridiemHour?A.meridiemHour(M,t):null!=A.isPM?(I=A.isPM(t),I&&M<12&&(M+=12),I||12!==M||(M=0),M):M}function aM(A){var M,t,I,g,e;if(0===A._f.length)return a(A).invalidFormat=!0,void(A._d=new Date(NaN));for(g=0;gthis.clone().month(0).utcOffset()||this.utcOffset()>this.clone().month(5).utcOffset()}function GM(){if(!i(this._isDSTShifted))return this._isDSTShifted;var A={};if(B(A,this),A=BM(A),A._a){var M=A._isUTC?c(A._a):uM(A._a);this._isDSTShifted=this.isValid()&&y(A._a,M.toArray())>0}else this._isDSTShifted=!1;return this._isDSTShifted}function HM(){return!!this.isValid()&&!this._isUTC}function vM(){return!!this.isValid()&&this._isUTC}function bM(){return!!this.isValid()&&(this._isUTC&&0===this._offset)}function XM(A,M){var t,I,g,e=A,i=null;return dM(A)?e={ms:A._milliseconds,d:A._days,M:A._months}:T(A)?(e={},M?e[M]=A:e.milliseconds=A):(i=Wg.exec(A))?(t="-"===i[1]?-1:1,e={y:0,d:x(i[Tg])*t,h:x(i[Eg])*t,m:x(i[ng])*t,s:x(i[Ng])*t,ms:x(hM(1e3*i[og]))*t}):(i=Vg.exec(A))?(t="-"===i[1]?-1:1,e={y:WM(i[2],t),M:WM(i[3],t),w:WM(i[4],t),d:WM(i[5],t),h:WM(i[6],t),m:WM(i[7],t),s:WM(i[8],t)}):null==e?e={}:"object"==typeof e&&("from"in e||"to"in e)&&(g=PM(uM(e.from),uM(e.to)),e={},e.ms=g.milliseconds,e.M=g.months),I=new YM(e),dM(A)&&N(A,"_locale")&&(I._locale=A._locale),I}function WM(A,M){var t=A&&parseFloat(A.replace(",","."));return(isNaN(t)?0:t)*M}function VM(A,M){var t={milliseconds:0,months:0};return t.months=M.month()-A.month()+12*(M.year()-A.year()),A.clone().add(t.months,"M").isAfter(M)&&--t.months,t.milliseconds=+M-+A.clone().add(t.months,"M"),t}function PM(A,M){var t;return A.isValid()&&M.isValid()?(M=pM(M,A),A.isBefore(M)?t=VM(A,M):(t=VM(M,A),t.milliseconds=-t.milliseconds,t.months=-t.months),t):{milliseconds:0,months:0}}function ZM(A,M){return function(t,I){var g,e;return null===I||isNaN(+I)||(w(M,"moment()."+M+"(period, number) is deprecated. Please use moment()."+M+"(number, period). See http://momentjs.com/guides/#/warnings/add-inverted-param/ for more info."),e=t,t=I,I=e),t="string"==typeof t?+t:t,g=XM(t,I),KM(this,g,A),this}}function KM(A,t,I,g){var e=t._milliseconds,i=hM(t._days),T=hM(t._months);A.isValid()&&(g=null==g||g,e&&A._d.setTime(A._d.valueOf()+e*I),i&&v(A,"Date",H(A,"Date")+i*I),T&&NA(A,H(A,"Month")+T*I),g&&M.updateOffset(A,i||T))}function qM(A,M){var t=A.diff(M,"days",!0);return t<-6?"sameElse":t<-1?"lastWeek":t<0?"lastDay":t<1?"sameDay":t<2?"nextDay":t<7?"nextWeek":"sameElse"}function _M(A,t){var I=A||uM(),g=pM(I,this).startOf("day"),e=M.calendarFormat(this,g)||"sameElse",i=t&&(L(t[e])?t[e].call(this,I):t[e]);return this.format(i||this.localeData().calendar(e,this,uM(I)))}function $M(){return new Q(this)}function At(A,M){var t=s(A)?A:uM(A);return!(!this.isValid()||!t.isValid())&&(M=F(i(M)?"millisecond":M),"millisecond"===M?this.valueOf()>t.valueOf():t.valueOf()9999?K(A,"YYYYYY-MM-DD[T]HH:mm:ss.SSS[Z]"):L(Date.prototype.toISOString)?this.toDate().toISOString():K(A,"YYYY-MM-DD[T]HH:mm:ss.SSS[Z]")}function Nt(){if(!this.isValid())return"moment.invalid(/* "+this._i+" */)";var A="moment",M="";this.isLocal()||(A=0===this.utcOffset()?"moment.utc":"moment.parseZone",M="Z");var t="["+A+'("]',I=0<=this.year()&&this.year()<=9999?"YYYY":"YYYYYY",g="-MM-DD[T]HH:mm:ss.SSS",e=M+'[")]';return this.format(t+I+g+e)}function ot(A){A||(A=this.isUtc()?M.defaultFormatUtc:M.defaultFormat);var t=K(this,A);return this.localeData().postformat(t)}function ct(A,M){return this.isValid()&&(s(A)&&A.isValid()||uM(A).isValid())?XM({to:this,from:A}).locale(this.locale()).humanize(!M):this.localeData().invalidDate()}function Ct(A){return this.from(uM(),A)}function at(A,M){return this.isValid()&&(s(A)&&A.isValid()||uM(A).isValid())?XM({from:this,to:A}).locale(this.locale()).humanize(!M):this.localeData().invalidDate()}function Dt(A){return this.to(uM(),A)}function rt(A){var M;return void 0===A?this._locale._abbr:(M=tM(A),null!=M&&(this._locale=M),this)}function Bt(){return this._locale}function Qt(A){switch(A=F(A)){case"year":this.month(0);case"quarter":case"month":this.date(1);case"week":case"isoWeek":case"day":case"date":this.hours(0);case"hour":this.minutes(0);case"minute":this.seconds(0);case"second":this.milliseconds(0)}return"week"===A&&this.weekday(0),"isoWeek"===A&&this.isoWeekday(1),"quarter"===A&&this.month(3*Math.floor(this.month()/3)),this}function st(A){return A=F(A),void 0===A||"millisecond"===A?this:("date"===A&&(A="day"),this.startOf(A).add(1,"isoWeek"===A?"week":A).subtract(1,"ms"))}function ut(){return this._d.valueOf()-6e4*(this._offset||0)}function xt(){return Math.floor(this.valueOf()/1e3)}function yt(){return new Date(this.valueOf())}function jt(){var A=this;return[A.year(),A.month(),A.date(),A.hour(),A.minute(),A.second(),A.millisecond()]}function lt(){var A=this;return{years:A.year(),months:A.month(),date:A.date(),hours:A.hours(),minutes:A.minutes(),seconds:A.seconds(),milliseconds:A.milliseconds()}}function wt(){return this.isValid()?this.toISOString():null}function Lt(){return D(this)}function Yt(){return o({},a(this))}function dt(){return a(this).overflow}function ht(){return{input:this._i,format:this._f,locale:this._locale,isUTC:this._isUTC,strict:this._strict}}function St(A,M){V(0,[A,A.length],0,M)}function zt(A){return ft.call(this,A,this.week(),this.weekday(),this.localeData()._week.dow,this.localeData()._week.doy)}function pt(A){return ft.call(this,A,this.isoWeek(),this.isoWeekday(),1,4)}function Ut(){return lA(this.year(),1,4)}function Ot(){var A=this.localeData()._week;return lA(this.year(),A.dow,A.doy)}function ft(A,M,t,I,g){var e;return null==A?jA(this,I,g).year:(e=lA(A,I,g),M>e&&(M=e),mt.call(this,A,M,t,I,g))}function mt(A,M,t,I,g){var e=yA(A,M,t,I,g),i=uA(e.year,0,e.dayOfYear);return this.year(i.getUTCFullYear()),this.month(i.getUTCMonth()),this.date(i.getUTCDate()),this}function Ft(A){return null==A?Math.ceil((this.month()+1)/3):this.month(3*(A-1)+this.month()%3)}function kt(A){var M=Math.round((this.clone().startOf("day")-this.clone().startOf("year"))/864e5)+1;return null==A?M:this.add(A-M,"d")}function Rt(A,M){M[og]=x(1e3*("0."+A))}function Jt(){return this._isUTC?"UTC":""}function Gt(){return this._isUTC?"Coordinated Universal Time":""}function Ht(A){return uM(1e3*A)}function vt(){return uM.apply(null,arguments).parseZone()}function bt(A){return A}function Xt(A,M,t,I){var g=tM(),e=c().set(I,M);return g[t](e,A)}function Wt(A,M,t){if(T(A)&&(M=A,A=void 0),A=A||"",null!=M)return Xt(A,M,t,"month");var I,g=[];for(I=0;I<12;I++)g[I]=Xt(A,I,t,"month");return g}function Vt(A,M,t,I){"boolean"==typeof A?(T(M)&&(t=M,M=void 0),M=M||""):(M=A,t=M,A=!1,T(M)&&(t=M,M=void 0),M=M||"");var g=tM(),e=A?g._week.dow:0;if(null!=t)return Xt(M,(t+e)%7,I,"day");var i,E=[];for(i=0;i<7;i++)E[i]=Xt(M,(i+e)%7,I,"day");return E}function Pt(A,M){return Wt(A,M,"months")}function Zt(A,M){return Wt(A,M,"monthsShort")}function Kt(A,M,t){return Vt(A,M,t,"weekdays")}function qt(A,M,t){return Vt(A,M,t,"weekdaysShort")}function _t(A,M,t){return Vt(A,M,t,"weekdaysMin")}function $t(){var A=this._data;return this._milliseconds=ge(this._milliseconds),this._days=ge(this._days),this._months=ge(this._months),A.milliseconds=ge(A.milliseconds),A.seconds=ge(A.seconds),A.minutes=ge(A.minutes),A.hours=ge(A.hours),A.months=ge(A.months),A.years=ge(A.years),this}function AI(A,M,t,I){var g=XM(M,t);return A._milliseconds+=I*g._milliseconds,A._days+=I*g._days,A._months+=I*g._months,A._bubble()}function MI(A,M){return AI(this,A,M,1)}function tI(A,M){return AI(this,A,M,-1)}function II(A){return A<0?Math.floor(A):Math.ceil(A)}function gI(){var A,M,t,I,g,e=this._milliseconds,i=this._days,T=this._months,E=this._data;return e>=0&&i>=0&&T>=0||e<=0&&i<=0&&T<=0||(e+=864e5*II(iI(T)+i),i=0,T=0),E.milliseconds=e%1e3,A=u(e/1e3),E.seconds=A%60,M=u(A/60),E.minutes=M%60,t=u(M/60),E.hours=t%24,i+=u(t/24),g=u(eI(i)),T+=g,i-=II(iI(g)),I=u(T/12),T%=12,E.days=i,E.months=T,E.years=I,this}function eI(A){return 4800*A/146097}function iI(A){return 146097*A/4800}function TI(A){if(!this.isValid())return NaN;var M,t,I=this._milliseconds;if(A=F(A),"month"===A||"year"===A)return M=this._days+I/864e5,t=this._months+eI(M),"month"===A?t:t/12;switch(M=this._days+Math.round(iI(this._months)),A){case"week":return M/7+I/6048e5;case"day":return M+I/864e5;case"hour":return 24*M+I/36e5;case"minute":return 1440*M+I/6e4;case"second":return 86400*M+I/1e3;case"millisecond":return Math.floor(864e5*M)+I;default:throw new Error("Unknown unit "+A)}}function EI(){return this.isValid()?this._milliseconds+864e5*this._days+this._months%12*2592e6+31536e6*x(this._months/12):NaN}function nI(A){return function(){return this.as(A)}}function NI(A){return A=F(A),this.isValid()?this[A+"s"]():NaN}function oI(A){return function(){return this.isValid()?this._data[A]:NaN}}function cI(){return u(this.days()/7)}function CI(A,M,t,I,g){return g.relativeTime(M||1,!!t,A,I)}function aI(A,M,t){var I=XM(A).abs(),g=ue(I.as("s")),e=ue(I.as("m")),i=ue(I.as("h")),T=ue(I.as("d")),E=ue(I.as("M")),n=ue(I.as("y")),N=g<=xe.ss&&["s",g]||g0,N[4]=t,CI.apply(null,N)}function DI(A){return void 0===A?ue:"function"==typeof A&&(ue=A,!0)}function rI(A,M){return void 0!==xe[A]&&(void 0===M?xe[A]:(xe[A]=M,"s"===A&&(xe.ss=M-1),!0))}function BI(A){if(!this.isValid())return this.localeData().invalidDate();var M=this.localeData(),t=aI(this,!A,M);return A&&(t=M.pastFuture(+this,t)),M.postformat(t)}function QI(){if(!this.isValid())return this.localeData().invalidDate();var A,M,t,I=ye(this._milliseconds)/1e3,g=ye(this._days),e=ye(this._months);A=u(I/60),M=u(A/60),I%=60,A%=60,t=u(e/12),e%=12;var i=t,T=e,E=g,n=M,N=A,o=I,c=this.asSeconds();return c?(c<0?"-":"")+"P"+(i?i+"Y":"")+(T?T+"M":"")+(E?E+"D":"")+(n||N||o?"T":"")+(n?n+"H":"")+(N?N+"M":"")+(o?o+"S":""):"P0D"}var sI,uI;uI=Array.prototype.some?Array.prototype.some:function(A){for(var M=Object(this),t=M.length>>>0,I=0;I68?1900:2e3)};var ug=G("FullYear",!0);V("w",["ww",2],"wo","week"),V("W",["WW",2],"Wo","isoWeek"),m("week","w"),m("isoWeek","W"),R("week",5),R("isoWeek",5),_("w",XI),_("ww",XI,GI),_("W",XI),_("WW",XI,GI),IA(["w","ww","W","WW"],function(A,M,t,I){M[I.substr(0,1)]=x(A)});var xg={dow:0,doy:6};V("d",0,"do","day"),V("dd",0,0,function(A){return this.localeData().weekdaysMin(this,A)}),V("ddd",0,0,function(A){return this.localeData().weekdaysShort(this,A)}),V("dddd",0,0,function(A){return this.localeData().weekdays(this,A)}),V("e",0,0,"weekday"),V("E",0,0,"isoWeekday"),m("day","d"),m("weekday","e"),m("isoWeekday","E"),R("day",11),R("weekday",11),R("isoWeekday",11),_("d",XI),_("e",XI),_("E",XI),_("dd",function(A,M){return M.weekdaysMinRegex(A)}),_("ddd",function(A,M){return M.weekdaysShortRegex(A)}),_("dddd",function(A,M){return M.weekdaysRegex(A)}),IA(["dd","ddd","dddd"],function(A,M,t,I){var g=t._locale.weekdaysParse(A,I,t._strict);null!=g?M.d=g:a(t).invalidWeekday=A}),IA(["d","e","E"],function(A,M,t,I){M[I]=x(A)});var yg="Sunday_Monday_Tuesday_Wednesday_Thursday_Friday_Saturday".split("_"),jg="Sun_Mon_Tue_Wed_Thu_Fri_Sat".split("_"),lg="Su_Mo_Tu_We_Th_Fr_Sa".split("_"),wg=tg,Lg=tg,Yg=tg;V("H",["HH",2],0,"hour"),V("h",["hh",2],0,bA),V("k",["kk",2],0,XA),V("hmm",0,0,function(){return""+bA.apply(this)+W(this.minutes(),2)}),V("hmmss",0,0,function(){return""+bA.apply(this)+W(this.minutes(),2)+W(this.seconds(),2)}),V("Hmm",0,0,function(){return""+this.hours()+W(this.minutes(),2)}),V("Hmmss",0,0,function(){return""+this.hours()+W(this.minutes(),2)+W(this.seconds(),2)}),WA("a",!0),WA("A",!1),m("hour","h"),R("hour",13),_("a",VA),_("A",VA),_("H",XI),_("h",XI),_("k",XI),_("HH",XI,GI),_("hh",XI,GI),_("kk",XI,GI),_("hmm",WI),_("hmmss",VI),_("Hmm",WI),_("Hmmss",VI),tA(["H","HH"],Eg),tA(["k","kk"],function(A,M,t){var I=x(A);M[Eg]=24===I?0:I}),tA(["a","A"],function(A,M,t){t._isPm=t._locale.isPM(A),t._meridiem=A}),tA(["h","hh"],function(A,M,t){M[Eg]=x(A),a(t).bigHour=!0}),tA("hmm",function(A,M,t){var I=A.length-2;M[Eg]=x(A.substr(0,I)),M[ng]=x(A.substr(I)),a(t).bigHour=!0}),tA("hmmss",function(A,M,t){var I=A.length-4,g=A.length-2;M[Eg]=x(A.substr(0,I)),M[ng]=x(A.substr(I,2)),M[Ng]=x(A.substr(g)),a(t).bigHour=!0}),tA("Hmm",function(A,M,t){var I=A.length-2;M[Eg]=x(A.substr(0,I)),M[ng]=x(A.substr(I))}),tA("Hmmss",function(A,M,t){var I=A.length-4,g=A.length-2;M[Eg]=x(A.substr(0,I)),M[ng]=x(A.substr(I,2)),M[Ng]=x(A.substr(g))});var dg,hg=/[ap]\.?m?\.?/i,Sg=G("Hours",!0),zg={calendar:dI,longDateFormat:hI,invalidDate:SI,ordinal:zI,dayOfMonthOrdinalParse:pI,relativeTime:UI,months:rg,monthsShort:Bg,week:xg,weekdays:yg,weekdaysMin:lg,weekdaysShort:jg,meridiemParse:hg},pg={},Ug={},Og=/^\s*((?:[+-]\d{6}|\d{4})-(?:\d\d-\d\d|W\d\d-\d|W\d\d|\d\d\d|\d\d))(?:(T| )(\d\d(?::\d\d(?::\d\d(?:[.,]\d+)?)?)?)([\+\-]\d\d(?::?\d\d)?|\s*Z)?)?$/,fg=/^\s*((?:[+-]\d{6}|\d{4})(?:\d\d\d\d|W\d\d\d|W\d\d|\d\d\d|\d\d))(?:(T| )(\d\d(?:\d\d(?:\d\d(?:[.,]\d+)?)?)?)([\+\-]\d\d(?::?\d\d)?|\s*Z)?)?$/,mg=/Z|[+-]\d\d(?::?\d\d)?/,Fg=[["YYYYYY-MM-DD",/[+-]\d{6}-\d\d-\d\d/],["YYYY-MM-DD",/\d{4}-\d\d-\d\d/],["GGGG-[W]WW-E",/\d{4}-W\d\d-\d/],["GGGG-[W]WW",/\d{4}-W\d\d/,!1],["YYYY-DDD",/\d{4}-\d{3}/],["YYYY-MM",/\d{4}-\d\d/,!1],["YYYYYYMMDD",/[+-]\d{10}/],["YYYYMMDD",/\d{8}/],["GGGG[W]WWE",/\d{4}W\d{3}/],["GGGG[W]WW",/\d{4}W\d{2}/,!1],["YYYYDDD",/\d{7}/]],kg=[["HH:mm:ss.SSSS",/\d\d:\d\d:\d\d\.\d+/],["HH:mm:ss,SSSS",/\d\d:\d\d:\d\d,\d+/],["HH:mm:ss",/\d\d:\d\d:\d\d/],["HH:mm",/\d\d:\d\d/],["HHmmss.SSSS",/\d\d\d\d\d\d\.\d+/],["HHmmss,SSSS",/\d\d\d\d\d\d,\d+/],["HHmmss",/\d\d\d\d\d\d/],["HHmm",/\d\d\d\d/],["HH",/\d\d/]],Rg=/^\/?Date\((\-?\d+)/i,Jg=/^((?:Mon|Tue|Wed|Thu|Fri|Sat|Sun),?\s)?(\d?\d\s(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s(?:\d\d)?\d\d\s)(\d\d:\d\d)(\:\d\d)?(\s(?:UT|GMT|[ECMP][SD]T|[A-IK-Za-ik-z]|[+-]\d{4}))$/;M.createFromInputFallback=l("value provided is not in a recognized RFC2822 or ISO format. moment construction falls back to js Date(), which is not reliable across all browsers and versions. Non RFC2822/ISO date formats are discouraged and will be removed in an upcoming major release. Please refer to http://momentjs.com/guides/#/warnings/js-date/ for more info.",function(A){A._d=new Date(A._i+(A._useUTC?" UTC":""))}),M.ISO_8601=function(){},M.RFC_2822=function(){};var Gg=l("moment().min is deprecated, use moment.max instead. http://momentjs.com/guides/#/warnings/min-max/",function(){var A=uM.apply(null,arguments);return this.isValid()&&A.isValid()?Athis?this:A:r()}),vg=function(){return Date.now?Date.now():+new Date},bg=["year","quarter","month","week","day","hour","minute","second","millisecond"];SM("Z",":"),SM("ZZ",""),_("Z",Ag),_("ZZ",Ag),tA(["Z","ZZ"],function(A,M,t){t._useUTC=!0,t._tzm=zM(Ag,A)});var Xg=/([\+\-]|\d\d)/gi;M.updateOffset=function(){};var Wg=/^(\-)?(?:(\d*)[. ])?(\d+)\:(\d+)(?:\:(\d+)(\.\d*)?)?$/,Vg=/^(-)?P(?:(-?[0-9,.]*)Y)?(?:(-?[0-9,.]*)M)?(?:(-?[0-9,.]*)W)?(?:(-?[0-9,.]*)D)?(?:T(?:(-?[0-9,.]*)H)?(?:(-?[0-9,.]*)M)?(?:(-?[0-9,.]*)S)?)?$/;XM.fn=YM.prototype,XM.invalid=LM;var Pg=ZM(1,"add"),Zg=ZM(-1,"subtract");M.defaultFormat="YYYY-MM-DDTHH:mm:ssZ",M.defaultFormatUtc="YYYY-MM-DDTHH:mm:ss[Z]";var Kg=l("moment().lang() is deprecated. Instead, use moment().localeData() to get the language configuration. Use moment().locale() to change languages.",function(A){return void 0===A?this.localeData():this.locale(A)});V(0,["gg",2],0,function(){return this.weekYear()%100}),V(0,["GG",2],0,function(){ +return this.isoWeekYear()%100}),St("gggg","weekYear"),St("ggggg","weekYear"),St("GGGG","isoWeekYear"),St("GGGGG","isoWeekYear"),m("weekYear","gg"),m("isoWeekYear","GG"),R("weekYear",1),R("isoWeekYear",1),_("G",_I),_("g",_I),_("GG",XI,GI),_("gg",XI,GI),_("GGGG",ZI,vI),_("gggg",ZI,vI),_("GGGGG",KI,bI),_("ggggg",KI,bI),IA(["gggg","ggggg","GGGG","GGGGG"],function(A,M,t,I){M[I.substr(0,2)]=x(A)}),IA(["gg","GG"],function(A,t,I,g){t[g]=M.parseTwoDigitYear(A)}),V("Q",0,"Qo","quarter"),m("quarter","Q"),R("quarter",7),_("Q",JI),tA("Q",function(A,M){M[ig]=3*(x(A)-1)}),V("D",["DD",2],"Do","date"),m("date","D"),R("date",9),_("D",XI),_("DD",XI,GI),_("Do",function(A,M){return A?M._dayOfMonthOrdinalParse||M._ordinalParse:M._dayOfMonthOrdinalParseLenient}),tA(["D","DD"],Tg),tA("Do",function(A,M){M[Tg]=x(A.match(XI)[0],10)});var qg=G("Date",!0);V("DDD",["DDDD",3],"DDDo","dayOfYear"),m("dayOfYear","DDD"),R("dayOfYear",4),_("DDD",PI),_("DDDD",HI),tA(["DDD","DDDD"],function(A,M,t){t._dayOfYear=x(A)}),V("m",["mm",2],0,"minute"),m("minute","m"),R("minute",14),_("m",XI),_("mm",XI,GI),tA(["m","mm"],ng);var _g=G("Minutes",!1);V("s",["ss",2],0,"second"),m("second","s"),R("second",15),_("s",XI),_("ss",XI,GI),tA(["s","ss"],Ng);var $g=G("Seconds",!1);V("S",0,0,function(){return~~(this.millisecond()/100)}),V(0,["SS",2],0,function(){return~~(this.millisecond()/10)}),V(0,["SSS",3],0,"millisecond"),V(0,["SSSS",4],0,function(){return 10*this.millisecond()}),V(0,["SSSSS",5],0,function(){return 100*this.millisecond()}),V(0,["SSSSSS",6],0,function(){return 1e3*this.millisecond()}),V(0,["SSSSSSS",7],0,function(){return 1e4*this.millisecond()}),V(0,["SSSSSSSS",8],0,function(){return 1e5*this.millisecond()}),V(0,["SSSSSSSSS",9],0,function(){return 1e6*this.millisecond()}),m("millisecond","ms"),R("millisecond",16),_("S",PI,JI),_("SS",PI,GI),_("SSS",PI,HI);var Ae;for(Ae="SSSS";Ae.length<=9;Ae+="S")_(Ae,qI);for(Ae="S";Ae.length<=9;Ae+="S")tA(Ae,Rt);var Me=G("Milliseconds",!1);V("z",0,0,"zoneAbbr"),V("zz",0,0,"zoneName");var te=Q.prototype;te.add=Pg,te.calendar=_M,te.clone=$M,te.diff=it,te.endOf=st,te.format=ot,te.from=ct,te.fromNow=Ct,te.to=at,te.toNow=Dt,te.get=b,te.invalidAt=dt,te.isAfter=At,te.isBefore=Mt,te.isBetween=tt,te.isSame=It,te.isSameOrAfter=gt,te.isSameOrBefore=et,te.isValid=Lt,te.lang=Kg,te.locale=rt,te.localeData=Bt,te.max=Hg,te.min=Gg,te.parsingFlags=Yt,te.set=X,te.startOf=Qt,te.subtract=Zg,te.toArray=jt,te.toObject=lt,te.toDate=yt,te.toISOString=nt,te.inspect=Nt,te.toJSON=wt,te.toString=Et,te.unix=xt,te.valueOf=ut,te.creationData=ht,te.year=ug,te.isLeapYear=QA,te.weekYear=zt,te.isoWeekYear=pt,te.quarter=te.quarters=Ft,te.month=oA,te.daysInMonth=cA,te.week=te.weeks=dA,te.isoWeek=te.isoWeeks=hA,te.weeksInYear=Ot,te.isoWeeksInYear=Ut,te.date=qg,te.day=te.days=FA,te.weekday=kA,te.isoWeekday=RA,te.dayOfYear=kt,te.hour=te.hours=Sg,te.minute=te.minutes=_g,te.second=te.seconds=$g,te.millisecond=te.milliseconds=Me,te.utcOffset=OM,te.utc=mM,te.local=FM,te.parseZone=kM,te.hasAlignedHourOffset=RM,te.isDST=JM,te.isLocal=HM,te.isUtcOffset=vM,te.isUtc=bM,te.isUTC=bM,te.zoneAbbr=Jt,te.zoneName=Gt,te.dates=l("dates accessor is deprecated. Use date instead.",qg),te.months=l("months accessor is deprecated. Use month instead",oA),te.years=l("years accessor is deprecated. Use year instead",ug),te.zone=l("moment().zone is deprecated, use moment().utcOffset instead. http://momentjs.com/guides/#/warnings/zone/",fM),te.isDSTShifted=l("isDSTShifted is deprecated. See http://momentjs.com/guides/#/warnings/dst-shifted/ for more information",GM);var Ie=h.prototype;Ie.calendar=S,Ie.longDateFormat=z,Ie.invalidDate=p,Ie.ordinal=U,Ie.preparse=bt,Ie.postformat=bt,Ie.relativeTime=O,Ie.pastFuture=f,Ie.set=Y,Ie.months=iA,Ie.monthsShort=TA,Ie.monthsParse=nA,Ie.monthsRegex=aA,Ie.monthsShortRegex=CA,Ie.week=wA,Ie.firstDayOfYear=YA,Ie.firstDayOfWeek=LA,Ie.weekdays=pA,Ie.weekdaysMin=OA,Ie.weekdaysShort=UA,Ie.weekdaysParse=mA,Ie.weekdaysRegex=JA,Ie.weekdaysShortRegex=GA,Ie.weekdaysMinRegex=HA,Ie.isPM=PA,Ie.meridiem=ZA,$A("en",{dayOfMonthOrdinalParse:/\d{1,2}(th|st|nd|rd)/,ordinal:function(A){var M=A%10,t=1===x(A%100/10)?"th":1===M?"st":2===M?"nd":3===M?"rd":"th";return A+t}}),M.lang=l("moment.lang is deprecated. Use moment.locale instead.",$A),M.langData=l("moment.langData is deprecated. Use moment.localeData instead.",tM);var ge=Math.abs,ee=nI("ms"),ie=nI("s"),Te=nI("m"),Ee=nI("h"),ne=nI("d"),Ne=nI("w"),oe=nI("M"),ce=nI("y"),Ce=oI("milliseconds"),ae=oI("seconds"),De=oI("minutes"),re=oI("hours"),Be=oI("days"),Qe=oI("months"),se=oI("years"),ue=Math.round,xe={ss:44,s:45,m:45,h:22,d:26,M:11},ye=Math.abs,je=YM.prototype;return je.isValid=wM,je.abs=$t,je.add=MI,je.subtract=tI,je.as=TI,je.asMilliseconds=ee,je.asSeconds=ie,je.asMinutes=Te,je.asHours=Ee,je.asDays=ne,je.asWeeks=Ne,je.asMonths=oe,je.asYears=ce,je.valueOf=EI,je._bubble=gI,je.get=NI,je.milliseconds=Ce,je.seconds=ae,je.minutes=De,je.hours=re,je.days=Be,je.weeks=cI,je.months=Qe,je.years=se,je.humanize=BI,je.toISOString=QI,je.toString=QI,je.toJSON=QI,je.locale=rt,je.localeData=Bt,je.toIsoString=l("toIsoString() is deprecated. Please use toISOString() instead (notice the capitals)",QI),je.lang=Kg,V("X",0,0,"unix"),V("x",0,0,"valueOf"),_("x",_I),_("X",Mg),tA("X",function(A,M,t){t._d=new Date(1e3*parseFloat(A,10))}),tA("x",function(A,M,t){t._d=new Date(x(A))}),M.version="2.18.1",t(uM),M.fn=te,M.min=yM,M.max=jM,M.now=vg,M.utc=c,M.unix=Ht,M.months=Pt,M.isDate=E,M.locale=$A,M.invalid=r,M.duration=XM,M.isMoment=s,M.weekdays=Kt,M.parseZone=vt,M.localeData=tM,M.isDuration=dM,M.monthsShort=Zt,M.weekdaysMin=_t,M.defineLocale=AM,M.updateLocale=MM,M.locales=IM,M.weekdaysShort=qt,M.normalizeUnits=F,M.relativeTimeRounding=DI,M.relativeTimeThreshold=rI,M.calendarFormat=qM,M.prototype=te,M})}).call(M,t(216)(A))},function(A,M,t){"use strict";var I=t(275).default,g=t(276).default,e=t(178).default;M.__esModule=!0;var i=function(A){return I(g({values:function(){var A=this;return e(this).map(function(M){return A[M]})}}),A)},T={SIZES:{large:"lg",medium:"md",small:"sm",xsmall:"xs",lg:"lg",md:"md",sm:"sm",xs:"xs"},GRID_COLUMNS:12},E=i({LARGE:"large",MEDIUM:"medium",SMALL:"small",XSMALL:"xsmall"});M.Sizes=E;var n=i({SUCCESS:"success",WARNING:"warning",DANGER:"danger",INFO:"info"});M.State=n;var N="default";M.DEFAULT=N;var o="primary";M.PRIMARY=o;var c="link";M.LINK=c;var C="inverse";M.INVERSE=C,M.default=T},function(A,M){"use strict";function t(){for(var A=arguments.length,M=Array(A),t=0;t=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t},M.__esModule=!0},function(A,M){"use strict";Object.defineProperty(M,"__esModule",{value:!0}),M.default=!("undefined"==typeof window||!window.document||!window.document.createElement),A.exports=M.default},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}function g(A){return A.replace(/[.*+?^${}()|[\]\\]/g,"\\$&")}function e(A){for(var M="",t=[],I=[],e=void 0,i=0,T=/:([a-zA-Z_$][a-zA-Z0-9_$]*)|\*\*|\*|\(|\)/g;e=T.exec(A);)e.index!==i&&(I.push(A.slice(i,e.index)),M+=g(A.slice(i,e.index))),e[1]?(M+="([^/]+)",t.push(e[1])):"**"===e[0]?(M+="(.*)",t.push("splat")):"*"===e[0]?(M+="(.*?)",t.push("splat")):"("===e[0]?M+="(?:":")"===e[0]&&(M+=")?"),I.push(e[0]),i=T.lastIndex;return i!==A.length&&(I.push(A.slice(i,A.length)),M+=g(A.slice(i,A.length))),{pattern:A,regexpSource:M,paramNames:t,tokens:I}}function i(A){return C[A]||(C[A]=e(A)),C[A]}function T(A,M){"/"!==A.charAt(0)&&(A="/"+A);var t=i(A),I=t.regexpSource,g=t.paramNames,e=t.tokens;"/"!==A.charAt(A.length-1)&&(I+="/?"),"*"===e[e.length-1]&&(I+="$");var T=M.match(new RegExp("^"+I,"i"));if(null==T)return null;var E=T[0],n=M.substr(E.length);if(n){if("/"!==E.charAt(E.length-1))return null;n="/"+n}return{remainingPathname:n,paramNames:g,paramValues:T.slice(1).map(function(A){return A&&decodeURIComponent(A)})}}function E(A){return i(A).paramNames}function n(A,M){var t=T(A,M);if(!t)return null;var I=t.paramNames,g=t.paramValues,e={};return I.forEach(function(A,M){e[A]=g[M]}),e}function N(A,M){M=M||{};for(var t=i(A),I=t.tokens,g=0,e="",T=0,E=void 0,n=void 0,N=void 0,o=0,C=I.length;o0?void 0:(0,c.default)(!1),null!=N&&(e+=encodeURI(N))):"("===E?g+=1:")"===E?g-=1:":"===E.charAt(0)?(n=E.substring(1),N=M[n],null!=N||g>0?void 0:(0,c.default)(!1),null!=N&&(e+=encodeURIComponent(N))):e+=E;return e.replace(/\/+/g,"/")}M.__esModule=!0,M.compilePattern=i,M.matchPattern=T,M.getParamNames=E,M.getParams=n,M.formatPattern=N;var o=t(16),c=I(o),C=Object.create(null)},function(A,M){"use strict";M.__esModule=!0;var t="PUSH";M.PUSH=t;var I="REPLACE";M.REPLACE=I;var g="POP";M.POP=g,M.default={PUSH:t,REPLACE:I,POP:g}},function(A,M,t){"use strict";function I(A,M){return(A&M)===M}var g=t(3),e={MUST_USE_ATTRIBUTE:1,MUST_USE_PROPERTY:2,HAS_SIDE_EFFECTS:4,HAS_BOOLEAN_VALUE:8,HAS_NUMERIC_VALUE:16,HAS_POSITIVE_NUMERIC_VALUE:48,HAS_OVERLOADED_BOOLEAN_VALUE:64,injectDOMPropertyConfig:function(A){var M=e,t=A.Properties||{},i=A.DOMAttributeNamespaces||{},E=A.DOMAttributeNames||{},n=A.DOMPropertyNames||{},N=A.DOMMutationMethods||{};A.isCustomAttribute&&T._isCustomAttributeFunctions.push(A.isCustomAttribute);for(var o in t){T.properties.hasOwnProperty(o)?g(!1):void 0;var c=o.toLowerCase(),C=t[o],a={attributeName:c,attributeNamespace:null,propertyName:o,mutationMethod:null,mustUseAttribute:I(C,M.MUST_USE_ATTRIBUTE),mustUseProperty:I(C,M.MUST_USE_PROPERTY),hasSideEffects:I(C,M.HAS_SIDE_EFFECTS),hasBooleanValue:I(C,M.HAS_BOOLEAN_VALUE),hasNumericValue:I(C,M.HAS_NUMERIC_VALUE),hasPositiveNumericValue:I(C,M.HAS_POSITIVE_NUMERIC_VALUE),hasOverloadedBooleanValue:I(C,M.HAS_OVERLOADED_BOOLEAN_VALUE)};if(a.mustUseAttribute&&a.mustUseProperty?g(!1):void 0,!a.mustUseProperty&&a.hasSideEffects?g(!1):void 0,a.hasBooleanValue+a.hasNumericValue+a.hasOverloadedBooleanValue<=1?void 0:g(!1),E.hasOwnProperty(o)){var D=E[o];a.attributeName=D}i.hasOwnProperty(o)&&(a.attributeNamespace=i[o]),n.hasOwnProperty(o)&&(a.propertyName=n[o]),N.hasOwnProperty(o)&&(a.mutationMethod=N[o]),T.properties[o]=a}}},i={},T={ID_ATTRIBUTE_NAME:"data-reactid",properties:{},getPossibleStandardName:null,_isCustomAttributeFunctions:[],isCustomAttribute:function(A){for(var M=0;M1){var M=A.indexOf(C,1);return M>-1?A.substr(0,M):A}return null},traverseEnterLeave:function(A,M,t,I,g){var e=n(A,M);e!==A&&N(A,e,t,I,!1,!0),e!==M&&N(e,M,t,g,!0,!1)},traverseTwoPhase:function(A,M,t){A&&(N("",A,M,t,!0,!1),N(A,"",M,t,!1,!0))},traverseTwoPhaseSkipTarget:function(A,M,t){A&&(N("",A,M,t,!0,!0),N(A,"",M,t,!0,!0))},traverseAncestors:function(A,M,t){N("",A,M,t,!0,!1)},getFirstCommonAncestorID:n,_getNextDescendantID:E,isAncestorIDOf:i,SEPARATOR:C};A.exports=r},function(A,M,t){var I=t(35),g=t(11)("toStringTag"),e="Arguments"==I(function(){return arguments}()),i=function(A,M){try{return A[M]}catch(A){}};A.exports=function(A){var M,t,T;return void 0===A?"Undefined":null===A?"Null":"string"==typeof(t=i(M=Object(A),g))?t:e?I(M):"Object"==(T=I(M))&&"function"==typeof M.callee?"Arguments":T}},function(A,M,t){var I=t(35);A.exports=Object("z").propertyIsEnumerable(0)?Object:function(A){return"String"==I(A)?A.split(""):Object(A)}},function(A,M){M.f={}.propertyIsEnumerable},function(A,M){"use strict";A.exports=!("undefined"==typeof window||!window.document||!window.document.createElement)},function(A,M,t){A.exports=t(628)()},function(A,M,t){"use strict";function I(A,M,t){var I=0;return o.default.Children.map(A,function(A){if(o.default.isValidElement(A)){var g=I;return I++,M.call(t,A,g)}return A})}function g(A,M,t){var I=0;return o.default.Children.forEach(A,function(A){o.default.isValidElement(A)&&(M.call(t,A,I),I++)})}function e(A){var M=0;return o.default.Children.forEach(A,function(A){o.default.isValidElement(A)&&M++}),M}function i(A){var M=!1;return o.default.Children.forEach(A,function(A){!M&&o.default.isValidElement(A)&&(M=!0)}),M}function T(A,M){var t=void 0;return g(A,function(I,g){!t&&M(I,g,A)&&(t=I)}),t}function E(A,M,t){var I=0,g=[];return o.default.Children.forEach(A,function(A){o.default.isValidElement(A)&&(M.call(t,A,I)&&g.push(A),I++)}),g}var n=t(12).default;M.__esModule=!0;var N=t(2),o=n(N);M.default={map:I,forEach:g,numberOf:e,find:T,findValidComponents:E,hasValidComponent:i},A.exports=M.default},function(A,M){var t=A.exports={version:"1.2.6"};"number"==typeof __e&&(__e=t)},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}Object.defineProperty(M,"__esModule",{value:!0}),M.default=function(A){return(0,T.default)(e.default.findDOMNode(A))};var g=t(34),e=I(g),i=t(100),T=I(i);A.exports=M.default},function(A,M){"use strict";function t(A){return A&&A.ownerDocument||document}Object.defineProperty(M,"__esModule",{value:!0}),M.default=t,A.exports=M.default},function(A,M,t){"use strict";var I=t(313),g=t(735),e=t(326),i=t(335),T=t(336),E=t(3),n=(t(8),{}),N=null,o=function(A,M){A&&(g.executeDispatchesInOrder(A,M),A.isPersistent()||A.constructor.release(A))},c=function(A){return o(A,!0)},C=function(A){return o(A,!1)},a=null,D={injection:{injectMount:g.injection.injectMount,injectInstanceHandle:function(A){a=A},getInstanceHandle:function(){return a},injectEventPluginOrder:I.injectEventPluginOrder,injectEventPluginsByName:I.injectEventPluginsByName},eventNameDispatchConfigs:I.eventNameDispatchConfigs,registrationNameModules:I.registrationNameModules,putListener:function(A,M,t){"function"!=typeof t?E(!1):void 0;var g=n[M]||(n[M]={});g[A]=t;var e=I.registrationNameModules[M];e&&e.didPutListener&&e.didPutListener(A,M,t)},getListener:function(A,M){var t=n[M];return t&&t[A]},deleteListener:function(A,M){var t=I.registrationNameModules[M];t&&t.willDeleteListener&&t.willDeleteListener(A,M);var g=n[M];g&&delete g[A]},deleteAllListeners:function(A){for(var M in n)if(n[M][A]){var t=I.registrationNameModules[M];t&&t.willDeleteListener&&t.willDeleteListener(A,M),delete n[M][A]}},extractEvents:function(A,M,t,g,e){for(var T,E=I.plugins,n=0;nN;)if(T=E[N++],T!=T)return!0}else for(;n>N;N++)if((A||N in E)&&E[N]===t)return A||N||0;return!A&&-1}}},function(A,M,t){"use strict";var I=t(5),g=t(1),e=t(26),i=t(68),T=t(55),E=t(78),n=t(63),N=t(9),o=t(6),c=t(112),C=t(80),a=t(143);A.exports=function(A,M,t,D,r,B){var Q=I[A],s=Q,u=r?"set":"add",x=s&&s.prototype,y={},j=function(A){var M=x[A];e(x,A,"delete"==A?function(A){return!(B&&!N(A))&&M.call(this,0===A?0:A)}:"has"==A?function(A){return!(B&&!N(A))&&M.call(this,0===A?0:A)}:"get"==A?function(A){return B&&!N(A)?void 0:M.call(this,0===A?0:A)}:"add"==A?function(A){return M.call(this,0===A?0:A),this}:function(A,t){return M.call(this,0===A?0:A,t),this})};if("function"==typeof s&&(B||x.forEach&&!o(function(){(new s).entries().next()}))){var l=new s,w=l[u](B?{}:-0,1)!=l,L=o(function(){l.has(1)}),Y=c(function(A){new s(A)}),d=!B&&o(function(){for(var A=new s,M=5;M--;)A[u](M,M);return!A.has(-0)});Y||(s=M(function(M,t){n(M,s,A);var I=a(new Q,M,s);return void 0!=t&&E(t,r,I[u],I),I}),s.prototype=x,x.constructor=s),(L||d)&&(j("delete"),j("has"),r&&j("get")),(d||w)&&j(u),B&&x.clear&&delete x.clear}else s=D.getConstructor(M,A,r,u),i(s.prototype,t),T.NEED=!0;return C(s,A),y[A]=s,g(g.G+g.W+g.F*(s!=Q),y),B||D.setStrong(s,A,r),s}},function(A,M,t){"use strict";var I=t(25),g=t(26),e=t(6),i=t(36),T=t(11);A.exports=function(A,M,t){var E=T(A),n=t(i,E,""[A]),N=n[0],o=n[1];e(function(){var M={};return M[E]=function(){return 7},7!=""[A](M)})&&(g(String.prototype,A,N),I(RegExp.prototype,E,2==M?function(A,M){return o.call(A,this,M)}:function(A){return o.call(A,this)}))}},function(A,M,t){"use strict";var I=t(4);A.exports=function(){var A=I(this),M="";return A.global&&(M+="g"),A.ignoreCase&&(M+="i"),A.multiline&&(M+="m"),A.unicode&&(M+="u"),A.sticky&&(M+="y"),M}},function(A,M){A.exports=function(A,M,t){var I=void 0===t;switch(M.length){case 0:return I?A():A.call(t);case 1:return I?A(M[0]):A.call(t,M[0]);case 2:return I?A(M[0],M[1]):A.call(t,M[0],M[1]);case 3:return I?A(M[0],M[1],M[2]):A.call(t,M[0],M[1],M[2]);case 4:return I?A(M[0],M[1],M[2],M[3]):A.call(t,M[0],M[1],M[2],M[3])}return A.apply(t,M)}},function(A,M,t){var I=t(9),g=t(35),e=t(11)("match");A.exports=function(A){var M;return I(A)&&(void 0!==(M=A[e])?!!M:"RegExp"==g(A))}},function(A,M,t){var I=t(11)("iterator"),g=!1;try{var e=[7][I]();e.return=function(){g=!0},Array.from(e,function(){throw 2})}catch(A){}A.exports=function(A,M){if(!M&&!g)return!1;var t=!1;try{var e=[7],i=e[I]();i.next=function(){return{done:t=!0}},e[I]=function(){return i},A(e)}catch(A){}return t}},function(A,M,t){A.exports=t(64)||!t(6)(function(){var A=Math.random();__defineSetter__.call(null,A,function(){}),delete t(5)[A]})},function(A,M){M.f=Object.getOwnPropertySymbols},function(A,M,t){var I=t(5),g="__core-js_shared__",e=I[g]||(I[g]={});A.exports=function(A){return e[A]||(e[A]={})}},function(A,M,t){for(var I,g=t(5),e=t(25),i=t(71),T=i("typed_array"),E=i("view"),n=!(!g.ArrayBuffer||!g.DataView),N=n,o=0,c=9,C="Int8Array,Uint8Array,Uint8ClampedArray,Int16Array,Uint16Array,Int32Array,Uint32Array,Float32Array,Float64Array".split(",");o1?I-1:0),e=1;e":">","<":"<",'"':""","'":"'"},e=/[&><"']/g;A.exports=I},function(A,M,t){"use strict";var I=t(20),g=/^[ \r\n\t\f]/,e=/<(!--|link|noscript|meta|script|style)[ \r\n\t\f\/>]/,i=function(A,M){A.innerHTML=M};if("undefined"!=typeof MSApp&&MSApp.execUnsafeLocalFunction&&(i=function(A,M){MSApp.execUnsafeLocalFunction(function(){A.innerHTML=M})}),I.canUseDOM){var T=document.createElement("div");T.innerHTML=" ",""===T.innerHTML&&(i=function(A,M){if(A.parentNode&&A.parentNode.replaceChild(A,A),g.test(M)||"<"===M[0]&&e.test(M)){A.innerHTML=String.fromCharCode(65279)+M;var t=A.firstChild;1===t.data.length?A.removeChild(t):t.deleteData(0,1)}else A.innerHTML=M})}A.exports=i},function(A,M,t){"use strict";var I=t(3),g=function(A){var M,t={};A instanceof Object&&!Array.isArray(A)?void 0:I(!1);for(M in A)A.hasOwnProperty(M)&&(t[M]=M);return t};A.exports=g},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}Object.defineProperty(M,"__esModule",{value:!0});var g=t(2),e=I(g),i=function(A){var M=A.label,t=A.id,I=A.name,g=A.value,i=A.onChange,T=A.type,E=A.spellCheck,n=A.required,N=A.readonly,o=A.autoComplete,c=A.align,C=A.className,a=e.default.createElement("input",{id:t,name:I,value:g,onChange:i,className:"ig-text",type:T,spellCheck:E,required:n,autoComplete:o});return N&&(a=e.default.createElement("input",{id:t,name:I,value:g,onChange:i,className:"ig-text",type:T,spellCheck:E,required:n,autoComplete:o,disabled:!0})),e.default.createElement("div",{className:"input-group "+c+" "+C},a,e.default.createElement("i",{className:"ig-helpers"}),e.default.createElement("label",{className:"ig-label"},M))};M.default=i},function(A,M,t){"use strict";var I=t(19),g=t(70),e=t(17);A.exports=function(A){for(var M=I(this),t=e(M.length),i=arguments.length,T=g(i>1?arguments[1]:void 0,t),E=i>2?arguments[2]:void 0,n=void 0===E?t:g(E,t);n>T;)M[T++]=A;return M}},function(A,M,t){"use strict";var I=t(14),g=t(56);A.exports=function(A,M,t){M in A?I.f(A,M,g(0,t)):A[M]=t}},function(A,M,t){var I=t(9),g=t(5).document,e=I(g)&&I(g.createElement);A.exports=function(A){return e?g.createElement(A):{}}},function(A,M){A.exports="constructor,hasOwnProperty,isPrototypeOf,propertyIsEnumerable,toLocaleString,toString,valueOf".split(",")},function(A,M,t){var I=t(11)("match");A.exports=function(A){var M=/./;try{"/./"[A](M)}catch(t){try{return M[I]=!1,!"/./"[A](M)}catch(A){}}return!0}},function(A,M,t){A.exports=t(5).document&&document.documentElement},function(A,M,t){var I=t(9),g=t(151).set;A.exports=function(A,M,t){var e,i=M.constructor;return i!==t&&"function"==typeof i&&(e=i.prototype)!==t.prototype&&I(e)&&g&&g(A,e),A}},function(A,M,t){var I=t(79),g=t(11)("iterator"),e=Array.prototype;A.exports=function(A){return void 0!==A&&(I.Array===A||e[g]===A)}},function(A,M,t){var I=t(35);A.exports=Array.isArray||function(A){return"Array"==I(A)}},function(A,M,t){"use strict";var I=t(65),g=t(56),e=t(80),i={};t(25)(i,t(11)("iterator"),function(){return this}),A.exports=function(A,M,t){A.prototype=I(i,{next:g(1,t)}),e(A,M+" Iterator")}},function(A,M,t){"use strict";var I=t(64),g=t(1),e=t(26),i=t(25),T=t(21),E=t(79),n=t(146),N=t(80),o=t(31),c=t(11)("iterator"),C=!([].keys&&"next"in[].keys()),a="@@iterator",D="keys",r="values",B=function(){return this};A.exports=function(A,M,t,Q,s,u,x){n(t,M,Q);var y,j,l,w=function(A){if(!C&&A in h)return h[A];switch(A){case D:return function(){return new t(this,A)};case r:return function(){return new t(this,A)}}return function(){return new t(this,A)}},L=M+" Iterator",Y=s==r,d=!1,h=A.prototype,S=h[c]||h[a]||s&&h[s],z=S||w(s),p=s?Y?w("entries"):z:void 0,U="Array"==M?h.entries||S:S;if(U&&(l=o(U.call(new A)),l!==Object.prototype&&(N(l,L,!0),I||T(l,c)||i(l,c,B))),Y&&S&&S.name!==r&&(d=!0,z=function(){return S.call(this)}),I&&!x||!C&&!d&&h[c]||i(h,c,z),E[M]=z,E[L]=B,s)if(y={values:Y?z:w(r),keys:u?z:w(D),entries:p},x)for(j in y)j in h||e(h,j,y[j]);else g(g.P+g.F*(C||d),M,y);return y}},function(A,M){var t=Math.expm1;A.exports=!t||t(10)>22025.465794806718||t(10)<22025.465794806718||t(-2e-17)!=-2e-17?function(A){return 0==(A=+A)?A:A>-1e-6&&A<1e-6?A+A*A/2:Math.exp(A)-1}:t},function(A,M){A.exports=Math.sign||function(A){return 0==(A=+A)||A!=A?A:A<0?-1:1}},function(A,M,t){var I=t(5),g=t(158).set,e=I.MutationObserver||I.WebKitMutationObserver,i=I.process,T=I.Promise,E="process"==t(35)(i);A.exports=function(){var A,M,t,n=function(){var I,g;for(E&&(I=i.domain)&&I.exit();A;){g=A.fn,A=A.next;try{g()}catch(I){throw A?t():M=void 0,I}}M=void 0,I&&I.enter()};if(E)t=function(){i.nextTick(n)};else if(e){var N=!0,o=document.createTextNode("");new e(n).observe(o,{characterData:!0}),t=function(){o.data=N=!N}}else if(T&&T.resolve){var c=T.resolve();t=function(){c.then(n)}}else t=function(){g.call(I,n)};return function(I){var g={fn:I,next:void 0};M&&(M.next=g),A||(A=g,t()),M=g}}},function(A,M,t){var I=t(9),g=t(4),e=function(A,M){if(g(A),!I(M)&&null!==M)throw TypeError(M+": can't set as prototype!")};A.exports={set:Object.setPrototypeOf||("__proto__"in{}?function(A,M,I){try{I=t(49)(Function.call,t(30).f(Object.prototype,"__proto__").set,2),I(A,[]),M=!(A instanceof Array)}catch(A){M=!0}return function(A,t){return e(A,t),M?A.__proto__=t:I(A,t),A}}({},!1):void 0),check:e}},function(A,M,t){var I=t(115)("keys"),g=t(71);A.exports=function(A){return I[A]||(I[A]=g(A))}},function(A,M,t){var I=t(4),g=t(24),e=t(11)("species");A.exports=function(A,M){var t,i=I(A).constructor;return void 0===i||void 0==(t=I(i)[e])?M:g(t)}},function(A,M,t){var I=t(57),g=t(36);A.exports=function(A){return function(M,t){var e,i,T=String(g(M)),E=I(t),n=T.length;return E<0||E>=n?A?"":void 0:(e=T.charCodeAt(E),e<55296||e>56319||E+1===n||(i=T.charCodeAt(E+1))<56320||i>57343?A?T.charAt(E):e:A?T.slice(E,E+2):(e-55296<<10)+(i-56320)+65536)}}},function(A,M,t){var I=t(111),g=t(36);A.exports=function(A,M,t){if(I(M))throw TypeError("String#"+t+" doesn't accept regex!");return String(g(A))}},function(A,M,t){"use strict";var I=t(57),g=t(36);A.exports=function(A){var M=String(g(this)),t="",e=I(A);if(e<0||e==1/0)throw RangeError("Count can't be negative");for(;e>0;(e>>>=1)&&(M+=M))1&e&&(t+=M);return t}},function(A,M){A.exports="\t\n\v\f\r   ᠎              \u2028\u2029\ufeff"},function(A,M,t){var I,g,e,i=t(49),T=t(110),E=t(142),n=t(139),N=t(5),o=N.process,c=N.setImmediate,C=N.clearImmediate,a=N.MessageChannel,D=0,r={},B="onreadystatechange",Q=function(){var A=+this;if(r.hasOwnProperty(A)){var M=r[A];delete r[A],M()}},s=function(A){Q.call(A.data)};c&&C||(c=function(A){for(var M=[],t=1;arguments.length>t;)M.push(arguments[t++]);return r[++D]=function(){T("function"==typeof A?A:Function(A),M)},I(D),D},C=function(A){delete r[A]},"process"==t(35)(o)?I=function(A){o.nextTick(i(Q,A,1))}:a?(g=new a,e=g.port2,g.port1.onmessage=s,I=i(e.postMessage,e,1)):N.addEventListener&&"function"==typeof postMessage&&!N.importScripts?(I=function(A){N.postMessage(A+"","*")},N.addEventListener("message",s,!1)):I=B in n("script")?function(A){E.appendChild(n("script"))[B]=function(){E.removeChild(this),Q.call(A)}}:function(A){setTimeout(i(Q,A,1),0)}),A.exports={set:c,clear:C}},function(A,M,t){"use strict";var I=t(5),g=t(13),e=t(64),i=t(116),T=t(25),E=t(68),n=t(6),N=t(63),o=t(57),c=t(17),C=t(66).f,a=t(14).f,D=t(137),r=t(80),B="ArrayBuffer",Q="DataView",s="prototype",u="Wrong length!",x="Wrong index!",y=I[B],j=I[Q],l=I.Math,w=I.RangeError,L=I.Infinity,Y=y,d=l.abs,h=l.pow,S=l.floor,z=l.log,p=l.LN2,U="buffer",O="byteLength",f="byteOffset",m=g?"_b":U,F=g?"_l":O,k=g?"_o":f,R=function(A,M,t){var I,g,e,i=Array(t),T=8*t-M-1,E=(1<>1,N=23===M?h(2,-24)-h(2,-77):0,o=0,c=A<0||0===A&&1/A<0?1:0;for(A=d(A),A!=A||A===L?(g=A!=A?1:0,I=E):(I=S(z(A)/p),A*(e=h(2,-I))<1&&(I--,e*=2),A+=I+n>=1?N/e:N*h(2,1-n),A*e>=2&&(I++,e/=2),I+n>=E?(g=0,I=E):I+n>=1?(g=(A*e-1)*h(2,M),I+=n):(g=A*h(2,n-1)*h(2,M),I=0));M>=8;i[o++]=255&g,g/=256,M-=8);for(I=I<0;i[o++]=255&I,I/=256,T-=8);return i[--o]|=128*c,i},J=function(A,M,t){var I,g=8*t-M-1,e=(1<>1,T=g-7,E=t-1,n=A[E--],N=127&n;for(n>>=7;T>0;N=256*N+A[E],E--,T-=8);for(I=N&(1<<-T)-1,N>>=-T,T+=M;T>0;I=256*I+A[E],E--,T-=8);if(0===N)N=1-i;else{if(N===e)return I?NaN:n?-L:L;I+=h(2,M),N-=i}return(n?-1:1)*I*h(2,N-M)},G=function(A){return A[3]<<24|A[2]<<16|A[1]<<8|A[0]},H=function(A){return[255&A]},v=function(A){return[255&A,A>>8&255]},b=function(A){return[255&A,A>>8&255,A>>16&255,A>>24&255]},X=function(A){return R(A,52,8)},W=function(A){return R(A,23,4)},V=function(A,M,t){a(A[s],M,{get:function(){return this[t]}})},P=function(A,M,t,I){var g=+t,e=o(g);if(g!=e||e<0||e+M>A[F])throw w(x);var i=A[m]._b,T=e+A[k],E=i.slice(T,T+M);return I?E:E.reverse()},Z=function(A,M,t,I,g,e){var i=+t,T=o(i);if(i!=T||T<0||T+M>A[F])throw w(x);for(var E=A[m]._b,n=T+A[k],N=I(+g),c=0;cAA;)(q=$[AA++])in y||T(y,q,Y[q]);e||(_.constructor=y)}var MA=new j(new y(2)),tA=j[s].setInt8;MA.setInt8(0,2147483648),MA.setInt8(1,2147483649),!MA.getInt8(0)&&MA.getInt8(1)||E(j[s],{setInt8:function(A,M){tA.call(this,A,M<<24>>24)},setUint8:function(A,M){tA.call(this,A,M<<24>>24)}},!0)}else y=function(A){var M=K(this,A);this._b=D.call(Array(M),0),this[F]=M},j=function(A,M,t){N(this,j,Q),N(A,y,Q);var I=A[F],g=o(M);if(g<0||g>I)throw w("Wrong offset!");if(t=void 0===t?I-g:c(t),g+t>I)throw w(u);this[m]=A,this[k]=g,this[F]=t},g&&(V(y,O,"_l"),V(j,U,"_b"),V(j,O,"_l"),V(j,f,"_o")),E(j[s],{getInt8:function(A){return P(this,1,A)[0]<<24>>24},getUint8:function(A){return P(this,1,A)[0]},getInt16:function(A){var M=P(this,2,A,arguments[1]);return(M[1]<<8|M[0])<<16>>16},getUint16:function(A){var M=P(this,2,A,arguments[1]);return M[1]<<8|M[0]},getInt32:function(A){return G(P(this,4,A,arguments[1]))},getUint32:function(A){return G(P(this,4,A,arguments[1]))>>>0},getFloat32:function(A){return J(P(this,4,A,arguments[1]),23,4)},getFloat64:function(A){return J(P(this,8,A,arguments[1]),52,8)},setInt8:function(A,M){Z(this,1,A,H,M)},setUint8:function(A,M){Z(this,1,A,H,M)},setInt16:function(A,M){Z(this,2,A,v,M,arguments[2])},setUint16:function(A,M){Z(this,2,A,v,M,arguments[2])},setInt32:function(A,M){Z(this,4,A,b,M,arguments[2])},setUint32:function(A,M){Z(this,4,A,b,M,arguments[2])},setFloat32:function(A,M){Z(this,4,A,W,M,arguments[2])},setFloat64:function(A,M){Z(this,8,A,X,M,arguments[2])}});r(y,B),r(j,Q),T(j[s],i.VIEW,!0),M[B]=y,M[Q]=j},function(A,M,t){var I=t(5),g=t(48),e=t(64),i=t(241),T=t(14).f;A.exports=function(A){var M=g.Symbol||(g.Symbol=e?{}:I.Symbol||{});"_"==A.charAt(0)||A in M||T(M,A,{value:i.f(A)})}},function(A,M,t){var I=t(92),g=t(11)("iterator"),e=t(79);A.exports=t(48).getIteratorMethod=function(A){if(void 0!=A)return A[g]||A["@@iterator"]||e[I(A)]}},function(A,M,t){"use strict";var I=t(77),g=t(229),e=t(79),i=t(28);A.exports=t(147)(Array,"Array",function(A,M){this._t=i(A),this._i=0,this._k=M},function(){var A=this._t,M=this._k,t=this._i++;return!A||t>=A.length?(this._t=void 0,g(1)):"keys"==M?g(0,t):"values"==M?g(0,A[t]):g(0,[t,A[t]])},"values"),e.Arguments=e.Array,I("keys"),I("values"),I("entries")},function(A,M,t){"use strict";var I=t(95),g=function(){var A=I&&document.documentElement;return A&&A.contains?function(A,M){return A.contains(M)}:A&&A.compareDocumentPosition?function(A,M){return A===M||!!(16&A.compareDocumentPosition(M))}:function(A,M){if(M)do if(M===A)return!0;while(M=M.parentNode);return!1}}();A.exports=g},function(A,M,t){(function(){var t=this,I=t.humanize,g={};"undefined"!=typeof A&&A.exports&&(M=A.exports=g),M.humanize=g,g.noConflict=function(){return t.humanize=I,this},g.pad=function(A,M,t,I){if(A+="",t?t.length>1&&(t=t.charAt(0)):t=" ",I=void 0===I?"left":"right","right"===I)for(;A.length4&&A<21?"th":{1:"st",2:"nd",3:"rd"}[A%10]||"th"},w:function(){return t.getDay()},z:function(){return(N.L()?i[N.n()]:e[N.n()])+N.j()-1},W:function(){var A=N.z()-N.N()+1.5;return g.pad(1+Math.floor(Math.abs(A)/7)+(A%7>3.5?1:0),2,"0")},F:function(){return n[t.getMonth()]},m:function(){return g.pad(N.n(),2,"0")},M:function(){return N.F().slice(0,3)},n:function(){return t.getMonth()+1},t:function(){return new Date(N.Y(),N.n(),0).getDate()},L:function(){return 1===new Date(N.Y(),1,29).getMonth()?1:0},o:function(){var A=N.n(),M=N.W();return N.Y()+(12===A&&M<9?-1:1===A&&M>9)},Y:function(){return t.getFullYear()},y:function(){return String(N.Y()).slice(-2)},a:function(){return t.getHours()>11?"pm":"am"},A:function(){return N.a().toUpperCase()},B:function(){var A=t.getTime()/1e3,M=A%86400+3600;M<0&&(M+=86400);var I=M/86.4%1e3;return A<0?Math.ceil(I):Math.floor(I)},g:function(){return N.G()%12||12},G:function(){return t.getHours()},h:function(){return g.pad(N.g(),2,"0")},H:function(){return g.pad(N.G(),2,"0")},i:function(){return g.pad(t.getMinutes(),2,"0")},s:function(){return g.pad(t.getSeconds(),2,"0")},u:function(){return g.pad(1e3*t.getMilliseconds(),6,"0")},O:function(){var A=t.getTimezoneOffset(),M=Math.abs(A);return(A>0?"-":"+")+g.pad(100*Math.floor(M/60)+M%60,4,"0")},P:function(){var A=N.O();return A.substr(0,3)+":"+A.substr(3,2)},Z:function(){return 60*-t.getTimezoneOffset()},c:function(){return"Y-m-d\\TH:i:sP".replace(I,T)},r:function(){return"D, d M Y H:i:s O".replace(I,T)},U:function(){return t.getTime()/1e3||0}};return A.replace(I,T)},g.numberFormat=function(A,M,t,I){M=isNaN(M)?2:Math.abs(M),t=void 0===t?".":t,I=void 0===I?",":I;var g=A<0?"-":"";A=Math.abs(+A||0);var e=parseInt(A.toFixed(M),10)+"",i=e.length>3?e.length%3:0;return g+(i?e.substr(0,i)+I:"")+e.substr(i).replace(/(\d{3})(?=\d)/g,"$1"+I)+(M?t+Math.abs(A-e).toFixed(M).slice(2):"")},g.naturalDay=function(A,M){A=void 0===A?g.time():A,M=void 0===M?"Y-m-d":M;var t=86400,I=new Date,e=new Date(I.getFullYear(),I.getMonth(),I.getDate()).getTime()/1e3;return A=e-t?"yesterday":A>=e&&A=e+t&&A-2)return(t>=0?"just ":"")+"now";if(t<60&&t>-60)return t>=0?Math.floor(t)+" seconds ago":"in "+Math.floor(-t)+" seconds";if(t<120&&t>-120)return t>=0?"about a minute ago":"in about a minute";if(t<3600&&t>-3600)return t>=0?Math.floor(t/60)+" minutes ago":"in "+Math.floor(-t/60)+" minutes";if(t<7200&&t>-7200)return t>=0?"about an hour ago":"in about an hour";if(t<86400&&t>-86400)return t>=0?Math.floor(t/3600)+" hours ago":"in "+Math.floor(-t/3600)+" hours";var I=172800;if(t-I)return t>=0?"1 day ago":"in 1 day";var e=2505600;if(t-e)return t>=0?Math.floor(t/86400)+" days ago":"in "+Math.floor(-t/86400)+" days";var i=5184e3;if(t-i)return t>=0?"about a month ago":"in about a month";var T=parseInt(g.date("Y",M),10),E=parseInt(g.date("Y",A),10),n=12*T+parseInt(g.date("n",M),10),N=12*E+parseInt(g.date("n",A),10),o=n-N;if(o<12&&o>-12)return o>=0?o+" months ago":"in "+-o+" months";var c=T-E;return c<2&&c>-2?c>=0?"a year ago":"in a year":c>=0?c+" years ago":"in "+-c+" years"},g.ordinal=function(A){A=parseInt(A,10),A=isNaN(A)?0:A;var M=A<0?"-":"";A=Math.abs(A);var t=A%100;return M+A+(t>4&&t<21?"th":{1:"st",2:"nd",3:"rd"}[A%10]||"th")},g.filesize=function(A,M,t,I,e,i){return M=void 0===M?1024:M,A<=0?"0 bytes":(A

"),A=A.replace(/\n/g,"
"),"

"+A+"

"},g.nl2br=function(A){return A.replace(/(\r\n|\n|\r)/g,"
")},g.truncatechars=function(A,M){return A.length<=M?A:A.substr(0,M)+"…"},g.truncatewords=function(A,M){var t=A.split(" ");return t.length0,B=c.enumErrorProps&&(A===j||A instanceof Error),Q=c.enumPrototypes&&T(A);++IM.documentElement.clientHeight;return{modalStyles:{paddingRight:I&&!g?B.default():void 0,paddingLeft:!I&&g?B.default():void 0}}}});b.Body=z.default,b.Header=U.default,b.Title=f.default,b.Footer=F.default,b.Dialog=h.default,b.TRANSITION_DURATION=300,b.BACKDROP_TRANSITION_DURATION=150,M.default=C.bsSizes([D.Sizes.LARGE,D.Sizes.SMALL],C.bsClass("modal",b)),A.exports=M.default},function(A,M,t){"use strict";var I=t(33).default,g=t(32).default,e=t(86).default,i=t(15).default,T=t(12).default;M.__esModule=!0;var E=t(2),n=T(E),N=t(10),o=T(N),c=t(22),C=T(c),a=t(85),D=T(a),r=function(A){function M(){g(this,M),A.apply(this,arguments)}return I(M,A),M.prototype.render=function(){var A=this.props,M=A["aria-label"],t=e(A,["aria-label"]),I=D.default(this.context.$bs_onModalHide,this.props.onHide);return n.default.createElement("div",i({},t,{className:o.default(this.props.className,C.default.prefix(this.props,"header"))}),this.props.closeButton&&n.default.createElement("button",{type:"button",className:"close","aria-label":M,onClick:I},n.default.createElement("span",{"aria-hidden":"true"},"×")),this.props.children)},M}(n.default.Component);r.propTypes={"aria-label":n.default.PropTypes.string,bsClass:n.default.PropTypes.string,closeButton:n.default.PropTypes.bool,onHide:n.default.PropTypes.func},r.contextTypes={$bs_onModalHide:n.default.PropTypes.func},r.defaultProps={"aria-label":"Close",closeButton:!1},M.default=c.bsClass("modal",r),A.exports=M.default},function(A,M,t){"use strict";function I(A,M){return Array.isArray(M)?M.indexOf(A)>=0:A===M}var g=t(15).default,e=t(178).default,i=t(12).default;M.__esModule=!0;var T=t(163),E=i(T),n=t(267),N=i(n),o=t(2),c=i(o),C=t(34),a=i(C),D=t(215),r=(i(D),t(644)),B=i(r),Q=t(85),s=i(Q),u=c.default.createClass({displayName:"OverlayTrigger",propTypes:g({},B.default.propTypes,{trigger:c.default.PropTypes.oneOfType([c.default.PropTypes.oneOf(["click","hover","focus"]),c.default.PropTypes.arrayOf(c.default.PropTypes.oneOf(["click","hover","focus"]))]),delay:c.default.PropTypes.number,delayShow:c.default.PropTypes.number,delayHide:c.default.PropTypes.number,defaultOverlayShown:c.default.PropTypes.bool,overlay:c.default.PropTypes.node.isRequired,onBlur:c.default.PropTypes.func,onClick:c.default.PropTypes.func,onFocus:c.default.PropTypes.func,onMouseEnter:c.default.PropTypes.func,onMouseLeave:c.default.PropTypes.func,target:function(){},onHide:function(){},show:function(){}}),getDefaultProps:function(){return{defaultOverlayShown:!1,trigger:["hover","focus"]}},getInitialState:function(){return{isOverlayShown:this.props.defaultOverlayShown}},show:function(){this.setState({isOverlayShown:!0})},hide:function(){this.setState({isOverlayShown:!1})},toggle:function(){this.state.isOverlayShown?this.hide():this.show()},componentWillMount:function(){this.handleMouseOver=this.handleMouseOverOut.bind(null,this.handleDelayedShow),this.handleMouseOut=this.handleMouseOverOut.bind(null,this.handleDelayedHide)},componentDidMount:function(){this._mountNode=document.createElement("div"),this.renderOverlay()},renderOverlay:function(){a.default.unstable_renderSubtreeIntoContainer(this,this._overlay,this._mountNode)},componentWillUnmount:function(){a.default.unmountComponentAtNode(this._mountNode),this._mountNode=null,clearTimeout(this._hoverShowDelay),clearTimeout(this._hoverHideDelay)},componentDidUpdate:function(){this._mountNode&&this.renderOverlay(); +},getOverlayTarget:function(){return a.default.findDOMNode(this)},getOverlay:function(){var A=g({},N.default(this.props,e(B.default.propTypes)),{show:this.state.isOverlayShown,onHide:this.hide,target:this.getOverlayTarget,onExit:this.props.onExit,onExiting:this.props.onExiting,onExited:this.props.onExited,onEnter:this.props.onEnter,onEntering:this.props.onEntering,onEntered:this.props.onEntered}),M=o.cloneElement(this.props.overlay,{placement:A.placement,container:A.container});return c.default.createElement(B.default,A,M)},render:function(){var A=c.default.Children.only(this.props.children),M=A.props,t={"aria-describedby":this.props.overlay.props.id};return this._overlay=this.getOverlay(),t.onClick=s.default(M.onClick,this.props.onClick),I("click",this.props.trigger)&&(t.onClick=s.default(this.toggle,t.onClick)),I("hover",this.props.trigger)&&(t.onMouseOver=s.default(this.handleMouseOver,this.props.onMouseOver,M.onMouseOver),t.onMouseOut=s.default(this.handleMouseOut,this.props.onMouseOut,M.onMouseOut)),I("focus",this.props.trigger)&&(t.onFocus=s.default(this.handleDelayedShow,this.props.onFocus,M.onFocus),t.onBlur=s.default(this.handleDelayedHide,this.props.onBlur,M.onBlur)),o.cloneElement(A,t)},handleDelayedShow:function(){var A=this;if(null!=this._hoverHideDelay)return clearTimeout(this._hoverHideDelay),void(this._hoverHideDelay=null);if(!this.state.isOverlayShown&&null==this._hoverShowDelay){var M=null!=this.props.delayShow?this.props.delayShow:this.props.delay;return M?void(this._hoverShowDelay=setTimeout(function(){A._hoverShowDelay=null,A.show()},M)):void this.show()}},handleDelayedHide:function(){var A=this;if(null!=this._hoverShowDelay)return clearTimeout(this._hoverShowDelay),void(this._hoverShowDelay=null);if(this.state.isOverlayShown&&null==this._hoverHideDelay){var M=null!=this.props.delayHide?this.props.delayHide:this.props.delay;return M?void(this._hoverHideDelay=setTimeout(function(){A._hoverHideDelay=null,A.hide()},M)):void this.hide()}},handleMouseOverOut:function(A,M){var t=M.currentTarget,I=M.relatedTarget||M.nativeEvent.toElement;I&&(I===t||E.default(t,I))||A(M)}});M.default=u,A.exports=M.default},function(A,M,t){"use strict";var I=t(15).default,g=t(12).default;M.__esModule=!0;var e=t(2),i=g(e),T=t(10),E=g(T),n=t(22),N=g(n),o=t(293),c=g(o),C=i.default.createClass({displayName:"Tooltip",propTypes:{id:c.default(i.default.PropTypes.oneOfType([i.default.PropTypes.string,i.default.PropTypes.number])),placement:i.default.PropTypes.oneOf(["top","right","bottom","left"]),positionLeft:i.default.PropTypes.number,positionTop:i.default.PropTypes.number,arrowOffsetLeft:i.default.PropTypes.oneOfType([i.default.PropTypes.number,i.default.PropTypes.string]),arrowOffsetTop:i.default.PropTypes.oneOfType([i.default.PropTypes.number,i.default.PropTypes.string]),title:i.default.PropTypes.node},getDefaultProps:function(){return{bsClass:"tooltip",placement:"right"}},render:function(){var A,M=(A={},A[N.default.prefix(this.props)]=!0,A[this.props.placement]=!0,A),t=I({left:this.props.positionLeft,top:this.props.positionTop},this.props.style),g={left:this.props.arrowOffsetLeft,top:this.props.arrowOffsetTop};return i.default.createElement("div",I({role:"tooltip"},this.props,{className:E.default(this.props.className,M),style:t}),i.default.createElement("div",{className:N.default.prefix(this.props,"arrow"),style:g}),i.default.createElement("div",{className:N.default.prefix(this.props,"inner")},this.props.children))}});M.default=C,A.exports=M.default},function(A,M,t){A.exports={default:t(651),__esModule:!0}},function(A,M,t){var I=t(657),g=t(98),e=t(277),i="prototype",T=function(A,M,t){var E,n,N,o=A&T.F,c=A&T.G,C=A&T.S,a=A&T.P,D=A&T.B,r=A&T.W,B=c?g:g[M]||(g[M]={}),Q=c?I:C?I[M]:(I[M]||{})[i];c&&(t=M);for(E in t)n=!o&&Q&&E in Q,n&&E in B||(N=n?Q[E]:t[E],B[E]=c&&"function"!=typeof Q[E]?t[E]:D&&n?e(N,I):r&&Q[E]==N?function(A){var M=function(M){return this instanceof A?new A(M):A(M)};return M[i]=A[i],M}(N):a&&"function"==typeof N?e(Function.call,N):N,a&&((B[i]||(B[i]={}))[E]=N))};T.F=1,T.G=2,T.S=4,T.P=8,T.B=16,T.W=32,A.exports=T},function(A,M){var t=Object;A.exports={create:t.create,getProto:t.getPrototypeOf,isEnum:{}.propertyIsEnumerable,getDesc:t.getOwnPropertyDescriptor,setDesc:t.defineProperty,setDescs:t.defineProperties,getKeys:t.keys,getNames:t.getOwnPropertyNames,getSymbols:t.getOwnPropertySymbols,each:[].forEach}},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}function g(A,M){return A="function"==typeof A?A():A,i.default.findDOMNode(A)||M}Object.defineProperty(M,"__esModule",{value:!0}),M.default=g;var e=t(34),i=I(e);A.exports=M.default},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}function g(A,M){if(M)do if(M===A)return!0;while(M=M.parentNode);return!1}Object.defineProperty(M,"__esModule",{value:!0});var e=t(87),i=I(e);M.default=function(){return i.default?function(A,M){return A.contains?A.contains(M):A.compareDocumentPosition?A===M||!!(16&A.compareDocumentPosition(M)):g(A,M)}:g}(),A.exports=M.default},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}function g(A,M,t){var I="",g="",e=M;if("string"==typeof M){if(void 0===t)return A.style[(0,i.default)(M)]||(0,N.default)(A).getPropertyValue((0,E.default)(M));(e={})[M]=t}Object.keys(e).forEach(function(M){var t=e[M];t||0===t?(0,D.default)(M)?g+=M+"("+t+") ":I+=(0,E.default)(M)+": "+t+";":(0,c.default)(A,(0,E.default)(M))}),g&&(I+=C.transform+": "+g+";"),A.style.cssText+=";"+I}Object.defineProperty(M,"__esModule",{value:!0}),M.default=g;var e=t(290),i=I(e),T=t(697),E=I(T),n=t(692),N=I(n),o=t(693),c=I(o),C=t(289),a=t(694),D=I(a);A.exports=M.default},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}function g(A,M,t,I,g){var i=A[M],E="undefined"==typeof i?"undefined":e(i);return T.default.isValidElement(i)?new Error("Invalid "+I+" ` + "`" + `"+g+"` + "`" + ` of type ReactElement "+("supplied to ` + "`" + `"+t+"` + "`" + `, expected a ReactComponent or a ")+"DOMElement. You can usually obtain a ReactComponent or DOMElement from a ReactElement by attaching a ref to it."):"object"===E&&"function"==typeof i.render||1===i.nodeType?null:new Error("Invalid "+I+" ` + "`" + `"+g+"` + "`" + ` of value ` + "`" + `"+i+"` + "`" + ` "+("supplied to ` + "`" + `"+t+"` + "`" + `, expected a ReactComponent or a ")+"DOMElement.")}M.__esModule=!0;var e="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(A){return typeof A}:function(A){return A&&"function"==typeof Symbol&&A.constructor===Symbol?"symbol":typeof A},i=t(2),T=I(i),E=t(292),n=I(E);M.default=(0,n.default)(g)},function(A,M,t){"use strict";function I(){function A(A,M,I){for(var g=0;g>",null!=t[I]?A(t,I,g):M?new Error("Required prop '"+I+"' was not specified in '"+g+"'."):void 0}var t=M.bind(null,!1);return t.isRequired=M.bind(null,!0),t}M.__esModule=!0,M.errMsg=t,M.createChainableTypeChecker=I},function(A,M){"use strict";function t(A,M,t){function I(){return i=!0,T?void(n=[].concat(Array.prototype.slice.call(arguments))):void t.apply(this,arguments)}function g(){if(!i&&(E=!0,!T)){for(T=!0;!i&&e=A&&E&&(i=!0,t()))}}var e=0,i=!1,T=!1,E=!1,n=void 0;g()}function I(A,M,t){function I(A,M,I){i||(M?(i=!0,t(M)):(e[A]=I,i=++T===g,i&&t(null,e)))}var g=A.length,e=[];if(0===g)return t(null,e);var i=!1,T=0;A.forEach(function(A,t){M(A,t,function(A,M){I(t,A,M)})})}M.__esModule=!0,M.loopAsync=t,M.mapAsync=I},function(A,M,t){"use strict";function I(A){if(A&&A.__esModule)return A;var M={};if(null!=A)for(var t in A)Object.prototype.hasOwnProperty.call(A,t)&&(M[t]=A[t]);return M.default=A,M}function g(A){return A&&A.__esModule?A:{default:A}}M.__esModule=!0,M.router=M.routes=M.route=M.components=M.component=M.location=M.history=M.falsy=M.locationShape=M.routerShape=void 0;var e=t(2),i=t(124),T=(g(i),t(73)),E=I(T),n=t(18),N=(g(n),e.PropTypes.func),o=e.PropTypes.object,c=e.PropTypes.shape,C=e.PropTypes.string,a=M.routerShape=c({push:N.isRequired,replace:N.isRequired,go:N.isRequired,goBack:N.isRequired,goForward:N.isRequired,setRouteLeaveHook:N.isRequired,isActive:N.isRequired}),D=M.locationShape=c({pathname:C.isRequired,search:C.isRequired,state:o,action:C.isRequired,key:C}),r=M.falsy=E.falsy,B=M.history=E.history,Q=M.location=D,s=M.component=E.component,u=M.components=E.components,x=M.route=E.route,y=(M.routes=E.routes,M.router=a),j={falsy:r,history:B,location:Q,component:s,components:u,route:x,router:y};M.default=j},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}function g(A){for(var M in A)if(Object.prototype.hasOwnProperty.call(A,M))return!0;return!1}function e(A,M){function t(M){var t=!(arguments.length<=1||void 0===arguments[1])&&arguments[1],I=arguments.length<=2||void 0===arguments[2]?null:arguments[2],g=void 0;return t&&t!==!0||null!==I?(M={pathname:M,query:t},g=I||!1):(M=A.createLocation(M),g=t),(0,c.default)(M,g,s.location,s.routes,s.params)}function I(A,t){u&&u.location===A?e(u,t):(0,r.default)(M,A,function(M,I){M?t(M):I?e(i({},I,{location:A}),t):t()})}function e(A,M){function t(t,g){return t||g?I(t,g):void(0,a.default)(A,function(t,I){t?M(t):M(null,null,s=i({},A,{components:I}))})}function I(A,t){A?M(A):M(null,t)}var g=(0,n.default)(s,A),e=g.leaveRoutes,T=g.changeRoutes,E=g.enterRoutes;(0,N.runLeaveHooks)(e,s),e.filter(function(A){return E.indexOf(A)===-1}).forEach(D),(0,N.runChangeHooks)(T,s,A,function(M,g){return M||g?I(M,g):void(0,N.runEnterHooks)(E,A,t)})}function T(A){var M=arguments.length<=1||void 0===arguments[1]||arguments[1];return A.__id__||M&&(A.__id__=x++)}function E(A){return A.reduce(function(A,M){return A.push.apply(A,y[T(M)]),A},[])}function o(A,t){(0,r.default)(M,A,function(M,I){if(null==I)return void t();u=i({},I,{location:A});for(var g=E((0,n.default)(s,u).leaveRoutes),e=void 0,T=0,N=g.length;null==e&&T=32||13===M?M:0}A.exports=t},function(A,M){"use strict";function t(A){var M=this,t=M.nativeEvent;if(t.getModifierState)return t.getModifierState(A);var I=g[A];return!!I&&!!t[I]}function I(A){return t}var g={Alt:"altKey",Control:"ctrlKey",Meta:"metaKey",Shift:"shiftKey"};A.exports=I},function(A,M){"use strict";function t(A){var M=A.target||A.srcElement||window;return 3===M.nodeType?M.parentNode:M}A.exports=t},function(A,M){"use strict";function t(A){var M=A&&(I&&A[I]||A[g]);if("function"==typeof M)return M}var I="function"==typeof Symbol&&Symbol.iterator,g="@@iterator";A.exports=t},function(A,M,t){"use strict";function I(A){return"function"==typeof A&&"undefined"!=typeof A.prototype&&"function"==typeof A.prototype.mountComponent&&"function"==typeof A.prototype.receiveComponent}function g(A){var M;if(null===A||A===!1)M=new i(g);else if("object"==typeof A){var t=A;!t||"function"!=typeof t.type&&"string"!=typeof t.type?n(!1):void 0,M="string"==typeof t.type?T.createInternalComponent(t):I(t.type)?new t.type(t):new N}else"string"==typeof A||"number"==typeof A?M=T.createInstanceForText(A):n(!1);return M.construct(A),M._mountIndex=0,M._mountImage=null,M}var e=t(741),i=t(324),T=t(330),E=t(7),n=t(3),N=(t(8),function(){});E(N.prototype,e.Mixin,{_instantiateReactComponent:g}),A.exports=g},function(A,M,t){"use strict";/** * Checks if an event is supported in the current execution environment. * * NOTE: This will not work correctly for non-generic events such as ` + "`" + `change` + "`" + `, @@ -201,62 +201,68 @@ this._mountNode=null,clearTimeout(this._hoverShowDelay),clearTimeout(this._hover * @internal * @license Modernizr 3.0.0pre (Custom Build) | MIT */ -function I(A,M){if(!e.canUseDOM||M&&!("addEventListener"in document))return!1;var t="on"+A,I=t in document;if(!I){var i=document.createElement("div");i.setAttribute(t,"return;"),I="function"==typeof i[t]}return!I&&g&&"wheel"===A&&(I=document.implementation.hasFeature("Events.wheel","3.0")),I}var g,e=t(20);e.canUseDOM&&(g=document.implementation&&document.implementation.hasFeature&&document.implementation.hasFeature("","")!==!0),A.exports=I},function(A,M,t){"use strict";var I=t(20),g=t(134),e=t(135),i=function(A,M){A.textContent=M};I.canUseDOM&&("textContent"in document.documentElement||(i=function(A,M){e(A,g(M))})),A.exports=i},function(A,M){"use strict";function t(A,M){var t=null===A||A===!1,I=null===M||M===!1;if(t||I)return t===I;var g=typeof A,e=typeof M;return"string"===g||"number"===g?"string"===e||"number"===e:"object"===e&&A.type===M.type&&A.key===M.key}A.exports=t},function(A,M,t){"use strict";function I(A){return D[A]}function g(A,M){return A&&null!=A.key?i(A.key):M.toString(36)}function e(A){return(""+A).replace(r,I)}function i(A){return"$"+e(A)}function T(A,M,t,I){var e=typeof A;if("undefined"!==e&&"boolean"!==e||(A=null),null===A||"string"===e||"number"===e||N.isValidElement(A))return t(I,A,""===M?C+g(A,0):M),1;var E,n,D=0,r=""===M?C:M+a;if(Array.isArray(A))for(var B=0;B2?arguments[2]:void 0,n=Math.min((void 0===N?i:g(N,i))-E,i-T),o=1;for(E0;)E in t?t[T]=t[E]:delete t[T],T+=o,E+=o;return t}},function(A,M,t){var I=t(79);A.exports=function(A,M){var t=[];return I(A,!1,t.push,t,M),t}},function(A,M,t){var I=t(24),g=t(19),e=t(95),i=t(17);A.exports=function(A,M,t,T,E){I(M);var N=g(A),n=e(N),o=i(N.length),c=E?o-1:0,C=E?-1:1;if(t<2)for(;;){if(c in n){T=n[c],c+=C;break}if(c+=C,E?c<0:o<=c)throw TypeError("Reduce of empty array with no initial value")}for(;E?c>=0:o>c;c+=C)c in n&&(T=M(T,n[c],c,N));return T}},function(A,M,t){"use strict";var I=t(24),g=t(9),e=t(109),i=[].slice,T={},E=function(A,M,t){if(!(M in T)){for(var I=[],g=0;g1?arguments[1]:void 0,3);M=M?M.n:this._f;)for(t(M.v,M.k,this);M&&M.r;)M=M.p},has:function(A){return!!r(this,A)}}),C&&I(o.prototype,"size",{get:function(){return E(this[D])}}),o},def:function(A,M,t){var I,g,e=r(A,M);return e?e.v=t:(A._l=e={i:g=a(M,!0),k:M,v:t,p:I=A._l,n:void 0,r:!1},A._f||(A._f=e),I&&(I.n=e),A[D]++,"F"!==g&&(A._i[g]=e)),A},getEntry:r,setStrong:function(A,M,t){n(A,M,function(A,M){this._t=A,this._k=M,this._l=void 0},function(){for(var A=this,M=A._k,t=A._l;t&&t.r;)t=t.p;return A._t&&(A._l=t=t?t.n:A._t._f)?"keys"==M?o(0,t.k):"values"==M?o(0,t.v):o(0,[t.k,t.v]):(A._t=void 0,o(1))},t?"entries":"values",!t,!0),c(M)}}},function(A,M,t){var I=t(94),g=t(219);A.exports=function(A){return function(){if(I(this)!=A)throw TypeError(A+"#toJSON isn't generic");return g(this)}}},function(A,M,t){"use strict";var I=t(68),g=t(55).getWeak,e=t(4),i=t(9),T=t(63),E=t(79),N=t(41),n=t(21),o=N(5),c=N(6),C=0,a=function(A){return A._l||(A._l=new D)},D=function(){this.a=[]},r=function(A,M){return o(A.a,function(A){return A[0]===M})};D.prototype={get:function(A){var M=r(this,A);if(M)return M[1]},has:function(A){return!!r(this,A)},set:function(A,M){var t=r(this,A);t?t[1]=M:this.a.push([A,M])},delete:function(A){var M=c(this.a,function(M){return M[0]===A});return~M&&this.a.splice(M,1),!!~M}},A.exports={getConstructor:function(A,M,t,e){var N=A(function(A,I){T(A,N,M,"_i"),A._i=C++,A._l=void 0,void 0!=I&&E(I,t,A[e],A)});return I(N.prototype,{delete:function(A){if(!i(A))return!1;var M=g(A);return M===!0?a(this).delete(A):M&&n(M,this._i)&&delete M[this._i]},has:function(A){if(!i(A))return!1;var M=g(A);return M===!0?a(this).has(A):M&&n(M,this._i)}}),N},def:function(A,M,t){var I=g(e(M),!0);return I===!0?a(A).set(M,t):I[A._i]=t,A},ufstore:a}},function(A,M,t){A.exports=!t(13)&&!t(6)(function(){return 7!=Object.defineProperty(t(139)("div"),"a",{get:function(){return 7}}).a})},function(A,M,t){var I=t(9),g=Math.floor;A.exports=function(A){return!I(A)&&isFinite(A)&&g(A)===A}},function(A,M,t){var I=t(4);A.exports=function(A,M,t,g){try{return g?M(I(t)[0],t[1]):M(t)}catch(M){var e=A.return;throw void 0!==e&&I(e.call(A)),M}}},function(A,M){A.exports=function(A,M){return{value:M,done:!!A}}},function(A,M){A.exports=Math.log1p||function(A){return(A=+A)>-1e-8&&A<1e-8?A-A*A/2:Math.log(1+A)}},function(A,M,t){"use strict";var I=t(67),g=t(113),e=t(96),i=t(19),T=t(95),E=Object.assign;A.exports=!E||t(6)(function(){var A={},M={},t=Symbol(),I="abcdefghijklmnopqrst";return A[t]=7,I.split("").forEach(function(A){M[A]=A}),7!=E({},A)[t]||Object.keys(E({},M)).join("")!=I})?function(A,M){for(var t=i(A),E=arguments.length,N=1,n=g.f,o=e.f;E>N;)for(var c,C=T(arguments[N++]),a=n?I(C).concat(n(C)):I(C),D=a.length,r=0;D>r;)o.call(C,c=a[r++])&&(t[c]=C[c]);return t}:E},function(A,M,t){var I=t(14),g=t(4),e=t(67);A.exports=t(13)?Object.defineProperties:function(A,M){g(A);for(var t,i=e(M),T=i.length,E=0;T>E;)I.f(A,t=i[E++],M[t]);return A}},function(A,M,t){var I=t(28),g=t(66).f,e={}.toString,i="object"==typeof window&&window&&Object.getOwnPropertyNames?Object.getOwnPropertyNames(window):[],T=function(A){try{return g(A)}catch(A){return i.slice()}};A.exports.f=function(A){return i&&"[object Window]"==e.call(A)?T(A):g(I(A))}},function(A,M,t){var I=t(21),g=t(28),e=t(105)(!1),i=t(152)("IE_PROTO");A.exports=function(A,M){var t,T=g(A),E=0,N=[];for(t in T)t!=i&&I(T,t)&&N.push(t);for(;M.length>E;)I(T,t=M[E++])&&(~e(N,t)||N.push(t));return N}},function(A,M,t){var I=t(67),g=t(28),e=t(96).f;A.exports=function(A){return function(M){for(var t,i=g(M),T=I(i),E=T.length,N=0,n=[];E>N;)e.call(i,t=T[N++])&&n.push(A?[t,i[t]]:i[t]);return n}}},function(A,M,t){var I=t(66),g=t(113),e=t(4),i=t(5).Reflect;A.exports=i&&i.ownKeys||function(A){var M=I.f(e(A)),t=g.f;return t?M.concat(t(A)):M}},function(A,M,t){var I=t(5).parseFloat,g=t(82).trim;A.exports=1/I(t(157)+"-0")!==-(1/0)?function(A){var M=g(String(A),3),t=I(M);return 0===t&&"-"==M.charAt(0)?-0:t}:I},function(A,M,t){var I=t(5).parseInt,g=t(82).trim,e=t(157),i=/^[\-+]?0[xX]/;A.exports=8!==I(e+"08")||22!==I(e+"0x16")?function(A,M){var t=g(String(A),3);return I(t,M>>>0||(i.test(t)?16:10))}:I},function(A,M){A.exports=Object.is||function(A,M){return A===M?0!==A||1/A===1/M:A!=A&&M!=M}},function(A,M,t){var I=t(17),g=t(156),e=t(36);A.exports=function(A,M,t,i){var T=String(e(A)),E=T.length,N=void 0===t?" ":String(t),n=I(M);if(n<=E||""==N)return T;var o=n-E,c=g.call(N,Math.ceil(o/N.length));return c.length>o&&(c=c.slice(0,o)),i?c+T:T+c}},function(A,M,t){M.f=t(11)},function(A,M,t){"use strict";var I=t(222);A.exports=t(106)("Map",function(A){return function(){return A(this,arguments.length>0?arguments[0]:void 0)}},{get:function(A){var M=I.getEntry(this,A);return M&&M.v},set:function(A,M){return I.def(this,0===A?0:A,M)}},I,!0)},function(A,M,t){t(13)&&"g"!=/./g.flags&&t(14).f(RegExp.prototype,"flags",{configurable:!0,get:t(108)})},function(A,M,t){"use strict";var I=t(222);A.exports=t(106)("Set",function(A){return function(){return A(this,arguments.length>0?arguments[0]:void 0)}},{add:function(A){return I.def(this,A=0===A?0:A,A)}},I)},function(A,M,t){"use strict";var I,g=t(41)(0),e=t(26),i=t(55),T=t(230),E=t(224),N=t(9),n=i.getWeak,o=Object.isExtensible,c=E.ufstore,C={},a=function(A){return function(){return A(this,arguments.length>0?arguments[0]:void 0)}},D={get:function(A){if(N(A)){var M=n(A);return M===!0?c(this).get(A):M?M[this._i]:void 0}},set:function(A,M){return E.def(this,A,M)}},r=A.exports=t(106)("WeakMap",a,D,E,!0,!0);7!=(new r).set((Object.freeze||Object)(C),7).get(C)&&(I=E.getConstructor(a),T(I.prototype,D),i.NEED=!0,g(["delete","has","get","set"],function(A){var M=r.prototype,t=M[A];e(M,A,function(M,g){if(N(M)&&!o(M)){this._f||(this._f=new I);var e=this._f[A](M,g);return"set"==A?this:e}return t.call(this,M,g)})}))},function(A,M){A.exports=function(){var A=[];return A.toString=function(){for(var A=[],M=0;M":i.innerHTML="<"+A+">",T[A]=!i.firstChild),T[A]?c[A]:null}var g=t(20),e=t(3),i=g.canUseDOM?document.createElement("div"):null,T={},E=[1,'"],N=[1,"","
"],n=[3,"","
"],o=[1,'',""],c={"*":[1,"?
","
"],area:[1,"",""],col:[2,"","
"],legend:[1,"
","
"],param:[1,"",""],tr:[2,"","
"],optgroup:E,option:E,caption:N,colgroup:N,tbody:N,tfoot:N,thead:N,td:n,th:n},C=["circle","clipPath","defs","ellipse","g","image","line","linearGradient","mask","path","pattern","polygon","polyline","radialGradient","rect","stop","text","tspan"];C.forEach(function(A){c[A]=o,T[A]=!0}),A.exports=I},function(A,M){"use strict";function t(A,M){if(A===M)return!0;if("object"!=typeof A||null===A||"object"!=typeof M||null===M)return!1;var t=Object.keys(A),g=Object.keys(M);if(t.length!==g.length)return!1;for(var e=I.bind(M),i=0;i2?arguments[2]:void 0,N=Math.min((void 0===n?i:g(n,i))-E,i-T),o=1;for(E0;)E in t?t[T]=t[E]:delete t[T],T+=o,E+=o;return t}},function(A,M,t){var I=t(78);A.exports=function(A,M){var t=[];return I(A,!1,t.push,t,M),t}},function(A,M,t){var I=t(24),g=t(19),e=t(93),i=t(17);A.exports=function(A,M,t,T,E){I(M);var n=g(A),N=e(n),o=i(n.length),c=E?o-1:0,C=E?-1:1;if(t<2)for(;;){if(c in N){T=N[c],c+=C;break}if(c+=C,E?c<0:o<=c)throw TypeError("Reduce of empty array with no initial value")}for(;E?c>=0:o>c;c+=C)c in N&&(T=M(T,N[c],c,n));return T}},function(A,M,t){"use strict";var I=t(24),g=t(9),e=t(110),i=[].slice,T={},E=function(A,M,t){if(!(M in T)){for(var I=[],g=0;g1?arguments[1]:void 0,3);M=M?M.n:this._f;)for(t(M.v,M.k,this);M&&M.r;)M=M.p},has:function(A){return!!r(this,A)}}),C&&I(o.prototype,"size",{get:function(){return E(this[D])}}),o},def:function(A,M,t){var I,g,e=r(A,M);return e?e.v=t:(A._l=e={i:g=a(M,!0),k:M,v:t,p:I=A._l,n:void 0,r:!1},A._f||(A._f=e),I&&(I.n=e),A[D]++,"F"!==g&&(A._i[g]=e)),A},getEntry:r,setStrong:function(A,M,t){N(A,M,function(A,M){this._t=A,this._k=M,this._l=void 0},function(){for(var A=this,M=A._k,t=A._l;t&&t.r;)t=t.p;return A._t&&(A._l=t=t?t.n:A._t._f)?"keys"==M?o(0,t.k):"values"==M?o(0,t.v):o(0,[t.k,t.v]):(A._t=void 0,o(1))},t?"entries":"values",!t,!0),c(M)}}},function(A,M,t){var I=t(92),g=t(220);A.exports=function(A){return function(){if(I(this)!=A)throw TypeError(A+"#toJSON isn't generic");return g(this)}}},function(A,M,t){"use strict";var I=t(68),g=t(55).getWeak,e=t(4),i=t(9),T=t(63),E=t(78),n=t(41),N=t(21),o=n(5),c=n(6),C=0,a=function(A){return A._l||(A._l=new D)},D=function(){this.a=[]},r=function(A,M){return o(A.a,function(A){return A[0]===M})};D.prototype={get:function(A){var M=r(this,A);if(M)return M[1]},has:function(A){return!!r(this,A)},set:function(A,M){var t=r(this,A);t?t[1]=M:this.a.push([A,M])},delete:function(A){var M=c(this.a,function(M){return M[0]===A});return~M&&this.a.splice(M,1),!!~M}},A.exports={getConstructor:function(A,M,t,e){var n=A(function(A,I){T(A,n,M,"_i"),A._i=C++,A._l=void 0,void 0!=I&&E(I,t,A[e],A)});return I(n.prototype,{delete:function(A){if(!i(A))return!1;var M=g(A);return M===!0?a(this).delete(A):M&&N(M,this._i)&&delete M[this._i]},has:function(A){if(!i(A))return!1;var M=g(A);return M===!0?a(this).has(A):M&&N(M,this._i)}}),n},def:function(A,M,t){var I=g(e(M),!0);return I===!0?a(A).set(M,t):I[A._i]=t,A},ufstore:a}},function(A,M,t){A.exports=!t(13)&&!t(6)(function(){return 7!=Object.defineProperty(t(139)("div"),"a",{get:function(){return 7}}).a})},function(A,M,t){var I=t(9),g=Math.floor;A.exports=function(A){return!I(A)&&isFinite(A)&&g(A)===A}},function(A,M,t){var I=t(4);A.exports=function(A,M,t,g){try{return g?M(I(t)[0],t[1]):M(t)}catch(M){var e=A.return;throw void 0!==e&&I(e.call(A)),M}}},function(A,M){A.exports=function(A,M){return{value:M,done:!!A}}},function(A,M){A.exports=Math.log1p||function(A){return(A=+A)>-1e-8&&A<1e-8?A-A*A/2:Math.log(1+A)}},function(A,M,t){"use strict";var I=t(67),g=t(114),e=t(94),i=t(19),T=t(93),E=Object.assign;A.exports=!E||t(6)(function(){var A={},M={},t=Symbol(),I="abcdefghijklmnopqrst";return A[t]=7,I.split("").forEach(function(A){M[A]=A}),7!=E({},A)[t]||Object.keys(E({},M)).join("")!=I})?function(A,M){for(var t=i(A),E=arguments.length,n=1,N=g.f,o=e.f;E>n;)for(var c,C=T(arguments[n++]),a=N?I(C).concat(N(C)):I(C),D=a.length,r=0;D>r;)o.call(C,c=a[r++])&&(t[c]=C[c]);return t}:E},function(A,M,t){var I=t(14),g=t(4),e=t(67);A.exports=t(13)?Object.defineProperties:function(A,M){g(A);for(var t,i=e(M),T=i.length,E=0;T>E;)I.f(A,t=i[E++],M[t]);return A}},function(A,M,t){var I=t(28),g=t(66).f,e={}.toString,i="object"==typeof window&&window&&Object.getOwnPropertyNames?Object.getOwnPropertyNames(window):[],T=function(A){try{return g(A)}catch(A){return i.slice()}};A.exports.f=function(A){return i&&"[object Window]"==e.call(A)?T(A):g(I(A))}},function(A,M,t){var I=t(21),g=t(28),e=t(106)(!1),i=t(152)("IE_PROTO");A.exports=function(A,M){var t,T=g(A),E=0,n=[];for(t in T)t!=i&&I(T,t)&&n.push(t);for(;M.length>E;)I(T,t=M[E++])&&(~e(n,t)||n.push(t));return n}},function(A,M,t){var I=t(67),g=t(28),e=t(94).f;A.exports=function(A){return function(M){for(var t,i=g(M),T=I(i),E=T.length,n=0,N=[];E>n;)e.call(i,t=T[n++])&&N.push(A?[t,i[t]]:i[t]);return N}}},function(A,M,t){var I=t(66),g=t(114),e=t(4),i=t(5).Reflect;A.exports=i&&i.ownKeys||function(A){var M=I.f(e(A)),t=g.f;return t?M.concat(t(A)):M}},function(A,M,t){var I=t(5).parseFloat,g=t(81).trim;A.exports=1/I(t(157)+"-0")!==-(1/0)?function(A){var M=g(String(A),3),t=I(M);return 0===t&&"-"==M.charAt(0)?-0:t}:I},function(A,M,t){var I=t(5).parseInt,g=t(81).trim,e=t(157),i=/^[\-+]?0[xX]/;A.exports=8!==I(e+"08")||22!==I(e+"0x16")?function(A,M){var t=g(String(A),3);return I(t,M>>>0||(i.test(t)?16:10))}:I},function(A,M){A.exports=Object.is||function(A,M){return A===M?0!==A||1/A===1/M:A!=A&&M!=M}},function(A,M,t){var I=t(17),g=t(156),e=t(36);A.exports=function(A,M,t,i){var T=String(e(A)),E=T.length,n=void 0===t?" ":String(t),N=I(M);if(N<=E||""==n)return T;var o=N-E,c=g.call(n,Math.ceil(o/n.length));return c.length>o&&(c=c.slice(0,o)),i?c+T:T+c}},function(A,M,t){M.f=t(11)},function(A,M,t){"use strict";var I=t(223);A.exports=t(107)("Map",function(A){return function(){return A(this,arguments.length>0?arguments[0]:void 0)}},{get:function(A){var M=I.getEntry(this,A);return M&&M.v},set:function(A,M){return I.def(this,0===A?0:A,M)}},I,!0)},function(A,M,t){t(13)&&"g"!=/./g.flags&&t(14).f(RegExp.prototype,"flags",{configurable:!0,get:t(109)})},function(A,M,t){"use strict";var I=t(223);A.exports=t(107)("Set",function(A){return function(){return A(this,arguments.length>0?arguments[0]:void 0)}},{add:function(A){return I.def(this,A=0===A?0:A,A)}},I)},function(A,M,t){"use strict";var I,g=t(41)(0),e=t(26),i=t(55),T=t(231),E=t(225),n=t(9),N=i.getWeak,o=Object.isExtensible,c=E.ufstore,C={},a=function(A){return function(){return A(this,arguments.length>0?arguments[0]:void 0)}},D={get:function(A){if(n(A)){var M=N(A);return M===!0?c(this).get(A):M?M[this._i]:void 0}},set:function(A,M){return E.def(this,A,M)}},r=A.exports=t(107)("WeakMap",a,D,E,!0,!0);7!=(new r).set((Object.freeze||Object)(C),7).get(C)&&(I=E.getConstructor(a),T(I.prototype,D),i.NEED=!0,g(["delete","has","get","set"],function(A){var M=r.prototype,t=M[A];e(M,A,function(M,g){if(n(M)&&!o(M)){this._f||(this._f=new I);var e=this._f[A](M,g);return"set"==A?this:e}return t.call(this,M,g)})}))},function(A,M){A.exports=function(){var A=[];return A.toString=function(){for(var A=[],M=0;M-1&&A%1==0&&AA.clientHeight}Object.defineProperty(M,"__esModule",{value:!0}),M.default=i;var T=t(116),E=I(T),N=t(83),n=I(N);A.exports=M.default},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}function g(A,M,t,I,g){var i=A[M],E="undefined"==typeof i?"undefined":e(i);return T.default.isValidElement(i)?new Error("Invalid "+I+" ` + "`" + `"+g+"` + "`" + ` of type ReactElement "+("supplied to ` + "`" + `"+t+"` + "`" + `, expected an element type (a string ")+"or a ReactClass)."):"function"!==E&&"string"!==E?new Error("Invalid "+I+" ` + "`" + `"+g+"` + "`" + ` of value ` + "`" + `"+i+"` + "`" + ` "+("supplied to ` + "`" + `"+t+"` + "`" + `, expected an element type (a string ")+"or a ReactClass)."):null}M.__esModule=!0;var e="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(A){return typeof A}:function(A){return A&&"function"==typeof Symbol&&A.constructor===Symbol?"symbol":typeof A},i=t(2),T=I(i),E=t(295),N=I(E);M.default=(0,N.default)(g)},function(A,M){"use strict";function t(A){function M(M,t,I,g,e,i){var T=g||"<>",E=i||I;if(null==t[I])return M?new Error("Required "+e+" ` + "`" + `"+E+"` + "`" + ` was not specified "+("in ` + "`" + `"+T+"` + "`" + `.")):null;for(var N=arguments.length,n=Array(N>6?N-6:0),o=6;o=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}function e(A){return 0===A.button}function i(A){return!!(A.metaKey||A.altKey||A.ctrlKey||A.shiftKey)}function T(A){for(var M in A)if(Object.prototype.hasOwnProperty.call(A,M))return!1;return!0}function E(A,M){var t=M.query,I=M.hash,g=M.state;return t||I||g?{pathname:A,query:t,hash:I,state:g}:A}M.__esModule=!0;var N=Object.assign||function(A){for(var M=1;M=0;I--){var g=A[I],e=g.path||"";if(t=e.replace(/\/*$/,"/")+t,0===e.indexOf("/"))break}return"/"+t}},propTypes:{path:c,from:c,to:c.isRequired,query:C,state:C,onEnter:n.falsy,children:n.falsy},render:function(){(0,T.default)(!1)}});M.default=a,A.exports=M.default},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}M.__esModule=!0;var g=t(2),e=I(g),i=t(16),T=I(i),E=t(61),N=t(74),n=e.default.PropTypes,o=n.string,c=n.func,C=e.default.createClass({displayName:"Route",statics:{createRouteFromReactElement:E.createRouteFromReactElement},propTypes:{path:o,component:N.component,components:N.components,getComponent:c,getComponents:c},render:function(){(0,T.default)(!1)}});M.default=C,A.exports=M.default},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}function g(A,M){var t={};for(var I in A)M.indexOf(I)>=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}function e(A){return!A||!A.__v2_compatible__}function i(A){return A&&A.getCurrentLocation}M.__esModule=!0;var T=Object.assign||function(A){for(var M=1;M=0&&0===window.sessionStorage.length)return;throw A}}function i(A){var M=void 0;try{M=window.sessionStorage.getItem(g(A))}catch(A){if(A.name===n)return null}if(M)try{return JSON.parse(M)}catch(A){}return null}M.__esModule=!0,M.saveState=e,M.readState=i;var T=t(47),E=(I(T),"@@History/"),N=["QuotaExceededError","QUOTA_EXCEEDED_ERR"],n="SecurityError"},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}function g(A){function M(A){return E.canUseDOM?void 0:T.default(!1),t.listen(A)}var t=o.default(e({getUserConfirmation:N.getUserConfirmation},A,{go:N.go}));return e({},t,{listen:M})}M.__esModule=!0;var e=Object.assign||function(A){for(var M=1;M1?M-1:0),e=1;e=A.childNodes.length?null:A.childNodes.item(t);A.insertBefore(M,I)}var g=t(726),e=t(332),i=t(38),T=t(135),E=t(208),N=t(3),n={dangerouslyReplaceNodeWithMarkup:g.dangerouslyReplaceNodeWithMarkup,updateTextContent:E,processUpdates:function(A,M){for(var t,i=null,n=null,o=0;o-1?void 0:i(!1),!N.plugins[t]){M.extractEvents?void 0:i(!1),N.plugins[t]=M;var I=M.eventTypes;for(var e in I)g(I[e],M,e)?void 0:i(!1)}}}function g(A,M,t){N.eventNameDispatchConfigs.hasOwnProperty(t)?i(!1):void 0,N.eventNameDispatchConfigs[t]=A;var I=A.phasedRegistrationNames;if(I){for(var g in I)if(I.hasOwnProperty(g)){var T=I[g];e(T,M,t)}return!0}return!!A.registrationName&&(e(A.registrationName,M,t),!0)}function e(A,M,t){N.registrationNameModules[A]?i(!1):void 0,N.registrationNameModules[A]=M,N.registrationNameDependencies[A]=M.eventTypes[t].dependencies}var i=t(3),T=null,E={},N={plugins:[],eventNameDispatchConfigs:{},registrationNameModules:{},registrationNameDependencies:{},injectEventPluginOrder:function(A){T?i(!1):void 0,T=Array.prototype.slice.call(A),I()},injectEventPluginsByName:function(A){var M=!1;for(var t in A)if(A.hasOwnProperty(t)){var g=A[t];E.hasOwnProperty(t)&&E[t]===g||(E[t]?i(!1):void 0,E[t]=g,M=!0)}M&&I()},getPluginModuleForEvent:function(A){var M=A.dispatchConfig;if(M.registrationName)return N.registrationNameModules[M.registrationName]||null;for(var t in M.phasedRegistrationNames)if(M.phasedRegistrationNames.hasOwnProperty(t)){var I=N.registrationNameModules[M.phasedRegistrationNames[t]];if(I)return I}return null},_resetEventPlugins:function(){T=null;for(var A in E)E.hasOwnProperty(A)&&delete E[A];N.plugins.length=0;var M=N.eventNameDispatchConfigs;for(var t in M)M.hasOwnProperty(t)&&delete M[t];var I=N.registrationNameModules;for(var g in I)I.hasOwnProperty(g)&&delete I[g]}};A.exports=N},function(A,M,t){"use strict";function I(A){return(""+A).replace(u,"//")}function g(A,M){this.func=A,this.context=M,this.count=0}function e(A,M,t){var I=A.func,g=A.context;I.call(g,M,A.count++)}function i(A,M,t){if(null==A)return A;var I=g.getPooled(M,t);B(A,e,I),g.release(I)}function T(A,M,t,I){this.result=A,this.keyPrefix=M,this.func=t,this.context=I,this.count=0}function E(A,M,t){var g=A.result,e=A.keyPrefix,i=A.func,T=A.context,E=i.call(T,M,A.count++);Array.isArray(E)?N(E,g,t,r.thatReturnsArgument):null!=E&&(D.isValidElement(E)&&(E=D.cloneAndReplaceKey(E,e+(E!==M?I(E.key||"")+"/":"")+t)),g.push(E))}function N(A,M,t,g,e){var i="";null!=t&&(i=I(t)+"/");var N=T.getPooled(M,i,g,e);B(A,E,N),T.release(N)}function n(A,M,t){if(null==A)return A;var I=[];return N(A,I,null,M,t),I}function o(A,M,t){return null}function c(A,M){return B(A,o,null)}function C(A){var M=[];return N(A,M,null,r.thatReturnsArgument),M}var a=t(62),D=t(29),r=t(44),B=t(210),Q=a.twoArgumentPooler,s=a.fourArgumentPooler,u=/\/(?!\/)/g;g.prototype.destructor=function(){this.func=null,this.context=null,this.count=0},a.addPoolingTo(g,Q),T.prototype.destructor=function(){this.result=null,this.keyPrefix=null,this.func=null,this.context=null,this.count=0},a.addPoolingTo(T,s);var x={forEach:i,map:n,mapIntoWithKeyPrefixInternal:N,count:c,toArray:C};A.exports=x},function(A,M,t){"use strict";function I(A,M){var t=y.hasOwnProperty(M)?y[M]:null;l.hasOwnProperty(M)&&(t!==u.OVERRIDE_BASE?r(!1):void 0),A.hasOwnProperty(M)&&(t!==u.DEFINE_MANY&&t!==u.DEFINE_MANY_MERGED?r(!1):void 0)}function g(A,M){if(M){"function"==typeof M?r(!1):void 0,c.isValidElement(M)?r(!1):void 0;var t=A.prototype;M.hasOwnProperty(s)&&j.mixins(A,M.mixins);for(var g in M)if(M.hasOwnProperty(g)&&g!==s){var e=M[g];if(I(t,g),j.hasOwnProperty(g))j[g](A,e);else{var i=y.hasOwnProperty(g),N=t.hasOwnProperty(g),n="function"==typeof e,o=n&&!i&&!N&&M.autobind!==!1;if(o)t.__reactAutoBindMap||(t.__reactAutoBindMap={}),t.__reactAutoBindMap[g]=e,t[g]=e;else if(N){var C=y[g];!i||C!==u.DEFINE_MANY_MERGED&&C!==u.DEFINE_MANY?r(!1):void 0,C===u.DEFINE_MANY_MERGED?t[g]=T(t[g],e):C===u.DEFINE_MANY&&(t[g]=E(t[g],e))}else t[g]=e}}}}function e(A,M){if(M)for(var t in M){var I=M[t];if(M.hasOwnProperty(t)){var g=t in j;g?r(!1):void 0;var e=t in A;e?r(!1):void 0,A[t]=I}}}function i(A,M){A&&M&&"object"==typeof A&&"object"==typeof M?void 0:r(!1);for(var t in M)M.hasOwnProperty(t)&&(void 0!==A[t]?r(!1):void 0,A[t]=M[t]);return A}function T(A,M){return function(){var t=A.apply(this,arguments),I=M.apply(this,arguments);if(null==t)return I;if(null==I)return t;var g={};return i(g,t),i(g,I),g}}function E(A,M){return function(){A.apply(this,arguments),M.apply(this,arguments)}}function N(A,M){var t=M.bind(A);return t}function n(A){for(var M in A.__reactAutoBindMap)if(A.__reactAutoBindMap.hasOwnProperty(M)){var t=A.__reactAutoBindMap[M];A[M]=N(A,t)}}var o=t(319),c=t(29),C=(t(130),t(129),t(334)),a=t(8),D=t(97),r=t(3),B=t(118),Q=t(58),s=(t(7),Q({mixins:null})),u=B({DEFINE_ONCE:null,DEFINE_MANY:null,OVERRIDE_BASE:null,DEFINE_MANY_MERGED:null}),x=[],y={mixins:u.DEFINE_MANY,statics:u.DEFINE_MANY,propTypes:u.DEFINE_MANY,contextTypes:u.DEFINE_MANY,childContextTypes:u.DEFINE_MANY,getDefaultProps:u.DEFINE_MANY_MERGED,getInitialState:u.DEFINE_MANY_MERGED,getChildContext:u.DEFINE_MANY_MERGED,render:u.DEFINE_ONCE,componentWillMount:u.DEFINE_MANY,componentDidMount:u.DEFINE_MANY,componentWillReceiveProps:u.DEFINE_MANY,shouldComponentUpdate:u.DEFINE_ONCE,componentWillUpdate:u.DEFINE_MANY,componentDidUpdate:u.DEFINE_MANY,componentWillUnmount:u.DEFINE_MANY,updateComponent:u.OVERRIDE_BASE},j={displayName:function(A,M){A.displayName=M},mixins:function(A,M){if(M)for(var t=0;t"+T+""},receiveComponent:function(A,M){if(A!==this._currentElement){this._currentElement=A;var t=""+A;if(t!==this._stringText){this._stringText=t;var g=i.getNode(this._rootNodeID);I.updateTextContent(g,t)}}},unmountComponent:function(){e.unmountIDFromEnvironment(this._rootNodeID)}}),A.exports=n},function(A,M,t){"use strict";function I(){this.reinitializeTransaction()}var g=t(39),e=t(132),i=t(8),T=t(44),E={initialize:T,close:function(){c.isBatchingUpdates=!1}},N={initialize:T,close:g.flushBatchedUpdates.bind(g)},n=[N,E];i(I.prototype,e.Mixin,{getTransactionWrappers:function(){return n}});var o=new I,c={isBatchingUpdates:!1,batchedUpdates:function(A,M,t,I,g,e){var i=c.isBatchingUpdates;c.isBatchingUpdates=!0,i?A(M,t,I,g,e):o.perform(A,null,M,t,I,g,e)}};A.exports=c},function(A,M,t){"use strict";function I(){if(!w){w=!0,B.EventEmitter.injectReactEventListener(r),B.EventPluginHub.injectEventPluginOrder(T),B.EventPluginHub.injectInstanceHandle(Q),B.EventPluginHub.injectMount(s),B.EventPluginHub.injectEventPluginsByName({SimpleEventPlugin:j,EnterLeaveEventPlugin:E,ChangeEventPlugin:e,SelectEventPlugin:x,BeforeInputEventPlugin:g}),B.NativeComponent.injectGenericComponentClass(a),B.NativeComponent.injectTextComponentClass(D),B.Class.injectMixin(o),B.DOMProperty.injectDOMPropertyConfig(n),B.DOMProperty.injectDOMPropertyConfig(l),B.EmptyComponent.injectEmptyComponent("noscript"),B.Updates.injectReconcileTransaction(u),B.Updates.injectBatchingStrategy(C),B.RootIndex.injectCreateReactRootIndex(N.canUseDOM?i.createReactRootIndex:y.createReactRootIndex),B.Component.injectEnvironment(c)}}var g=t(722),e=t(724),i=t(725),T=t(727),E=t(728),N=t(20),n=t(731),o=t(733),c=t(196),C=t(324),a=t(737),D=t(323),r=t(745),B=t(746),Q=t(93),s=t(23),u=t(750),x=t(756),y=t(757),j=t(758),l=t(755),w=!1;A.exports={inject:I}},function(A,M,t){"use strict";function I(){if(o.current){var A=o.current.getName();if(A)return" Check the render method of ` + "`" + `"+A+"` + "`" + `."}return""}function g(A,M){if(A._store&&!A._store.validated&&null==A.key){A._store.validated=!0;e("uniqueKey",A,M)}}function e(A,M,t){var g=I();if(!g){var e="string"==typeof t?t:t.displayName||t.name;e&&(g=" Check the top-level render call using <"+e+">.")}var i=a[A]||(a[A]={});if(i[g])return null;i[g]=!0;var T={parentOrOwner:g,url:" See https://fb.me/react-warning-keys for more information.",childOwner:null};return M&&M._owner&&M._owner!==o.current&&(T.childOwner=" It was passed a child from "+M._owner.getName()+"."),T}function i(A,M){if("object"==typeof A)if(Array.isArray(A))for(var t=0;t/,e={CHECKSUM_ATTR_NAME:"data-react-checksum",addChecksumToMarkup:function(A){var M=I(A);return A.replace(g," "+e.CHECKSUM_ATTR_NAME+'="'+M+'"$&')},canReuseMarkup:function(A,M){var t=M.getAttribute(e.CHECKSUM_ATTR_NAME);t=t&&parseInt(t,10);var g=I(A);return g===t}};A.exports=e},function(A,M,t){"use strict";var I=t(118),g=I({INSERT_MARKUP:null,MOVE_EXISTING:null,REMOVE_NODE:null,SET_MARKUP:null,TEXT_CONTENT:null});A.exports=g},function(A,M,t){"use strict";function I(A){if("function"==typeof A.type)return A.type;var M=A.type,t=o[M];return null==t&&(o[M]=t=N(M)),t}function g(A){return n?void 0:E(!1),new n(A.type,A.props)}function e(A){return new c(A)}function i(A){return A instanceof c}var T=t(8),E=t(3),N=null,n=null,o={},c=null,C={injectGenericComponentClass:function(A){n=A},injectTextComponentClass:function(A){c=A},injectComponentClasses:function(A){T(o,A)}},a={getComponentClassForElement:I,createInternalComponent:g,createInstanceForText:e,isTextComponent:i,injection:C};A.exports=a},function(A,M,t){"use strict";function I(A,M){}var g=(t(7),{isMounted:function(A){return!1},enqueueCallback:function(A,M){},enqueueForceUpdate:function(A){I(A,"forceUpdate")},enqueueReplaceState:function(A,M){I(A,"replaceState")},enqueueSetState:function(A,M){I(A,"setState")},enqueueSetProps:function(A,M){I(A,"setProps")},enqueueReplaceProps:function(A,M){I(A,"replaceProps")}});A.exports=g},function(A,M,t){"use strict";function I(A){function M(M,t,I,g,e,i){if(g=g||y,i=i||I,null==t[I]){var T=s[e];return M?new Error("Required "+T+" ` + "`" + `"+i+"` + "`" + ` was not specified in "+("` + "`" + `"+g+"` + "`" + `.")):null}return A(t,I,g,e,i)}var t=M.bind(null,!1);return t.isRequired=M.bind(null,!0),t}function g(A){function M(M,t,I,g,e){var i=M[t],T=D(i);if(T!==A){var E=s[g],N=r(i);return new Error("Invalid "+E+" ` + "`" + `"+e+"` + "`" + ` of type "+("` + "`" + `"+N+"` + "`" + ` supplied to ` + "`" + `"+I+"` + "`" + `, expected ")+("` + "`" + `"+A+"` + "`" + `."))}return null}return I(M)}function e(){return I(u.thatReturns(null))}function i(A){function M(M,t,I,g,e){var i=M[t];if(!Array.isArray(i)){var T=s[g],E=D(i);return new Error("Invalid "+T+" ` + "`" + `"+e+"` + "`" + ` of type "+("` + "`" + `"+E+"` + "`" + ` supplied to ` + "`" + `"+I+"` + "`" + `, expected an array."))}for(var N=0;N>"}var Q=t(29),s=t(129),u=t(44),x=t(205),y="<>",j={array:g("array"),bool:g("boolean"),func:g("function"),number:g("number"),object:g("object"),string:g("string"),any:e(),arrayOf:i,element:T(),instanceOf:E,node:c(),objectOf:n,oneOf:N,oneOfType:o,shape:C};A.exports=j},function(A,M){"use strict";var t={injectCreateReactRootIndex:function(A){I.createReactRootIndex=A}},I={createReactRootIndex:null,injection:t};A.exports=I},function(A,M){"use strict";var t={currentScrollLeft:0,currentScrollTop:0,refreshScrollValues:function(A){t.currentScrollLeft=A.x,t.currentScrollTop=A.y}};A.exports=t},function(A,M,t){"use strict";function I(A,M){if(null==M?g(!1):void 0,null==A)return M;var t=Array.isArray(A),I=Array.isArray(M);return t&&I?(A.push.apply(A,M),A):t?(A.push(M),A):I?[A].concat(M):[A,M]}var g=t(3);A.exports=I},function(A,M){"use strict";var t=function(A,M,t){Array.isArray(A)?A.forEach(M,t):A&&M.call(t,A)};A.exports=t},function(A,M,t){"use strict";function I(){return!e&&g.canUseDOM&&(e="textContent"in document.documentElement?"textContent":"innerText"),e}var g=t(20),e=null;A.exports=I},function(A,M){"use strict";function t(A){var M=A&&A.nodeName&&A.nodeName.toLowerCase();return M&&("input"===M&&I[A.type]||"textarea"===M)}var I={color:!0,date:!0,datetime:!0,"datetime-local":!0,email:!0,month:!0,number:!0,password:!0,range:!0,search:!0,tel:!0,text:!0,time:!0,url:!0,week:!0};A.exports=t},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}function g(){for(var A=arguments.length,M=Array(A),t=0;t=0&&s.splice(M,1)}function T(A){var M=document.createElement("style");return M.type="text/css",e(A,M),M}function E(A){var M=document.createElement("link");return M.rel="stylesheet",e(A,M),M}function N(A,M){var t,I,g;if(M.singleton){var e=Q++;t=B||(B=T(M)),I=n.bind(null,t,e,!1),g=n.bind(null,t,e,!0)}else A.sourceMap&&"function"==typeof URL&&"function"==typeof URL.createObjectURL&&"function"==typeof URL.revokeObjectURL&&"function"==typeof Blob&&"function"==typeof btoa?(t=E(M),I=c.bind(null,t),g=function(){i(t),t.href&&URL.revokeObjectURL(t.href)}):(t=T(M),I=o.bind(null,t),g=function(){i(t)});return I(A),function(M){if(M){if(M.css===A.css&&M.media===A.media&&M.sourceMap===A.sourceMap)return;I(A=M)}else g()}}function n(A,M,t,I){var g=t?"":I.css;if(A.styleSheet)A.styleSheet.cssText=u(M,g);else{var e=document.createTextNode(g),i=A.childNodes;i[M]&&A.removeChild(i[M]),i.length?A.insertBefore(e,i[M]):A.appendChild(e)}}function o(A,M){var t=M.css,I=M.media;if(I&&A.setAttribute("media",I),A.styleSheet)A.styleSheet.cssText=t;else{for(;A.firstChild;)A.removeChild(A.firstChild);A.appendChild(document.createTextNode(t))}}function c(A,M){var t=M.css,I=M.sourceMap;I&&(t+="\n/*# sourceMappingURL=data:application/json;base64,"+btoa(unescape(encodeURIComponent(JSON.stringify(I))))+" */");var g=new Blob([t],{type:"text/css"}),e=A.href;A.href=URL.createObjectURL(g),e&&URL.revokeObjectURL(e)}var C={},a=function(A){var M;return function(){return"undefined"==typeof M&&(M=A.apply(this,arguments)),M}},D=a(function(){return/msie [6-9]\b/.test(window.navigator.userAgent.toLowerCase())}),r=a(function(){return document.head||document.getElementsByTagName("head")[0]}),B=null,Q=0,s=[];A.exports=function(A,M){M=M||{},"undefined"==typeof M.singleton&&(M.singleton=D()),"undefined"==typeof M.insertAt&&(M.insertAt="bottom");var t=g(A);return I(t,M),function(A){for(var e=[],i=0;i-1})))}},{key:"listObjects",value:function(){var A=this.props.dispatch;A(eA.listObjects())}},{key:"selectPrefix",value:function(A,M){A.preventDefault();var t=this.props,I=(t.dispatch,t.currentPath),g=(t.web,t.currentBucket),e=encodeURI(M);if(M.endsWith("/")||""===M){if(M===I)return;a.default.push(TA.pathJoin(g,e))}else window.location=window.location.origin+"/minio/download/"+g+"/"+e+"?token="+aA.default.getItem("token")}},{key:"makeBucket",value:function(A){A.preventDefault();var M=this.refs.makeBucketRef.value;this.refs.makeBucketRef.value="";var t=this.props,I=t.web,g=t.dispatch;this.hideMakeBucketModal(),I.MakeBucket({bucketName:M}).then(function(){g(eA.addBucket(M)),g(eA.selectBucket(M))}).catch(function(A){return g(eA.showAlert({type:"danger",message:A.message}))})}},{key:"hideMakeBucketModal",value:function(){var A=this.props.dispatch;A(eA.hideMakeBucketModal())}},{key:"showMakeBucketModal",value:function(A){A.preventDefault();var M=this.props.dispatch;M(eA.showMakeBucketModal())}},{key:"showAbout",value:function(A){A.preventDefault();var M=this.props.dispatch;M(eA.showAbout())}},{key:"hideAbout",value:function(A){A.preventDefault();var M=this.props.dispatch;M(eA.hideAbout())}},{key:"showBucketPolicy",value:function(A){A.preventDefault();var M=this.props.dispatch;M(eA.showBucketPolicy())}},{key:"hideBucketPolicy",value:function(A){A.preventDefault();var M=this.props.dispatch;M(eA.hideBucketPolicy())}},{key:"uploadFile",value:function(A){A.preventDefault();var M=this.props,t=M.dispatch,I=M.buckets;if(0===I.length)return void t(eA.showAlert({type:"danger",message:"Bucket needs to be created before trying to upload files."}));var g=A.target.files[0];A.target.value=null,this.xhr=new XMLHttpRequest,t(eA.uploadFile(g,this.xhr))}},{key:"removeObject",value:function(){var A=this,M=this.props,t=M.web,I=M.dispatch,g=M.currentPath,e=M.currentBucket,i=M.deleteConfirmation,T=M.checkedObjects,E=[];E=T.length>0?T.map(function(A){return""+g+A}):[i.object],t.RemoveObject({bucketname:e,objects:E}).then(function(){if(A.hideDeleteConfirmation(),T.length>0){for(var M=0;M0})},n.default.createElement("span",{className:"la-label"},n.default.createElement("i",{className:"fa fa-check-circle"})," ",C.length," Objects selected"),n.default.createElement("span",{className:"la-actions pull-right"},n.default.createElement("button",{onClick:this.downloadSelected.bind(this)}," Download all as zip ")),n.default.createElement("span",{className:"la-actions pull-right"},n.default.createElement("button",{onClick:this.showDeleteConfirmation.bind(this)}," Delete selected ")),n.default.createElement("i",{className:"la-close fa fa-times",onClick:this.clearSelected.bind(this)})),n.default.createElement(f.default,null,b,n.default.createElement("header",{className:"fe-header-mobile hidden-lg hidden-md"},n.default.createElement("div",{id:"feh-trigger",className:"feh-trigger "+(0,c.default)({"feht-toggled":y}),onClick:this.toggleSidebar.bind(this,!y)},n.default.createElement("div",{className:"feht-lines"},n.default.createElement("div",{className:"top"}),n.default.createElement("div",{className:"center"}),n.default.createElement("div",{className:"bottom"}))),n.default.createElement("img",{className:"mh-logo",src:IA.default,alt:""})),n.default.createElement("header",{className:"fe-header"},n.default.createElement(G.default,{selectPrefix:this.selectPrefix.bind(this)}),AA,n.default.createElement("ul",{className:"feh-actions"},n.default.createElement(v.default,null),K,_)),n.default.createElement("div",{className:"feb-container"},n.default.createElement("header",{className:"fesl-row","data-type":"folder"},n.default.createElement("div",{className:"fesl-item fesl-item-icon"}),n.default.createElement("div",{className:"fesl-item fesl-item-name",onClick:this.sortObjectsByName.bind(this),"data-sort":"name"},"Name",n.default.createElement("i",{className:(0,c.default)({"fesli-sort":!0,fa:!0,"fa-sort-alpha-desc":i,"fa-sort-alpha-asc":!i})})),n.default.createElement("div",{className:"fesl-item fesl-item-size",onClick:this.sortObjectsBySize.bind(this),"data-sort":"size"},"Size",n.default.createElement("i",{className:(0,c.default)({"fesli-sort":!0,fa:!0,"fa-sort-amount-desc":T,"fa-sort-amount-asc":!T})})),n.default.createElement("div",{className:"fesl-item fesl-item-modified",onClick:this.sortObjectsByDate.bind(this),"data-sort":"last-modified"},"Last Modified",n.default.createElement("i",{className:(0,c.default)({"fesli-sort":!0,fa:!0,"fa-sort-numeric-desc":E,"fa-sort-numeric-asc":!E})})),n.default.createElement("div",{className:"fesl-item fesl-item-actions"}))),n.default.createElement("div",{className:"feb-container"},n.default.createElement(rA.default,{loadMore:this.listObjects.bind(this),hasMore:J,useWindow:!0,initialLoad:!1},n.default.createElement(F.default,{dataType:this.dataType.bind(this),selectPrefix:this.selectPrefix.bind(this),showDeleteConfirmation:this.showDeleteConfirmation.bind(this),shareObject:this.shareObject.bind(this),checkObject:this.checkObject.bind(this),checkedObjectsArray:C})),n.default.createElement("div",{className:"text-center",style:{display:J&&S?"block":"none"}},n.default.createElement("span",null,"Loading..."))),n.default.createElement(X.default,null),eA,n.default.createElement(s.default,{className:"modal-create-bucket",bsSize:"small",animation:!1,show:g,onHide:this.hideMakeBucketModal.bind(this)},n.default.createElement("button",{className:"close close-alt",onClick:this.hideMakeBucketModal.bind(this)},n.default.createElement("span",null,"×")),n.default.createElement(x.default,null,n.default.createElement("form",{onSubmit:this.makeBucket.bind(this)},n.default.createElement("div",{className:"input-group"},n.default.createElement("input",{className:"ig-text",type:"text",ref:"makeBucketRef",placeholder:"Bucket Name",autoFocus:!0}),n.default.createElement("i",{className:"ig-helpers"}))))),n.default.createElement(s.default,{className:"modal-about modal-dark",animation:!1,show:N,onHide:this.hideAbout.bind(this)},n.default.createElement("button",{className:"close",onClick:this.hideAbout.bind(this)},n.default.createElement("span",null,"×")),n.default.createElement("div",{className:"ma-inner"},n.default.createElement("div",{className:"mai-item hidden-xs"},n.default.createElement("a",{href:"https://minio.io",target:"_blank"},n.default.createElement("img",{className:"maii-logo",src:IA.default,alt:""}))),n.default.createElement("div",{className:"mai-item"},n.default.createElement("ul",{className:"maii-list"},n.default.createElement("li",null,n.default.createElement("div",null,"Version"),n.default.createElement("small",null,D)),n.default.createElement("li",null,n.default.createElement("div",null,"Memory"),n.default.createElement("small",null,B)),n.default.createElement("li",null,n.default.createElement("div",null,"Platform"),n.default.createElement("small",null,Q)),n.default.createElement("li",null,n.default.createElement("div",null,"Runtime"),n.default.createElement("small",null,u)))))),n.default.createElement(s.default,{className:"modal-policy",animation:!1,show:o,onHide:this.hideBucketPolicy.bind(this)},n.default.createElement(j.default,null,"Bucket Policy (",S,")",n.default.createElement("button",{className:"close close-alt",onClick:this.hideBucketPolicy.bind(this)},n.default.createElement("span",null,"×"))),n.default.createElement("div",{className:"pm-body"},n.default.createElement(Z.default,{bucket:S}),d.map(function(A,M){return n.default.createElement(q.default,{key:M,prefix:A.prefix,policy:A.policy})}))),n.default.createElement(MA.default,{show:p.show,icon:"fa fa-exclamation-triangle mci-red",text:"Are you sure you want to delete?",sub:"This cannot be undone!",okText:"Delete",cancelText:"Cancel",okHandler:this.removeObject.bind(this),cancelHandler:this.hideDeleteConfirmation.bind(this)}),n.default.createElement(s.default,{show:U.show,animation:!1,onHide:this.hideShareObjectModal.bind(this),bsSize:"small"},n.default.createElement(j.default,null,"Share Object"),n.default.createElement(x.default,null,n.default.createElement("div",{className:"input-group copy-text"},n.default.createElement("label",null,"Shareable Link"),n.default.createElement("input",{type:"text",ref:"copyTextInput",readOnly:"readOnly",value:window.location.protocol+"//"+U.url,onClick:this.selectTexts.bind(this)})),n.default.createElement("div",{className:"input-group",style:{display:m.LoggedIn()?"block":"none"}},n.default.createElement("label",null,"Expires in (Max 7 days)"),n.default.createElement("div",{className:"set-expire"},n.default.createElement("div",{className:"set-expire-item"},n.default.createElement("i",{className:"set-expire-increase",onClick:this.handleExpireValue.bind(this,"expireDays",1,U.object)}),n.default.createElement("div",{className:"set-expire-title"},"Days"),n.default.createElement("div",{className:"set-expire-value"},n.default.createElement("input",{ref:"expireDays",type:"number",min:0,max:7,defaultValue:5,readOnly:"readOnly"})),n.default.createElement("i",{className:"set-expire-decrease",onClick:this.handleExpireValue.bind(this,"expireDays",-1,U.object)})),n.default.createElement("div",{className:"set-expire-item"},n.default.createElement("i",{className:"set-expire-increase",onClick:this.handleExpireValue.bind(this,"expireHours",1,U.object)}),n.default.createElement("div",{className:"set-expire-title"},"Hours"),n.default.createElement("div",{className:"set-expire-value"},n.default.createElement("input",{ref:"expireHours",type:"number",min:0,max:23,defaultValue:0,readOnly:"readOnly"})),n.default.createElement("i",{className:"set-expire-decrease",onClick:this.handleExpireValue.bind(this,"expireHours",-1,U.object)})),n.default.createElement("div",{className:"set-expire-item"},n.default.createElement("i",{className:"set-expire-increase",onClick:this.handleExpireValue.bind(this,"expireMins",1,U.object)}),n.default.createElement("div",{className:"set-expire-title"},"Minutes"),n.default.createElement("div",{className:"set-expire-value"},n.default.createElement("input",{ref:"expireMins",type:"number",min:0,max:59,defaultValue:0,readOnly:"readOnly"})),n.default.createElement("i",{className:"set-expire-decrease",onClick:this.handleExpireValue.bind(this,"expireMins",-1,U.object)}))))),n.default.createElement("div",{className:"modal-footer"},n.default.createElement(cA.default,{text:window.location.protocol+"//"+U.url,onCopy:this.showMessage.bind(this)},n.default.createElement("button",{className:"btn btn-success"},"Copy Link")),n.default.createElement("button",{className:"btn btn-link",onClick:this.hideShareObjectModal.bind(this)},"Cancel"))),H)))}}]),M}(n.default.Component);M.default=BA},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}Object.defineProperty(M,"__esModule",{value:!0});var g=t(2),e=I(g),i=t(46),T=I(i),E=t(175),N=I(E),n=function(A){var M=A.fullScreenFunc,t=A.aboutFunc,I=A.settingsFunc,g=A.logoutFunc;return e.default.createElement("li",null,e.default.createElement(N.default,{pullRight:!0,id:"top-right-menu"},e.default.createElement(N.default.Toggle,{noCaret:!0},e.default.createElement("i",{className:"fa fa-reorder"})),e.default.createElement(N.default.Menu,{className:"dropdown-menu-right"},e.default.createElement("li",null,e.default.createElement("a",{target:"_blank",href:"https://github.com/minio/minio"},"Github ",e.default.createElement("i",{className:"fa fa-github"}))),e.default.createElement("li",null,e.default.createElement("a",{href:"",onClick:M},"Fullscreen ",e.default.createElement("i",{className:"fa fa-expand"}))),e.default.createElement("li",null,e.default.createElement("a",{target:"_blank",href:"https://docs.minio.io/"},"Documentation ",e.default.createElement("i",{className:"fa fa-book"}))),e.default.createElement("li",null,e.default.createElement("a",{target:"_blank",href:"https://slack.minio.io"},"Ask for help ",e.default.createElement("i",{className:"fa fa-question-circle"}))),e.default.createElement("li",null,e.default.createElement("a",{href:"",onClick:t},"About ",e.default.createElement("i",{className:"fa fa-info-circle"}))),e.default.createElement("li",null,e.default.createElement("a",{href:"",onClick:I},"Settings ",e.default.createElement("i",{className:"fa fa-cog"}))),e.default.createElement("li",null,e.default.createElement("a",{href:"",onClick:g},"Sign Out ",e.default.createElement("i",{className:"fa fa-sign-out"}))))))};M.default=(0,T.default)(function(A){return A})(n)},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}Object.defineProperty(M,"__esModule",{value:!0});var g=t(2),e=I(g),i=t(46),T=I(i),E=t(179),N=I(E),n=t(178),o=I(n),c=function(A){var M=A.latestUiVersion;return M===currentUiVersion?e.default.createElement("noscript",null):e.default.createElement("li",{className:"hidden-xs hidden-sm"},e.default.createElement("a",{href:""},e.default.createElement(o.default,{placement:"left",overlay:e.default.createElement(N.default,{id:"tt-version-update"},"New update available. Click to refresh.")}," ",e.default.createElement("i",{className:"fa fa-refresh"})," ")))};M.default=(0,T.default)(function(A){return{latestUiVersion:A.latestUiVersion}})(c)},function(A,M,t){"use strict";function I(A){if(A&&A.__esModule)return A;var M={};if(null!=A)for(var t in A)Object.prototype.hasOwnProperty.call(A,t)&&(M[t]=A[t]);return M.default=A,M}function g(A){return A&&A.__esModule?A:{default:A}}function e(A,M){if(!(A instanceof M))throw new TypeError("Cannot call a class as a function")}function i(A,M){if(!A)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!M||"object"!=typeof M&&"function"!=typeof M?A:M}function T(A,M){if("function"!=typeof M&&null!==M)throw new TypeError("Super expression must either be null or a function, not "+typeof M);A.prototype=Object.create(M&&M.prototype,{constructor:{value:A,enumerable:!1,writable:!0,configurable:!0}}),M&&(Object.setPrototypeOf?Object.setPrototypeOf(A,M):A.__proto__=M)}Object.defineProperty(M,"__esModule",{value:!0});var E=function(){function A(A,M){for(var t=0;t0&&(A.name.endsWith("/")||(B=e.default.createElement(C.default,{id:"fia-dropdown-"+A.name.replace(".","-")},e.default.createElement(C.default.Toggle,{noCaret:!0,className:"fia-toggle"}),e.default.createElement(C.default.Menu,null,e.default.createElement("a",{href:"",className:"fiad-action",onClick:function(M){return E(M,""+t+A.name)}},e.default.createElement("i",{className:"fa fa-copy"})),Q))));var s="",u="";return c.indexOf(A.name)>-1&&(s=" fesl-row-selected",u=!0),e.default.createElement("div",{key:M,className:"fesl-row "+r+s, -"data-type":g(A.name,A.contentType)},e.default.createElement("div",{className:"fesl-item fesl-item-icon"},e.default.createElement("div",{className:"fi-select"},e.default.createElement("input",{type:"checkbox",name:A.name,checked:u,onChange:function(M){return o(M,A.name)}}),e.default.createElement("i",{className:"fis-icon"}),e.default.createElement("i",{className:"fis-helper"}))),e.default.createElement("div",{className:"fesl-item fesl-item-name"},e.default.createElement("a",{href:"",onClick:function(M){return I(M,""+t+A.name)}},A.name)),e.default.createElement("div",{className:"fesl-item fesl-item-size"},a),e.default.createElement("div",{className:"fesl-item fesl-item-modified"},D),e.default.createElement("div",{className:"fesl-item fesl-item-actions"},B))});return e.default.createElement("div",null,a)};M.default=(0,o.default)(function(A){return{objects:A.objects,currentPath:A.currentPath,loadPath:A.loadPath}})(a)},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}Object.defineProperty(M,"__esModule",{value:!0});var g=t(2),e=I(g),i=t(46),T=I(i),E=function(A){var M=A.currentBucket,t=A.currentPath,I=A.selectPrefix,g=[],i="";return t&&(i=t.split("/").map(function(A,M){g.push(A);var t=g.join("/")+"/";return e.default.createElement("span",{key:M},e.default.createElement("a",{href:"",onClick:function(A){return I(A,t)}},A))})),e.default.createElement("h2",null,e.default.createElement("span",{className:"main"},e.default.createElement("a",{onClick:function(A){return I(A,"")},href:""},M)),i)};M.default=(0,T.default)(function(A){return{currentBucket:A.currentBucket,currentPath:A.currentPath}})(E)},function(A,M,t){"use strict";function I(A){if(A&&A.__esModule)return A;var M={};if(null!=A)for(var t in A)Object.prototype.hasOwnProperty.call(A,t)&&(M[t]=A[t]);return M.default=A,M}function g(A){return A&&A.__esModule?A:{default:A}}function e(A,M){if(!(A instanceof M))throw new TypeError("Cannot call a class as a function")}function i(A,M){if(!A)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!M||"object"!=typeof M&&"function"!=typeof M?A:M}function T(A,M){if("function"!=typeof M&&null!==M)throw new TypeError("Super expression must either be null or a function, not "+typeof M);A.prototype=Object.create(M&&M.prototype,{constructor:{value:A,enumerable:!1,writable:!0,configurable:!0}}),M&&(Object.setPrototypeOf?Object.setPrototypeOf(A,M):A.__proto__=M)}Object.defineProperty(M,"__esModule",{value:!0});var E=function(){function A(A,M){for(var t=0;t0&&void 0!==arguments[0]?arguments[0]:{buckets:[],visibleBuckets:[],objects:[],istruncated:!0,storageInfo:{},serverInfo:{},currentBucket:"",currentPath:"",showMakeBucketModal:!1,uploads:{},alert:{show:!1,type:"danger",message:""},loginError:!1,sortNameOrder:!1,sortSizeOrder:!1,sortDateOrder:!1,latestUiVersion:currentUiVersion,sideBarActive:!1,loginRedirectPath:E.minioBrowserPrefix,settings:{accessKey:"",secretKey:"",secretKeyVisible:!1},showSettings:!1,policies:[],deleteConfirmation:{object:"",show:!1},shareObject:{show:!1,url:"",object:""},prefixWritable:!1,checkedObjects:[]},M=arguments[1],t=Object.assign({},A);switch(M.type){case T.SET_WEB:t.web=M.web;break;case T.SET_BUCKETS:t.buckets=M.buckets;break;case T.ADD_BUCKET:t.buckets=[M.bucket].concat(e(t.buckets)),t.visibleBuckets=[M.bucket].concat(e(t.visibleBuckets));break;case T.SET_VISIBLE_BUCKETS:t.visibleBuckets=M.visibleBuckets;break;case T.SET_CURRENT_BUCKET:t.currentBucket=M.currentBucket;break;case T.APPEND_OBJECTS:t.objects=[].concat(e(t.objects),e(M.objects)),t.marker=M.marker,t.istruncated=M.istruncated;break;case T.SET_OBJECTS:t.objects=[].concat(e(M.objects));break;case T.RESET_OBJECTS:t.objects=[],t.marker="",t.istruncated=!1;break;case T.SET_CURRENT_PATH:t.currentPath=M.currentPath;break;case T.SET_STORAGE_INFO:t.storageInfo=M.storageInfo;break;case T.SET_SERVER_INFO:t.serverInfo=M.serverInfo;break;case T.SHOW_MAKEBUCKET_MODAL:t.showMakeBucketModal=M.showMakeBucketModal;break;case T.UPLOAD_PROGRESS:t.uploads=Object.assign({},t.uploads),t.uploads[M.slug].loaded=M.loaded;break;case T.ADD_UPLOAD:t.uploads=Object.assign({},t.uploads,g({},M.slug,{loaded:0,size:M.size,xhr:M.xhr,name:M.name}));break;case T.STOP_UPLOAD:t.uploads=Object.assign({},t.uploads),delete t.uploads[M.slug];break;case T.SET_ALERT:t.alert.alertTimeout&&clearTimeout(t.alert.alertTimeout),M.alert.show?t.alert=M.alert:t.alert=Object.assign({},t.alert,{show:!1});break;case T.SET_LOGIN_ERROR:t.loginError=!0;break;case T.SET_SHOW_ABORT_MODAL:t.showAbortModal=M.showAbortModal;break;case T.SHOW_ABOUT:t.showAbout=M.showAbout;break;case T.SET_SORT_NAME_ORDER:t.sortNameOrder=M.sortNameOrder;break;case T.SET_SORT_SIZE_ORDER:t.sortSizeOrder=M.sortSizeOrder;break;case T.SET_SORT_DATE_ORDER:t.sortDateOrder=M.sortDateOrder;break;case T.SET_LATEST_UI_VERSION:t.latestUiVersion=M.latestUiVersion;break;case T.SET_SIDEBAR_STATUS:t.sidebarStatus=M.sidebarStatus;break;case T.SET_LOGIN_REDIRECT_PATH:t.loginRedirectPath=M.path;case T.SET_LOAD_BUCKET:t.loadBucket=M.loadBucket;break;case T.SET_LOAD_PATH:t.loadPath=M.loadPath;break;case T.SHOW_SETTINGS:t.showSettings=M.showSettings;break;case T.SET_SETTINGS:t.settings=Object.assign({},t.settings,M.settings);break;case T.SHOW_BUCKET_POLICY:t.showBucketPolicy=M.showBucketPolicy;break;case T.SET_POLICIES:t.policies=M.policies;break;case T.DELETE_CONFIRMATION:t.deleteConfirmation=Object.assign({},M.payload);break;case T.SET_SHARE_OBJECT:t.shareObject=Object.assign({},M.shareObject);break;case T.SET_PREFIX_WRITABLE:t.prefixWritable=M.prefixWritable;break;case T.REMOVE_OBJECT:var I=t.objects.findIndex(function(A){return A.name===M.object});if(I==-1)break;t.objects=[].concat(e(t.objects.slice(0,I)),e(t.objects.slice(I+1)));break;case T.CHECKED_OBJECTS_ADD:t.checkedObjects=[].concat(e(t.checkedObjects),[M.objectName]);break;case T.CHECKED_OBJECTS_REMOVE:var i=t.checkedObjects.indexOf(M.objectName);if(i==-1)break;t.checkedObjects=[].concat(e(t.checkedObjects.slice(0,i)),e(t.checkedObjects.slice(i+1)));break;case T.CHECKED_OBJECTS_RESET:t.checkedObjects=[]}return t}},function(A,M,t){"use strict";function I(A){if(Array.isArray(A)){for(var M=0,t=Array(A.length);MM.name.toLowerCase()?1:0}),g=g.sort(function(A,M){return A.name.toLowerCase()M.name.toLowerCase()?1:0}),M&&(t=t.reverse(),g=g.reverse()),[].concat(I(t),I(g))},M.sortObjectsBySize=function(A,M){var t=A.filter(function(A){return A.name.endsWith("/")}),g=A.filter(function(A){return!A.name.endsWith("/")});return g=g.sort(function(A,M){return A.size-M.size}),M&&(g=g.reverse()),[].concat(I(t),I(g))},M.sortObjectsByDate=function(A,M){var t=A.filter(function(A){return A.name.endsWith("/")}),g=A.filter(function(A){return!A.name.endsWith("/")});return g=g.sort(function(A,M){return new Date(A.lastModified).getTime()-new Date(M.lastModified).getTime()}),M&&(g=g.reverse()),[].concat(I(t),I(g))},M.pathSlice=function(A){A=A.replace(g.minioBrowserPrefix,"");var M="",t="";if(!A)return{bucket:t,prefix:M};var I=A.indexOf("/",1);return I==-1?(t=A.slice(1),{bucket:t,prefix:M}):(t=A.slice(1,I),M=A.slice(I+1),{bucket:t,prefix:M})},M.pathJoin=function(A,M){return M||(M=""),g.minioBrowserPrefix+"/"+A+"/"+M}},function(A,M,t){"use strict";function I(A){if(A&&A.__esModule)return A;var M={};if(null!=A)for(var t in A)Object.prototype.hasOwnProperty.call(A,t)&&(M[t]=A[t]);return M.default=A,M}function g(A){return A&&A.__esModule?A:{default:A}}function e(A,M){if(!(A instanceof M))throw new TypeError("Cannot call a class as a function")}Object.defineProperty(M,"__esModule",{value:!0});var i=function(){function A(A,M){for(var t=0;t-1&&A%1==0&&A1)for(var t=1;tA.clientHeight}Object.defineProperty(M,"__esModule",{value:!0}),M.default=i;var T=t(120),E=I(T),n=t(100),N=I(n);A.exports=M.default},function(A,M){"use strict";function t(A,M){return A.classList?!!M&&A.classList.contains(M):(" "+A.className+" ").indexOf(" "+M+" ")!==-1}Object.defineProperty(M,"__esModule",{value:!0}),M.default=t,A.exports=M.default},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}Object.defineProperty(M,"__esModule",{value:!0});var g=t(87),e=I(g),i=function(){};e.default&&(i=function(){return document.addEventListener?function(A,M,t,I){return A.addEventListener(M,t,I||!1)}:document.attachEvent?function(A,M,t){return A.attachEvent("on"+M,function(M){M=M||window.event,M.target=M.target||M.srcElement,M.currentTarget=A,t.call(A,M)})}:void 0}()),M.default=i,A.exports=M.default},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}function g(A){var M=(0,N.default)(A),t=(0,E.default)(M),I=M&&M.documentElement,g={top:0,left:0,height:0,width:0};if(M)return(0,i.default)(I,A)?(void 0!==A.getBoundingClientRect&&(g=A.getBoundingClientRect()),g={top:g.top+(t.pageYOffset||I.scrollTop)-(I.clientTop||0),left:g.left+(t.pageXOffset||I.scrollLeft)-(I.clientLeft||0),width:(null==g.width?A.offsetWidth:g.width)||0,height:(null==g.height?A.offsetHeight:g.height)||0}):g}Object.defineProperty(M,"__esModule",{value:!0}),M.default=g;var e=t(182),i=I(e),T=t(120),E=I(T),n=t(100),N=I(n);A.exports=M.default},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}function g(A,M){var t=(0,i.default)(A);return void 0===M?t?"pageYOffset"in t?t.pageYOffset:t.document.documentElement.scrollTop:A.scrollTop:void(t?t.scrollTo("pageXOffset"in t?t.pageXOffset:t.document.documentElement.scrollLeft,M):A.scrollTop=M)}Object.defineProperty(M,"__esModule",{value:!0}),M.default=g;var e=t(120),i=I(e);A.exports=M.default},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}function g(){for(var A=document.createElement("div").style,M={O:function(A){return"o"+A.toLowerCase()},Moz:function(A){return A.toLowerCase()},Webkit:function(A){return"webkit"+A},ms:function(A){return"MS"+A}},t=Object.keys(M),I=void 0,g=void 0,e="",i=0;i>",E=i||I;if(null==t[I])return M?new Error("Required "+e+" ` + "`" + `"+E+"` + "`" + ` was not specified "+("in ` + "`" + `"+T+"` + "`" + `.")):null;for(var n=arguments.length,N=Array(n>6?n-6:0),o=6;o=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}function e(A){return 0===A.button}function i(A){return!!(A.metaKey||A.altKey||A.ctrlKey||A.shiftKey)}function T(A){for(var M in A)if(Object.prototype.hasOwnProperty.call(A,M))return!1;return!0}function E(A,M){var t=M.query,I=M.hash,g=M.state;return t||I||g?{pathname:A,query:t,hash:I,state:g}:A}M.__esModule=!0;var n=Object.assign||function(A){for(var M=1;M=0;I--){var g=A[I],e=g.path||"";if(t=e.replace(/\/*$/,"/")+t,0===e.indexOf("/"))break}return"/"+t}},propTypes:{path:c,from:c,to:c.isRequired,query:C,state:C,onEnter:N.falsy,children:N.falsy},render:function(){(0,T.default)(!1)}});M.default=a,A.exports=M.default},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}M.__esModule=!0;var g=t(2),e=I(g),i=t(16),T=I(i),E=t(60),n=t(73),N=e.default.PropTypes,o=N.string,c=N.func,C=e.default.createClass({displayName:"Route",statics:{createRouteFromReactElement:E.createRouteFromReactElement},propTypes:{path:o,component:n.component,components:n.components,getComponent:c,getComponents:c},render:function(){(0,T.default)(!1)}});M.default=C,A.exports=M.default},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}function g(A,M){var t={};for(var I in A)M.indexOf(I)>=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}function e(A){return!A||!A.__v2_compatible__}function i(A){return A&&A.getCurrentLocation}M.__esModule=!0;var T=Object.assign||function(A){for(var M=1;M=0&&0===window.sessionStorage.length)return;throw A}}function i(A){var M=void 0;try{M=window.sessionStorage.getItem(g(A))}catch(A){if(A.name===N)return null}if(M)try{return JSON.parse(M)}catch(A){}return null}M.__esModule=!0,M.saveState=e,M.readState=i;var T=t(46),E=(I(T),"@@History/"),n=["QuotaExceededError","QUOTA_EXCEEDED_ERR"],N="SecurityError"},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}function g(A){function M(A){return E.canUseDOM?void 0:T.default(!1),t.listen(A)}var t=o.default(e({getUserConfirmation:n.getUserConfirmation},A,{go:n.go}));return e({},t,{listen:M})}M.__esModule=!0;var e=Object.assign||function(A){for(var M=1;M1?M-1:0),e=1;e=A.childNodes.length?null:A.childNodes.item(t);A.insertBefore(M,I)}var g=t(732),e=t(329),i=t(38),T=t(134),E=t(208),n=t(3),N={dangerouslyReplaceNodeWithMarkup:g.dangerouslyReplaceNodeWithMarkup,updateTextContent:E,processUpdates:function(A,M){for(var t,i=null,N=null,o=0;o-1?void 0:i(!1),!n.plugins[t]){M.extractEvents?void 0:i(!1),n.plugins[t]=M;var I=M.eventTypes;for(var e in I)g(I[e],M,e)?void 0:i(!1)}}}function g(A,M,t){n.eventNameDispatchConfigs.hasOwnProperty(t)?i(!1):void 0,n.eventNameDispatchConfigs[t]=A;var I=A.phasedRegistrationNames;if(I){for(var g in I)if(I.hasOwnProperty(g)){var T=I[g];e(T,M,t)}return!0}return!!A.registrationName&&(e(A.registrationName,M,t),!0)}function e(A,M,t){n.registrationNameModules[A]?i(!1):void 0,n.registrationNameModules[A]=M,n.registrationNameDependencies[A]=M.eventTypes[t].dependencies}var i=t(3),T=null,E={},n={plugins:[],eventNameDispatchConfigs:{},registrationNameModules:{},registrationNameDependencies:{},injectEventPluginOrder:function(A){T?i(!1):void 0,T=Array.prototype.slice.call(A),I()},injectEventPluginsByName:function(A){var M=!1;for(var t in A)if(A.hasOwnProperty(t)){var g=A[t];E.hasOwnProperty(t)&&E[t]===g||(E[t]?i(!1):void 0,E[t]=g,M=!0)}M&&I()},getPluginModuleForEvent:function(A){var M=A.dispatchConfig;if(M.registrationName)return n.registrationNameModules[M.registrationName]||null;for(var t in M.phasedRegistrationNames)if(M.phasedRegistrationNames.hasOwnProperty(t)){var I=n.registrationNameModules[M.phasedRegistrationNames[t]];if(I)return I}return null},_resetEventPlugins:function(){T=null;for(var A in E)E.hasOwnProperty(A)&&delete E[A];n.plugins.length=0;var M=n.eventNameDispatchConfigs;for(var t in M)M.hasOwnProperty(t)&&delete M[t];var I=n.registrationNameModules;for(var g in I)I.hasOwnProperty(g)&&delete I[g]}};A.exports=n},function(A,M,t){"use strict";function I(A){return(""+A).replace(u,"//")}function g(A,M){this.func=A,this.context=M,this.count=0}function e(A,M,t){var I=A.func,g=A.context;I.call(g,M,A.count++)}function i(A,M,t){if(null==A)return A;var I=g.getPooled(M,t);B(A,e,I),g.release(I)}function T(A,M,t,I){this.result=A,this.keyPrefix=M,this.func=t,this.context=I,this.count=0}function E(A,M,t){var g=A.result,e=A.keyPrefix,i=A.func,T=A.context,E=i.call(T,M,A.count++);Array.isArray(E)?n(E,g,t,r.thatReturnsArgument):null!=E&&(D.isValidElement(E)&&(E=D.cloneAndReplaceKey(E,e+(E!==M?I(E.key||"")+"/":"")+t)),g.push(E))}function n(A,M,t,g,e){var i="";null!=t&&(i=I(t)+"/");var n=T.getPooled(M,i,g,e);B(A,E,n),T.release(n)}function N(A,M,t){if(null==A)return A;var I=[];return n(A,I,null,M,t),I}function o(A,M,t){return null}function c(A,M){return B(A,o,null)}function C(A){var M=[];return n(A,M,null,r.thatReturnsArgument),M}var a=t(61),D=t(29),r=t(47),B=t(210),Q=a.twoArgumentPooler,s=a.fourArgumentPooler,u=/\/(?!\/)/g;g.prototype.destructor=function(){this.func=null,this.context=null,this.count=0},a.addPoolingTo(g,Q),T.prototype.destructor=function(){this.result=null,this.keyPrefix=null,this.func=null,this.context=null,this.count=0},a.addPoolingTo(T,s);var x={forEach:i,map:N,mapIntoWithKeyPrefixInternal:n,count:c,toArray:C};A.exports=x},function(A,M,t){"use strict";function I(A,M){var t=y.hasOwnProperty(M)?y[M]:null;l.hasOwnProperty(M)&&(t!==u.OVERRIDE_BASE?r(!1):void 0),A.hasOwnProperty(M)&&(t!==u.DEFINE_MANY&&t!==u.DEFINE_MANY_MERGED?r(!1):void 0)}function g(A,M){if(M){"function"==typeof M?r(!1):void 0,c.isValidElement(M)?r(!1):void 0;var t=A.prototype;M.hasOwnProperty(s)&&j.mixins(A,M.mixins);for(var g in M)if(M.hasOwnProperty(g)&&g!==s){var e=M[g];if(I(t,g),j.hasOwnProperty(g))j[g](A,e);else{var i=y.hasOwnProperty(g),n=t.hasOwnProperty(g),N="function"==typeof e,o=N&&!i&&!n&&M.autobind!==!1;if(o)t.__reactAutoBindMap||(t.__reactAutoBindMap={}),t.__reactAutoBindMap[g]=e,t[g]=e;else if(n){var C=y[g];!i||C!==u.DEFINE_MANY_MERGED&&C!==u.DEFINE_MANY?r(!1):void 0,C===u.DEFINE_MANY_MERGED?t[g]=T(t[g],e):C===u.DEFINE_MANY&&(t[g]=E(t[g],e))}else t[g]=e}}}}function e(A,M){if(M)for(var t in M){var I=M[t];if(M.hasOwnProperty(t)){var g=t in j;g?r(!1):void 0;var e=t in A;e?r(!1):void 0,A[t]=I}}}function i(A,M){A&&M&&"object"==typeof A&&"object"==typeof M?void 0:r(!1);for(var t in M)M.hasOwnProperty(t)&&(void 0!==A[t]?r(!1):void 0,A[t]=M[t]);return A}function T(A,M){return function(){var t=A.apply(this,arguments),I=M.apply(this,arguments);if(null==t)return I;if(null==I)return t;var g={};return i(g,t),i(g,I),g}}function E(A,M){return function(){A.apply(this,arguments),M.apply(this,arguments)}}function n(A,M){var t=M.bind(A);return t}function N(A){for(var M in A.__reactAutoBindMap)if(A.__reactAutoBindMap.hasOwnProperty(M)){var t=A.__reactAutoBindMap[M];A[M]=n(A,t)}}var o=t(316),c=t(29),C=(t(129),t(128),t(331)),a=t(7),D=t(105),r=t(3),B=t(135),Q=t(62),s=(t(8),Q({mixins:null})),u=B({DEFINE_ONCE:null,DEFINE_MANY:null,OVERRIDE_BASE:null,DEFINE_MANY_MERGED:null}),x=[],y={mixins:u.DEFINE_MANY,statics:u.DEFINE_MANY,propTypes:u.DEFINE_MANY,contextTypes:u.DEFINE_MANY,childContextTypes:u.DEFINE_MANY,getDefaultProps:u.DEFINE_MANY_MERGED,getInitialState:u.DEFINE_MANY_MERGED,getChildContext:u.DEFINE_MANY_MERGED,render:u.DEFINE_ONCE,componentWillMount:u.DEFINE_MANY,componentDidMount:u.DEFINE_MANY,componentWillReceiveProps:u.DEFINE_MANY,shouldComponentUpdate:u.DEFINE_ONCE,componentWillUpdate:u.DEFINE_MANY,componentDidUpdate:u.DEFINE_MANY,componentWillUnmount:u.DEFINE_MANY,updateComponent:u.OVERRIDE_BASE},j={displayName:function(A,M){A.displayName=M},mixins:function(A,M){if(M)for(var t=0;t"+T+""},receiveComponent:function(A,M){if(A!==this._currentElement){this._currentElement=A;var t=""+A;if(t!==this._stringText){this._stringText=t;var g=i.getNode(this._rootNodeID);I.updateTextContent(g,t)}}},unmountComponent:function(){e.unmountIDFromEnvironment(this._rootNodeID)}}),A.exports=N},function(A,M,t){"use strict";function I(){this.reinitializeTransaction()}var g=t(39),e=t(131),i=t(7),T=t(47),E={initialize:T,close:function(){c.isBatchingUpdates=!1}},n={initialize:T,close:g.flushBatchedUpdates.bind(g)},N=[n,E];i(I.prototype,e.Mixin,{getTransactionWrappers:function(){return N}});var o=new I,c={isBatchingUpdates:!1,batchedUpdates:function(A,M,t,I,g,e){var i=c.isBatchingUpdates;c.isBatchingUpdates=!0,i?A(M,t,I,g,e):o.perform(A,null,M,t,I,g,e)}};A.exports=c},function(A,M,t){"use strict";function I(){if(!w){w=!0,B.EventEmitter.injectReactEventListener(r),B.EventPluginHub.injectEventPluginOrder(T),B.EventPluginHub.injectInstanceHandle(Q),B.EventPluginHub.injectMount(s),B.EventPluginHub.injectEventPluginsByName({SimpleEventPlugin:j,EnterLeaveEventPlugin:E,ChangeEventPlugin:e,SelectEventPlugin:x,BeforeInputEventPlugin:g}),B.NativeComponent.injectGenericComponentClass(a),B.NativeComponent.injectTextComponentClass(D),B.Class.injectMixin(o),B.DOMProperty.injectDOMPropertyConfig(N),B.DOMProperty.injectDOMPropertyConfig(l),B.EmptyComponent.injectEmptyComponent("noscript"),B.Updates.injectReconcileTransaction(u),B.Updates.injectBatchingStrategy(C),B.RootIndex.injectCreateReactRootIndex(n.canUseDOM?i.createReactRootIndex:y.createReactRootIndex),B.Component.injectEnvironment(c)}}var g=t(728),e=t(730),i=t(731),T=t(733),E=t(734),n=t(20),N=t(737),o=t(739),c=t(196),C=t(321),a=t(743),D=t(320),r=t(751),B=t(752),Q=t(91),s=t(23),u=t(756),x=t(762),y=t(763),j=t(764),l=t(761),w=!1;A.exports={inject:I}},function(A,M,t){"use strict";function I(){if(o.current){var A=o.current.getName();if(A)return" Check the render method of ` + "`" + `"+A+"` + "`" + `."}return""}function g(A,M){if(A._store&&!A._store.validated&&null==A.key){A._store.validated=!0;e("uniqueKey",A,M)}}function e(A,M,t){var g=I();if(!g){var e="string"==typeof t?t:t.displayName||t.name;e&&(g=" Check the top-level render call using <"+e+">.")}var i=a[A]||(a[A]={});if(i[g])return null;i[g]=!0;var T={parentOrOwner:g,url:" See https://fb.me/react-warning-keys for more information.",childOwner:null};return M&&M._owner&&M._owner!==o.current&&(T.childOwner=" It was passed a child from "+M._owner.getName()+"."),T}function i(A,M){if("object"==typeof A)if(Array.isArray(A))for(var t=0;t/,e={CHECKSUM_ATTR_NAME:"data-react-checksum",addChecksumToMarkup:function(A){var M=I(A);return A.replace(g," "+e.CHECKSUM_ATTR_NAME+'="'+M+'"$&')},canReuseMarkup:function(A,M){var t=M.getAttribute(e.CHECKSUM_ATTR_NAME);t=t&&parseInt(t,10);var g=I(A);return g===t}};A.exports=e},function(A,M,t){"use strict";var I=t(135),g=I({INSERT_MARKUP:null,MOVE_EXISTING:null,REMOVE_NODE:null,SET_MARKUP:null,TEXT_CONTENT:null});A.exports=g},function(A,M,t){"use strict";function I(A){if("function"==typeof A.type)return A.type;var M=A.type,t=o[M];return null==t&&(o[M]=t=n(M)),t}function g(A){return N?void 0:E(!1),new N(A.type,A.props)}function e(A){return new c(A)}function i(A){return A instanceof c}var T=t(7),E=t(3),n=null,N=null,o={},c=null,C={injectGenericComponentClass:function(A){N=A},injectTextComponentClass:function(A){c=A},injectComponentClasses:function(A){T(o,A)}},a={getComponentClassForElement:I,createInternalComponent:g,createInstanceForText:e,isTextComponent:i,injection:C};A.exports=a},function(A,M,t){"use strict";function I(A,M){}var g=(t(8),{isMounted:function(A){return!1},enqueueCallback:function(A,M){},enqueueForceUpdate:function(A){I(A,"forceUpdate")},enqueueReplaceState:function(A,M){I(A,"replaceState")},enqueueSetState:function(A,M){I(A,"setState")},enqueueSetProps:function(A,M){I(A,"setProps")},enqueueReplaceProps:function(A,M){I(A,"replaceProps")}});A.exports=g},function(A,M,t){"use strict";function I(A){function M(M,t,I,g,e,i){if(g=g||y,i=i||I,null==t[I]){var T=s[e];return M?new Error("Required "+T+" ` + "`" + `"+i+"` + "`" + ` was not specified in "+("` + "`" + `"+g+"` + "`" + `.")):null}return A(t,I,g,e,i)}var t=M.bind(null,!1);return t.isRequired=M.bind(null,!0),t}function g(A){function M(M,t,I,g,e){var i=M[t],T=D(i);if(T!==A){var E=s[g],n=r(i);return new Error("Invalid "+E+" ` + "`" + `"+e+"` + "`" + ` of type "+("` + "`" + `"+n+"` + "`" + ` supplied to ` + "`" + `"+I+"` + "`" + `, expected ")+("` + "`" + `"+A+"` + "`" + `."))}return null}return I(M)}function e(){return I(u.thatReturns(null))}function i(A){function M(M,t,I,g,e){var i=M[t];if(!Array.isArray(i)){var T=s[g],E=D(i);return new Error("Invalid "+T+" ` + "`" + `"+e+"` + "`" + ` of type "+("` + "`" + `"+E+"` + "`" + ` supplied to ` + "`" + `"+I+"` + "`" + `, expected an array."))}for(var n=0;n>"}var Q=t(29),s=t(128),u=t(47),x=t(205),y="<>",j={array:g("array"),bool:g("boolean"),func:g("function"),number:g("number"),object:g("object"),string:g("string"),any:e(),arrayOf:i,element:T(),instanceOf:E,node:c(),objectOf:N,oneOf:n,oneOfType:o,shape:C};A.exports=j},function(A,M){"use strict";var t={injectCreateReactRootIndex:function(A){I.createReactRootIndex=A}},I={createReactRootIndex:null,injection:t};A.exports=I},function(A,M){"use strict";var t={currentScrollLeft:0,currentScrollTop:0,refreshScrollValues:function(A){t.currentScrollLeft=A.x,t.currentScrollTop=A.y}};A.exports=t},function(A,M,t){"use strict";function I(A,M){if(null==M?g(!1):void 0,null==A)return M;var t=Array.isArray(A),I=Array.isArray(M);return t&&I?(A.push.apply(A,M),A):t?(A.push(M),A):I?[A].concat(M):[A,M]}var g=t(3);A.exports=I},function(A,M){"use strict";var t=function(A,M,t){Array.isArray(A)?A.forEach(M,t):A&&M.call(t,A)};A.exports=t},function(A,M,t){"use strict";function I(){return!e&&g.canUseDOM&&(e="textContent"in document.documentElement?"textContent":"innerText"),e}var g=t(20),e=null;A.exports=I},function(A,M){"use strict";function t(A){var M=A&&A.nodeName&&A.nodeName.toLowerCase();return M&&("input"===M&&I[A.type]||"textarea"===M)}var I={color:!0,date:!0,datetime:!0,"datetime-local":!0,email:!0,month:!0,number:!0,password:!0,range:!0,search:!0,tel:!0,text:!0,time:!0,url:!0,week:!0};A.exports=t},function(A,M,t){"use strict";var I=t(47),g={listen:function(A,M,t){return A.addEventListener?(A.addEventListener(M,t,!1),{remove:function(){A.removeEventListener(M,t,!1)}}):A.attachEvent?(A.attachEvent("on"+M,t),{remove:function(){A.detachEvent("on"+M,t)}}):void 0},capture:function(A,M,t){return A.addEventListener?(A.addEventListener(M,t,!0),{remove:function(){A.removeEventListener(M,t,!0)}}):{remove:I}},registerDefault:function(){}};A.exports=g},function(A,M,t){"use strict";function I(A,M){var t=!0;A:for(;t;){var I=A,e=M;if(t=!1,I&&e){if(I===e)return!0;if(g(I))return!1;if(g(e)){A=I,M=e.parentNode,t=!0;continue A}return I.contains?I.contains(e):!!I.compareDocumentPosition&&!!(16&I.compareDocumentPosition(e))}return!1}}var g=t(790);A.exports=I},function(A,M){"use strict";function t(A){ +try{A.focus()}catch(A){}}A.exports=t},function(A,M){"use strict";function t(){if("undefined"==typeof document)return null;try{return document.activeElement||document.body}catch(A){return document.body}}A.exports=t},function(A,M,t){"use strict";function I(A){return i?void 0:e(!1),c.hasOwnProperty(A)||(A="*"),T.hasOwnProperty(A)||("*"===A?i.innerHTML="":i.innerHTML="<"+A+">",T[A]=!i.firstChild),T[A]?c[A]:null}var g=t(20),e=t(3),i=g.canUseDOM?document.createElement("div"):null,T={},E=[1,'"],n=[1,"","
"],N=[3,"","
"],o=[1,'',""],c={"*":[1,"?
","
"],area:[1,"",""],col:[2,"","
"],legend:[1,"
","
"],param:[1,"",""],tr:[2,"","
"],optgroup:E,option:E,caption:n,colgroup:n,tbody:n,tfoot:n,thead:n,td:N,th:N},C=["circle","clipPath","defs","ellipse","g","image","line","linearGradient","mask","path","pattern","polygon","polyline","radialGradient","rect","stop","text","tspan"];C.forEach(function(A){c[A]=o,T[A]=!0}),A.exports=I},function(A,M){"use strict";function t(A,M){if(A===M)return!0;if("object"!=typeof A||null===A||"object"!=typeof M||null===M)return!1;var t=Object.keys(A),g=Object.keys(M);if(t.length!==g.length)return!1;for(var e=I.bind(M),i=0;i=0&&s.splice(M,1)}function T(A){var M=document.createElement("style");return M.type="text/css",e(A,M),M}function E(A){var M=document.createElement("link");return M.rel="stylesheet",e(A,M),M}function n(A,M){var t,I,g;if(M.singleton){var e=Q++;t=B||(B=T(M)),I=N.bind(null,t,e,!1),g=N.bind(null,t,e,!0)}else A.sourceMap&&"function"==typeof URL&&"function"==typeof URL.createObjectURL&&"function"==typeof URL.revokeObjectURL&&"function"==typeof Blob&&"function"==typeof btoa?(t=E(M),I=c.bind(null,t),g=function(){i(t),t.href&&URL.revokeObjectURL(t.href)}):(t=T(M),I=o.bind(null,t),g=function(){i(t)});return I(A),function(M){if(M){if(M.css===A.css&&M.media===A.media&&M.sourceMap===A.sourceMap)return;I(A=M)}else g()}}function N(A,M,t,I){var g=t?"":I.css;if(A.styleSheet)A.styleSheet.cssText=u(M,g);else{var e=document.createTextNode(g),i=A.childNodes;i[M]&&A.removeChild(i[M]),i.length?A.insertBefore(e,i[M]):A.appendChild(e)}}function o(A,M){var t=M.css,I=M.media;if(I&&A.setAttribute("media",I),A.styleSheet)A.styleSheet.cssText=t;else{for(;A.firstChild;)A.removeChild(A.firstChild);A.appendChild(document.createTextNode(t))}}function c(A,M){var t=M.css,I=M.sourceMap;I&&(t+="\n/*# sourceMappingURL=data:application/json;base64,"+btoa(unescape(encodeURIComponent(JSON.stringify(I))))+" */");var g=new Blob([t],{type:"text/css"}),e=A.href;A.href=URL.createObjectURL(g),e&&URL.revokeObjectURL(e)}var C={},a=function(A){var M;return function(){return"undefined"==typeof M&&(M=A.apply(this,arguments)),M}},D=a(function(){return/msie [6-9]\b/.test(self.navigator.userAgent.toLowerCase())}),r=a(function(){return document.head||document.getElementsByTagName("head")[0]}),B=null,Q=0,s=[];A.exports=function(A,M){M=M||{},"undefined"==typeof M.singleton&&(M.singleton=D()),"undefined"==typeof M.insertAt&&(M.insertAt="bottom");var t=g(A);return I(t,M),function(A){for(var e=[],i=0;i-1})))}},{key:"listObjects",value:function(){var A=this.props.dispatch;A(eA.listObjects())}},{key:"selectPrefix",value:function(A,M){A.preventDefault();var t=this.props,I=(t.dispatch,t.currentPath),g=(t.web,t.currentBucket),e=encodeURI(M);if(M.endsWith("/")||""===M){if(M===I)return;a.default.push(TA.pathJoin(g,e))}else window.location=window.location.origin+"/minio/download/"+g+"/"+e+"?token="+aA.default.getItem("token")}},{key:"makeBucket",value:function(A){A.preventDefault();var M=this.refs.makeBucketRef.value;this.refs.makeBucketRef.value="";var t=this.props,I=t.web,g=t.dispatch;this.hideMakeBucketModal(),I.MakeBucket({bucketName:M}).then(function(){g(eA.addBucket(M)),g(eA.selectBucket(M))}).catch(function(A){return g(eA.showAlert({type:"danger",message:A.message}))})}},{key:"hideMakeBucketModal",value:function(){var A=this.props.dispatch;A(eA.hideMakeBucketModal())}},{key:"showMakeBucketModal",value:function(A){A.preventDefault();var M=this.props.dispatch;M(eA.showMakeBucketModal())}},{key:"showAbout",value:function(A){A.preventDefault();var M=this.props.dispatch;M(eA.showAbout())}},{key:"hideAbout",value:function(A){A.preventDefault();var M=this.props.dispatch;M(eA.hideAbout())}},{key:"showBucketPolicy",value:function(A){A.preventDefault();var M=this.props.dispatch;M(eA.showBucketPolicy())}},{key:"hideBucketPolicy",value:function(A){A.preventDefault();var M=this.props.dispatch;M(eA.hideBucketPolicy())}},{key:"uploadFile",value:function(A){A.preventDefault();var M=this.props,t=M.dispatch,I=M.buckets;if(0===I.length)return void t(eA.showAlert({type:"danger",message:"Bucket needs to be created before trying to upload files."}));var g=A.target.files[0];A.target.value=null,this.xhr=new XMLHttpRequest,t(eA.uploadFile(g,this.xhr))}},{key:"removeObject",value:function(){var A=this,M=this.props,t=M.web,I=M.dispatch,g=M.currentPath,e=M.currentBucket,i=M.deleteConfirmation,T=M.checkedObjects,E=[];E=T.length>0?T.map(function(A){return""+g+A}):[i.object],t.RemoveObject({bucketname:e,objects:E}).then(function(){if(A.hideDeleteConfirmation(),T.length>0){for(var M=0;M0})},N.default.createElement("span",{className:"la-label"},N.default.createElement("i",{className:"fa fa-check-circle"})," ",C.length," Objects selected"),N.default.createElement("span",{className:"la-actions pull-right"},N.default.createElement("button",{onClick:this.downloadSelected.bind(this)}," Download all as zip ")),N.default.createElement("span",{className:"la-actions pull-right"},N.default.createElement("button",{onClick:this.showDeleteConfirmation.bind(this)}," Delete selected ")),N.default.createElement("i",{className:"la-close fa fa-times",onClick:this.clearSelected.bind(this)})),N.default.createElement(f.default,null,b,N.default.createElement("header",{className:"fe-header-mobile hidden-lg hidden-md"},N.default.createElement("div",{id:"feh-trigger",className:"feh-trigger "+(0,c.default)({"feht-toggled":y}),onClick:this.toggleSidebar.bind(this,!y)},N.default.createElement("div",{className:"feht-lines"},N.default.createElement("div",{className:"top"}),N.default.createElement("div",{className:"center"}),N.default.createElement("div",{className:"bottom"}))),N.default.createElement("img",{className:"mh-logo",src:IA.default,alt:""})),N.default.createElement("header",{className:"fe-header"},N.default.createElement(G.default,{selectPrefix:this.selectPrefix.bind(this)}),AA,N.default.createElement("ul",{className:"feh-actions"},N.default.createElement(v.default,null),K,_)),N.default.createElement("div",{className:"feb-container"},N.default.createElement("header",{className:"fesl-row","data-type":"folder"},N.default.createElement("div",{className:"fesl-item fesl-item-icon"}),N.default.createElement("div",{className:"fesl-item fesl-item-name",onClick:this.sortObjectsByName.bind(this),"data-sort":"name"},"Name",N.default.createElement("i",{className:(0,c.default)({"fesli-sort":!0,fa:!0,"fa-sort-alpha-desc":i,"fa-sort-alpha-asc":!i})})),N.default.createElement("div",{className:"fesl-item fesl-item-size",onClick:this.sortObjectsBySize.bind(this),"data-sort":"size"},"Size",N.default.createElement("i",{className:(0,c.default)({"fesli-sort":!0,fa:!0,"fa-sort-amount-desc":T,"fa-sort-amount-asc":!T})})),N.default.createElement("div",{className:"fesl-item fesl-item-modified",onClick:this.sortObjectsByDate.bind(this),"data-sort":"last-modified"},"Last Modified",N.default.createElement("i",{className:(0,c.default)({"fesli-sort":!0,fa:!0,"fa-sort-numeric-desc":E,"fa-sort-numeric-asc":!E})})),N.default.createElement("div",{className:"fesl-item fesl-item-actions"}))),N.default.createElement("div",{className:"feb-container"},N.default.createElement(rA.default,{loadMore:this.listObjects.bind(this),hasMore:J,useWindow:!0,initialLoad:!1},N.default.createElement(F.default,{dataType:this.dataType.bind(this),selectPrefix:this.selectPrefix.bind(this),showDeleteConfirmation:this.showDeleteConfirmation.bind(this),shareObject:this.shareObject.bind(this),checkObject:this.checkObject.bind(this),checkedObjectsArray:C})),N.default.createElement("div",{className:"text-center",style:{display:J&&S?"block":"none"}},N.default.createElement("span",null,"Loading..."))),N.default.createElement(X.default,null),eA,N.default.createElement(s.default,{className:"modal-create-bucket",bsSize:"small",animation:!1,show:g,onHide:this.hideMakeBucketModal.bind(this)},N.default.createElement("button",{className:"close close-alt",onClick:this.hideMakeBucketModal.bind(this)},N.default.createElement("span",null,"×")),N.default.createElement(x.default,null,N.default.createElement("form",{onSubmit:this.makeBucket.bind(this)},N.default.createElement("div",{className:"input-group"},N.default.createElement("input",{className:"ig-text",type:"text",ref:"makeBucketRef",placeholder:"Bucket Name",autoFocus:!0}),N.default.createElement("i",{className:"ig-helpers"}))))),N.default.createElement(s.default,{className:"modal-about modal-dark",animation:!1,show:n,onHide:this.hideAbout.bind(this)},N.default.createElement("button",{className:"close",onClick:this.hideAbout.bind(this)},N.default.createElement("span",null,"×")),N.default.createElement("div",{className:"ma-inner"},N.default.createElement("div",{className:"mai-item hidden-xs"},N.default.createElement("a",{href:"https://minio.io",target:"_blank"},N.default.createElement("img",{className:"maii-logo",src:IA.default,alt:""}))),N.default.createElement("div",{className:"mai-item"},N.default.createElement("ul",{className:"maii-list"},N.default.createElement("li",null,N.default.createElement("div",null,"Version"),N.default.createElement("small",null,D)),N.default.createElement("li",null,N.default.createElement("div",null,"Memory"),N.default.createElement("small",null,B)),N.default.createElement("li",null,N.default.createElement("div",null,"Platform"),N.default.createElement("small",null,Q)),N.default.createElement("li",null,N.default.createElement("div",null,"Runtime"),N.default.createElement("small",null,u)))))),N.default.createElement(s.default,{className:"modal-policy",animation:!1,show:o,onHide:this.hideBucketPolicy.bind(this)},N.default.createElement(j.default,null,"Bucket Policy (",S,")",N.default.createElement("button",{className:"close close-alt",onClick:this.hideBucketPolicy.bind(this)},N.default.createElement("span",null,"×"))),N.default.createElement("div",{className:"pm-body"},N.default.createElement(Z.default,{bucket:S}),d.map(function(A,M){return N.default.createElement(q.default,{key:M,prefix:A.prefix,policy:A.policy})}))),N.default.createElement(MA.default,{show:p.show,icon:"fa fa-exclamation-triangle mci-red",text:"Are you sure you want to delete?",sub:"This cannot be undone!",okText:"Delete",cancelText:"Cancel",okHandler:this.removeObject.bind(this),cancelHandler:this.hideDeleteConfirmation.bind(this)}),N.default.createElement(s.default,{show:U.show,animation:!1,onHide:this.hideShareObjectModal.bind(this),bsSize:"small"},N.default.createElement(j.default,null,"Share Object"),N.default.createElement(x.default,null,N.default.createElement("div",{className:"input-group copy-text"},N.default.createElement("label",null,"Shareable Link"),N.default.createElement("input",{type:"text",ref:"copyTextInput",readOnly:"readOnly",value:window.location.protocol+"//"+U.url,onClick:this.selectTexts.bind(this)})),N.default.createElement("div",{className:"input-group",style:{display:m.LoggedIn()?"block":"none"}},N.default.createElement("label",null,"Expires in (Max 7 days)"),N.default.createElement("div",{className:"set-expire"},N.default.createElement("div",{className:"set-expire-item"},N.default.createElement("i",{className:"set-expire-increase",onClick:this.handleExpireValue.bind(this,"expireDays",1,U.object)}),N.default.createElement("div",{className:"set-expire-title"},"Days"),N.default.createElement("div",{className:"set-expire-value"},N.default.createElement("input",{ref:"expireDays",type:"number",min:0,max:7,defaultValue:5,readOnly:"readOnly"})),N.default.createElement("i",{className:"set-expire-decrease",onClick:this.handleExpireValue.bind(this,"expireDays",-1,U.object)})),N.default.createElement("div",{className:"set-expire-item"},N.default.createElement("i",{className:"set-expire-increase",onClick:this.handleExpireValue.bind(this,"expireHours",1,U.object)}),N.default.createElement("div",{className:"set-expire-title"},"Hours"),N.default.createElement("div",{className:"set-expire-value"},N.default.createElement("input",{ref:"expireHours",type:"number",min:0,max:23,defaultValue:0,readOnly:"readOnly"})),N.default.createElement("i",{className:"set-expire-decrease",onClick:this.handleExpireValue.bind(this,"expireHours",-1,U.object)})),N.default.createElement("div",{className:"set-expire-item"},N.default.createElement("i",{className:"set-expire-increase",onClick:this.handleExpireValue.bind(this,"expireMins",1,U.object)}),N.default.createElement("div",{className:"set-expire-title"},"Minutes"),N.default.createElement("div",{className:"set-expire-value"},N.default.createElement("input",{ref:"expireMins",type:"number",min:0,max:59,defaultValue:0,readOnly:"readOnly"})),N.default.createElement("i",{className:"set-expire-decrease",onClick:this.handleExpireValue.bind(this,"expireMins",-1,U.object)}))))),N.default.createElement("div",{className:"modal-footer"},N.default.createElement(cA.default,{text:window.location.protocol+"//"+U.url,onCopy:this.showMessage.bind(this)},N.default.createElement("button",{className:"btn btn-success"},"Copy Link")),N.default.createElement("button",{className:"btn btn-link",onClick:this.hideShareObjectModal.bind(this)},"Cancel"))),H)))}}]),M}(N.default.Component);M.default=BA},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}Object.defineProperty(M,"__esModule",{value:!0});var g=t(2),e=I(g),i=t(45),T=I(i),E=t(173),n=I(E),N=function(A){var M=A.fullScreenFunc,t=A.aboutFunc,I=A.settingsFunc,g=A.logoutFunc;return e.default.createElement("li",null,e.default.createElement(n.default,{pullRight:!0,id:"top-right-menu"},e.default.createElement(n.default.Toggle,{noCaret:!0},e.default.createElement("i",{className:"fa fa-reorder"})),e.default.createElement(n.default.Menu,{className:"dropdown-menu-right"},e.default.createElement("li",null,e.default.createElement("a",{target:"_blank",href:"https://github.com/minio/minio"},"Github ",e.default.createElement("i",{className:"fa fa-github"}))),e.default.createElement("li",null,e.default.createElement("a",{href:"",onClick:M},"Fullscreen ",e.default.createElement("i",{className:"fa fa-expand"}))),e.default.createElement("li",null,e.default.createElement("a",{target:"_blank",href:"https://docs.minio.io/"},"Documentation ",e.default.createElement("i",{className:"fa fa-book"}))),e.default.createElement("li",null,e.default.createElement("a",{target:"_blank",href:"https://slack.minio.io" +},"Ask for help ",e.default.createElement("i",{className:"fa fa-question-circle"}))),e.default.createElement("li",null,e.default.createElement("a",{href:"",onClick:t},"About ",e.default.createElement("i",{className:"fa fa-info-circle"}))),e.default.createElement("li",null,e.default.createElement("a",{href:"",onClick:I},"Settings ",e.default.createElement("i",{className:"fa fa-cog"}))),e.default.createElement("li",null,e.default.createElement("a",{href:"",onClick:g},"Sign Out ",e.default.createElement("i",{className:"fa fa-sign-out"}))))))};M.default=(0,T.default)(function(A){return A})(N)},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}Object.defineProperty(M,"__esModule",{value:!0});var g=t(2),e=I(g),i=t(45),T=I(i),E=t(177),n=I(E),N=t(176),o=I(N),c=function(A){var M=A.latestUiVersion;return M===currentUiVersion?e.default.createElement("noscript",null):e.default.createElement("li",{className:"hidden-xs hidden-sm"},e.default.createElement("a",{href:""},e.default.createElement(o.default,{placement:"left",overlay:e.default.createElement(n.default,{id:"tt-version-update"},"New update available. Click to refresh.")}," ",e.default.createElement("i",{className:"fa fa-refresh"})," ")))};M.default=(0,T.default)(function(A){return{latestUiVersion:A.latestUiVersion}})(c)},function(A,M,t){"use strict";function I(A){if(A&&A.__esModule)return A;var M={};if(null!=A)for(var t in A)Object.prototype.hasOwnProperty.call(A,t)&&(M[t]=A[t]);return M.default=A,M}function g(A){return A&&A.__esModule?A:{default:A}}function e(A,M){if(!(A instanceof M))throw new TypeError("Cannot call a class as a function")}function i(A,M){if(!A)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!M||"object"!=typeof M&&"function"!=typeof M?A:M}function T(A,M){if("function"!=typeof M&&null!==M)throw new TypeError("Super expression must either be null or a function, not "+typeof M);A.prototype=Object.create(M&&M.prototype,{constructor:{value:A,enumerable:!1,writable:!0,configurable:!0}}),M&&(Object.setPrototypeOf?Object.setPrototypeOf(A,M):A.__proto__=M)}Object.defineProperty(M,"__esModule",{value:!0});var E=function(){function A(A,M){for(var t=0;t0&&(A.name.endsWith("/")||(B=e.default.createElement(C.default,{id:"fia-dropdown-"+A.name.replace(".","-")},e.default.createElement(C.default.Toggle,{noCaret:!0,className:"fia-toggle"}),e.default.createElement(C.default.Menu,null,e.default.createElement("a",{href:"",className:"fiad-action",onClick:function(M){return E(M,""+t+A.name)}},e.default.createElement("i",{className:"fa fa-copy"})),Q))));var s="",u="";return c.indexOf(A.name)>-1&&(s=" fesl-row-selected",u=!0),e.default.createElement("div",{key:M,className:"fesl-row "+r+s,"data-type":g(A.name,A.contentType)},e.default.createElement("div",{className:"fesl-item fesl-item-icon"},e.default.createElement("div",{className:"fi-select"},e.default.createElement("input",{type:"checkbox",name:A.name,checked:u,onChange:function(M){return o(M,A.name)}}),e.default.createElement("i",{className:"fis-icon"}),e.default.createElement("i",{className:"fis-helper"}))),e.default.createElement("div",{className:"fesl-item fesl-item-name"},e.default.createElement("a",{href:"",onClick:function(M){return I(M,""+t+A.name)}},A.name)),e.default.createElement("div",{className:"fesl-item fesl-item-size"},a),e.default.createElement("div",{className:"fesl-item fesl-item-modified"},D),e.default.createElement("div",{className:"fesl-item fesl-item-actions"},B))});return e.default.createElement("div",null,a)};M.default=(0,o.default)(function(A){return{objects:A.objects,currentPath:A.currentPath,loadPath:A.loadPath}})(a)},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}Object.defineProperty(M,"__esModule",{value:!0});var g=t(2),e=I(g),i=t(45),T=I(i),E=function(A){var M=A.currentBucket,t=A.currentPath,I=A.selectPrefix,g=[],i="";return t&&(i=t.split("/").map(function(A,M){g.push(A);var t=g.join("/")+"/";return e.default.createElement("span",{key:M},e.default.createElement("a",{href:"",onClick:function(A){return I(A,t)}},A))})),e.default.createElement("h2",null,e.default.createElement("span",{className:"main"},e.default.createElement("a",{onClick:function(A){return I(A,"")},href:""},M)),i)};M.default=(0,T.default)(function(A){return{currentBucket:A.currentBucket,currentPath:A.currentPath}})(E)},function(A,M,t){"use strict";function I(A){if(A&&A.__esModule)return A;var M={};if(null!=A)for(var t in A)Object.prototype.hasOwnProperty.call(A,t)&&(M[t]=A[t]);return M.default=A,M}function g(A){return A&&A.__esModule?A:{default:A}}function e(A,M){if(!(A instanceof M))throw new TypeError("Cannot call a class as a function")}function i(A,M){if(!A)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!M||"object"!=typeof M&&"function"!=typeof M?A:M}function T(A,M){if("function"!=typeof M&&null!==M)throw new TypeError("Super expression must either be null or a function, not "+typeof M);A.prototype=Object.create(M&&M.prototype,{constructor:{value:A,enumerable:!1,writable:!0,configurable:!0}}),M&&(Object.setPrototypeOf?Object.setPrototypeOf(A,M):A.__proto__=M)}Object.defineProperty(M,"__esModule",{value:!0});var E=function(){function A(A,M){for(var t=0;t0&&void 0!==arguments[0]?arguments[0]:{buckets:[],visibleBuckets:[],objects:[],istruncated:!0,storageInfo:{},serverInfo:{},currentBucket:"",currentPath:"",showMakeBucketModal:!1,uploads:{},alert:{show:!1,type:"danger",message:""},loginError:!1,sortNameOrder:!1,sortSizeOrder:!1,sortDateOrder:!1,latestUiVersion:currentUiVersion,sideBarActive:!1,loginRedirectPath:E.minioBrowserPrefix,settings:{accessKey:"",secretKey:"",secretKeyVisible:!1},showSettings:!1,policies:[],deleteConfirmation:{object:"",show:!1},shareObject:{show:!1,url:"",object:""},prefixWritable:!1,checkedObjects:[]},M=arguments[1],t=Object.assign({},A);switch(M.type){case T.SET_WEB:t.web=M.web;break;case T.SET_BUCKETS:t.buckets=M.buckets;break;case T.ADD_BUCKET:t.buckets=[M.bucket].concat(e(t.buckets)),t.visibleBuckets=[M.bucket].concat(e(t.visibleBuckets));break;case T.SET_VISIBLE_BUCKETS:t.visibleBuckets=M.visibleBuckets;break;case T.SET_CURRENT_BUCKET:t.currentBucket=M.currentBucket;break;case T.APPEND_OBJECTS:t.objects=[].concat(e(t.objects),e(M.objects)),t.marker=M.marker,t.istruncated=M.istruncated;break;case T.SET_OBJECTS:t.objects=[].concat(e(M.objects));break;case T.RESET_OBJECTS:t.objects=[],t.marker="",t.istruncated=!1;break;case T.SET_CURRENT_PATH:t.currentPath=M.currentPath;break;case T.SET_STORAGE_INFO:t.storageInfo=M.storageInfo;break;case T.SET_SERVER_INFO:t.serverInfo=M.serverInfo;break;case T.SHOW_MAKEBUCKET_MODAL:t.showMakeBucketModal=M.showMakeBucketModal;break;case T.UPLOAD_PROGRESS:t.uploads=Object.assign({},t.uploads),t.uploads[M.slug].loaded=M.loaded;break;case T.ADD_UPLOAD:t.uploads=Object.assign({},t.uploads,g({},M.slug,{loaded:0,size:M.size,xhr:M.xhr,name:M.name}));break;case T.STOP_UPLOAD:t.uploads=Object.assign({},t.uploads),delete t.uploads[M.slug];break;case T.SET_ALERT:t.alert.alertTimeout&&clearTimeout(t.alert.alertTimeout),M.alert.show?t.alert=M.alert:t.alert=Object.assign({},t.alert,{show:!1});break;case T.SET_LOGIN_ERROR:t.loginError=!0;break;case T.SET_SHOW_ABORT_MODAL:t.showAbortModal=M.showAbortModal;break;case T.SHOW_ABOUT:t.showAbout=M.showAbout;break;case T.SET_SORT_NAME_ORDER:t.sortNameOrder=M.sortNameOrder;break;case T.SET_SORT_SIZE_ORDER:t.sortSizeOrder=M.sortSizeOrder;break;case T.SET_SORT_DATE_ORDER:t.sortDateOrder=M.sortDateOrder;break;case T.SET_LATEST_UI_VERSION:t.latestUiVersion=M.latestUiVersion;break;case T.SET_SIDEBAR_STATUS:t.sidebarStatus=M.sidebarStatus;break;case T.SET_LOGIN_REDIRECT_PATH:t.loginRedirectPath=M.path;case T.SET_LOAD_BUCKET:t.loadBucket=M.loadBucket;break;case T.SET_LOAD_PATH:t.loadPath=M.loadPath;break;case T.SHOW_SETTINGS:t.showSettings=M.showSettings;break;case T.SET_SETTINGS:t.settings=Object.assign({},t.settings,M.settings);break;case T.SHOW_BUCKET_POLICY:t.showBucketPolicy=M.showBucketPolicy;break;case T.SET_POLICIES:t.policies=M.policies;break;case T.DELETE_CONFIRMATION:t.deleteConfirmation=Object.assign({},M.payload);break;case T.SET_SHARE_OBJECT:t.shareObject=Object.assign({},M.shareObject);break;case T.SET_PREFIX_WRITABLE:t.prefixWritable=M.prefixWritable;break;case T.REMOVE_OBJECT:var I=t.objects.findIndex(function(A){return A.name===M.object; +});if(I==-1)break;t.objects=[].concat(e(t.objects.slice(0,I)),e(t.objects.slice(I+1)));break;case T.CHECKED_OBJECTS_ADD:t.checkedObjects=[].concat(e(t.checkedObjects),[M.objectName]);break;case T.CHECKED_OBJECTS_REMOVE:var i=t.checkedObjects.indexOf(M.objectName);if(i==-1)break;t.checkedObjects=[].concat(e(t.checkedObjects.slice(0,i)),e(t.checkedObjects.slice(i+1)));break;case T.CHECKED_OBJECTS_RESET:t.checkedObjects=[]}return t}},function(A,M,t){"use strict";function I(A){if(Array.isArray(A)){for(var M=0,t=Array(A.length);MM.name.toLowerCase()?1:0}),g=g.sort(function(A,M){return A.name.toLowerCase()M.name.toLowerCase()?1:0}),M&&(t=t.reverse(),g=g.reverse()),[].concat(I(t),I(g))},M.sortObjectsBySize=function(A,M){var t=A.filter(function(A){return A.name.endsWith("/")}),g=A.filter(function(A){return!A.name.endsWith("/")});return g=g.sort(function(A,M){return A.size-M.size}),M&&(g=g.reverse()),[].concat(I(t),I(g))},M.sortObjectsByDate=function(A,M){var t=A.filter(function(A){return A.name.endsWith("/")}),g=A.filter(function(A){return!A.name.endsWith("/")});return g=g.sort(function(A,M){return new Date(A.lastModified).getTime()-new Date(M.lastModified).getTime()}),M&&(g=g.reverse()),[].concat(I(t),I(g))},M.pathSlice=function(A){A=A.replace(g.minioBrowserPrefix,"");var M="",t="";if(!A)return{bucket:t,prefix:M};var I=A.indexOf("/",1);return I==-1?(t=A.slice(1),{bucket:t,prefix:M}):(t=A.slice(1,I),M=A.slice(I+1),{bucket:t,prefix:M})},M.pathJoin=function(A,M){return M||(M=""),g.minioBrowserPrefix+"/"+A+"/"+M}},function(A,M,t){"use strict";function I(A){if(A&&A.__esModule)return A;var M={};if(null!=A)for(var t in A)Object.prototype.hasOwnProperty.call(A,t)&&(M[t]=A[t]);return M.default=A,M}function g(A){return A&&A.__esModule?A:{default:A}}function e(A,M){if(!(A instanceof M))throw new TypeError("Cannot call a class as a function")}Object.defineProperty(M,"__esModule",{value:!0});var i=function(){function A(A,M){for(var t=0;tN;)E.call(A,i=T[N++])&&M.push(i);return M}},function(A,M,t){var I=t(67),g=t(28);A.exports=function(A,M){for(var t,e=g(A),i=I(e),T=i.length,E=0;T>E;)if(e[t=i[E++]]===M)return t}},function(A,M,t){"use strict";var I=t(378),g=t(109),e=t(24);A.exports=function(){for(var A=e(this),M=arguments.length,t=Array(M),i=0,T=I._,E=!1;M>i;)(t[i]=arguments[i++])===T&&(E=!0);return function(){var I,e=this,i=arguments.length,N=0,n=0;if(!E&&!i)return g(A,t,e);if(I=t.slice(),E)for(;M>N;N++)I[N]===T&&(I[N]=arguments[n++]);for(;i>n;)I.push(arguments[n++]);return g(A,I,e)}}},function(A,M,t){A.exports=t(5)},function(A,M){A.exports=function(A,M){var t=M===Object(M)?function(A){return M[A]}:M;return function(M){return String(M).replace(A,t)}}},function(A,M,t){var I=t(1),g=t(379)(/[\\^$*+?.()|[\]{}]/g,"\\$&");I(I.S,"RegExp",{escape:function(A){return g(A)}})},function(A,M,t){var I=t(1);I(I.P,"Array",{copyWithin:t(218)}),t(78)("copyWithin")},function(A,M,t){"use strict";var I=t(1),g=t(41)(4);I(I.P+I.F*!t(37)([].every,!0),"Array",{every:function(A){return g(this,A,arguments[1])}})},function(A,M,t){var I=t(1);I(I.P,"Array",{fill:t(137)}),t(78)("fill")},function(A,M,t){"use strict";var I=t(1),g=t(41)(2);I(I.P+I.F*!t(37)([].filter,!0),"Array",{filter:function(A){return g(this,A,arguments[1])}})},function(A,M,t){"use strict";var I=t(1),g=t(41)(6),e="findIndex",i=!0;e in[]&&Array(1)[e](function(){i=!1}),I(I.P+I.F*i,"Array",{findIndex:function(A){return g(this,A,arguments.length>1?arguments[1]:void 0)}}),t(78)(e)},function(A,M,t){"use strict";var I=t(1),g=t(41)(5),e="find",i=!0;e in[]&&Array(1)[e](function(){i=!1}),I(I.P+I.F*i,"Array",{find:function(A){return g(this,A,arguments.length>1?arguments[1]:void 0)}}),t(78)(e)},function(A,M,t){"use strict";var I=t(1),g=t(41)(0),e=t(37)([].forEach,!0);I(I.P+I.F*!e,"Array",{forEach:function(A){return g(this,A,arguments[1])}})},function(A,M,t){"use strict";var I=t(49),g=t(1),e=t(19),i=t(227),T=t(144),E=t(17),N=t(138),n=t(161);g(g.S+g.F*!t(111)(function(A){Array.from(A)}),"Array",{from:function(A){var M,t,g,o,c=e(A),C="function"==typeof this?this:Array,a=arguments.length,D=a>1?arguments[1]:void 0,r=void 0!==D,B=0,Q=n(c);if(r&&(D=I(D,a>2?arguments[2]:void 0,2)),void 0==Q||C==Array&&T(Q))for(M=E(c.length),t=new C(M);M>B;B++)N(t,B,r?D(c[B],B):c[B]);else for(o=Q.call(c),t=new C;!(g=o.next()).done;B++)N(t,B,r?i(o,D,[g.value,B],!0):g.value);return t.length=B,t}})},function(A,M,t){"use strict";var I=t(1),g=t(105)(!1),e=[].indexOf,i=!!e&&1/[1].indexOf(1,-0)<0;I(I.P+I.F*(i||!t(37)(e)),"Array",{indexOf:function(A){return i?e.apply(this,arguments)||0:g(this,A,arguments[1])}})},function(A,M,t){var I=t(1);I(I.S,"Array",{isArray:t(145)})},function(A,M,t){"use strict";var I=t(1),g=t(28),e=[].join;I(I.P+I.F*(t(95)!=Object||!t(37)(e)),"Array",{join:function(A){return e.call(g(this),void 0===A?",":A)}})},function(A,M,t){"use strict";var I=t(1),g=t(28),e=t(57),i=t(17),T=[].lastIndexOf,E=!!T&&1/[1].lastIndexOf(1,-0)<0;I(I.P+I.F*(E||!t(37)(T)),"Array",{lastIndexOf:function(A){if(E)return T.apply(this,arguments)||0;var M=g(this),t=i(M.length),I=t-1;for(arguments.length>1&&(I=Math.min(I,e(arguments[1]))),I<0&&(I=t+I);I>=0;I--)if(I in M&&M[I]===A)return I||0;return-1}})},function(A,M,t){"use strict";var I=t(1),g=t(41)(1);I(I.P+I.F*!t(37)([].map,!0),"Array",{map:function(A){return g(this,A,arguments[1])}})},function(A,M,t){"use strict";var I=t(1),g=t(138);I(I.S+I.F*t(6)(function(){function A(){}return!(Array.of.call(A)instanceof A)}),"Array",{of:function(){for(var A=0,M=arguments.length,t=new("function"==typeof this?this:Array)(M);M>A;)g(t,A,arguments[A++]);return t.length=M,t}})},function(A,M,t){"use strict";var I=t(1),g=t(220);I(I.P+I.F*!t(37)([].reduceRight,!0),"Array",{reduceRight:function(A){return g(this,A,arguments.length,arguments[1],!0)}})},function(A,M,t){"use strict";var I=t(1),g=t(220);I(I.P+I.F*!t(37)([].reduce,!0),"Array",{reduce:function(A){return g(this,A,arguments.length,arguments[1],!1)}})},function(A,M,t){"use strict";var I=t(1),g=t(142),e=t(35),i=t(70),T=t(17),E=[].slice;I(I.P+I.F*t(6)(function(){g&&E.call(g)}),"Array",{slice:function(A,M){var t=T(this.length),I=e(this);if(M=void 0===M?t:M,"Array"==I)return E.call(this,A,M);for(var g=i(A,t),N=i(M,t),n=T(N-g),o=Array(n),c=0;c9?A:"0"+A};I(I.P+I.F*(g(function(){return"0385-07-25T07:06:39.999Z"!=new Date(-5e13-1).toISOString()})||!g(function(){new Date(NaN).toISOString()})),"Date",{toISOString:function(){if(!isFinite(e.call(this)))throw RangeError("Invalid time value");var A=this,M=A.getUTCFullYear(),t=A.getUTCMilliseconds(),I=M<0?"-":M>9999?"+":"";return I+("00000"+Math.abs(M)).slice(I?-6:-4)+"-"+i(A.getUTCMonth()+1)+"-"+i(A.getUTCDate())+"T"+i(A.getUTCHours())+":"+i(A.getUTCMinutes())+":"+i(A.getUTCSeconds())+"."+(t>99?t:"0"+i(t))+"Z"}})},function(A,M,t){"use strict";var I=t(1),g=t(19),e=t(43);I(I.P+I.F*t(6)(function(){return null!==new Date(NaN).toJSON()||1!==Date.prototype.toJSON.call({toISOString:function(){return 1}})}),"Date",{toJSON:function(A){var M=g(this),t=e(M);return"number"!=typeof t||isFinite(t)?M.toISOString():null}})},function(A,M,t){var I=t(11)("toPrimitive"),g=Date.prototype;I in g||t(25)(g,I,t(374))},function(A,M,t){var I=Date.prototype,g="Invalid Date",e="toString",i=I[e],T=I.getTime;new Date(NaN)+""!=g&&t(26)(I,e,function(){var A=T.call(this);return A===A?i.call(this):g})},function(A,M,t){var I=t(1);I(I.P,"Function",{bind:t(221)})},function(A,M,t){"use strict";var I=t(9),g=t(31),e=t(11)("hasInstance"),i=Function.prototype;e in i||t(14).f(i,e,{value:function(A){if("function"!=typeof this||!I(A))return!1;if(!I(this.prototype))return A instanceof this;for(;A=g(A);)if(this.prototype===A)return!0;return!1}})},function(A,M,t){var I=t(14).f,g=t(56),e=t(21),i=Function.prototype,T=/^\s*function ([^ (]*)/,E="name",N=Object.isExtensible||function(){return!0};E in i||t(13)&&I(i,E,{configurable:!0,get:function(){try{var A=this,M=(""+A).match(T)[1];return e(A,E)||!N(A)||I(A,E,g(5,M)),M}catch(A){return""}}})},function(A,M,t){var I=t(1),g=t(229),e=Math.sqrt,i=Math.acosh;I(I.S+I.F*!(i&&710==Math.floor(i(Number.MAX_VALUE))&&i(1/0)==1/0),"Math",{acosh:function(A){return(A=+A)<1?NaN:A>94906265.62425156?Math.log(A)+Math.LN2:g(A-1+e(A-1)*e(A+1))}})},function(A,M,t){function I(A){return isFinite(A=+A)&&0!=A?A<0?-I(-A):Math.log(A+Math.sqrt(A*A+1)):A}var g=t(1),e=Math.asinh;g(g.S+g.F*!(e&&1/e(0)>0),"Math",{asinh:I})},function(A,M,t){var I=t(1),g=Math.atanh;I(I.S+I.F*!(g&&1/g(-0)<0),"Math",{atanh:function(A){return 0==(A=+A)?A:Math.log((1+A)/(1-A))/2}})},function(A,M,t){var I=t(1),g=t(149);I(I.S,"Math",{cbrt:function(A){return g(A=+A)*Math.pow(Math.abs(A),1/3)}})},function(A,M,t){var I=t(1);I(I.S,"Math",{clz32:function(A){return(A>>>=0)?31-Math.floor(Math.log(A+.5)*Math.LOG2E):32}})},function(A,M,t){var I=t(1),g=Math.exp;I(I.S,"Math",{cosh:function(A){return(g(A=+A)+g(-A))/2}})},function(A,M,t){var I=t(1),g=t(148);I(I.S+I.F*(g!=Math.expm1),"Math",{expm1:g})},function(A,M,t){var I=t(1),g=t(149),e=Math.pow,i=e(2,-52),T=e(2,-23),E=e(2,127)*(2-T),N=e(2,-126),n=function(A){return A+1/i-1/i};I(I.S,"Math",{fround:function(A){var M,t,I=Math.abs(A),e=g(A);return IE||t!=t?e*(1/0):e*t)}})},function(A,M,t){var I=t(1),g=Math.abs;I(I.S,"Math",{hypot:function(A,M){for(var t,I,e=0,i=0,T=arguments.length,E=0;i0?(I=t/E,e+=I*I):e+=t;return E===1/0?1/0:E*Math.sqrt(e)}})},function(A,M,t){var I=t(1),g=Math.imul;I(I.S+I.F*t(6)(function(){return g(4294967295,5)!=-5||2!=g.length}),"Math",{imul:function(A,M){var t=65535,I=+A,g=+M,e=t&I,i=t&g;return 0|e*i+((t&I>>>16)*i+e*(t&g>>>16)<<16>>>0)}})},function(A,M,t){var I=t(1);I(I.S,"Math",{log10:function(A){return Math.log(A)/Math.LN10}})},function(A,M,t){var I=t(1);I(I.S,"Math",{log1p:t(229)})},function(A,M,t){var I=t(1);I(I.S,"Math",{log2:function(A){return Math.log(A)/Math.LN2}})},function(A,M,t){var I=t(1);I(I.S,"Math",{sign:t(149)})},function(A,M,t){var I=t(1),g=t(148),e=Math.exp;I(I.S+I.F*t(6)(function(){return!Math.sinh(-2e-17)!=-2e-17}),"Math",{sinh:function(A){return Math.abs(A=+A)<1?(g(A)-g(-A))/2:(e(A-1)-e(-A-1))*(Math.E/2)}})},function(A,M,t){var I=t(1),g=t(148),e=Math.exp;I(I.S,"Math",{tanh:function(A){var M=g(A=+A),t=g(-A);return M==1/0?1:t==1/0?-1:(M-t)/(e(A)+e(-A))}})},function(A,M,t){var I=t(1);I(I.S,"Math",{trunc:function(A){return(A>0?Math.floor:Math.ceil)(A)}})},function(A,M,t){"use strict";var I=t(5),g=t(21),e=t(35),i=t(143),T=t(43),E=t(6),N=t(66).f,n=t(30).f,o=t(14).f,c=t(82).trim,C="Number",a=I[C],D=a,r=a.prototype,B=e(t(65)(r))==C,Q="trim"in String.prototype,s=function(A){var M=T(A,!1);if("string"==typeof M&&M.length>2){M=Q?M.trim():c(M,3);var t,I,g,e=M.charCodeAt(0);if(43===e||45===e){if(t=M.charCodeAt(2),88===t||120===t)return NaN}else if(48===e){switch(M.charCodeAt(1)){case 66:case 98:I=2,g=49;break;case 79:case 111:I=8,g=55;break;default:return+M}for(var i,E=M.slice(2),N=0,n=E.length;Ng)return NaN;return parseInt(E,I)}}return+M};if(!a(" 0o1")||!a("0b1")||a("+0x1")){a=function(A){var M=arguments.length<1?0:A,t=this;return t instanceof a&&(B?E(function(){r.valueOf.call(t)}):e(t)!=C)?i(new D(s(M)),t,a):s(M)};for(var u,x=t(13)?N(D):"MAX_VALUE,MIN_VALUE,NaN,NEGATIVE_INFINITY,POSITIVE_INFINITY,EPSILON,isFinite,isInteger,isNaN,isSafeInteger,MAX_SAFE_INTEGER,MIN_SAFE_INTEGER,parseFloat,parseInt,isInteger".split(","),y=0;x.length>y;y++)g(D,u=x[y])&&!g(a,u)&&o(a,u,n(D,u));a.prototype=r,r.constructor=a,t(26)(I,C,a)}},function(A,M,t){var I=t(1);I(I.S,"Number",{EPSILON:Math.pow(2,-52)})},function(A,M,t){var I=t(1),g=t(5).isFinite;I(I.S,"Number",{isFinite:function(A){return"number"==typeof A&&g(A)}})},function(A,M,t){var I=t(1);I(I.S,"Number",{isInteger:t(226)})},function(A,M,t){var I=t(1);I(I.S,"Number",{isNaN:function(A){return A!=A}})},function(A,M,t){var I=t(1),g=t(226),e=Math.abs;I(I.S,"Number",{isSafeInteger:function(A){return g(A)&&e(A)<=9007199254740991}})},function(A,M,t){var I=t(1);I(I.S,"Number",{MAX_SAFE_INTEGER:9007199254740991})},function(A,M,t){var I=t(1);I(I.S,"Number",{MIN_SAFE_INTEGER:-9007199254740991})},function(A,M,t){var I=t(1),g=t(236);I(I.S+I.F*(Number.parseFloat!=g),"Number",{parseFloat:g})},function(A,M,t){var I=t(1),g=t(237);I(I.S+I.F*(Number.parseInt!=g),"Number",{parseInt:g})},function(A,M,t){"use strict";var I=t(1),g=t(57),e=t(217),i=t(156),T=1..toFixed,E=Math.floor,N=[0,0,0,0,0,0],n="Number.toFixed: incorrect invocation!",o="0",c=function(A,M){for(var t=-1,I=M;++t<6;)I+=A*N[t],N[t]=I%1e7,I=E(I/1e7)},C=function(A){for(var M=6,t=0;--M>=0;)t+=N[M],N[M]=E(t/A),t=t%A*1e7},a=function(){for(var A=6,M="";--A>=0;)if(""!==M||0===A||0!==N[A]){var t=String(N[A]);M=""===M?t:M+i.call(o,7-t.length)+t}return M},D=function(A,M,t){return 0===M?t:M%2===1?D(A,M-1,t*A):D(A*A,M/2,t)},r=function(A){for(var M=0,t=A;t>=4096;)M+=12,t/=4096;for(;t>=2;)M+=1,t/=2;return M};I(I.P+I.F*(!!T&&("0.000"!==8e-5.toFixed(3)||"1"!==.9.toFixed(0)||"1.25"!==1.255.toFixed(2)||"1000000000000000128"!==(0xde0b6b3a7640080).toFixed(0))||!t(6)(function(){T.call({})})),"Number",{toFixed:function(A){var M,t,I,T,E=e(this,n),N=g(A),B="",Q=o;if(N<0||N>20)throw RangeError(n);if(E!=E)return"NaN";if(E<=-1e21||E>=1e21)return String(E);if(E<0&&(B="-",E=-E),E>1e-21)if(M=r(E*D(2,69,1))-69,t=M<0?E*D(2,-M,1):E/D(2,M,1),t*=4503599627370496,M=52-M,M>0){for(c(0,t),I=N;I>=7;)c(1e7,0),I-=7;for(c(D(10,I,1),0),I=M-1;I>=23;)C(1<<23),I-=23;C(1<0?(T=Q.length,Q=B+(T<=N?"0."+i.call(o,N-T)+Q:Q.slice(0,T-N)+"."+Q.slice(T-N))):Q=B+Q,Q}})},function(A,M,t){"use strict";var I=t(1),g=t(6),e=t(217),i=1..toPrecision;I(I.P+I.F*(g(function(){return"1"!==i.call(1,void 0)})||!g(function(){i.call({})})),"Number",{toPrecision:function(A){var M=e(this,"Number#toPrecision: incorrect invocation!");return void 0===A?i.call(M):i.call(M,A)}})},function(A,M,t){var I=t(1);I(I.S+I.F,"Object",{assign:t(230)})},function(A,M,t){var I=t(1);I(I.S,"Object",{create:t(65)})},function(A,M,t){var I=t(1);I(I.S+I.F*!t(13),"Object",{defineProperties:t(231)})},function(A,M,t){var I=t(1);I(I.S+I.F*!t(13),"Object",{defineProperty:t(14).f})},function(A,M,t){var I=t(9),g=t(55).onFreeze;t(42)("freeze",function(A){return function(M){return A&&I(M)?A(g(M)):M}})},function(A,M,t){var I=t(28),g=t(30).f;t(42)("getOwnPropertyDescriptor",function(){return function(A,M){return g(I(A),M)}})},function(A,M,t){t(42)("getOwnPropertyNames",function(){return t(232).f})},function(A,M,t){var I=t(19),g=t(31);t(42)("getPrototypeOf",function(){return function(A){return g(I(A))}})},function(A,M,t){var I=t(9);t(42)("isExtensible",function(A){return function(M){return!!I(M)&&(!A||A(M))}})},function(A,M,t){var I=t(9);t(42)("isFrozen",function(A){return function(M){return!I(M)||!!A&&A(M)}})},function(A,M,t){var I=t(9);t(42)("isSealed",function(A){return function(M){return!I(M)||!!A&&A(M)}})},function(A,M,t){var I=t(1);I(I.S,"Object",{is:t(238)})},function(A,M,t){var I=t(19),g=t(67);t(42)("keys",function(){return function(A){return g(I(A))}})},function(A,M,t){var I=t(9),g=t(55).onFreeze;t(42)("preventExtensions",function(A){return function(M){return A&&I(M)?A(g(M)):M}})},function(A,M,t){var I=t(9),g=t(55).onFreeze;t(42)("seal",function(A){return function(M){return A&&I(M)?A(g(M)):M}})},function(A,M,t){var I=t(1);I(I.S,"Object",{setPrototypeOf:t(151).set})},function(A,M,t){"use strict";var I=t(94),g={};g[t(11)("toStringTag")]="z",g+""!="[object z]"&&t(26)(Object.prototype,"toString",function(){return"[object "+I(this)+"]"},!0)},function(A,M,t){var I=t(1),g=t(236);I(I.G+I.F*(parseFloat!=g),{parseFloat:g})},function(A,M,t){var I=t(1),g=t(237);I(I.G+I.F*(parseInt!=g),{parseInt:g})},function(A,M,t){"use strict";var I,g,e,i=t(64),T=t(5),E=t(49),N=t(94),n=t(1),o=t(9),c=t(24),C=t(63),a=t(79),D=t(153),r=t(158).set,B=t(150)(),Q="Promise",s=T.TypeError,u=T.process,x=T[Q],u=T.process,y="process"==N(u),j=function(){},l=!!function(){try{var A=x.resolve(1),M=(A.constructor={})[t(11)("species")]=function(A){A(j,j)};return(y||"function"==typeof PromiseRejectionEvent)&&A.then(j)instanceof M}catch(A){}}(),w=function(A,M){return A===M||A===x&&M===e},L=function(A){var M;return!(!o(A)||"function"!=typeof(M=A.then))&&M},Y=function(A){return w(x,A)?new d(A):new g(A)},d=g=function(A){var M,t;this.promise=new A(function(A,I){if(void 0!==M||void 0!==t)throw s("Bad Promise constructor");M=A,t=I}),this.resolve=c(M),this.reject=c(t)},h=function(A){try{A()}catch(A){return{error:A}}},S=function(A,M){if(!A._n){A._n=!0;var t=A._c;B(function(){for(var I=A._v,g=1==A._s,e=0,i=function(M){var t,e,i=g?M.ok:M.fail,T=M.resolve,E=M.reject,N=M.domain;try{i?(g||(2==A._h&&U(A),A._h=1),i===!0?t=I:(N&&N.enter(),t=i(I),N&&N.exit()),t===M.promise?E(s("Promise-chain cycle")):(e=L(t))?e.call(t,T,E):T(t)):E(I)}catch(A){E(A)}};t.length>e;)i(t[e++]);A._c=[],A._n=!1,M&&!A._h&&z(A)})}},z=function(A){r.call(T,function(){var M,t,I,g=A._v;if(p(A)&&(M=h(function(){y?u.emit("unhandledRejection",g,A):(t=T.onunhandledrejection)?t({promise:A,reason:g}):(I=T.console)&&I.error&&I.error("Unhandled promise rejection",g)}),A._h=y||p(A)?2:1),A._a=void 0,M)throw M.error})},p=function(A){if(1==A._h)return!1;for(var M,t=A._a||A._c,I=0;t.length>I;)if(M=t[I++],M.fail||!p(M.promise))return!1;return!0},U=function(A){r.call(T,function(){var M;y?u.emit("rejectionHandled",A):(M=T.onrejectionhandled)&&M({promise:A,reason:A._v})})},O=function(A){var M=this;M._d||(M._d=!0,M=M._w||M,M._v=A,M._s=2,M._a||(M._a=M._c.slice()),S(M,!0))},f=function(A){var M,t=this;if(!t._d){t._d=!0,t=t._w||t;try{if(t===A)throw s("Promise can't be resolved itself");(M=L(A))?B(function(){var I={_w:t,_d:!1};try{M.call(A,E(f,I,1),E(O,I,1))}catch(A){O.call(I,A)}}):(t._v=A,t._s=1,S(t,!1))}catch(A){O.call({_w:t,_d:!1},A)}}};l||(x=function(A){C(this,x,Q,"_h"),c(A),I.call(this);try{A(E(f,this,1),E(O,this,1))}catch(A){O.call(this,A)}},I=function(A){this._c=[],this._a=void 0,this._s=0,this._d=!1,this._v=void 0,this._h=0,this._n=!1},I.prototype=t(68)(x.prototype,{then:function(A,M){var t=Y(D(this,x));return t.ok="function"!=typeof A||A,t.fail="function"==typeof M&&M,t.domain=y?u.domain:void 0,this._c.push(t),this._a&&this._a.push(t),this._s&&S(this,!1),t.promise},catch:function(A){return this.then(void 0,A)}}),d=function(){var A=new I;this.promise=A,this.resolve=E(f,A,1),this.reject=E(O,A,1)}),n(n.G+n.W+n.F*!l,{Promise:x}),t(81)(x,Q),t(69)(Q),e=t(48)[Q],n(n.S+n.F*!l,Q,{reject:function(A){var M=Y(this),t=M.reject;return t(A),M.promise}}),n(n.S+n.F*(i||!l),Q,{resolve:function(A){if(A instanceof x&&w(A.constructor,this))return A;var M=Y(this),t=M.resolve;return t(A),M.promise}}),n(n.S+n.F*!(l&&t(111)(function(A){x.all(A).catch(j)})),Q,{all:function(A){var M=this,t=Y(M),I=t.resolve,g=t.reject,e=h(function(){var t=[],e=0,i=1;a(A,!1,function(A){var T=e++,E=!1;t.push(void 0),i++,M.resolve(A).then(function(A){E||(E=!0,t[T]=A,--i||I(t))},g)}),--i||I(t)});return e&&g(e.error),t.promise},race:function(A){var M=this,t=Y(M),I=t.reject,g=h(function(){a(A,!1,function(A){M.resolve(A).then(t.resolve,I)})});return g&&I(g.error),t.promise}})},function(A,M,t){var I=t(1),g=t(24),e=t(4),i=(t(5).Reflect||{}).apply,T=Function.apply;I(I.S+I.F*!t(6)(function(){i(function(){})}),"Reflect",{apply:function(A,M,t){var I=g(A),E=e(t);return i?i(I,M,E):T.call(I,M,E)}})},function(A,M,t){var I=t(1),g=t(65),e=t(24),i=t(4),T=t(9),E=t(6),N=t(221),n=(t(5).Reflect||{}).construct,o=E(function(){function A(){}return!(n(function(){},[],A)instanceof A)}),c=!E(function(){n(function(){})});I(I.S+I.F*(o||c),"Reflect",{construct:function(A,M){e(A),i(M);var t=arguments.length<3?A:e(arguments[2]);if(c&&!o)return n(A,M,t);if(A==t){switch(M.length){case 0:return new A;case 1:return new A(M[0]);case 2:return new A(M[0],M[1]);case 3:return new A(M[0],M[1],M[2]);case 4:return new A(M[0],M[1],M[2],M[3])}var I=[null];return I.push.apply(I,M),new(N.apply(A,I))}var E=t.prototype,C=g(T(E)?E:Object.prototype),a=Function.apply.call(A,C,M);return T(a)?a:C}})},function(A,M,t){var I=t(14),g=t(1),e=t(4),i=t(43);g(g.S+g.F*t(6)(function(){Reflect.defineProperty(I.f({},1,{value:1}),1,{value:2})}),"Reflect",{defineProperty:function(A,M,t){e(A),M=i(M,!0),e(t);try{return I.f(A,M,t),!0}catch(A){return!1}}})},function(A,M,t){var I=t(1),g=t(30).f,e=t(4);I(I.S,"Reflect",{deleteProperty:function(A,M){var t=g(e(A),M);return!(t&&!t.configurable)&&delete A[M]}})},function(A,M,t){"use strict";var I=t(1),g=t(4),e=function(A){this._t=g(A),this._i=0;var M,t=this._k=[];for(M in A)t.push(M)};t(146)(e,"Object",function(){var A,M=this,t=M._k;do if(M._i>=t.length)return{value:void 0,done:!0};while(!((A=t[M._i++])in M._t));return{value:A,done:!1}}),I(I.S,"Reflect",{enumerate:function(A){return new e(A)}})},function(A,M,t){var I=t(30),g=t(1),e=t(4);g(g.S,"Reflect",{getOwnPropertyDescriptor:function(A,M){return I.f(e(A),M)}})},function(A,M,t){var I=t(1),g=t(31),e=t(4);I(I.S,"Reflect",{getPrototypeOf:function(A){return g(e(A))}})},function(A,M,t){function I(A,M){var t,T,n=arguments.length<3?A:arguments[2];return N(A)===n?A[M]:(t=g.f(A,M))?i(t,"value")?t.value:void 0!==t.get?t.get.call(n):void 0:E(T=e(A))?I(T,M,n):void 0}var g=t(30),e=t(31),i=t(21),T=t(1),E=t(9),N=t(4);T(T.S,"Reflect",{get:I})},function(A,M,t){var I=t(1);I(I.S,"Reflect",{has:function(A,M){return M in A}})},function(A,M,t){var I=t(1),g=t(4),e=Object.isExtensible;I(I.S,"Reflect",{isExtensible:function(A){return g(A),!e||e(A)}})},function(A,M,t){var I=t(1);I(I.S,"Reflect",{ownKeys:t(235)})},function(A,M,t){var I=t(1),g=t(4),e=Object.preventExtensions;I(I.S,"Reflect",{preventExtensions:function(A){g(A);try{return e&&e(A),!0}catch(A){return!1}}})},function(A,M,t){var I=t(1),g=t(151);g&&I(I.S,"Reflect",{setPrototypeOf:function(A,M){g.check(A,M);try{return g.set(A,M),!0}catch(A){return!1}}})},function(A,M,t){function I(A,M,t){var E,c,C=arguments.length<4?A:arguments[3],a=e.f(n(A),M);if(!a){if(o(c=i(A)))return I(c,M,t,C);a=N(0)}return T(a,"value")?!(a.writable===!1||!o(C))&&(E=e.f(C,M)||N(0),E.value=t,g.f(C,M,E),!0):void 0!==a.set&&(a.set.call(C,t),!0)}var g=t(14),e=t(30),i=t(31),T=t(21),E=t(1),N=t(56),n=t(4),o=t(9);E(E.S,"Reflect",{set:I})},function(A,M,t){var I=t(5),g=t(143),e=t(14).f,i=t(66).f,T=t(110),E=t(108),N=I.RegExp,n=N,o=N.prototype,c=/a/g,C=/a/g,a=new N(c)!==c;if(t(13)&&(!a||t(6)(function(){return C[t(11)("match")]=!1,N(c)!=c||N(C)==C||"/a/i"!=N(c,"i")}))){N=function(A,M){var t=this instanceof N,I=T(A),e=void 0===M;return!t&&I&&A.constructor===N&&e?A:g(a?new n(I&&!e?A.source:A,M):n((I=A instanceof N)?A.source:A,I&&e?E.call(A):M),t?this:o,N)};for(var D=(function(A){A in N||e(N,A,{configurable:!0,get:function(){return n[A]},set:function(M){n[A]=M}})}),r=i(n),B=0;r.length>B;)D(r[B++]);o.constructor=N,N.prototype=o,t(26)(I,"RegExp",N)}t(69)("RegExp")},function(A,M,t){t(107)("match",1,function(A,M,t){return[function(t){"use strict";var I=A(this),g=void 0==t?void 0:t[M];return void 0!==g?g.call(t,I):new RegExp(t)[M](String(I))},t]})},function(A,M,t){t(107)("replace",2,function(A,M,t){return[function(I,g){"use strict";var e=A(this),i=void 0==I?void 0:I[M];return void 0!==i?i.call(I,e,g):t.call(String(e),I,g)},t]})},function(A,M,t){t(107)("search",1,function(A,M,t){return[function(t){"use strict";var I=A(this),g=void 0==t?void 0:t[M];return void 0!==g?g.call(t,I):new RegExp(t)[M](String(I))},t]})},function(A,M,t){t(107)("split",2,function(A,M,I){"use strict";var g=t(110),e=I,i=[].push,T="split",E="length",N="lastIndex";if("c"=="abbc"[T](/(b)*/)[1]||4!="test"[T](/(?:)/,-1)[E]||2!="ab"[T](/(?:ab)*/)[E]||4!="."[T](/(.?)(.?)/)[E]||"."[T](/()()/)[E]>1||""[T](/.?/)[E]){var n=void 0===/()??/.exec("")[1];I=function(A,M){var t=String(this);if(void 0===A&&0===M)return[];if(!g(A))return e.call(t,A,M);var I,T,o,c,C,a=[],D=(A.ignoreCase?"i":"")+(A.multiline?"m":"")+(A.unicode?"u":"")+(A.sticky?"y":""),r=0,B=void 0===M?4294967295:M>>>0,Q=new RegExp(A.source,D+"g");for(n||(I=new RegExp("^"+Q.source+"$(?!\\s)",D));(T=Q.exec(t))&&(o=T.index+T[0][E],!(o>r&&(a.push(t.slice(r,T.index)),!n&&T[E]>1&&T[0].replace(I,function(){for(C=1;C1&&T.index=B)));)Q[N]===T.index&&Q[N]++;return r===t[E]?!c&&Q.test("")||a.push(""):a.push(t.slice(r)),a[E]>B?a.slice(0,B):a}}else"0"[T](void 0,0)[E]&&(I=function(A,M){return void 0===A&&0===M?[]:e.call(this,A,M)});return[function(t,g){var e=A(this),i=void 0==t?void 0:t[M];return void 0!==i?i.call(t,e,g):I.call(String(e),t,g)},I]})},function(A,M,t){"use strict";t(242);var I=t(4),g=t(108),e=t(13),i="toString",T=/./[i],E=function(A){t(26)(RegExp.prototype,i,A,!0)};t(6)(function(){return"/a/b"!=T.call({source:"a",flags:"b"})})?E(function(){var A=I(this);return"/".concat(A.source,"/","flags"in A?A.flags:!e&&A instanceof RegExp?g.call(A):void 0)}):T.name!=i&&E(function(){return T.call(this)})},function(A,M,t){"use strict";t(27)("anchor",function(A){return function(M){return A(this,"a","name",M)}})},function(A,M,t){"use strict";t(27)("big",function(A){return function(){return A(this,"big","","")}})},function(A,M,t){"use strict";t(27)("blink",function(A){return function(){return A(this,"blink","","")}})},function(A,M,t){"use strict";t(27)("bold",function(A){return function(){return A(this,"b","","")}})},function(A,M,t){"use strict";var I=t(1),g=t(154)(!1);I(I.P,"String",{codePointAt:function(A){return g(this,A)}})},function(A,M,t){"use strict";var I=t(1),g=t(17),e=t(155),i="endsWith",T=""[i];I(I.P+I.F*t(141)(i),"String",{endsWith:function(A){var M=e(this,A,i),t=arguments.length>1?arguments[1]:void 0,I=g(M.length),E=void 0===t?I:Math.min(g(t),I),N=String(A);return T?T.call(M,N,E):M.slice(E-N.length,E)===N}})},function(A,M,t){"use strict";t(27)("fixed",function(A){return function(){return A(this,"tt","","")}})},function(A,M,t){"use strict";t(27)("fontcolor",function(A){return function(M){return A(this,"font","color",M)}})},function(A,M,t){"use strict";t(27)("fontsize",function(A){return function(M){return A(this,"font","size",M)}})},function(A,M,t){var I=t(1),g=t(70),e=String.fromCharCode,i=String.fromCodePoint;I(I.S+I.F*(!!i&&1!=i.length),"String",{fromCodePoint:function(A){for(var M,t=[],I=arguments.length,i=0;I>i;){if(M=+arguments[i++],g(M,1114111)!==M)throw RangeError(M+" is not a valid code point");t.push(M<65536?e(M):e(((M-=65536)>>10)+55296,M%1024+56320))}return t.join("")}})},function(A,M,t){"use strict";var I=t(1),g=t(155),e="includes";I(I.P+I.F*t(141)(e),"String",{includes:function(A){return!!~g(this,A,e).indexOf(A,arguments.length>1?arguments[1]:void 0)}})},function(A,M,t){"use strict";t(27)("italics",function(A){return function(){return A(this,"i","","")}})},function(A,M,t){"use strict";var I=t(154)(!0);t(147)(String,"String",function(A){this._t=String(A),this._i=0},function(){var A,M=this._t,t=this._i;return t>=M.length?{value:void 0,done:!0}:(A=I(M,t),this._i+=A.length,{value:A,done:!1})})},function(A,M,t){"use strict";t(27)("link",function(A){return function(M){return A(this,"a","href",M)}})},function(A,M,t){var I=t(1),g=t(28),e=t(17);I(I.S,"String",{raw:function(A){for(var M=g(A.raw),t=e(M.length),I=arguments.length,i=[],T=0;t>T;)i.push(String(M[T++])),T1?arguments[1]:void 0,M.length)),I=String(A);return T?T.call(M,I,t):M.slice(t,t+I.length)===I}})},function(A,M,t){"use strict";t(27)("strike",function(A){return function(){return A(this,"strike","","")}})},function(A,M,t){"use strict";t(27)("sub",function(A){return function(){return A(this,"sub","","")}})},function(A,M,t){"use strict";t(27)("sup",function(A){return function(){return A(this,"sup","","")}})},function(A,M,t){"use strict";t(82)("trim",function(A){return function(){return A(this,3)}})},function(A,M,t){"use strict";var I=t(5),g=t(21),e=t(13),i=t(1),T=t(26),E=t(55).KEY,N=t(6),n=t(114),o=t(81),c=t(71),C=t(11),a=t(240),D=t(160),r=t(376),B=t(375),Q=t(145),s=t(4),u=t(28),x=t(43),y=t(56),j=t(65),l=t(232),w=t(30),L=t(14),Y=t(67),d=w.f,h=L.f,S=l.f,z=I.Symbol,p=I.JSON,U=p&&p.stringify,O="prototype",f=C("_hidden"),m=C("toPrimitive"),F={}.propertyIsEnumerable,k=n("symbol-registry"),R=n("symbols"),J=n("op-symbols"),G=Object[O],H="function"==typeof z,v=I.QObject,b=!v||!v[O]||!v[O].findChild,X=e&&N(function(){return 7!=j(h({},"a",{get:function(){return h(this,"a",{value:7}).a}})).a})?function(A,M,t){var I=d(G,M);I&&delete G[M],h(A,M,t),I&&A!==G&&h(G,M,I)}:h,W=function(A){var M=R[A]=j(z[O]);return M._k=A,M},V=H&&"symbol"==typeof z.iterator?function(A){return"symbol"==typeof A}:function(A){return A instanceof z},P=function(A,M,t){return A===G&&P(J,M,t),s(A),M=x(M,!0),s(t),g(R,M)?(t.enumerable?(g(A,f)&&A[f][M]&&(A[f][M]=!1),t=j(t,{enumerable:y(0,!1)})):(g(A,f)||h(A,f,y(1,{})),A[f][M]=!0),X(A,M,t)):h(A,M,t)},Z=function(A,M){s(A);for(var t,I=B(M=u(M)),g=0,e=I.length;e>g;)P(A,t=I[g++],M[t]);return A},K=function(A,M){return void 0===M?j(A):Z(j(A),M)},q=function(A){var M=F.call(this,A=x(A,!0));return!(this===G&&g(R,A)&&!g(J,A))&&(!(M||!g(this,A)||!g(R,A)||g(this,f)&&this[f][A])||M)},_=function(A,M){if(A=u(A),M=x(M,!0),A!==G||!g(R,M)||g(J,M)){var t=d(A,M);return!t||!g(R,M)||g(A,f)&&A[f][M]||(t.enumerable=!0),t}},$=function(A){for(var M,t=S(u(A)),I=[],e=0;t.length>e;)g(R,M=t[e++])||M==f||M==E||I.push(M);return I},AA=function(A){for(var M,t=A===G,I=S(t?J:u(A)),e=[],i=0;I.length>i;)!g(R,M=I[i++])||t&&!g(G,M)||e.push(R[M]);return e};H||(z=function(){if(this instanceof z)throw TypeError("Symbol is not a constructor!");var A=c(arguments.length>0?arguments[0]:void 0),M=function(t){this===G&&M.call(J,t), -g(this,f)&&g(this[f],A)&&(this[f][A]=!1),X(this,A,y(1,t))};return e&&b&&X(G,A,{configurable:!0,set:M}),W(A)},T(z[O],"toString",function(){return this._k}),w.f=_,L.f=P,t(66).f=l.f=$,t(96).f=q,t(113).f=AA,e&&!t(64)&&T(G,"propertyIsEnumerable",q,!0),a.f=function(A){return W(C(A))}),i(i.G+i.W+i.F*!H,{Symbol:z});for(var MA="hasInstance,isConcatSpreadable,iterator,match,replace,search,species,split,toPrimitive,toStringTag,unscopables".split(","),tA=0;MA.length>tA;)C(MA[tA++]);for(var MA=Y(C.store),tA=0;MA.length>tA;)D(MA[tA++]);i(i.S+i.F*!H,"Symbol",{for:function(A){return g(k,A+="")?k[A]:k[A]=z(A)},keyFor:function(A){if(V(A))return r(k,A);throw TypeError(A+" is not a symbol!")},useSetter:function(){b=!0},useSimple:function(){b=!1}}),i(i.S+i.F*!H,"Object",{create:K,defineProperty:P,defineProperties:Z,getOwnPropertyDescriptor:_,getOwnPropertyNames:$,getOwnPropertySymbols:AA}),p&&i(i.S+i.F*(!H||N(function(){var A=z();return"[null]"!=U([A])||"{}"!=U({a:A})||"{}"!=U(Object(A))})),"JSON",{stringify:function(A){if(void 0!==A&&!V(A)){for(var M,t,I=[A],g=1;arguments.length>g;)I.push(arguments[g++]);return M=I[1],"function"==typeof M&&(t=M),!t&&Q(M)||(M=function(A,M){if(t&&(M=t.call(this,A,M)),!V(M))return M}),I[1]=M,U.apply(p,I)}}}),z[O][m]||t(25)(z[O],m,z[O].valueOf),o(z,"Symbol"),o(Math,"Math",!0),o(I.JSON,"JSON",!0)},function(A,M,t){"use strict";var I=t(1),g=t(115),e=t(159),i=t(4),T=t(70),E=t(17),N=t(9),n=t(5).ArrayBuffer,o=t(153),c=e.ArrayBuffer,C=e.DataView,a=g.ABV&&n.isView,D=c.prototype.slice,r=g.VIEW,B="ArrayBuffer";I(I.G+I.W+I.F*(n!==c),{ArrayBuffer:c}),I(I.S+I.F*!g.CONSTR,B,{isView:function(A){return a&&a(A)||N(A)&&r in A}}),I(I.P+I.U+I.F*t(6)(function(){return!new c(2).slice(1,void 0).byteLength}),B,{slice:function(A,M){if(void 0!==D&&void 0===M)return D.call(i(this),A);for(var t=i(this).byteLength,I=T(A,t),g=T(void 0===M?t:M,t),e=new(o(this,c))(E(g-I)),N=new C(this),n=new C(e),a=0;I0?arguments[0]:void 0)}},{add:function(A){return I.def(this,A,!0)}},I,!1,!0)},function(A,M,t){"use strict";var I=t(1),g=t(105)(!0);I(I.P,"Array",{includes:function(A){return g(this,A,arguments.length>1?arguments[1]:void 0)}}),t(78)("includes")},function(A,M,t){var I=t(1),g=t(150)(),e=t(5).process,i="process"==t(35)(e);I(I.G,{asap:function(A){var M=i&&e.domain;g(M?M.bind(A):A)}})},function(A,M,t){var I=t(1),g=t(35);I(I.S,"Error",{isError:function(A){return"Error"===g(A)}})},function(A,M,t){var I=t(1);I(I.P+I.R,"Map",{toJSON:t(223)("Map")})},function(A,M,t){var I=t(1);I(I.S,"Math",{iaddh:function(A,M,t,I){var g=A>>>0,e=M>>>0,i=t>>>0;return e+(I>>>0)+((g&i|(g|i)&~(g+i>>>0))>>>31)|0}})},function(A,M,t){var I=t(1);I(I.S,"Math",{imulh:function(A,M){var t=65535,I=+A,g=+M,e=I&t,i=g&t,T=I>>16,E=g>>16,N=(T*i>>>0)+(e*i>>>16);return T*E+(N>>16)+((e*E>>>0)+(N&t)>>16)}})},function(A,M,t){var I=t(1);I(I.S,"Math",{isubh:function(A,M,t,I){var g=A>>>0,e=M>>>0,i=t>>>0;return e-(I>>>0)-((~g&i|~(g^i)&g-i>>>0)>>>31)|0}})},function(A,M,t){var I=t(1);I(I.S,"Math",{umulh:function(A,M){var t=65535,I=+A,g=+M,e=I&t,i=g&t,T=I>>>16,E=g>>>16,N=(T*i>>>0)+(e*i>>>16);return T*E+(N>>>16)+((e*E>>>0)+(N&t)>>>16)}})},function(A,M,t){"use strict";var I=t(1),g=t(19),e=t(24),i=t(14);t(13)&&I(I.P+t(112),"Object",{__defineGetter__:function(A,M){i.f(g(this),A,{get:e(M),enumerable:!0,configurable:!0})}})},function(A,M,t){"use strict";var I=t(1),g=t(19),e=t(24),i=t(14);t(13)&&I(I.P+t(112),"Object",{__defineSetter__:function(A,M){i.f(g(this),A,{set:e(M),enumerable:!0,configurable:!0})}})},function(A,M,t){var I=t(1),g=t(234)(!0);I(I.S,"Object",{entries:function(A){return g(A)}})},function(A,M,t){var I=t(1),g=t(235),e=t(28),i=t(30),T=t(138);I(I.S,"Object",{getOwnPropertyDescriptors:function(A){for(var M,t=e(A),I=i.f,E=g(t),N={},n=0;E.length>n;)T(N,M=E[n++],I(t,M));return N}})},function(A,M,t){"use strict";var I=t(1),g=t(19),e=t(43),i=t(31),T=t(30).f;t(13)&&I(I.P+t(112),"Object",{__lookupGetter__:function(A){var M,t=g(this),I=e(A,!0);do if(M=T(t,I))return M.get;while(t=i(t))}})},function(A,M,t){"use strict";var I=t(1),g=t(19),e=t(43),i=t(31),T=t(30).f;t(13)&&I(I.P+t(112),"Object",{__lookupSetter__:function(A){var M,t=g(this),I=e(A,!0);do if(M=T(t,I))return M.set;while(t=i(t))}})},function(A,M,t){var I=t(1),g=t(234)(!1);I(I.S,"Object",{values:function(A){return g(A)}})},function(A,M,t){"use strict";var I=t(1),g=t(5),e=t(48),i=t(150)(),T=t(11)("observable"),E=t(24),N=t(4),n=t(63),o=t(68),c=t(25),C=t(79),a=C.RETURN,D=function(A){return null==A?void 0:E(A)},r=function(A){var M=A._c;M&&(A._c=void 0,M())},B=function(A){return void 0===A._o},Q=function(A){B(A)||(A._o=void 0,r(A))},s=function(A,M){N(A),this._c=void 0,this._o=A,A=new u(this);try{var t=M(A),I=t;null!=t&&("function"==typeof t.unsubscribe?t=function(){I.unsubscribe()}:E(t),this._c=t)}catch(M){return void A.error(M)}B(this)&&r(this)};s.prototype=o({},{unsubscribe:function(){Q(this)}});var u=function(A){this._s=A};u.prototype=o({},{next:function(A){var M=this._s;if(!B(M)){var t=M._o;try{var I=D(t.next);if(I)return I.call(t,A)}catch(A){try{Q(M)}finally{throw A}}}},error:function(A){var M=this._s;if(B(M))throw A;var t=M._o;M._o=void 0;try{var I=D(t.error);if(!I)throw A;A=I.call(t,A)}catch(A){try{r(M)}finally{throw A}}return r(M),A},complete:function(A){var M=this._s;if(!B(M)){var t=M._o;M._o=void 0;try{var I=D(t.complete);A=I?I.call(t,A):void 0}catch(A){try{r(M)}finally{throw A}}return r(M),A}}});var x=function(A){n(this,x,"Observable","_f")._f=E(A)};o(x.prototype,{subscribe:function(A){return new s(A,this._f)},forEach:function(A){var M=this;return new(e.Promise||g.Promise)(function(t,I){E(A);var g=M.subscribe({next:function(M){try{return A(M)}catch(A){I(A),g.unsubscribe()}},error:I,complete:t})})}}),o(x,{from:function(A){var M="function"==typeof this?this:x,t=D(N(A)[T]);if(t){var I=N(t.call(A));return I.constructor===M?I:new M(function(A){return I.subscribe(A)})}return new M(function(M){var t=!1;return i(function(){if(!t){try{if(C(A,!1,function(A){if(M.next(A),t)return a})===a)return}catch(A){if(t)throw A;return void M.error(A)}M.complete()}}),function(){t=!0}})},of:function(){for(var A=0,M=arguments.length,t=Array(M);A1?arguments[1]:void 0,!1)}})},function(A,M,t){"use strict";var I=t(1),g=t(239);I(I.P,"String",{padStart:function(A){return g(this,A,arguments.length>1?arguments[1]:void 0,!0)}})},function(A,M,t){"use strict";t(82)("trimLeft",function(A){return function(){return A(this,1)}},"trimStart")},function(A,M,t){"use strict";t(82)("trimRight",function(A){return function(){return A(this,2)}},"trimEnd")},function(A,M,t){t(160)("asyncIterator")},function(A,M,t){t(160)("observable")},function(A,M,t){var I=t(1);I(I.S,"System",{global:t(5)})},function(A,M,t){for(var I=t(162),g=t(26),e=t(5),i=t(25),T=t(80),E=t(11),N=E("iterator"),n=E("toStringTag"),o=T.Array,c=["NodeList","DOMTokenList","MediaList","StyleSheetList","CSSRuleList"],C=0;C<5;C++){var a,D=c[C],r=e[D],B=r&&r.prototype;if(B){B[N]||i(B,N,o),B[n]||i(B,n,D),T[D]=o;for(a in I)B[a]||g(B,a,I[a],!0)}}},function(A,M,t){var I=t(1),g=t(158);I(I.G+I.B,{setImmediate:g.set,clearImmediate:g.clear})},function(A,M,t){var I=t(5),g=t(1),e=t(109),i=t(377),T=I.navigator,E=!!T&&/MSIE .\./.test(T.userAgent),N=function(A){return E?function(M,t){return A(e(i,[].slice.call(arguments,2),"function"==typeof M?M:Function(M)),t)}:A};g(g.G+g.B+g.F*E,{setTimeout:N(I.setTimeout),setInterval:N(I.setInterval)})},function(A,M,t){t(500),t(439),t(441),t(440),t(443),t(445),t(450),t(444),t(442),t(452),t(451),t(447),t(448),t(446),t(438),t(449),t(453),t(454),t(406),t(408),t(407),t(456),t(455),t(426),t(436),t(437),t(427),t(428),t(429),t(430),t(431),t(432),t(433),t(434),t(435),t(409),t(410),t(411),t(412),t(413),t(414),t(415),t(416),t(417),t(418),t(419),t(420),t(421),t(422),t(423),t(424),t(425),t(487),t(492),t(499),t(490),t(482),t(483),t(488),t(493),t(495),t(478),t(479),t(480),t(481),t(484),t(485),t(486),t(489),t(491),t(494),t(496),t(497),t(498),t(401),t(403),t(402),t(405),t(404),t(390),t(388),t(394),t(391),t(397),t(399),t(387),t(393),t(384),t(398),t(382),t(396),t(395),t(389),t(392),t(381),t(383),t(386),t(385),t(400),t(162),t(472),t(477),t(242),t(473),t(474),t(475),t(476),t(457),t(241),t(243),t(244),t(512),t(501),t(502),t(507),t(510),t(511),t(505),t(508),t(506),t(509),t(503),t(504),t(458),t(459),t(460),t(461),t(462),t(465),t(463),t(464),t(466),t(467),t(468),t(469),t(471),t(470),t(513),t(539),t(542),t(541),t(543),t(544),t(540),t(545),t(546),t(524),t(527),t(523),t(521),t(522),t(525),t(526),t(516),t(538),t(547),t(515),t(517),t(519),t(518),t(520),t(529),t(530),t(532),t(531),t(534),t(533),t(535),t(536),t(537),t(514),t(528),t(550),t(549),t(548),A.exports=t(48)},function(A,M,t){M=A.exports=t(245)(),M.push([A.id,"/*!\n * Font Awesome 4.7.0 by @davegandy - http://fontawesome.io - @fontawesome\n * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License)\n */@font-face{font-family:FontAwesome;src:url("+t(805)+");src:url("+t(804)+'?#iefix&v=4.7.0) format("embedded-opentype"),url('+t(808)+') format("woff2"),url('+t(809)+') format("woff"),url('+t(807)+') format("truetype"),url('+t(806)+'#fontawesomeregular) format("svg");font-weight:400;font-style:normal}.fa{display:inline-block;font:normal normal normal 14px/1 FontAwesome;font-size:inherit;text-rendering:auto;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.fa-lg{font-size:1.33333333em;line-height:.75em;vertical-align:-15%}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-fw{width:1.28571429em;text-align:center}.fa-ul{padding-left:0;margin-left:2.14285714em;list-style-type:none}.fa-ul>li{position:relative}.fa-li{position:absolute;left:-2.14285714em;width:2.14285714em;top:.14285714em;text-align:center}.fa-li.fa-lg{left:-1.85714286em}.fa-border{padding:.2em .25em .15em;border:.08em solid #eee;border-radius:.1em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa.fa-pull-left{margin-right:.3em}.fa.fa-pull-right{margin-left:.3em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left{margin-right:.3em}.fa.pull-right{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s infinite linear;animation:fa-spin 2s infinite linear}.fa-pulse{-webkit-animation:fa-spin 1s infinite steps(8);animation:fa-spin 1s infinite steps(8)}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.fa-rotate-90{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";-webkit-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";-webkit-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";-webkit-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";-webkit-transform:scaleX(-1);transform:scaleX(-1)}.fa-flip-vertical{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)";-webkit-transform:scaleY(-1);transform:scaleY(-1)}:root .fa-flip-horizontal,:root .fa-flip-vertical,:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270{filter:none}.fa-stack{position:relative;display:inline-block;width:2em;height:2em;line-height:2em;vertical-align:middle}.fa-stack-1x,.fa-stack-2x{position:absolute;left:0;width:100%;text-align:center}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-glass:before{content:"\\F000"}.fa-music:before{content:"\\F001"}.fa-search:before{content:"\\F002"}.fa-envelope-o:before{content:"\\F003"}.fa-heart:before{content:"\\F004"}.fa-star:before{content:"\\F005"}.fa-star-o:before{content:"\\F006"}.fa-user:before{content:"\\F007"}.fa-film:before{content:"\\F008"}.fa-th-large:before{content:"\\F009"}.fa-th:before{content:"\\F00A"}.fa-th-list:before{content:"\\F00B"}.fa-check:before{content:"\\F00C"}.fa-close:before,.fa-remove:before,.fa-times:before{content:"\\F00D"}.fa-search-plus:before{content:"\\F00E"}.fa-search-minus:before{content:"\\F010"}.fa-power-off:before{content:"\\F011"}.fa-signal:before{content:"\\F012"}.fa-cog:before,.fa-gear:before{content:"\\F013"}.fa-trash-o:before{content:"\\F014"}.fa-home:before{content:"\\F015"}.fa-file-o:before{content:"\\F016"}.fa-clock-o:before{content:"\\F017"}.fa-road:before{content:"\\F018"}.fa-download:before{content:"\\F019"}.fa-arrow-circle-o-down:before{content:"\\F01A"}.fa-arrow-circle-o-up:before{content:"\\F01B"}.fa-inbox:before{content:"\\F01C"}.fa-play-circle-o:before{content:"\\F01D"}.fa-repeat:before,.fa-rotate-right:before{content:"\\F01E"}.fa-refresh:before{content:"\\F021"}.fa-list-alt:before{content:"\\F022"}.fa-lock:before{content:"\\F023"}.fa-flag:before{content:"\\F024"}.fa-headphones:before{content:"\\F025"}.fa-volume-off:before{content:"\\F026"}.fa-volume-down:before{content:"\\F027"}.fa-volume-up:before{content:"\\F028"}.fa-qrcode:before{content:"\\F029"}.fa-barcode:before{content:"\\F02A"}.fa-tag:before{content:"\\F02B"}.fa-tags:before{content:"\\F02C"}.fa-book:before{content:"\\F02D"}.fa-bookmark:before{content:"\\F02E"}.fa-print:before{content:"\\F02F"}.fa-camera:before{content:"\\F030"}.fa-font:before{content:"\\F031"}.fa-bold:before{content:"\\F032"}.fa-italic:before{content:"\\F033"}.fa-text-height:before{content:"\\F034"}.fa-text-width:before{content:"\\F035"}.fa-align-left:before{content:"\\F036"}.fa-align-center:before{content:"\\F037"}.fa-align-right:before{content:"\\F038"}.fa-align-justify:before{content:"\\F039"}.fa-list:before{content:"\\F03A"}.fa-dedent:before,.fa-outdent:before{content:"\\F03B"}.fa-indent:before{content:"\\F03C"}.fa-video-camera:before{content:"\\F03D"}.fa-image:before,.fa-photo:before,.fa-picture-o:before{content:"\\F03E"}.fa-pencil:before{content:"\\F040"}.fa-map-marker:before{content:"\\F041"}.fa-adjust:before{content:"\\F042"}.fa-tint:before{content:"\\F043"}.fa-edit:before,.fa-pencil-square-o:before{content:"\\F044"}.fa-share-square-o:before{content:"\\F045"}.fa-check-square-o:before{content:"\\F046"}.fa-arrows:before{content:"\\F047"}.fa-step-backward:before{content:"\\F048"}.fa-fast-backward:before{content:"\\F049"}.fa-backward:before{content:"\\F04A"}.fa-play:before{content:"\\F04B"}.fa-pause:before{content:"\\F04C"}.fa-stop:before{content:"\\F04D"}.fa-forward:before{content:"\\F04E"}.fa-fast-forward:before{content:"\\F050"}.fa-step-forward:before{content:"\\F051"}.fa-eject:before{content:"\\F052"}.fa-chevron-left:before{content:"\\F053"}.fa-chevron-right:before{content:"\\F054"}.fa-plus-circle:before{content:"\\F055"}.fa-minus-circle:before{content:"\\F056"}.fa-times-circle:before{content:"\\F057"}.fa-check-circle:before{content:"\\F058"}.fa-question-circle:before{content:"\\F059"}.fa-info-circle:before{content:"\\F05A"}.fa-crosshairs:before{content:"\\F05B"}.fa-times-circle-o:before{content:"\\F05C"}.fa-check-circle-o:before{content:"\\F05D"}.fa-ban:before{content:"\\F05E"}.fa-arrow-left:before{content:"\\F060"}.fa-arrow-right:before{content:"\\F061"}.fa-arrow-up:before{content:"\\F062"}.fa-arrow-down:before{content:"\\F063"}.fa-mail-forward:before,.fa-share:before{content:"\\F064"}.fa-expand:before{content:"\\F065"}.fa-compress:before{content:"\\F066"}.fa-plus:before{content:"\\F067"}.fa-minus:before{content:"\\F068"}.fa-asterisk:before{content:"\\F069"}.fa-exclamation-circle:before{content:"\\F06A"}.fa-gift:before{content:"\\F06B"}.fa-leaf:before{content:"\\F06C"}.fa-fire:before{content:"\\F06D"}.fa-eye:before{content:"\\F06E"}.fa-eye-slash:before{content:"\\F070"}.fa-exclamation-triangle:before,.fa-warning:before{content:"\\F071"}.fa-plane:before{content:"\\F072"}.fa-calendar:before{content:"\\F073"}.fa-random:before{content:"\\F074"}.fa-comment:before{content:"\\F075"}.fa-magnet:before{content:"\\F076"}.fa-chevron-up:before{content:"\\F077"}.fa-chevron-down:before{content:"\\F078"}.fa-retweet:before{content:"\\F079"}.fa-shopping-cart:before{content:"\\F07A"}.fa-folder:before{content:"\\F07B"}.fa-folder-open:before{content:"\\F07C"}.fa-arrows-v:before{content:"\\F07D"}.fa-arrows-h:before{content:"\\F07E"}.fa-bar-chart-o:before,.fa-bar-chart:before{content:"\\F080"}.fa-twitter-square:before{content:"\\F081"}.fa-facebook-square:before{content:"\\F082"}.fa-camera-retro:before{content:"\\F083"}.fa-key:before{content:"\\F084"}.fa-cogs:before,.fa-gears:before{content:"\\F085"}.fa-comments:before{content:"\\F086"}.fa-thumbs-o-up:before{content:"\\F087"}.fa-thumbs-o-down:before{content:"\\F088"}.fa-star-half:before{content:"\\F089"}.fa-heart-o:before{content:"\\F08A"}.fa-sign-out:before{content:"\\F08B"}.fa-linkedin-square:before{content:"\\F08C"}.fa-thumb-tack:before{content:"\\F08D"}.fa-external-link:before{content:"\\F08E"}.fa-sign-in:before{content:"\\F090"}.fa-trophy:before{content:"\\F091"}.fa-github-square:before{content:"\\F092"}.fa-upload:before{content:"\\F093"}.fa-lemon-o:before{content:"\\F094"}.fa-phone:before{content:"\\F095"}.fa-square-o:before{content:"\\F096"}.fa-bookmark-o:before{content:"\\F097"}.fa-phone-square:before{content:"\\F098"}.fa-twitter:before{content:"\\F099"}.fa-facebook-f:before,.fa-facebook:before{content:"\\F09A"}.fa-github:before{content:"\\F09B"}.fa-unlock:before{content:"\\F09C"}.fa-credit-card:before{content:"\\F09D"}.fa-feed:before,.fa-rss:before{content:"\\F09E"}.fa-hdd-o:before{content:"\\F0A0"}.fa-bullhorn:before{content:"\\F0A1"}.fa-bell:before{content:"\\F0F3"}.fa-certificate:before{content:"\\F0A3"}.fa-hand-o-right:before{content:"\\F0A4"}.fa-hand-o-left:before{content:"\\F0A5"}.fa-hand-o-up:before{content:"\\F0A6"}.fa-hand-o-down:before{content:"\\F0A7"}.fa-arrow-circle-left:before{content:"\\F0A8"}.fa-arrow-circle-right:before{content:"\\F0A9"}.fa-arrow-circle-up:before{content:"\\F0AA"}.fa-arrow-circle-down:before{content:"\\F0AB"}.fa-globe:before{content:"\\F0AC"}.fa-wrench:before{content:"\\F0AD"}.fa-tasks:before{content:"\\F0AE"}.fa-filter:before{content:"\\F0B0"}.fa-briefcase:before{content:"\\F0B1"}.fa-arrows-alt:before{content:"\\F0B2"}.fa-group:before,.fa-users:before{content:"\\F0C0"}.fa-chain:before,.fa-link:before{content:"\\F0C1"}.fa-cloud:before{content:"\\F0C2"}.fa-flask:before{content:"\\F0C3"}.fa-cut:before,.fa-scissors:before{content:"\\F0C4"}.fa-copy:before,.fa-files-o:before{content:"\\F0C5"}.fa-paperclip:before{content:"\\F0C6"}.fa-floppy-o:before,.fa-save:before{content:"\\F0C7"}.fa-square:before{content:"\\F0C8"}.fa-bars:before,.fa-navicon:before,.fa-reorder:before{content:"\\F0C9"}.fa-list-ul:before{content:"\\F0CA"}.fa-list-ol:before{content:"\\F0CB"}.fa-strikethrough:before{content:"\\F0CC"}.fa-underline:before{content:"\\F0CD"}.fa-table:before{content:"\\F0CE"}.fa-magic:before{content:"\\F0D0"}.fa-truck:before{content:"\\F0D1"}.fa-pinterest:before{content:"\\F0D2"}.fa-pinterest-square:before{content:"\\F0D3"}.fa-google-plus-square:before{content:"\\F0D4"}.fa-google-plus:before{content:"\\F0D5"}.fa-money:before{content:"\\F0D6"}.fa-caret-down:before{content:"\\F0D7"}.fa-caret-up:before{content:"\\F0D8"}.fa-caret-left:before{content:"\\F0D9"}.fa-caret-right:before{content:"\\F0DA"}.fa-columns:before{content:"\\F0DB"}.fa-sort:before,.fa-unsorted:before{content:"\\F0DC"}.fa-sort-desc:before,.fa-sort-down:before{content:"\\F0DD"}.fa-sort-asc:before,.fa-sort-up:before{content:"\\F0DE"}.fa-envelope:before{content:"\\F0E0"}.fa-linkedin:before{content:"\\F0E1"}.fa-rotate-left:before,.fa-undo:before{content:"\\F0E2"}.fa-gavel:before,.fa-legal:before{content:"\\F0E3"}.fa-dashboard:before,.fa-tachometer:before{content:"\\F0E4"}.fa-comment-o:before{content:"\\F0E5"}.fa-comments-o:before{content:"\\F0E6"}.fa-bolt:before,.fa-flash:before{content:"\\F0E7"}.fa-sitemap:before{content:"\\F0E8"}.fa-umbrella:before{content:"\\F0E9"}.fa-clipboard:before,.fa-paste:before{content:"\\F0EA"}.fa-lightbulb-o:before{content:"\\F0EB"}.fa-exchange:before{content:"\\F0EC"}.fa-cloud-download:before{content:"\\F0ED"}.fa-cloud-upload:before{content:"\\F0EE"}.fa-user-md:before{content:"\\F0F0"}.fa-stethoscope:before{content:"\\F0F1"}.fa-suitcase:before{content:"\\F0F2"}.fa-bell-o:before{content:"\\F0A2"}.fa-coffee:before{content:"\\F0F4"}.fa-cutlery:before{content:"\\F0F5"}.fa-file-text-o:before{content:"\\F0F6"}.fa-building-o:before{content:"\\F0F7"}.fa-hospital-o:before{content:"\\F0F8"}.fa-ambulance:before{content:"\\F0F9"}.fa-medkit:before{content:"\\F0FA"}.fa-fighter-jet:before{content:"\\F0FB"}.fa-beer:before{content:"\\F0FC"}.fa-h-square:before{content:"\\F0FD"}.fa-plus-square:before{content:"\\F0FE"}.fa-angle-double-left:before{content:"\\F100"}.fa-angle-double-right:before{content:"\\F101"}.fa-angle-double-up:before{content:"\\F102"}.fa-angle-double-down:before{content:"\\F103"}.fa-angle-left:before{content:"\\F104"}.fa-angle-right:before{content:"\\F105"}.fa-angle-up:before{content:"\\F106"}.fa-angle-down:before{content:"\\F107"}.fa-desktop:before{content:"\\F108"}.fa-laptop:before{content:"\\F109"}.fa-tablet:before{content:"\\F10A"}.fa-mobile-phone:before,.fa-mobile:before{content:"\\F10B"}.fa-circle-o:before{content:"\\F10C"}.fa-quote-left:before{content:"\\F10D"}.fa-quote-right:before{content:"\\F10E"}.fa-spinner:before{content:"\\F110"}.fa-circle:before{content:"\\F111"}.fa-mail-reply:before,.fa-reply:before{content:"\\F112"}.fa-github-alt:before{content:"\\F113"}.fa-folder-o:before{content:"\\F114"}.fa-folder-open-o:before{content:"\\F115"}.fa-smile-o:before{content:"\\F118"}.fa-frown-o:before{content:"\\F119"}.fa-meh-o:before{content:"\\F11A"}.fa-gamepad:before{content:"\\F11B"}.fa-keyboard-o:before{content:"\\F11C"}.fa-flag-o:before{content:"\\F11D"}.fa-flag-checkered:before{content:"\\F11E"}.fa-terminal:before{content:"\\F120"}.fa-code:before{content:"\\F121"}.fa-mail-reply-all:before,.fa-reply-all:before{content:"\\F122"}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:"\\F123"}.fa-location-arrow:before{content:"\\F124"}.fa-crop:before{content:"\\F125"}.fa-code-fork:before{content:"\\F126"}.fa-chain-broken:before,.fa-unlink:before{content:"\\F127"}.fa-question:before{content:"\\F128"}.fa-info:before{content:"\\F129"}.fa-exclamation:before{content:"\\F12A"}.fa-superscript:before{content:"\\F12B"}.fa-subscript:before{content:"\\F12C"}.fa-eraser:before{content:"\\F12D"}.fa-puzzle-piece:before{content:"\\F12E"}.fa-microphone:before{content:"\\F130"}.fa-microphone-slash:before{content:"\\F131"}.fa-shield:before{content:"\\F132"}.fa-calendar-o:before{content:"\\F133"}.fa-fire-extinguisher:before{content:"\\F134"}.fa-rocket:before{content:"\\F135"}.fa-maxcdn:before{content:"\\F136"}.fa-chevron-circle-left:before{content:"\\F137"}.fa-chevron-circle-right:before{content:"\\F138"}.fa-chevron-circle-up:before{content:"\\F139"}.fa-chevron-circle-down:before{content:"\\F13A"}.fa-html5:before{content:"\\F13B"}.fa-css3:before{content:"\\F13C"}.fa-anchor:before{content:"\\F13D"}.fa-unlock-alt:before{content:"\\F13E"}.fa-bullseye:before{content:"\\F140"}.fa-ellipsis-h:before{content:"\\F141"}.fa-ellipsis-v:before{content:"\\F142"}.fa-rss-square:before{content:"\\F143"}.fa-play-circle:before{content:"\\F144"}.fa-ticket:before{content:"\\F145"}.fa-minus-square:before{content:"\\F146"}.fa-minus-square-o:before{content:"\\F147"}.fa-level-up:before{content:"\\F148"}.fa-level-down:before{content:"\\F149"}.fa-check-square:before{content:"\\F14A"}.fa-pencil-square:before{content:"\\F14B"}.fa-external-link-square:before{content:"\\F14C"}.fa-share-square:before{content:"\\F14D"}.fa-compass:before{content:"\\F14E"}.fa-caret-square-o-down:before,.fa-toggle-down:before{content:"\\F150"}.fa-caret-square-o-up:before,.fa-toggle-up:before{content:"\\F151"}.fa-caret-square-o-right:before,.fa-toggle-right:before{content:"\\F152"}.fa-eur:before,.fa-euro:before{content:"\\F153"}.fa-gbp:before{content:"\\F154"}.fa-dollar:before,.fa-usd:before{content:"\\F155"}.fa-inr:before,.fa-rupee:before{content:"\\F156"}.fa-cny:before,.fa-jpy:before,.fa-rmb:before,.fa-yen:before{content:"\\F157"}.fa-rouble:before,.fa-rub:before,.fa-ruble:before{content:"\\F158"}.fa-krw:before,.fa-won:before{content:"\\F159"}.fa-bitcoin:before,.fa-btc:before{content:"\\F15A"}.fa-file:before{content:"\\F15B"}.fa-file-text:before{content:"\\F15C"}.fa-sort-alpha-asc:before{content:"\\F15D"}.fa-sort-alpha-desc:before{content:"\\F15E"}.fa-sort-amount-asc:before{content:"\\F160"}.fa-sort-amount-desc:before{content:"\\F161"}.fa-sort-numeric-asc:before{content:"\\F162"}.fa-sort-numeric-desc:before{content:"\\F163"}.fa-thumbs-up:before{content:"\\F164"}.fa-thumbs-down:before{content:"\\F165"}.fa-youtube-square:before{content:"\\F166"}.fa-youtube:before{content:"\\F167"}.fa-xing:before{content:"\\F168"}.fa-xing-square:before{content:"\\F169"}.fa-youtube-play:before{content:"\\F16A"}.fa-dropbox:before{content:"\\F16B"}.fa-stack-overflow:before{content:"\\F16C"}.fa-instagram:before{content:"\\F16D"}.fa-flickr:before{content:"\\F16E"}.fa-adn:before{content:"\\F170"}.fa-bitbucket:before{content:"\\F171"}.fa-bitbucket-square:before{content:"\\F172"}.fa-tumblr:before{content:"\\F173"}.fa-tumblr-square:before{content:"\\F174"}.fa-long-arrow-down:before{content:"\\F175"}.fa-long-arrow-up:before{content:"\\F176"}.fa-long-arrow-left:before{content:"\\F177"}.fa-long-arrow-right:before{content:"\\F178"}.fa-apple:before{content:"\\F179"}.fa-windows:before{content:"\\F17A"}.fa-android:before{content:"\\F17B"}.fa-linux:before{content:"\\F17C"}.fa-dribbble:before{content:"\\F17D"}.fa-skype:before{content:"\\F17E"}.fa-foursquare:before{content:"\\F180"}.fa-trello:before{content:"\\F181"}.fa-female:before{content:"\\F182"}.fa-male:before{content:"\\F183"}.fa-gittip:before,.fa-gratipay:before{content:"\\F184"}.fa-sun-o:before{content:"\\F185"}.fa-moon-o:before{content:"\\F186"}.fa-archive:before{content:"\\F187"}.fa-bug:before{content:"\\F188"}.fa-vk:before{content:"\\F189"}.fa-weibo:before{content:"\\F18A"}.fa-renren:before{content:"\\F18B"}.fa-pagelines:before{content:"\\F18C"}.fa-stack-exchange:before{content:"\\F18D"}.fa-arrow-circle-o-right:before{content:"\\F18E"}.fa-arrow-circle-o-left:before{content:"\\F190"}.fa-caret-square-o-left:before,.fa-toggle-left:before{content:"\\F191"}.fa-dot-circle-o:before{content:"\\F192"}.fa-wheelchair:before{content:"\\F193"}.fa-vimeo-square:before{content:"\\F194"}.fa-try:before,.fa-turkish-lira:before{content:"\\F195"}.fa-plus-square-o:before{content:"\\F196"}.fa-space-shuttle:before{content:"\\F197"}.fa-slack:before{content:"\\F198"}.fa-envelope-square:before{content:"\\F199"}.fa-wordpress:before{content:"\\F19A"}.fa-openid:before{content:"\\F19B"}.fa-bank:before,.fa-institution:before,.fa-university:before{content:"\\F19C"}.fa-graduation-cap:before,.fa-mortar-board:before{content:"\\F19D"}.fa-yahoo:before{content:"\\F19E"}.fa-google:before{content:"\\F1A0"}.fa-reddit:before{content:"\\F1A1"}.fa-reddit-square:before{content:"\\F1A2"}.fa-stumbleupon-circle:before{content:"\\F1A3"}.fa-stumbleupon:before{content:"\\F1A4"}.fa-delicious:before{content:"\\F1A5"}.fa-digg:before{content:"\\F1A6"}.fa-pied-piper-pp:before{content:"\\F1A7"}.fa-pied-piper-alt:before{content:"\\F1A8"}.fa-drupal:before{content:"\\F1A9"}.fa-joomla:before{content:"\\F1AA"}.fa-language:before{content:"\\F1AB"}.fa-fax:before{content:"\\F1AC"}.fa-building:before{content:"\\F1AD"}.fa-child:before{content:"\\F1AE"}.fa-paw:before{content:"\\F1B0"}.fa-spoon:before{content:"\\F1B1"}.fa-cube:before{content:"\\F1B2"}.fa-cubes:before{content:"\\F1B3"}.fa-behance:before{content:"\\F1B4"}.fa-behance-square:before{content:"\\F1B5"}.fa-steam:before{content:"\\F1B6"}.fa-steam-square:before{content:"\\F1B7"}.fa-recycle:before{content:"\\F1B8"}.fa-automobile:before,.fa-car:before{content:"\\F1B9"}.fa-cab:before,.fa-taxi:before{content:"\\F1BA"}.fa-tree:before{content:"\\F1BB"}.fa-spotify:before{content:"\\F1BC"}.fa-deviantart:before{content:"\\F1BD"}.fa-soundcloud:before{content:"\\F1BE"}.fa-database:before{content:"\\F1C0"}.fa-file-pdf-o:before{content:"\\F1C1"}.fa-file-word-o:before{content:"\\F1C2"}.fa-file-excel-o:before{content:"\\F1C3"}.fa-file-powerpoint-o:before{content:"\\F1C4"}.fa-file-image-o:before,.fa-file-photo-o:before,.fa-file-picture-o:before{content:"\\F1C5"}.fa-file-archive-o:before,.fa-file-zip-o:before{content:"\\F1C6"}.fa-file-audio-o:before,.fa-file-sound-o:before{content:"\\F1C7"}.fa-file-movie-o:before,.fa-file-video-o:before{content:"\\F1C8"}.fa-file-code-o:before{content:"\\F1C9"}.fa-vine:before{content:"\\F1CA"}.fa-codepen:before{content:"\\F1CB"}.fa-jsfiddle:before{content:"\\F1CC"}.fa-life-bouy:before,.fa-life-buoy:before,.fa-life-ring:before,.fa-life-saver:before,.fa-support:before{content:"\\F1CD"}.fa-circle-o-notch:before{content:"\\F1CE"}.fa-ra:before,.fa-rebel:before,.fa-resistance:before{content:"\\F1D0"}.fa-empire:before,.fa-ge:before{content:"\\F1D1"}.fa-git-square:before{content:"\\F1D2"}.fa-git:before{content:"\\F1D3"}.fa-hacker-news:before,.fa-y-combinator-square:before,.fa-yc-square:before{content:"\\F1D4"}.fa-tencent-weibo:before{content:"\\F1D5"}.fa-qq:before{content:"\\F1D6"}.fa-wechat:before,.fa-weixin:before{content:"\\F1D7"}.fa-paper-plane:before,.fa-send:before{content:"\\F1D8"}.fa-paper-plane-o:before,.fa-send-o:before{content:"\\F1D9"}.fa-history:before{content:"\\F1DA"}.fa-circle-thin:before{content:"\\F1DB"}.fa-header:before{content:"\\F1DC"}.fa-paragraph:before{content:"\\F1DD"}.fa-sliders:before{content:"\\F1DE"}.fa-share-alt:before{content:"\\F1E0"}.fa-share-alt-square:before{content:"\\F1E1"}.fa-bomb:before{content:"\\F1E2"}.fa-futbol-o:before,.fa-soccer-ball-o:before{content:"\\F1E3"}.fa-tty:before{content:"\\F1E4"}.fa-binoculars:before{content:"\\F1E5"}.fa-plug:before{content:"\\F1E6"}.fa-slideshare:before{content:"\\F1E7"}.fa-twitch:before{content:"\\F1E8"}.fa-yelp:before{content:"\\F1E9"}.fa-newspaper-o:before{content:"\\F1EA"}.fa-wifi:before{content:"\\F1EB"}.fa-calculator:before{content:"\\F1EC"}.fa-paypal:before{content:"\\F1ED"}.fa-google-wallet:before{content:"\\F1EE"}.fa-cc-visa:before{content:"\\F1F0"}.fa-cc-mastercard:before{content:"\\F1F1"}.fa-cc-discover:before{content:"\\F1F2"}.fa-cc-amex:before{content:"\\F1F3"}.fa-cc-paypal:before{content:"\\F1F4"}.fa-cc-stripe:before{content:"\\F1F5"}.fa-bell-slash:before{content:"\\F1F6"}.fa-bell-slash-o:before{content:"\\F1F7"}.fa-trash:before{content:"\\F1F8"}.fa-copyright:before{content:"\\F1F9"}.fa-at:before{content:"\\F1FA"}.fa-eyedropper:before{content:"\\F1FB"}.fa-paint-brush:before{content:"\\F1FC"}.fa-birthday-cake:before{content:"\\F1FD"}.fa-area-chart:before{content:"\\F1FE"}.fa-pie-chart:before{content:"\\F200"}.fa-line-chart:before{content:"\\F201"}.fa-lastfm:before{content:"\\F202"}.fa-lastfm-square:before{content:"\\F203"}.fa-toggle-off:before{content:"\\F204"}.fa-toggle-on:before{content:"\\F205"}.fa-bicycle:before{content:"\\F206"}.fa-bus:before{content:"\\F207"}.fa-ioxhost:before{content:"\\F208"}.fa-angellist:before{content:"\\F209"}.fa-cc:before{content:"\\F20A"}.fa-ils:before,.fa-shekel:before,.fa-sheqel:before{content:"\\F20B"}.fa-meanpath:before{content:"\\F20C"}.fa-buysellads:before{content:"\\F20D"}.fa-connectdevelop:before{content:"\\F20E"}.fa-dashcube:before{content:"\\F210"}.fa-forumbee:before{content:"\\F211"}.fa-leanpub:before{content:"\\F212"}.fa-sellsy:before{content:"\\F213"}.fa-shirtsinbulk:before{content:"\\F214"}.fa-simplybuilt:before{content:"\\F215"}.fa-skyatlas:before{content:"\\F216"}.fa-cart-plus:before{content:"\\F217"}.fa-cart-arrow-down:before{content:"\\F218"}.fa-diamond:before{content:"\\F219"}.fa-ship:before{content:"\\F21A"}.fa-user-secret:before{content:"\\F21B"}.fa-motorcycle:before{content:"\\F21C"}.fa-street-view:before{content:"\\F21D"}.fa-heartbeat:before{content:"\\F21E"}.fa-venus:before{content:"\\F221"}.fa-mars:before{content:"\\F222"}.fa-mercury:before{content:"\\F223"}.fa-intersex:before,.fa-transgender:before{content:"\\F224"}.fa-transgender-alt:before{content:"\\F225"}.fa-venus-double:before{content:"\\F226"}.fa-mars-double:before{content:"\\F227"}.fa-venus-mars:before{content:"\\F228"}.fa-mars-stroke:before{content:"\\F229"}.fa-mars-stroke-v:before{content:"\\F22A"}.fa-mars-stroke-h:before{content:"\\F22B"}.fa-neuter:before{content:"\\F22C"}.fa-genderless:before{content:"\\F22D"}.fa-facebook-official:before{content:"\\F230"}.fa-pinterest-p:before{content:"\\F231"}.fa-whatsapp:before{content:"\\F232"}.fa-server:before{content:"\\F233"}.fa-user-plus:before{content:"\\F234"}.fa-user-times:before{content:"\\F235"}.fa-bed:before,.fa-hotel:before{content:"\\F236"}.fa-viacoin:before{content:"\\F237"}.fa-train:before{content:"\\F238"}.fa-subway:before{content:"\\F239"}.fa-medium:before{content:"\\F23A"}.fa-y-combinator:before,.fa-yc:before{content:"\\F23B"}.fa-optin-monster:before{content:"\\F23C"}.fa-opencart:before{content:"\\F23D"}.fa-expeditedssl:before{content:"\\F23E"}.fa-battery-4:before,.fa-battery-full:before,.fa-battery:before{content:"\\F240"}.fa-battery-3:before,.fa-battery-three-quarters:before{content:"\\F241"}.fa-battery-2:before,.fa-battery-half:before{content:"\\F242"}.fa-battery-1:before,.fa-battery-quarter:before{content:"\\F243"}.fa-battery-0:before,.fa-battery-empty:before{content:"\\F244"}.fa-mouse-pointer:before{content:"\\F245"}.fa-i-cursor:before{content:"\\F246"}.fa-object-group:before{content:"\\F247"}.fa-object-ungroup:before{content:"\\F248"}.fa-sticky-note:before{content:"\\F249"}.fa-sticky-note-o:before{content:"\\F24A"}.fa-cc-jcb:before{content:"\\F24B"}.fa-cc-diners-club:before{content:"\\F24C"}.fa-clone:before{content:"\\F24D"}.fa-balance-scale:before{content:"\\F24E"}.fa-hourglass-o:before{content:"\\F250"}.fa-hourglass-1:before,.fa-hourglass-start:before{content:"\\F251"}.fa-hourglass-2:before,.fa-hourglass-half:before{content:"\\F252"}.fa-hourglass-3:before,.fa-hourglass-end:before{content:"\\F253"}.fa-hourglass:before{content:"\\F254"}.fa-hand-grab-o:before,.fa-hand-rock-o:before{content:"\\F255"}.fa-hand-paper-o:before,.fa-hand-stop-o:before{content:"\\F256"}.fa-hand-scissors-o:before{content:"\\F257"}.fa-hand-lizard-o:before{content:"\\F258"}.fa-hand-spock-o:before{content:"\\F259"}.fa-hand-pointer-o:before{content:"\\F25A"}.fa-hand-peace-o:before{content:"\\F25B"}.fa-trademark:before{content:"\\F25C"}.fa-registered:before{content:"\\F25D"}.fa-creative-commons:before{content:"\\F25E"}.fa-gg:before{content:"\\F260"}.fa-gg-circle:before{content:"\\F261"}.fa-tripadvisor:before{content:"\\F262"}.fa-odnoklassniki:before{content:"\\F263"}.fa-odnoklassniki-square:before{content:"\\F264"}.fa-get-pocket:before{content:"\\F265"}.fa-wikipedia-w:before{content:"\\F266"}.fa-safari:before{content:"\\F267"}.fa-chrome:before{content:"\\F268"}.fa-firefox:before{content:"\\F269"}.fa-opera:before{content:"\\F26A"}.fa-internet-explorer:before{content:"\\F26B"}.fa-television:before,.fa-tv:before{content:"\\F26C"}.fa-contao:before{content:"\\F26D"}.fa-500px:before{content:"\\F26E"}.fa-amazon:before{content:"\\F270"}.fa-calendar-plus-o:before{content:"\\F271"}.fa-calendar-minus-o:before{content:"\\F272"}.fa-calendar-times-o:before{content:"\\F273"}.fa-calendar-check-o:before{content:"\\F274"}.fa-industry:before{content:"\\F275"}.fa-map-pin:before{content:"\\F276"}.fa-map-signs:before{content:"\\F277"}.fa-map-o:before{content:"\\F278"}.fa-map:before{content:"\\F279"}.fa-commenting:before{content:"\\F27A"}.fa-commenting-o:before{content:"\\F27B"}.fa-houzz:before{content:"\\F27C"}.fa-vimeo:before{content:"\\F27D"}.fa-black-tie:before{content:"\\F27E"}.fa-fonticons:before{content:"\\F280"}.fa-reddit-alien:before{content:"\\F281"}.fa-edge:before{content:"\\F282"}.fa-credit-card-alt:before{content:"\\F283"}.fa-codiepie:before{content:"\\F284"}.fa-modx:before{content:"\\F285"}.fa-fort-awesome:before{content:"\\F286"}.fa-usb:before{content:"\\F287"}.fa-product-hunt:before{content:"\\F288"}.fa-mixcloud:before{content:"\\F289"}.fa-scribd:before{content:"\\F28A"}.fa-pause-circle:before{content:"\\F28B"}.fa-pause-circle-o:before{content:"\\F28C"}.fa-stop-circle:before{content:"\\F28D"}.fa-stop-circle-o:before{content:"\\F28E"}.fa-shopping-bag:before{content:"\\F290"}.fa-shopping-basket:before{content:"\\F291"}.fa-hashtag:before{content:"\\F292"}.fa-bluetooth:before{content:"\\F293"}.fa-bluetooth-b:before{content:"\\F294"}.fa-percent:before{content:"\\F295"}.fa-gitlab:before{content:"\\F296"}.fa-wpbeginner:before{content:"\\F297"}.fa-wpforms:before{content:"\\F298"}.fa-envira:before{content:"\\F299"}.fa-universal-access:before{content:"\\F29A"}.fa-wheelchair-alt:before{content:"\\F29B"}.fa-question-circle-o:before{content:"\\F29C"}.fa-blind:before{content:"\\F29D"}.fa-audio-description:before{content:"\\F29E"}.fa-volume-control-phone:before{content:"\\F2A0"}.fa-braille:before{content:"\\F2A1"}.fa-assistive-listening-systems:before{content:"\\F2A2"}.fa-american-sign-language-interpreting:before,.fa-asl-interpreting:before{content:"\\F2A3"}.fa-deaf:before,.fa-deafness:before,.fa-hard-of-hearing:before{content:"\\F2A4"}.fa-glide:before{content:"\\F2A5"}.fa-glide-g:before{content:"\\F2A6"}.fa-sign-language:before,.fa-signing:before{content:"\\F2A7"}.fa-low-vision:before{content:"\\F2A8"}.fa-viadeo:before{content:"\\F2A9"}.fa-viadeo-square:before{content:"\\F2AA"}.fa-snapchat:before{content:"\\F2AB"}.fa-snapchat-ghost:before{content:"\\F2AC"}.fa-snapchat-square:before{content:"\\F2AD"}.fa-pied-piper:before{content:"\\F2AE"}.fa-first-order:before{content:"\\F2B0"}.fa-yoast:before{content:"\\F2B1"}.fa-themeisle:before{content:"\\F2B2"}.fa-google-plus-circle:before,.fa-google-plus-official:before{content:"\\F2B3"}.fa-fa:before,.fa-font-awesome:before{content:"\\F2B4"}.fa-handshake-o:before{content:"\\F2B5"}.fa-envelope-open:before{content:"\\F2B6"}.fa-envelope-open-o:before{content:"\\F2B7"}.fa-linode:before{content:"\\F2B8"}.fa-address-book:before{content:"\\F2B9"}.fa-address-book-o:before{content:"\\F2BA"}.fa-address-card:before,.fa-vcard:before{content:"\\F2BB"}.fa-address-card-o:before,.fa-vcard-o:before{content:"\\F2BC"}.fa-user-circle:before{content:"\\F2BD"}.fa-user-circle-o:before{content:"\\F2BE"}.fa-user-o:before{content:"\\F2C0"}.fa-id-badge:before{content:"\\F2C1"}.fa-drivers-license:before,.fa-id-card:before{content:"\\F2C2"}.fa-drivers-license-o:before,.fa-id-card-o:before{content:"\\F2C3"}.fa-quora:before{content:"\\F2C4"}.fa-free-code-camp:before{content:"\\F2C5"}.fa-telegram:before{content:"\\F2C6"}.fa-thermometer-4:before,.fa-thermometer-full:before,.fa-thermometer:before{content:"\\F2C7"}.fa-thermometer-3:before,.fa-thermometer-three-quarters:before{content:"\\F2C8"}.fa-thermometer-2:before,.fa-thermometer-half:before{content:"\\F2C9"}.fa-thermometer-1:before,.fa-thermometer-quarter:before{content:"\\F2CA"}.fa-thermometer-0:before,.fa-thermometer-empty:before{content:"\\F2CB"}.fa-shower:before{content:"\\F2CC"}.fa-bath:before,.fa-bathtub:before,.fa-s15:before{content:"\\F2CD"}.fa-podcast:before{content:"\\F2CE"}.fa-window-maximize:before{content:"\\F2D0"}.fa-window-minimize:before{content:"\\F2D1"}.fa-window-restore:before{content:"\\F2D2"}.fa-times-rectangle:before,.fa-window-close:before{content:"\\F2D3"}.fa-times-rectangle-o:before,.fa-window-close-o:before{content:"\\F2D4"}.fa-bandcamp:before{content:"\\F2D5"}.fa-grav:before{content:"\\F2D6"}.fa-etsy:before{content:"\\F2D7"}.fa-imdb:before{content:"\\F2D8"}.fa-ravelry:before{content:"\\F2D9"}.fa-eercast:before{content:"\\F2DA"}.fa-microchip:before{content:"\\F2DB"}.fa-snowflake-o:before{content:"\\F2DC"}.fa-superpowers:before{content:"\\F2DD"}.fa-wpexplorer:before{content:"\\F2DE"}.fa-meetup:before{content:"\\F2E0"}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}',""]); -},function(A,M,t){M=A.exports=t(245)(),M.push([A.id,'*,:after,:before{box-sizing:border-box}body{font-family:Lato,sans-serif;font-size:15px;line-height:1.42857143;color:#8e8e8e;background-color:#edecec}button,input,select,textarea{font-family:inherit;font-size:inherit;line-height:inherit}a{color:#46a5e0}a,a:focus,a:hover{text-decoration:none}a:focus,a:hover{color:#1f7fba}a:focus{outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}figure{margin:0}img{vertical-align:middle}.img-responsive{display:block;max-width:100%;height:auto}.img-rounded{border-radius:6px}.img-thumbnail{padding:4px;line-height:1.42857143;background-color:#edecec;border:1px solid #ddd;border-radius:4px;-webkit-transition:all .2s ease-in-out;transition:all .2s ease-in-out;display:inline-block;max-width:100%;height:auto}.img-circle{border-radius:50%}hr{margin-top:21px;margin-bottom:21px;border:0;border-top:1px solid #eee}.sr-only{position:absolute;width:1px;height:1px;margin:-1px;padding:0;overflow:hidden;clip:rect(0,0,0,0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}[role=button]{cursor:pointer}.container{margin-right:auto;margin-left:auto;padding-left:15px;padding-right:15px}@media (min-width:768px){.container{width:750px}}@media (min-width:992px){.container{width:970px}}@media (min-width:1200px){.container{width:1170px}}.container-fluid{margin-right:auto;margin-left:auto;padding-left:15px;padding-right:15px}.row{margin-left:-15px;margin-right:-15px}.col-lg-1,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-lg-10,.col-lg-11,.col-lg-12,.col-md-1,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-md-10,.col-md-11,.col-md-12,.col-sm-1,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-sm-10,.col-sm-11,.col-sm-12,.col-xs-1,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9,.col-xs-10,.col-xs-11,.col-xs-12{position:relative;min-height:1px;padding-left:15px;padding-right:15px}.col-xs-1,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9,.col-xs-10,.col-xs-11,.col-xs-12{float:left}.col-xs-12{width:100%}.col-xs-11{width:91.66666667%}.col-xs-10{width:83.33333333%}.col-xs-9{width:75%}.col-xs-8{width:66.66666667%}.col-xs-7{width:58.33333333%}.col-xs-6{width:50%}.col-xs-5{width:41.66666667%}.col-xs-4{width:33.33333333%}.col-xs-3{width:25%}.col-xs-2{width:16.66666667%}.col-xs-1{width:8.33333333%}.col-xs-pull-12{right:100%}.col-xs-pull-11{right:91.66666667%}.col-xs-pull-10{right:83.33333333%}.col-xs-pull-9{right:75%}.col-xs-pull-8{right:66.66666667%}.col-xs-pull-7{right:58.33333333%}.col-xs-pull-6{right:50%}.col-xs-pull-5{right:41.66666667%}.col-xs-pull-4{right:33.33333333%}.col-xs-pull-3{right:25%}.col-xs-pull-2{right:16.66666667%}.col-xs-pull-1{right:8.33333333%}.col-xs-pull-0{right:auto}.col-xs-push-12{left:100%}.col-xs-push-11{left:91.66666667%}.col-xs-push-10{left:83.33333333%}.col-xs-push-9{left:75%}.col-xs-push-8{left:66.66666667%}.col-xs-push-7{left:58.33333333%}.col-xs-push-6{left:50%}.col-xs-push-5{left:41.66666667%}.col-xs-push-4{left:33.33333333%}.col-xs-push-3{left:25%}.col-xs-push-2{left:16.66666667%}.col-xs-push-1{left:8.33333333%}.col-xs-push-0{left:auto}.col-xs-offset-12{margin-left:100%}.col-xs-offset-11{margin-left:91.66666667%}.col-xs-offset-10{margin-left:83.33333333%}.col-xs-offset-9{margin-left:75%}.col-xs-offset-8{margin-left:66.66666667%}.col-xs-offset-7{margin-left:58.33333333%}.col-xs-offset-6{margin-left:50%}.col-xs-offset-5{margin-left:41.66666667%}.col-xs-offset-4{margin-left:33.33333333%}.col-xs-offset-3{margin-left:25%}.col-xs-offset-2{margin-left:16.66666667%}.col-xs-offset-1{margin-left:8.33333333%}.col-xs-offset-0{margin-left:0}@media (min-width:768px){.col-sm-1,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-sm-10,.col-sm-11,.col-sm-12{float:left}.col-sm-12{width:100%}.col-sm-11{width:91.66666667%}.col-sm-10{width:83.33333333%}.col-sm-9{width:75%}.col-sm-8{width:66.66666667%}.col-sm-7{width:58.33333333%}.col-sm-6{width:50%}.col-sm-5{width:41.66666667%}.col-sm-4{width:33.33333333%}.col-sm-3{width:25%}.col-sm-2{width:16.66666667%}.col-sm-1{width:8.33333333%}.col-sm-pull-12{right:100%}.col-sm-pull-11{right:91.66666667%}.col-sm-pull-10{right:83.33333333%}.col-sm-pull-9{right:75%}.col-sm-pull-8{right:66.66666667%}.col-sm-pull-7{right:58.33333333%}.col-sm-pull-6{right:50%}.col-sm-pull-5{right:41.66666667%}.col-sm-pull-4{right:33.33333333%}.col-sm-pull-3{right:25%}.col-sm-pull-2{right:16.66666667%}.col-sm-pull-1{right:8.33333333%}.col-sm-pull-0{right:auto}.col-sm-push-12{left:100%}.col-sm-push-11{left:91.66666667%}.col-sm-push-10{left:83.33333333%}.col-sm-push-9{left:75%}.col-sm-push-8{left:66.66666667%}.col-sm-push-7{left:58.33333333%}.col-sm-push-6{left:50%}.col-sm-push-5{left:41.66666667%}.col-sm-push-4{left:33.33333333%}.col-sm-push-3{left:25%}.col-sm-push-2{left:16.66666667%}.col-sm-push-1{left:8.33333333%}.col-sm-push-0{left:auto}.col-sm-offset-12{margin-left:100%}.col-sm-offset-11{margin-left:91.66666667%}.col-sm-offset-10{margin-left:83.33333333%}.col-sm-offset-9{margin-left:75%}.col-sm-offset-8{margin-left:66.66666667%}.col-sm-offset-7{margin-left:58.33333333%}.col-sm-offset-6{margin-left:50%}.col-sm-offset-5{margin-left:41.66666667%}.col-sm-offset-4{margin-left:33.33333333%}.col-sm-offset-3{margin-left:25%}.col-sm-offset-2{margin-left:16.66666667%}.col-sm-offset-1{margin-left:8.33333333%}.col-sm-offset-0{margin-left:0}}@media (min-width:992px){.col-md-1,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-md-10,.col-md-11,.col-md-12{float:left}.col-md-12{width:100%}.col-md-11{width:91.66666667%}.col-md-10{width:83.33333333%}.col-md-9{width:75%}.col-md-8{width:66.66666667%}.col-md-7{width:58.33333333%}.col-md-6{width:50%}.col-md-5{width:41.66666667%}.col-md-4{width:33.33333333%}.col-md-3{width:25%}.col-md-2{width:16.66666667%}.col-md-1{width:8.33333333%}.col-md-pull-12{right:100%}.col-md-pull-11{right:91.66666667%}.col-md-pull-10{right:83.33333333%}.col-md-pull-9{right:75%}.col-md-pull-8{right:66.66666667%}.col-md-pull-7{right:58.33333333%}.col-md-pull-6{right:50%}.col-md-pull-5{right:41.66666667%}.col-md-pull-4{right:33.33333333%}.col-md-pull-3{right:25%}.col-md-pull-2{right:16.66666667%}.col-md-pull-1{right:8.33333333%}.col-md-pull-0{right:auto}.col-md-push-12{left:100%}.col-md-push-11{left:91.66666667%}.col-md-push-10{left:83.33333333%}.col-md-push-9{left:75%}.col-md-push-8{left:66.66666667%}.col-md-push-7{left:58.33333333%}.col-md-push-6{left:50%}.col-md-push-5{left:41.66666667%}.col-md-push-4{left:33.33333333%}.col-md-push-3{left:25%}.col-md-push-2{left:16.66666667%}.col-md-push-1{left:8.33333333%}.col-md-push-0{left:auto}.col-md-offset-12{margin-left:100%}.col-md-offset-11{margin-left:91.66666667%}.col-md-offset-10{margin-left:83.33333333%}.col-md-offset-9{margin-left:75%}.col-md-offset-8{margin-left:66.66666667%}.col-md-offset-7{margin-left:58.33333333%}.col-md-offset-6{margin-left:50%}.col-md-offset-5{margin-left:41.66666667%}.col-md-offset-4{margin-left:33.33333333%}.col-md-offset-3{margin-left:25%}.col-md-offset-2{margin-left:16.66666667%}.col-md-offset-1{margin-left:8.33333333%}.col-md-offset-0{margin-left:0}}@media (min-width:1200px){.col-lg-1,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-lg-10,.col-lg-11,.col-lg-12{float:left}.col-lg-12{width:100%}.col-lg-11{width:91.66666667%}.col-lg-10{width:83.33333333%}.col-lg-9{width:75%}.col-lg-8{width:66.66666667%}.col-lg-7{width:58.33333333%}.col-lg-6{width:50%}.col-lg-5{width:41.66666667%}.col-lg-4{width:33.33333333%}.col-lg-3{width:25%}.col-lg-2{width:16.66666667%}.col-lg-1{width:8.33333333%}.col-lg-pull-12{right:100%}.col-lg-pull-11{right:91.66666667%}.col-lg-pull-10{right:83.33333333%}.col-lg-pull-9{right:75%}.col-lg-pull-8{right:66.66666667%}.col-lg-pull-7{right:58.33333333%}.col-lg-pull-6{right:50%}.col-lg-pull-5{right:41.66666667%}.col-lg-pull-4{right:33.33333333%}.col-lg-pull-3{right:25%}.col-lg-pull-2{right:16.66666667%}.col-lg-pull-1{right:8.33333333%}.col-lg-pull-0{right:auto}.col-lg-push-12{left:100%}.col-lg-push-11{left:91.66666667%}.col-lg-push-10{left:83.33333333%}.col-lg-push-9{left:75%}.col-lg-push-8{left:66.66666667%}.col-lg-push-7{left:58.33333333%}.col-lg-push-6{left:50%}.col-lg-push-5{left:41.66666667%}.col-lg-push-4{left:33.33333333%}.col-lg-push-3{left:25%}.col-lg-push-2{left:16.66666667%}.col-lg-push-1{left:8.33333333%}.col-lg-push-0{left:auto}.col-lg-offset-12{margin-left:100%}.col-lg-offset-11{margin-left:91.66666667%}.col-lg-offset-10{margin-left:83.33333333%}.col-lg-offset-9{margin-left:75%}.col-lg-offset-8{margin-left:66.66666667%}.col-lg-offset-7{margin-left:58.33333333%}.col-lg-offset-6{margin-left:50%}.col-lg-offset-5{margin-left:41.66666667%}.col-lg-offset-4{margin-left:33.33333333%}.col-lg-offset-3{margin-left:25%}.col-lg-offset-2{margin-left:16.66666667%}.col-lg-offset-1{margin-left:8.33333333%}.col-lg-offset-0{margin-left:0}}/*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */html{font-family:sans-serif;-ms-text-size-adjust:100%;-webkit-text-size-adjust:100%}body{margin:0}article,aside,details,figcaption,figure,footer,header,hgroup,main,menu,nav,section,summary{display:block}audio,canvas,progress,video{display:inline-block;vertical-align:baseline}audio:not([controls]){display:none;height:0}[hidden],template{display:none}a{background-color:transparent}a:active,a:hover{outline:0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:700}dfn{font-style:italic}h1{font-size:2em;margin:.67em 0}mark{background:#ff0;color:#000}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sup{top:-.5em}sub{bottom:-.25em}img{border:0}svg:not(:root){overflow:hidden}figure{margin:1em 40px}hr{box-sizing:content-box;height:0}pre{overflow:auto}code,kbd,pre,samp{font-family:monospace,monospace;font-size:1em}button,input,optgroup,select,textarea{color:inherit;font:inherit;margin:0}button{overflow:visible}button,select{text-transform:none}button,html input[type=button],input[type=reset],input[type=submit]{-webkit-appearance:button;cursor:pointer}button[disabled],html input[disabled]{cursor:default}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}input{line-height:normal}input[type=checkbox],input[type=radio]{box-sizing:border-box;padding:0}input[type=number]::-webkit-inner-spin-button,input[type=number]::-webkit-outer-spin-button{height:auto}input[type=search]{-webkit-appearance:textfield;box-sizing:content-box}input[type=search]::-webkit-search-cancel-button,input[type=search]::-webkit-search-decoration{-webkit-appearance:none}fieldset{border:1px solid silver;margin:0 2px;padding:.35em .625em .75em}legend{border:0;padding:0}textarea{overflow:auto}optgroup{font-weight:700}table{border-collapse:collapse;border-spacing:0}td,th{padding:0}.caret{display:inline-block;width:0;height:0;margin-left:2px;vertical-align:middle;border-top:4px dashed;border-top:4px solid\\9;border-right:4px solid transparent;border-left:4px solid transparent}.dropdown,.dropup{position:relative}.dropdown-toggle:focus{outline:0}.dropdown-menu{position:absolute;top:100%;left:0;z-index:1000;display:none;float:left;min-width:160px;padding:5px 0;margin:2px 0 0;list-style:none;font-size:15px;text-align:left;background-color:#fff;border:1px solid transparent;border-radius:4px;box-shadow:0 6px 12px rgba(0,0,0,.175);background-clip:padding-box}.dropdown-menu.pull-right{right:0;left:auto}.dropdown-menu .divider{height:1px;margin:9.5px 0;overflow:hidden;background-color:rgba(0,0,0,.08)}.dropdown-menu>li>a{display:block;padding:3px 20px;clear:both;font-weight:400;line-height:1.42857143;color:#8e8e8e;white-space:nowrap}.dropdown-menu>li>a:focus,.dropdown-menu>li>a:hover{text-decoration:none;color:#333;background-color:rgba(0,0,0,.05)}.dropdown-menu>.active>a,.dropdown-menu>.active>a:focus,.dropdown-menu>.active>a:hover{color:#333;text-decoration:none;outline:0;background-color:rgba(0,0,0,.075)}.dropdown-menu>.disabled>a,.dropdown-menu>.disabled>a:focus,.dropdown-menu>.disabled>a:hover{color:#e4e4e4}.dropdown-menu>.disabled>a:focus,.dropdown-menu>.disabled>a:hover{text-decoration:none;background-color:transparent;background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled = false);cursor:not-allowed}.open>.dropdown-menu{display:block}.open>a{outline:0}.dropdown-menu-right{left:auto;right:0}.dropdown-menu-left{left:0;right:auto}.dropdown-header{display:block;padding:3px 20px;font-size:13px;line-height:1.42857143;color:#777;white-space:nowrap}.dropdown-backdrop{position:fixed;left:0;right:0;bottom:0;top:0;z-index:990}.pull-right>.dropdown-menu{right:0;left:auto}.dropup .caret,.navbar-fixed-bottom .dropdown .caret{border-top:0;border-bottom:4px dashed;border-bottom:4px solid\\9;content:""}.dropup .dropdown-menu,.navbar-fixed-bottom .dropdown .dropdown-menu{top:auto;bottom:100%;margin-bottom:2px}@media (min-width:768px){.navbar-right .dropdown-menu{left:auto;right:0}.navbar-right .dropdown-menu>li>a{text-align:right}.navbar-right .dropdown-menu-left{left:0;right:auto}}.modal,.modal-open{overflow:hidden}.modal{display:none;position:fixed;top:0;right:0;bottom:0;left:0;z-index:1050;-webkit-overflow-scrolling:touch;outline:0}.modal.fade .modal-dialog{-webkit-transform:translateY(-25%);transform:translateY(-25%);-webkit-transition:-webkit-transform .3s ease-out;transition:transform .3s ease-out}.modal.in .modal-dialog{-webkit-transform:translate(0);transform:translate(0)}.modal-open .modal{overflow-x:hidden;overflow-y:auto}.modal-dialog{position:relative;width:auto;margin:10px}.modal-content{position:relative;background-color:#fff;border:1px solid transparent;border-radius:6px;box-shadow:0 3px 9px rgba(0,0,0,.5);background-clip:padding-box;outline:0}.modal-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1040;background-color:rgba(0,0,0,.1)}.modal-backdrop.fade{opacity:0;filter:alpha(opacity=0)}.modal-backdrop.in{opacity:.5;filter:alpha(opacity=50)}.modal-header{padding:30px 35px 0;border-bottom:1px solid transparent}.modal-header .close{margin-top:-2px}.modal-title{margin:0;line-height:transparent}.modal-body{position:relative;padding:30px 35px}.modal-footer{padding:30px 35px;text-align:right;border-top:1px solid transparent}.modal-footer .btn+.btn{margin-left:5px;margin-bottom:0}.modal-footer .btn-group .btn+.btn{margin-left:-1px}.modal-footer .btn-block+.btn-block{margin-left:0}.modal-scrollbar-measure{position:absolute;top:-9999px;width:50px;height:50px;overflow:scroll}@media (min-width:768px){.modal-dialog{width:600px;margin:30px auto}.modal-content{box-shadow:0 5px 15px rgba(0,0,0,.5)}.modal-sm{width:400px}}@media (min-width:992px){.modal-lg{width:900px}}.tooltip{position:absolute;z-index:1070;display:block;font-family:Lato,sans-serif;font-style:normal;font-weight:400;letter-spacing:normal;line-break:auto;line-height:1.42857143;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;white-space:normal;word-break:normal;word-spacing:normal;word-wrap:normal;font-size:13px;opacity:0;filter:alpha(opacity=0)}.tooltip.in{opacity:.9;filter:alpha(opacity=90)}.tooltip.top{margin-top:-3px;padding:5px 0}.tooltip.right{margin-left:3px;padding:0 5px}.tooltip.bottom{margin-top:3px;padding:5px 0}.tooltip.left{margin-left:-3px;padding:0 5px}.tooltip-inner{max-width:200px;padding:3px 8px;color:#fff;text-align:center;background-color:#000;border-radius:4px}.tooltip-arrow{position:absolute;width:0;height:0;border-color:transparent;border-style:solid}.tooltip.top .tooltip-arrow{bottom:0;left:50%;margin-left:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.top-left .tooltip-arrow{right:5px}.tooltip.top-left .tooltip-arrow,.tooltip.top-right .tooltip-arrow{bottom:0;margin-bottom:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.top-right .tooltip-arrow{left:5px}.tooltip.right .tooltip-arrow{top:50%;left:0;margin-top:-5px;border-width:5px 5px 5px 0;border-right-color:#000}.tooltip.left .tooltip-arrow{top:50%;right:0;margin-top:-5px;border-width:5px 0 5px 5px;border-left-color:#000}.tooltip.bottom .tooltip-arrow{top:0;left:50%;margin-left:-5px;border-width:0 5px 5px;border-bottom-color:#000}.tooltip.bottom-left .tooltip-arrow{top:0;right:5px;margin-top:-5px;border-width:0 5px 5px;border-bottom-color:#000}.tooltip.bottom-right .tooltip-arrow{top:0;left:5px;margin-top:-5px;border-width:0 5px 5px;border-bottom-color:#000}@-ms-viewport{width:device-width}.visible-lg,.visible-lg-block,.visible-lg-inline,.visible-lg-inline-block,.visible-md,.visible-md-block,.visible-md-inline,.visible-md-inline-block,.visible-sm,.visible-sm-block,.visible-sm-inline,.visible-sm-inline-block,.visible-xs,.visible-xs-block,.visible-xs-inline,.visible-xs-inline-block{display:none!important}@media (max-width:767px){.visible-xs{display:block!important}table.visible-xs{display:table!important}tr.visible-xs{display:table-row!important}td.visible-xs,th.visible-xs{display:table-cell!important}}@media (max-width:767px){.visible-xs-block{display:block!important}}@media (max-width:767px){.visible-xs-inline{display:inline!important}}@media (max-width:767px){.visible-xs-inline-block{display:inline-block!important}}@media (min-width:768px) and (max-width:991px){.visible-sm{display:block!important}table.visible-sm{display:table!important}tr.visible-sm{display:table-row!important}td.visible-sm,th.visible-sm{display:table-cell!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-block{display:block!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-inline{display:inline!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-inline-block{display:inline-block!important}}@media (min-width:992px) and (max-width:1199px){.visible-md{display:block!important}table.visible-md{display:table!important}tr.visible-md{display:table-row!important}td.visible-md,th.visible-md{display:table-cell!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-block{display:block!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-inline{display:inline!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-inline-block{display:inline-block!important}}@media (min-width:1200px){.visible-lg{display:block!important}table.visible-lg{display:table!important}tr.visible-lg{display:table-row!important}td.visible-lg,th.visible-lg{display:table-cell!important}}@media (min-width:1200px){.visible-lg-block{display:block!important}}@media (min-width:1200px){.visible-lg-inline{display:inline!important}}@media (min-width:1200px){.visible-lg-inline-block{display:inline-block!important}}@media (max-width:767px){.hidden-xs{display:none!important}}@media (min-width:768px) and (max-width:991px){.hidden-sm{display:none!important}}@media (min-width:992px) and (max-width:1199px){.hidden-md{display:none!important}}@media (min-width:1200px){.hidden-lg{display:none!important}}.visible-print{display:none!important}@media print{.visible-print{display:block!important}table.visible-print{display:table!important}tr.visible-print{display:table-row!important}td.visible-print,th.visible-print{display:table-cell!important}}.visible-print-block{display:none!important}@media print{.visible-print-block{display:block!important}}.visible-print-inline{display:none!important}@media print{.visible-print-inline{display:inline!important}}.visible-print-inline-block{display:none!important}@media print{.visible-print-inline-block{display:inline-block!important}}@media print{.hidden-print{display:none!important}}*{-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}:active,:focus{outline:0}html{font-size:10px;-webkit-tap-highlight-color:rgba(0,0,0,0)}body,html{min-height:100%}a{-webkit-transition:color;transition:color;-webkit-transition-duration:.3s;transition-duration:.3s}button{border:0}.animated.infinite{-webkit-animation-iteration-count:infinite;animation-iteration-count:infinite}@-webkit-keyframes fadeIn{0%{opacity:0}to{opacity:1}}@keyframes fadeIn{0%{opacity:0}to{opacity:1}}.fadeIn{-webkit-animation-name:fadeIn;animation-name:fadeIn}@-webkit-keyframes fadeInDown{0%{opacity:0;-webkit-transform:translateY(-20px)}to{opacity:1;-webkit-transform:translateY(0)}}@keyframes fadeInDown{0%{opacity:0;transform:translateY(-20px)}to{opacity:1;transform:translateY(0)}}.fadeInDown{-webkit-animation-name:fadeInDown;animation-name:fadeInDown}@-webkit-keyframes fadeInUp{0%{opacity:0;-webkit-transform:translateY(20px)}to{opacity:1;-webkit-transform:translateY(0)}}@keyframes fadeInUp{0%{opacity:0;transform:translateY(20px)}to{opacity:1;transform:translateY(0)}}.fadeInUp{-webkit-animation-name:fadeInUp;animation-name:fadeInUp}@-webkit-keyframes fadeOut{0%{opacity:1}to{opacity:0}}@keyframes fadeOut{0%{opacity:1}to{opacity:0}}.fadeOut{-webkit-animation-name:fadeOut;animation-name:fadeOut}@-webkit-keyframes fadeOutDown{0%{opacity:1;-webkit-transform:translateY(0)}to{opacity:0;-webkit-transform:translateY(20px)}}@keyframes fadeOutDown{0%{opacity:1;transform:translateY(0)}to{opacity:0;transform:translateY(20px)}}.fadeOutDown{-webkit-animation-name:fadeOutDown;animation-name:fadeOutDown}@-webkit-keyframes fadeOutUp{0%{opacity:1;-webkit-transform:translateY(0)}to{opacity:0;-webkit-transform:translateY(-20px)}}@keyframes fadeOutUp{0%{opacity:1;transform:translateY(0)}to{opacity:0;transform:translateY(-20px)}}.fadeOutUp{-webkit-animation-name:fadeOutUp;animation-name:fadeOutUp}@-webkit-keyframes zoomIn{0%{opacity:0;-webkit-transform:scale3d(.3,.3,.3);transform:scale3d(.3,.3,.3)}50%{opacity:1}}@keyframes zoomIn{0%{opacity:0;-webkit-transform:scale3d(.3,.3,.3);transform:scale3d(.3,.3,.3)}50%{opacity:1}}.text-center{text-align:center!important}.text-left{text-align:left!important}.text-right{text-align:right!important}.clearfix:after,.clearfix:before,.container-fluid:after,.container-fluid:before,.container:after,.container:before,.modal-footer:after,.modal-footer:before,.modal-header:after,.modal-header:before,.row:after,.row:before{content:" ";display:table}.clearfix:after,.container-fluid:after,.container:after,.modal-footer:after,.modal-header:after,.row:after{clear:both}.pull-right{float:right!important}.pull-left{float:left!important}.p-relative{position:relative}.m-0{margin:0!important}.m-t-0{margin-top:0!important}.m-b-0{margin-bottom:0!important}.m-l-0{margin-left:0!important}.m-r-0{margin-right:0!important}.m-5{margin:5px!important}.m-t-5{margin-top:5px!important}.m-b-5{margin-bottom:5px!important}.m-l-5{margin-left:5px!important}.m-r-5{margin-right:5px!important}.m-10{margin:10px!important}.m-t-10{margin-top:10px!important}.m-b-10{margin-bottom:10px!important}.m-l-10{margin-left:10px!important}.m-r-10{margin-right:10px!important}.m-15{margin:15px!important}.m-t-15{margin-top:15px!important}.m-b-15{margin-bottom:15px!important}.m-l-15{margin-left:15px!important}.m-r-15{margin-right:15px!important}.m-20{margin:20px!important}.m-t-20{margin-top:20px!important}.m-b-20{margin-bottom:20px!important}.m-l-20{margin-left:20px!important}.m-r-20{margin-right:20px!important}.m-25{margin:25px!important}.m-t-25{margin-top:25px!important}.m-b-25{margin-bottom:25px!important}.m-l-25{margin-left:25px!important}.m-r-25{margin-right:25px!important}.m-30{margin:30px!important}.m-t-30{margin-top:30px!important}.m-b-30{margin-bottom:30px!important}.m-l-30{margin-left:30px!important}.m-r-30{margin-right:30px!important}.p-0{padding:0!important}.p-t-0{padding-top:0!important}.p-b-0{padding-bottom:0!important}.p-l-0{padding-left:0!important}.p-r-0{padding-right:0!important}.p-5{padding:5px!important}.p-t-5{padding-top:5px!important}.p-b-5{padding-bottom:5px!important}.p-l-5{padding-left:5px!important}.p-r-5{padding-right:5px!important}.p-10{padding:10px!important}.p-t-10{padding-top:10px!important}.p-b-10{padding-bottom:10px!important}.p-l-10{padding-left:10px!important}.p-r-10{padding-right:10px!important}.p-15{padding:15px!important}.p-t-15{padding-top:15px!important}.p-b-15{padding-bottom:15px!important}.p-l-15{padding-left:15px!important}.p-r-15{padding-right:15px!important}.p-20{padding:20px!important}.p-t-20{padding-top:20px!important}.p-b-20{padding-bottom:20px!important}.p-l-20{padding-left:20px!important}.p-r-20{padding-right:20px!important}.p-25{padding:25px!important}.p-t-25{padding-top:25px!important}.p-b-25{padding-bottom:25px!important}.p-l-25{padding-left:25px!important}.p-r-25{padding-right:25px!important}.p-30{padding:30px!important}.p-t-30{padding-top:30px!important}.p-b-30{padding-bottom:30px!important}.p-l-30{padding-left:30px!important}.p-r-30{padding-right:30px!important}@font-face{font-family:Lato;src:url('+t(800)+') format("woff2"),url('+t(799)+') format("woff");font-weight:400;font-style:normal}.form-control{border:0;border-bottom:1px solid #eee;color:#32393f;padding:5px;width:100%;font-size:13px;background-color:transparent}select.form-control{-webkit-appearance:none;-moz-appearance:none;border-radius:0;background:url('+t(803)+') no-repeat bottom 7px right}.input-group{position:relative}.input-group:not(:last-child){margin-bottom:25px}.input-group label:not(.ig-label){font-size:13px;display:block;margin-bottom:10px}.ig-label{position:absolute;text-align:center;bottom:7px;left:0;width:100%;-webkit-transition:all;transition:all;-webkit-transition-duration:.25s;transition-duration:.25s;padding:2px 0 3px;border-radius:2px;font-weight:400}.ig-helpers{z-index:1;width:100%;left:0}.ig-helpers,.ig-helpers:after,.ig-helpers:before{position:absolute;height:2px;bottom:0}.ig-helpers:after,.ig-helpers:before{content:"";width:0;-webkit-transition:all;transition:all;-webkit-transition-duration:.25s;transition-duration:.25s;background-color:#03a9f4}.ig-helpers:before{left:50%}.ig-helpers:after{right:50%}.ig-text{width:100%;height:40px;border:0;background:transparent!important;text-align:center;position:relative;z-index:1;border-bottom:1px solid #eee;color:#32393f;font-size:13px}.ig-text:focus+.ig-helpers:after,.ig-text:focus+.ig-helpers:before{width:50%}.ig-text:disabled~.ig-label,.ig-text:focus~.ig-label,.ig-text:valid~.ig-label{bottom:35px;font-size:13px;z-index:1}.ig-text:disabled{opacity:.5;filter:alpha(opacity=50)}.ig-dark .ig-text{color:#fff!important;border-color:hsla(0,0%,100%,.1)!important}.ig-dark .ig-helpers:after,.ig-dark .ig-helpers:before{background-color:#dfdfdf;height:1px}.ig-left .ig-label,.ig-left .ig-text{text-align:left}.ig-error .ig-label{color:#e23f3f}.ig-error .ig-helpers i:first-child,.ig-error .ig-helpers i:first-child:after,.ig-error .ig-helpers i:first-child:before{background:rgba(226,63,63,.43)}.ig-error .ig-helpers i:last-child,.ig-error .ig-helpers i:last-child:after,.ig-error .ig-helpers i:last-child:before{background:#e23f3f!important}.ig-error:after{content:"\\F05A";font-family:FontAwesome;position:absolute;top:17px;right:9px;font-size:20px;color:#d33d3e}.ig-search:before{font-family:fontAwesome;content:"\\F002";font-size:15px;position:absolute;left:2px;top:8px}.ig-search .ig-text{padding-left:25px}.set-expire{border:1px solid #eee;margin:35px 0 30px;position:relative}.set-expire:before{content:"";position:absolute;left:0;top:0;width:100%;height:100%;z-index:1}.set-expire-item{padding:9px 5px 3px;position:relative;display:table-cell;width:1%;text-align:center;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.set-expire-item:not(:last-child){border-right:1px solid #eee}.set-expire-title{font-size:10px;text-transform:uppercase}.set-expire-value{display:inline-block;overflow:hidden;position:relative;left:-8px}.set-expire-value input{-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;font-size:20px;text-align:center;position:relative;right:-15px;border:0;color:#333;padding:0;height:25px;width:100%;font-weight:400}.set-expire-decrease,.set-expire-increase{position:absolute;width:20px;height:20px;background:url('+t(801)+') no-repeat 50%;background-size:85%;left:50%;margin-left:-10px;opacity:.2;filter:alpha(opacity=20);cursor:pointer}.set-expire-decrease:hover,.set-expire-increase:hover{opacity:.5;filter:alpha(opacity=50)}.set-expire-increase{top:-25px}.set-expire-decrease{bottom:-27px;-webkit-transform:rotate(-180deg);transform:rotate(-180deg)}.btn{border:0;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:2px;text-align:center;-webkit-transition:all;transition:all;-webkit-transition-duration:.3s;transition-duration:.3s}.btn:focus,.btn:hover{opacity:.9;filter:alpha(opacity=90)}.btn-block{display:block;width:100%}.btn-white{color:#5b5b5b;background-color:#fff}.btn-white:focus,.btn-white:hover{color:#5b5b5b;background-color:#f0f0f0}.btn-link{color:#545454;background-color:#eee}.btn-link:focus,.btn-link:hover{color:#545454;background-color:#dfdfdf}.btn-danger{color:#fff;background-color:#ff726f}.btn-danger:focus,.btn-danger:hover{color:#fff;background-color:#ff5450}.btn-primary{color:#fff;background-color:#50b2ff}.btn-primary:focus,.btn-primary:hover{color:#fff;background-color:#31a5ff}.btn-success{color:#fff;background-color:#33d46f}.btn-success:focus,.btn-success:hover{color:#fff;background-color:#28c061}.close{right:15px;font-weight:400;opacity:1;font-size:18px;position:absolute;text-align:center;top:16px;z-index:1;padding:0;border:0;background-color:transparent}.close span{width:25px;height:25px;display:block;border-radius:50%;line-height:24px;text-shadow:none}.close:not(.close-alt) span{background-color:hsla(0,0%,100%,.1);color:hsla(0,0%,100%,.8)}.close:not(.close-alt):focus span,.close:not(.close-alt):hover span{background-color:hsla(0,0%,100%,.2);color:#fff}.close-alt span{background-color:#efefef;color:#989898}.close-alt:focus span,.close-alt:hover span{background-color:#e8e8e8;color:#7b7b7b}.hidden{display:none!important}.copy-text input{width:100%;border-radius:1px;border:1px solid #eee;padding:7px 12px;font-size:13px;line-height:100%;cursor:text;-webkit-transition:border-color;transition:border-color;-webkit-transition-duration:.3s;transition-duration:.3s}.copy-text input:hover{border-color:#e1e1e1}.share-availability{margin-bottom:40px}.share-availability:after,.share-availability:before{position:absolute;bottom:-30px;font-size:10px}.share-availability:before{content:"01 Sec";left:0}.share-availability:after{content:"7 days";right:0}.modal-aheader{height:100px}.modal-aheader:before{height:0!important}.modal-aheader .modal-dialog{margin:0;vertical-align:top}.login{height:100vh;min-height:500px;background:#32393f;text-align:center}.login:before{height:calc(100% - 110px);width:1px;content:""}.l-wrap,.login:before{display:inline-block;vertical-align:middle}.l-wrap{width:80%;max-width:500px;margin-top:-50px}.l-wrap.toggled{display:inline-block}.l-wrap .input-group:not(:last-child){margin-bottom:40px}.l-footer{height:110px;padding:0 50px}.lf-logo{float:right}.lf-logo img{width:40px}.lf-server{float:left;color:hsla(0,0%,100%,.4);font-size:20px;font-weight:400;padding-top:40px}@media (max-width:768px){.lf-logo,.lf-server{float:none;display:block;text-align:center;width:100%}.lf-logo{margin-bottom:5px}.lf-server{font-size:15px}}.lw-btn{width:50px;height:50px;border:1px solid #fff;display:inline-block;border-radius:50%;font-size:22px;color:#fff;-webkit-transition:all;transition:all;-webkit-transition-duration:.3s;transition-duration:.3s;opacity:.3;background-color:transparent;line-height:45px;padding:0}.lw-btn:hover{color:#fff;opacity:.8;border-color:#fff}.lw-btn i{display:block;width:100%;padding-left:3px}input:-webkit-autofill{-webkit-box-shadow:0 0 0 50px #32393f inset!important;-webkit-text-fill-color:#fff!important}@media (min-width:668px){.fe-header{position:relative;padding:40px 40px 20px 45px}}@media (max-width:667px){.fe-header{padding:20px}}.fe-header h2{font-size:16px;font-weight:400;margin:0}.fe-header h2>span{margin-bottom:7px;display:inline-block}.fe-header h2>span:not(:first-child):before{content:"/";margin:0 4px;color:#8e8e8e}.fe-header p{margin-top:7px}.feh-usage{margin-top:12px;max-width:285px}@media (max-width:667px){.feh-usage{max-width:100%;font-size:12px}}.feh-usage>ul{margin-top:7px;list-style:none;padding:0}.feh-usage>ul>li{padding-right:0;display:inline-block}.fehu-chart{height:5px;background:#eee;position:relative;border-radius:2px;overflow:hidden}.fehu-chart>div{position:absolute;left:0;height:100%;background:#46a5e0}.feh-actions{list-style:none;padding:0;margin:0;position:absolute;right:35px;top:30px;z-index:11}@media (max-width:991px){.feh-actions{top:7px;right:10px;position:fixed}}.feh-actions>li{display:inline-block;text-align:right;vertical-align:top;line-height:100%}.feh-actions>li>.btn-group>button,.feh-actions>li>a{display:block;height:45px;min-width:45px;text-align:center;border-radius:50%;padding:0;border:0;background:none}@media (min-width:992px){.feh-actions>li>.btn-group>button,.feh-actions>li>a{color:#7b7b7b;font-size:21px;line-height:45px;-webkit-transition:all;transition:all;-webkit-transition-duration:.3s;transition-duration:.3s}.feh-actions>li>.btn-group>button:hover,.feh-actions>li>a:hover{background:rgba(0,0,0,.09)}}@media (max-width:991px){.feh-actions>li>.btn-group>button,.feh-actions>li>a{background:url('+t(347)+') no-repeat 50%}.feh-actions>li>.btn-group>button .fa-reorder,.feh-actions>li>a .fa-reorder{display:none}}@media (max-width:991px){.fe-header-mobile{background-color:#32393f;padding:10px 50px 9px 12px;text-align:center;position:fixed;z-index:10;box-shadow:0 0 10px rgba(0,0,0,.3);left:0;top:0;width:100%}.fe-header-mobile .mh-logo{height:35px;position:relative;top:4px}.feh-trigger{width:41px;height:41px;cursor:pointer;float:left;position:relative;text-align:center}.feh-trigger:after,.feh-trigger:before{content:"";position:absolute;top:0;left:0;width:100%;height:100%;border-radius:50%}.feh-trigger:after{z-index:1}.feh-trigger:before{background:hsla(0,0%,100%,.1);-webkit-transition:all;transition:all;-webkit-transition-duration:.3s;transition-duration:.3s;-webkit-transform:scale(0);transform:scale(0)}.feht-toggled:before{-webkit-transform:scale(1);transform:scale(1)}.feht-toggled .feht-lines{-webkit-transform:rotate(180deg);transform:rotate(180deg)}.feht-toggled .feht-lines>div.top{width:12px;transform:translateX(8px) translateY(1px) rotate(45deg);-webkit-transform:translateX(8px) translateY(1px) rotate(45deg)}.feht-toggled .feht-lines>div.bottom{width:12px;transform:translateX(8px) translateY(-1px) rotate(-45deg);-webkit-transform:translateX(8px) translateY(-1px) rotate(-45deg)}.feht-lines,.feht-lines>div{-webkit-transition:all;transition:all;-webkit-transition-duration:.3s;transition-duration:.3s}.feht-lines{width:18px;height:12px;display:inline-block;margin-top:14px}.feht-lines>div{background-color:#eaeaea;width:18px;height:2px}.feht-lines>div.center{margin:3px 0}}.fe-sidebar{width:320px;background-color:#32393f;position:fixed;height:100%;overflow:hidden;padding:25px}@media (min-width:992px){.fe-sidebar{-webkit-transform:translateZ(0);transform:translateZ(0)}}@media (max-width:991px){.fe-sidebar{padding-top:85px;z-index:9;box-shadow:0 0 10px rgba(0,0,0,.65);-webkit-transition:all;transition:all;-webkit-transition-duration:.3s;transition-duration:.3s;-webkit-transform:translate3d(-335px,0,0);transform:translate3d(-335px,0,0)}.fe-sidebar.toggled{-webkit-transform:translateZ(0);transform:translateZ(0)}}.fe-sidebar a{color:hsla(0,0%,100%,.58)}.fe-sidebar a:hover{color:#fff}.fes-header{margin-bottom:40px}.fes-header h2,.fes-header img{float:left}.fes-header h2{margin:13px 0 0 10px;font-weight:400}.fes-header img{width:32px}.fesl-inner{height:calc(100vh - 260px);overflow:auto;padding:0;margin:0 -25px}.fesl-inner li{position:relative}.fesl-inner li>a{display:block;padding:10px 45px 12px 55px;word-wrap:break-word}.fesl-inner li>a:before{font-family:FontAwesome;content:"\\F0A0";font-size:17px;position:absolute;top:10px;left:25px;opacity:.8;filter:alpha(opacity=80)}.fesl-inner li>a.fesli-loading:before{content:"";width:20px;height:20px;border-radius:50%;-webkit-animation-name:zoomIn;animation-name:zoomIn;-webkit-animation-duration:.5s;animation-duration:.5s;-webkit-animation-fill-mode:both;animation-fill-mode:both;border:2px solid hsla(0,0%,100%,.1);border-bottom-color:hsla(0,0%,100%,.5);position:absolute;z-index:1;-webkit-animation:zoomIn .25s,spin .7s .25s infinite linear;animation:zoomIn .25s,spin .7s .25s infinite linear;left:32px;top:0;bottom:0;margin:auto}.fesl-inner li.active{background-color:#282e32}.fesl-inner li.active>a{color:#fff}.fesl-inner li:not(.active):hover{background-color:rgba(0,0,0,.1)}.fesl-inner li:not(.active):hover>a{color:#fff}.fesl-inner li:hover .fesli-trigger{opacity:.6;filter:alpha(opacity=60)}.fesl-inner li:hover .fesli-trigger:hover{opacity:1;filter:alpha(opacity=100)}.fesl-inner ul{list-style:none;padding:0;margin:0}.fesl-inner:hover .scrollbar-vertical{opacity:1}.fesli-trigger{filter:alpha(opacity=0);-webkit-transition:all;transition:all;-webkit-transition-duration:.2s;transition-duration:.2s;top:0;right:0;width:35px;cursor:pointer;background:url('+t(347)+') no-repeat top 20px left}.fesli-trigger,.scrollbar-vertical{opacity:0;position:absolute;height:100%}.scrollbar-vertical{right:5px;width:4px;-webkit-transition:opacity;transition:opacity;-webkit-transition-duration:.3s;transition-duration:.3s}.scrollbar-vertical div{border-radius:1px!important;background-color:#6a6a6a!important}.fes-host{position:fixed;left:0;bottom:0;z-index:1;background:#32393f;color:hsla(0,0%,100%,.4);font-size:15px;font-weight:400;width:320px;padding:20px;overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.fes-host>i{margin-right:10px}.fesl-row{position:relative}@media (min-width:668px){.fesl-row{padding:5px 20px 5px 40px;display:flex;flex-flow:row nowrap;justify-content:space-between}}@media (max-width:667px){.fesl-row{padding:5px 20px}}.fesl-row:after,.fesl-row:before{content:" ";display:table}.fesl-row:after{clear:both}@media (min-width:668px){header.fesl-row{margin-bottom:20px;border-bottom:1px solid #f0f0f0;padding-left:30px}header.fesl-row .fesl-item,header.fesl-row .fesli-sort{-webkit-transition:all;transition:all;-webkit-transition-duration:.3s;transition-duration:.3s}header.fesl-row .fesl-item{cursor:pointer;color:#8e8e8e;font-weight:500;margin-bottom:-5px}header.fesl-row .fesl-item>.fesli-sort{float:right;margin:4px 0 0;opacity:0;filter:alpha(opacity=0);color:#32393f;font-size:14px}header.fesl-row .fesl-item:hover:not(.fesl-item-actions){background:#f5f5f5;color:#32393f}header.fesl-row .fesl-item:hover:not(.fesl-item-actions)>.fesli-sort{opacity:.5;filter:alpha(opacity=50)}}@media (max-width:667px){header.fesl-row{display:none}}div.fesl-row{border-bottom:1px solid transparent;cursor:default;-webkit-transition:background-color;transition:background-color;-webkit-transition-duration:.5s;transition-duration:.5s}@media (max-width:667px){div.fesl-row{padding:5px 20px}}div.fesl-row:not(.fesl-row-selected):nth-child(2n){background-color:#fafafa}div.fesl-row:hover .fis-icon:before{opacity:0;filter:alpha(opacity=0)}div.fesl-row:hover .fis-helper:before{opacity:1;filter:alpha(opacity=100)}div.fesl-row[data-type=folder] .fis-icon{background-color:#a1d6dd}div.fesl-row[data-type=folder] .fis-icon:before{content:"\\F114"}div.fesl-row[data-type=pdf] .fis-icon{background-color:#fa7775}div.fesl-row[data-type=pdf] .fis-icon:before{content:"\\F1C1"}div.fesl-row[data-type=zip] .fis-icon{background-color:#427089}div.fesl-row[data-type=zip] .fis-icon:before{content:"\\F1C6"}div.fesl-row[data-type=audio] .fis-icon{background-color:#009688}div.fesl-row[data-type=audio] .fis-icon:before{content:"\\F1C7"}div.fesl-row[data-type=code] .fis-icon{background-color:#997867}div.fesl-row[data-type=code] .fis-icon:before{content:"\\F1C9"}div.fesl-row[data-type=excel] .fis-icon{background-color:#f1c3 3}div.fesl-row[data-type=excel] .fis-icon:before{content:"\\F1C3"}div.fesl-row[data-type=image] .fis-icon{background-color:#f06292}div.fesl-row[data-type=image] .fis-icon:before{content:"\\F1C5"}div.fesl-row[data-type=video] .fis-icon{background-color:#f8c363}div.fesl-row[data-type=video] .fis-icon:before{content:"\\F1C8"}div.fesl-row[data-type=other] .fis-icon{background-color:#afafaf}div.fesl-row[data-type=other] .fis-icon:before{content:"\\F016"}div.fesl-row[data-type=text] .fis-icon{background-color:#8a8a8a}div.fesl-row[data-type=text] .fis-icon:before{content:"\\F0F6"}div.fesl-row[data-type=doc] .fis-icon{background-color:#2196f5}div.fesl-row[data-type=doc] .fis-icon:before{content:"\\F1C2"}div.fesl-row[data-type=presentation] .fis-icon{background-color:#896ea6}div.fesl-row[data-type=presentation] .fis-icon:before{content:"\\F1C4"}div.fesl-row.fesl-loading:before{content:""}div.fesl-row.fesl-loading:after{content:"";width:20px;height:20px;border-radius:50%;-webkit-animation-name:zoomIn;animation-name:zoomIn;-webkit-animation-duration:.5s;animation-duration:.5s;-webkit-animation-fill-mode:both;animation-fill-mode:both;border:2px solid hsla(0,0%,100%,.5);border-bottom-color:#fff;position:absolute;z-index:1;-webkit-animation:zoomIn .25s,spin .7s .25s infinite linear;animation:zoomIn .25s,spin .7s .25s infinite linear;left:57px;top:17px}@media (max-width:667px){div.fesl-row.fesl-loading:after{left:27px}}.fesl-row-selected{background-color:#fbf2bf}.fesl-row-selected,.fesl-row-selected .fesl-item a{color:#757575}.fi-select{float:left;position:relative;width:35px;height:35px;margin:3px 0}@media (max-width:667px){.fi-select{margin-right:15px}}.fi-select input{position:absolute;left:0;top:0;width:35px;height:35px;z-index:20;opacity:0;cursor:pointer}.fi-select input:checked~.fis-icon{background-color:#32393f}.fi-select input:checked~.fis-icon:before{opacity:0}.fi-select input:checked~.fis-helper:before{-webkit-transform:scale(0);transform:scale(0)}.fi-select input:checked~.fis-helper:after{-webkit-transform:scale(1);transform:scale(1)}.fis-icon{display:inline-block;vertical-align:top;border-radius:50%;width:35px;height:35px;-webkit-transition:background-color;transition:background-color;-webkit-transition-duration:.25s;transition-duration:.25s}.fis-icon:before{width:100%;height:100%;text-align:center;position:absolute;border-radius:50%;font-family:fontAwesome;line-height:35px;font-size:16px;color:#fff;-webkit-transition:all;transition:all;-webkit-transition-duration:.3s;transition-duration:.3s;font-style:normal}.fis-helper:after,.fis-helper:before{position:absolute;-webkit-transition:all;transition:all;-webkit-transition-duration:.25s;transition-duration:.25s}.fis-helper:before{content:"";width:15px;height:15px;border:2px solid #fff;z-index:10;border-radius:2px;top:10px;left:10px;opacity:0}.fis-helper:after{font-family:fontAwesome;content:"\\F00C";top:8px;left:9px;color:#fff;font-size:14px;-webkit-transform:scale(0);transform:scale(0)}.fesl-item{display:block}.fesl-item a{color:#818181}@media (min-width:668px){.fesl-item:not(.fesl-item-actions):not(.fesl-item-icon){text-overflow:ellipsis;padding:10px 15px;white-space:nowrap;overflow:hidden}.fesl-item.fesl-item-name{flex:3}.fesl-item.fesl-item-size{width:140px}.fesl-item.fesl-item-modified{width:190px}.fesl-item.fesl-item-actions{width:40px}}@media (max-width:667px){.fesl-item{padding:0}.fesl-item.fesl-item-name{width:100%;margin-bottom:3px}.fesl-item.fesl-item-modified,.fesl-item.fesl-item-size{font-size:12px;color:#b5b5b5;float:left}.fesl-item.fesl-item-modified{max-width:72px;white-space:nowrap;overflow:hidden}.fesl-item.fesl-item-size{margin-right:10px}.fesl-item.fesl-item-actions{position:absolute;top:5px;right:10px}}.fia-toggle{height:36px;width:36px;background:transparent url('+t(802)+') no-repeat 50%;position:relative;top:3px;opacity:.4;filter:alpha(opacity=40)}.fia-toggle:hover{opacity:.7;filter:alpha(opacity=70)}.fesl-item-actions .dropdown-menu{background-color:transparent;box-shadow:none;padding:0;right:38px;left:auto;margin:0;height:100%;text-align:right}.fesl-item-actions .dropdown.open .dropdown-menu .fiad-action{right:0}.fiad-action{height:35px;width:35px;background:#ffc107;display:inline-block;border-radius:50%;text-align:center;line-height:35px;font-weight:400;position:relative;top:4px;margin-left:5px;-webkit-animation-name:fiad-action-anim;animation-name:fiad-action-anim;-webkit-transform-origin:center center;transform-origin:center center;-webkit-backface-visibility:none;backface-visibility:none;box-shadow:0 2px 4px rgba(0,0,0,.1)}.fiad-action:nth-child(2){-webkit-animation-duration:.1s;animation-duration:.1s}.fiad-action:first-child{-webkit-animation-duration:.25s;animation-duration:.25s}.fiad-action>i{font-size:14px;color:#fff}.fiad-action:hover{background-color:#f7b900}.list-actions{position:fixed;-webkit-transform:translate3d(0,-100%,0);transform:translate3d(0,-100%,0);opacity:0;filter:alpha(opacity=0);-webkit-transition:all;transition:all;-webkit-transition-duration:.2s;transition-duration:.2s;padding:20px 70px 20px 25px;top:0;left:0;width:100%;background-color:#2298f7;z-index:20;box-shadow:0 0 10px rgba(0,0,0,.3);text-align:center}.list-actions.list-actions-toggled{-webkit-transform:translateZ(0);transform:translateZ(0);opacity:1;filter:alpha(opacity=100)}.la-close{position:absolute;right:20px;top:0;color:#fff;width:30px;height:30px;border-radius:50%;text-align:center;line-height:30px!important;background:hsla(0,0%,100%,.1);font-weight:400;bottom:0;margin:auto;cursor:pointer}.la-close:hover{background-color:hsla(0,0%,100%,.2)}.la-label{color:#fff;float:left;padding:4px 0}.la-label .fa{font-size:22px;vertical-align:top;margin-right:10px;margin-top:-1px}.la-actions button{background-color:transparent;border:2px solid hsla(0,0%,100%,.9);color:#fff;border-radius:2px;padding:5px 10px;font-size:13px;-webkit-transition:all;transition:all;-webkit-transition-duration:.3s;transition-duration:.3s;margin-left:10px}.la-actions button:hover{background-color:#fff;color:#2298f7}@-webkit-keyframes fiad-action-anim{0%{-webkit-transform:scale(0);transform:scale(0);opacity:0;filter:alpha(opacity=0);right:-20px}to{-webkit-transform:scale(1);transform:scale(1);opacity:1;filter:alpha(opacity=100);right:0}}@keyframes fiad-action-anim{0%{-webkit-transform:scale(0);transform:scale(0);opacity:0;filter:alpha(opacity=0);right:-20px}to{-webkit-transform:scale(1);transform:scale(1);opacity:1;filter:alpha(opacity=100);right:0}}.file-explorer{background-color:#fff;position:relative;height:100%}.file-explorer.toggled{height:100vh;overflow:hidden}.fe-body{min-height:100vh;overflow:auto}@media (min-width:992px){.fe-body{padding:0 0 40px 320px}}@media (max-width:991px){.fe-body{padding:75px 0 80px}}.feb-actions{position:fixed;bottom:30px;right:30px}.feb-actions .dropdown-menu{min-width:55px;width:55px;text-align:center;background:transparent;box-shadow:none;margin:0}.feb-actions.open .feba-btn{-webkit-transform:scale(1);transform:scale(1)}.feb-actions.open .feba-btn:first-child{-webkit-animation-name:feba-btn-anim;animation-name:feba-btn-anim;-webkit-animation-duration:.3s;animation-duration:.3s}.feb-actions.open .feba-btn:last-child{-webkit-animation-name:feba-btn-anim;animation-name:feba-btn-anim;-webkit-animation-duration:.1s;animation-duration:.1s}.feb-actions.open .feba-toggle{background:#ff403c}.feb-actions.open .feba-toggle>span{-webkit-transform:rotate(135deg);transform:rotate(135deg)}.feba-toggle{width:55px;height:55px;line-height:55px;border-radius:50%;background:#ff726f;box-shadow:0 2px 3px rgba(0,0,0,.15);display:inline-block;text-align:center;border:0;padding:0}.feba-toggle span{display:inline-block;height:100%;width:100%}.feba-toggle i{color:#fff;font-size:17px;line-height:58px}.feba-toggle,.feba-toggle>span{-webkit-transition:all;transition:all;-webkit-transition-duration:.25s;transition-duration:.25s;-webkit-backface-visibility:hidden;backface-visibility:hidden}.feba-btn{width:40px;margin-top:10px;height:40px;border-radius:50%;text-align:center;display:inline-block;line-height:40px;box-shadow:0 2px 3px rgba(0,0,0,.15);-webkit-transform:scale(0);transform:scale(0);position:relative}.feba-btn,.feba-btn:focus,.feba-btn:hover{color:#fff}.feba-btn label{width:100%;height:100%;position:absolute;left:0;top:0;cursor:pointer}.feba-bucket{background:#ffc155}.feba-upload{background:#ffc107}@-webkit-keyframes feba-btn-anim{0%{-webkit-transform:scale(0);transform:scale(0);opacity:0;filter:alpha(opacity=0)}to{-webkit-transform:scale(1);transform:scale(1);opacity:1;filter:alpha(opacity=100)}}@keyframes feba-btn-anim{0%{-webkit-transform:scale(0);transform:scale(0);opacity:0;filter:alpha(opacity=0)}to{-webkit-transform:scale(1);transform:scale(1);opacity:1;filter:alpha(opacity=100)}}.ie-warning{background-color:#ff5252;width:100%;height:100%;position:fixed;left:0;top:0;text-align:center}.ie-warning:before{width:1px;content:"";height:100%}.ie-warning .iw-inner,.ie-warning:before{display:inline-block;vertical-align:middle}.iw-inner{width:470px;height:300px;background-color:#fff;border-radius:5px;padding:40px;position:relative}.iw-inner ul{list-style:none;padding:0;margin:0;width:230px;margin-left:80px;margin-top:16px}.iw-inner ul>li{float:left}.iw-inner ul>li>a{display:block;padding:10px 15px 7px;font-size:14px;margin:0 1px;border-radius:3px}.iw-inner ul>li>a:hover{background:#eee}.iw-inner ul>li>a img{height:40px;margin-bottom:5px}.iwi-icon{color:#ff5252;font-size:40px;display:block;line-height:100%;margin-bottom:15px}.iwi-skip{position:absolute;left:0;bottom:-35px;width:100%;color:hsla(0,0%,100%,.6);cursor:pointer}.iwi-skip:hover{color:#fff}.dropdown-menu{padding:15px 0;top:0;margin-top:-1px}.dropdown-menu>li>a{padding:8px 20px;font-size:15px}.dropdown-menu>li>a>i{width:20px;position:relative;top:1px}.dropdown-menu-right>li>a{text-align:right}.alert{border:0;position:fixed;max-width:500px;margin:0;box-shadow:0 4px 5px rgba(0,0,0,.1);color:#fff;width:100%;right:20px;border-radius:3px;padding:17px 50px 17px 17px;z-index:10010;-webkit-animation-duration:.8s;animation-duration:.8s;-webkit-animation-fill-mode:both;animation-fill-mode:both}.alert:not(.progress){top:20px}@media (min-width:768px){.alert:not(.progress){left:50%;margin-left:-250px}}.alert.progress{bottom:20px;right:20px}.alert.alert-danger{background:#ff726f}.alert.alert-success{background:#33d46f}.alert.alert-info{background:#50b2ff}@media (max-width:767px){.alert{left:20px;width:calc(100% - 40px);max-width:100%}}.alert .progress{margin:10px 10px 8px 0;height:5px;box-shadow:none;border-radius:1px;background-color:#50b2ff;border-radius:2px;overflow:hidden}.alert .progress-bar{box-shadow:none;background-color:#fff;height:100%}.alert .close{position:absolute;top:15px}@media (min-width:768px){.modal{text-align:center}.modal:before{content:"";height:100%;width:1px}.modal .modal-dialog,.modal:before{display:inline-block;vertical-align:middle}.modal .modal-dialog{text-align:left;margin:10px auto}}.modal-dark .modal-header{color:hsla(0,0%,100%,.4)}.modal-dark .modal-header small{color:hsla(0,0%,100%,.2)}.modal-dark .modal-content{background-color:#32393f}.modal-backdrop{-webkit-animation-name:fadeIn;animation-name:fadeIn;-webkit-animation-fill-mode:both;animation-fill-mode:both}.modal-backdrop,.modal-dialog{-webkit-animation-duration:.2s;animation-duration:.2s}.modal-dialog{-webkit-animation-name:zoomIn;animation-name:zoomIn;-webkit-animation-fill-mode:both;animation-fill-mode:both}.modal-header{color:#333;position:relative}.modal-header small{display:block;text-transform:none;font-size:12px;margin-top:5px;color:#a8a8a8}.modal-content{border-radius:3px;box-shadow:none}.modal-footer{padding:0 30px 30px}.modal-confirm .modal-dialog,.modal-footer{text-align:center}.mc-icon{margin:0 0 10px}.mc-icon>i{font-size:60px}.mci-red{color:#ff8f8f}.mci-amber{color:#ffc107}.mci-green{color:#64e096}.mc-text{color:#333}.mc-sub{color:#bdbdbd;margin-top:5px;font-size:13px}@media (max-width:767px){.modal-about{text-align:center}.modal-about .modal-dialog{max-width:400px;width:90%;margin:20px auto 0}}.ma-inner{display:flex;flex-direction:row;align-items:center;min-height:350px;position:relative}@media (min-width:768px){.ma-inner:before{content:"";width:150px;height:100%;top:0;left:0;position:absolute;border-radius:3px 0 0 3px;background-color:#23282c}}.mai-item:first-child{width:150px;text-align:center}.mai-item:last-child{flex:4;padding:30px}.maii-logo{width:70px;position:relative}.maii-list{list-style:none;padding:0}.maii-list>li{margin-bottom:15px}.maii-list>li div{color:hsla(0,0%,100%,.8);text-transform:uppercase;font-size:14px}.maii-list>li small{font-size:13px;color:hsla(0,0%,100%,.4)}.toggle-password{position:absolute;bottom:30px;right:35px;width:30px;height:30px;border:1px solid #eee;border-radius:0;text-align:center;cursor:pointer;z-index:10;background-color:#fff;padding-top:5px}.toggle-password.toggled{background:#eee}.pm-body{padding-bottom:30px}.pmb-header{margin-bottom:35px}.pmb-list{display:flex;flex-flow:row nowrap;align-items:center;justify-content:center;padding:10px 35px}.pmb-list:nth-child(2n){background-color:#f7f7f7}.pmb-list .form-control{padding-left:0;padding-right:0}header.pmb-list{margin:20px 0 10px}.pmbl-item{display:block;font-size:13px}.pmbl-item:first-child{flex:2}.pmbl-item:nth-child(2){margin:0 25px;width:150px}.pmbl-item:nth-child(3){width:70px}div.pmb-list select{border:0}div.pmb-list .pml-item:not(:last-child){padding:0 5px}.modal-create-bucket .modal-dialog{position:fixed;right:25px;bottom:95px;margin:0;height:110px}.modal-create-bucket .modal-content{width:100%;height:100%}',""]); -},function(A,M,t){function I(A){return null===A||void 0===A}function g(A){return!(!A||"object"!=typeof A||"number"!=typeof A.length)&&("function"==typeof A.copy&&"function"==typeof A.slice&&!(A.length>0&&"number"!=typeof A[0]))}function e(A,M,t){var e,n;if(I(A)||I(M))return!1;if(A.prototype!==M.prototype)return!1;if(E(A))return!!E(M)&&(A=i.call(A),M=i.call(M),N(A,M,t));if(g(A)){if(!g(M))return!1;if(A.length!==M.length)return!1;for(e=0;e=0;e--)if(o[e]!=c[e])return!1;for(e=o.length-1;e>=0;e--)if(n=o[e],!N(A[n],M[n],t))return!1;return typeof A==typeof M}var i=Array.prototype.slice,T=t(556),E=t(555),N=A.exports=function(A,M,t){return t||(t={}),A===M||(A instanceof Date&&M instanceof Date?A.getTime()===M.getTime():!A||!M||"object"!=typeof A&&"object"!=typeof M?t.strict?A===M:A==M:e(A,M,t))}},function(A,M){function t(A){return"[object Arguments]"==Object.prototype.toString.call(A)}function I(A){return A&&"object"==typeof A&&"number"==typeof A.length&&Object.prototype.hasOwnProperty.call(A,"callee")&&!Object.prototype.propertyIsEnumerable.call(A,"callee")||!1}var g="[object Arguments]"==function(){return Object.prototype.toString.call(arguments)}();M=A.exports=g?t:I,M.supported=t,M.unsupported=I},function(A,M){function t(A){var M=[];for(var t in A)M.push(t);return M}M=A.exports="function"==typeof Object.keys?Object.keys:t,M.shim=t},function(A,M,t){"use strict";var I=t(248);A.exports=function(A,M){A.classList?A.classList.add(M):I(A)||(A.className=A.className+" "+M)}},function(A,M,t){"use strict";A.exports={addClass:t(557),removeClass:t(559),hasClass:t(248)}},function(A,M){"use strict";A.exports=function(A,M){A.classList?A.classList.remove(M):A.className=A.className.replace(new RegExp("(^|\\s)"+M+"(?:\\s|$)","g"),"$1").replace(/\s+/g," ").replace(/^\s*|\s*$/g,"")}},function(A,M,t){"use strict";var I=t(84),g=t(564);A.exports=function(A,M){return function(t){var e=t.currentTarget,i=t.target,T=g(e,A);T.some(function(A){return I(A,i)})&&M.call(this,t)}}},function(A,M,t){"use strict";var I=t(163),g=t(249),e=t(560);A.exports={on:I,off:g,filter:e}},function(A,M,t){"use strict";function I(A){return A.nodeName&&A.nodeName.toLowerCase()}function g(A){for(var M=(0,T.default)(A),t=A&&A.offsetParent;t&&"html"!==I(A)&&"static"===(0,N.default)(t,"position");)t=t.offsetParent;return t||M.documentElement}var e=t(117);M.__esModule=!0,M.default=g;var i=t(83),T=e.interopRequireDefault(i),E=t(164),N=e.interopRequireDefault(E);A.exports=M.default},function(A,M,t){"use strict";function I(A){return A.nodeName&&A.nodeName.toLowerCase()}function g(A,M){var t,g={top:0,left:0};return"fixed"===(0,D.default)(A,"position")?t=A.getBoundingClientRect():(M=M||(0,N.default)(A),t=(0,T.default)(A),"html"!==I(M)&&(g=(0,T.default)(M)),g.top+=parseInt((0,D.default)(M,"borderTopWidth"),10)-(0,o.default)(M)||0,g.left+=parseInt((0,D.default)(M,"borderLeftWidth"),10)-(0,C.default)(M)||0),e._extends({},t,{top:t.top-g.top-(parseInt((0,D.default)(A,"marginTop"),10)||0),left:t.left-g.left-(parseInt((0,D.default)(A,"marginLeft"),10)||0)})}var e=t(117);M.__esModule=!0,M.default=g;var i=t(250),T=e.interopRequireDefault(i),E=t(562),N=e.interopRequireDefault(E),n=t(251),o=e.interopRequireDefault(n),c=t(565),C=e.interopRequireDefault(c),a=t(164),D=e.interopRequireDefault(a);A.exports=M.default},function(A,M){"use strict";var t=/^[\w-]*$/,I=Function.prototype.bind.call(Function.prototype.call,[].slice);A.exports=function(A,M){var g,e="#"===M[0],i="."===M[0],T=e||i?M.slice(1):M,E=t.test(T);return E?e?(A=A.getElementById?A:document,(g=A.getElementById(T))?[g]:[]):I(A.getElementsByClassName&&i?A.getElementsByClassName(T):A.getElementsByTagName(M)):I(A.querySelectorAll(M))}},function(A,M,t){"use strict";var I=t(116);A.exports=function(A,M){var t=I(A);return void 0===M?t?"pageXOffset"in t?t.pageXOffset:t.document.documentElement.scrollLeft:A.scrollLeft:void(t?t.scrollTo(M,"pageYOffset"in t?t.pageYOffset:t.document.documentElement.scrollTop):A.scrollLeft=M)}},function(A,M,t){"use strict";var I=t(117),g=t(252),e=I.interopRequireDefault(g),i=/^(top|right|bottom|left)$/,T=/^([+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|))(?!px)[a-z%]+$/i;A.exports=function(A){if(!A)throw new TypeError("No Element passed to ` + "`" + `getComputedStyle()` + "`" + `");var M=A.ownerDocument;return"defaultView"in M?M.defaultView.opener?A.ownerDocument.defaultView.getComputedStyle(A,null):window.getComputedStyle(A,null):{getPropertyValue:function(M){var t=A.style;M=(0,e.default)(M),"float"==M&&(M="styleFloat");var I=A.currentStyle[M]||null;if(null==I&&t&&t[M]&&(I=t[M]),T.test(I)&&!i.test(M)){var g=t.left,E=A.runtimeStyle,N=E&&E.left;N&&(E.left=A.currentStyle.left),t.left="fontSize"===M?"1em":I,I=t.pixelLeft+"px",t.left=g,N&&(E.left=N)}return I}}}},function(A,M){"use strict";A.exports=function(A,M){return"removeProperty"in A.style?A.style.removeProperty(M):A.style.removeAttribute(M)}},function(A,M,t){"use strict";function I(){var A,M="",t={O:"otransitionend",Moz:"transitionend",Webkit:"webkitTransitionEnd",ms:"MSTransitionEnd"},I=document.createElement("div");for(var g in t)if(N.call(t,g)&&void 0!==I.style[g+"TransitionProperty"]){M="-"+g.toLowerCase()+"-",A=t[g];break}return A||void 0===I.style.transitionProperty||(A="transitionend"),{end:A,prefix:M}}var g,e,i,T,E=t(72),N=Object.prototype.hasOwnProperty,n="transform",o={};E&&(o=I(),n=o.prefix+n,i=o.prefix+"transition-property",e=o.prefix+"transition-duration",T=o.prefix+"transition-delay",g=o.prefix+"transition-timing-function"),A.exports={transform:n,end:o.end,property:i,timing:g,delay:T,duration:e}},function(A,M){"use strict";var t=/-(.)/g;A.exports=function(A){return A.replace(t,function(A,M){return M.toUpperCase()})}},function(A,M){"use strict";var t=/([A-Z])/g;A.exports=function(A){return A.replace(t,"-$1").toLowerCase()}},function(A,M,t){"use strict";var I=t(570),g=/^ms-/;A.exports=function(A){return I(A).replace(g,"-ms-")}},function(A,M){"use strict";function t(A){return A.replace(I,function(A,M){return M.toUpperCase()})}var I=/-(.)/g;A.exports=t},function(A,M,t){"use strict";function I(A){return g(A.replace(e,"ms-"))}var g=t(572),e=/^-ms-/;A.exports=I},function(A,M,t){"use strict";function I(A){return!!A&&("object"==typeof A||"function"==typeof A)&&"length"in A&&!("setInterval"in A)&&"number"!=typeof A.nodeType&&(Array.isArray(A)||"callee"in A||"item"in A)}function g(A){return I(A)?Array.isArray(A)?A.slice():e(A):[A]}var e=t(583);A.exports=g},function(A,M,t){"use strict";function I(A){var M=A.match(n);return M&&M[1].toLowerCase()}function g(A,M){var t=N;N?void 0:E(!1);var g=I(A),e=g&&T(g);if(e){t.innerHTML=e[1]+A+e[2];for(var n=e[0];n--;)t=t.lastChild}else t.innerHTML=A;var o=t.getElementsByTagName("script");o.length&&(M?void 0:E(!1),i(o).forEach(M));for(var c=i(t.childNodes);t.lastChild;)t.removeChild(t.lastChild);return c}var e=t(20),i=t(574),T=t(258),E=t(3),N=e.canUseDOM?document.createElement("div"):null,n=/^\s*<(\w+)/;A.exports=g},function(A,M){"use strict";function t(A){return A===window?{x:window.pageXOffset||document.documentElement.scrollLeft,y:window.pageYOffset||document.documentElement.scrollTop}:{x:A.scrollLeft,y:A.scrollTop}}A.exports=t},function(A,M){"use strict";function t(A){return A.replace(I,"-$1").toLowerCase()}var I=/([A-Z])/g;A.exports=t},function(A,M,t){"use strict";function I(A){return g(A).replace(e,"-ms-")}var g=t(577),e=/^ms-/;A.exports=I},function(A,M){"use strict";function t(A){return!(!A||!("function"==typeof Node?A instanceof Node:"object"==typeof A&&"number"==typeof A.nodeType&&"string"==typeof A.nodeName))}A.exports=t},function(A,M,t){"use strict";function I(A){return g(A)&&3==A.nodeType}var g=t(579);A.exports=I},function(A,M){"use strict";function t(A,M,t){if(!A)return null;var g={};for(var e in A)I.call(A,e)&&(g[e]=M.call(t,A[e],e,A));return g}var I=Object.prototype.hasOwnProperty;A.exports=t},function(A,M){"use strict";function t(A){var M={};return function(t){return M.hasOwnProperty(t)||(M[t]=A.call(this,t)),M[t]}}A.exports=t},function(A,M,t){"use strict";function I(A){var M=A.length;if(Array.isArray(A)||"object"!=typeof A&&"function"!=typeof A?g(!1):void 0,"number"!=typeof M?g(!1):void 0,0===M||M-1 in A?void 0:g(!1),A.hasOwnProperty)try{return Array.prototype.slice.call(A)}catch(A){}for(var t=Array(M),I=0;I=T?i(M):null,c=M.length;o&&(N=e,n=!1,M=o);A:for(;++Eg?0:g+M),t=void 0===t||t>g?g:+t||0,t<0&&(t+=g),g=M>t?0:t-M>>>0,M>>>=0;for(var e=Array(g);++I-1?t[N]:void 0}return e(t,I,A)}}var g=t(595),e=t(598),i=t(599),T=t(59);A.exports=I},function(A,M,t){function I(A,M,t,I,e,i,T){var E=-1,N=A.length,n=M.length;if(N!=n&&!(e&&n>N))return!1;for(;++En;)E.call(A,i=T[n++])&&M.push(i);return M}},function(A,M,t){var I=t(67),g=t(28);A.exports=function(A,M){for(var t,e=g(A),i=I(e),T=i.length,E=0;T>E;)if(e[t=i[E++]]===M)return t}},function(A,M,t){"use strict";var I=t(380),g=t(110),e=t(24);A.exports=function(){for(var A=e(this),M=arguments.length,t=Array(M),i=0,T=I._,E=!1;M>i;)(t[i]=arguments[i++])===T&&(E=!0);return function(){var I,e=this,i=arguments.length,n=0,N=0;if(!E&&!i)return g(A,t,e);if(I=t.slice(),E)for(;M>n;n++)I[n]===T&&(I[n]=arguments[N++]);for(;i>N;)I.push(arguments[N++]);return g(A,I,e)}}},function(A,M,t){A.exports=t(5)},function(A,M){A.exports=function(A,M){var t=M===Object(M)?function(A){return M[A]}:M;return function(M){return String(M).replace(A,t)}}},function(A,M,t){var I=t(1),g=t(381)(/[\\^$*+?.()|[\]{}]/g,"\\$&");I(I.S,"RegExp",{escape:function(A){return g(A)}})},function(A,M,t){var I=t(1);I(I.P,"Array",{copyWithin:t(219)}),t(77)("copyWithin")},function(A,M,t){"use strict";var I=t(1),g=t(41)(4);I(I.P+I.F*!t(37)([].every,!0),"Array",{every:function(A){return g(this,A,arguments[1])}})},function(A,M,t){var I=t(1);I(I.P,"Array",{fill:t(137)}),t(77)("fill")},function(A,M,t){"use strict";var I=t(1),g=t(41)(2);I(I.P+I.F*!t(37)([].filter,!0),"Array",{filter:function(A){return g(this,A,arguments[1])}})},function(A,M,t){"use strict";var I=t(1),g=t(41)(6),e="findIndex",i=!0;e in[]&&Array(1)[e](function(){i=!1}),I(I.P+I.F*i,"Array",{findIndex:function(A){return g(this,A,arguments.length>1?arguments[1]:void 0)}}),t(77)(e)},function(A,M,t){"use strict";var I=t(1),g=t(41)(5),e="find",i=!0;e in[]&&Array(1)[e](function(){i=!1}),I(I.P+I.F*i,"Array",{find:function(A){return g(this,A,arguments.length>1?arguments[1]:void 0)}}),t(77)(e)},function(A,M,t){"use strict";var I=t(1),g=t(41)(0),e=t(37)([].forEach,!0);I(I.P+I.F*!e,"Array",{forEach:function(A){return g(this,A,arguments[1])}})},function(A,M,t){"use strict";var I=t(49),g=t(1),e=t(19),i=t(228),T=t(144),E=t(17),n=t(138),N=t(161);g(g.S+g.F*!t(112)(function(A){Array.from(A)}),"Array",{from:function(A){var M,t,g,o,c=e(A),C="function"==typeof this?this:Array,a=arguments.length,D=a>1?arguments[1]:void 0,r=void 0!==D,B=0,Q=N(c);if(r&&(D=I(D,a>2?arguments[2]:void 0,2)),void 0==Q||C==Array&&T(Q))for(M=E(c.length),t=new C(M);M>B;B++)n(t,B,r?D(c[B],B):c[B]);else for(o=Q.call(c),t=new C;!(g=o.next()).done;B++)n(t,B,r?i(o,D,[g.value,B],!0):g.value);return t.length=B,t}})},function(A,M,t){"use strict";var I=t(1),g=t(106)(!1),e=[].indexOf,i=!!e&&1/[1].indexOf(1,-0)<0;I(I.P+I.F*(i||!t(37)(e)),"Array",{indexOf:function(A){return i?e.apply(this,arguments)||0:g(this,A,arguments[1])}})},function(A,M,t){var I=t(1);I(I.S,"Array",{isArray:t(145)})},function(A,M,t){"use strict";var I=t(1),g=t(28),e=[].join;I(I.P+I.F*(t(93)!=Object||!t(37)(e)),"Array",{join:function(A){return e.call(g(this),void 0===A?",":A)}})},function(A,M,t){"use strict";var I=t(1),g=t(28),e=t(57),i=t(17),T=[].lastIndexOf,E=!!T&&1/[1].lastIndexOf(1,-0)<0;I(I.P+I.F*(E||!t(37)(T)),"Array",{lastIndexOf:function(A){if(E)return T.apply(this,arguments)||0;var M=g(this),t=i(M.length),I=t-1;for(arguments.length>1&&(I=Math.min(I,e(arguments[1]))),I<0&&(I=t+I);I>=0;I--)if(I in M&&M[I]===A)return I||0;return-1}})},function(A,M,t){"use strict";var I=t(1),g=t(41)(1);I(I.P+I.F*!t(37)([].map,!0),"Array",{map:function(A){return g(this,A,arguments[1])}})},function(A,M,t){"use strict";var I=t(1),g=t(138);I(I.S+I.F*t(6)(function(){function A(){}return!(Array.of.call(A)instanceof A)}),"Array",{of:function(){for(var A=0,M=arguments.length,t=new("function"==typeof this?this:Array)(M);M>A;)g(t,A,arguments[A++]);return t.length=M,t}})},function(A,M,t){"use strict";var I=t(1),g=t(221);I(I.P+I.F*!t(37)([].reduceRight,!0),"Array",{reduceRight:function(A){return g(this,A,arguments.length,arguments[1],!0)}})},function(A,M,t){"use strict";var I=t(1),g=t(221);I(I.P+I.F*!t(37)([].reduce,!0),"Array",{reduce:function(A){return g(this,A,arguments.length,arguments[1],!1)}})},function(A,M,t){"use strict";var I=t(1),g=t(142),e=t(35),i=t(70),T=t(17),E=[].slice;I(I.P+I.F*t(6)(function(){g&&E.call(g)}),"Array",{slice:function(A,M){var t=T(this.length),I=e(this);if(M=void 0===M?t:M,"Array"==I)return E.call(this,A,M);for(var g=i(A,t),n=i(M,t),N=T(n-g),o=Array(N),c=0;c9?A:"0"+A};I(I.P+I.F*(g(function(){return"0385-07-25T07:06:39.999Z"!=new Date(-5e13-1).toISOString()})||!g(function(){new Date(NaN).toISOString()})),"Date",{toISOString:function(){if(!isFinite(e.call(this)))throw RangeError("Invalid time value");var A=this,M=A.getUTCFullYear(),t=A.getUTCMilliseconds(),I=M<0?"-":M>9999?"+":"";return I+("00000"+Math.abs(M)).slice(I?-6:-4)+"-"+i(A.getUTCMonth()+1)+"-"+i(A.getUTCDate())+"T"+i(A.getUTCHours())+":"+i(A.getUTCMinutes())+":"+i(A.getUTCSeconds())+"."+(t>99?t:"0"+i(t))+"Z"}})},function(A,M,t){"use strict";var I=t(1),g=t(19),e=t(43);I(I.P+I.F*t(6)(function(){return null!==new Date(NaN).toJSON()||1!==Date.prototype.toJSON.call({toISOString:function(){return 1}})}),"Date",{toJSON:function(A){var M=g(this),t=e(M);return"number"!=typeof t||isFinite(t)?M.toISOString():null}})},function(A,M,t){var I=t(11)("toPrimitive"),g=Date.prototype;I in g||t(25)(g,I,t(376))},function(A,M,t){var I=Date.prototype,g="Invalid Date",e="toString",i=I[e],T=I.getTime;new Date(NaN)+""!=g&&t(26)(I,e,function(){var A=T.call(this);return A===A?i.call(this):g})},function(A,M,t){var I=t(1);I(I.P,"Function",{bind:t(222)})},function(A,M,t){"use strict";var I=t(9),g=t(31),e=t(11)("hasInstance"),i=Function.prototype;e in i||t(14).f(i,e,{value:function(A){if("function"!=typeof this||!I(A))return!1;if(!I(this.prototype))return A instanceof this;for(;A=g(A);)if(this.prototype===A)return!0;return!1}})},function(A,M,t){var I=t(14).f,g=t(56),e=t(21),i=Function.prototype,T=/^\s*function ([^ (]*)/,E="name",n=Object.isExtensible||function(){return!0};E in i||t(13)&&I(i,E,{configurable:!0,get:function(){try{var A=this,M=(""+A).match(T)[1];return e(A,E)||!n(A)||I(A,E,g(5,M)),M}catch(A){return""}}})},function(A,M,t){var I=t(1),g=t(230),e=Math.sqrt,i=Math.acosh;I(I.S+I.F*!(i&&710==Math.floor(i(Number.MAX_VALUE))&&i(1/0)==1/0),"Math",{acosh:function(A){return(A=+A)<1?NaN:A>94906265.62425156?Math.log(A)+Math.LN2:g(A-1+e(A-1)*e(A+1))}})},function(A,M,t){function I(A){return isFinite(A=+A)&&0!=A?A<0?-I(-A):Math.log(A+Math.sqrt(A*A+1)):A}var g=t(1),e=Math.asinh;g(g.S+g.F*!(e&&1/e(0)>0),"Math",{asinh:I})},function(A,M,t){var I=t(1),g=Math.atanh;I(I.S+I.F*!(g&&1/g(-0)<0),"Math",{atanh:function(A){return 0==(A=+A)?A:Math.log((1+A)/(1-A))/2}})},function(A,M,t){var I=t(1),g=t(149);I(I.S,"Math",{cbrt:function(A){return g(A=+A)*Math.pow(Math.abs(A),1/3)}})},function(A,M,t){var I=t(1);I(I.S,"Math",{clz32:function(A){return(A>>>=0)?31-Math.floor(Math.log(A+.5)*Math.LOG2E):32}})},function(A,M,t){var I=t(1),g=Math.exp;I(I.S,"Math",{cosh:function(A){return(g(A=+A)+g(-A))/2}})},function(A,M,t){var I=t(1),g=t(148);I(I.S+I.F*(g!=Math.expm1),"Math",{expm1:g})},function(A,M,t){var I=t(1),g=t(149),e=Math.pow,i=e(2,-52),T=e(2,-23),E=e(2,127)*(2-T),n=e(2,-126),N=function(A){return A+1/i-1/i};I(I.S,"Math",{fround:function(A){var M,t,I=Math.abs(A),e=g(A);return IE||t!=t?e*(1/0):e*t)}})},function(A,M,t){var I=t(1),g=Math.abs;I(I.S,"Math",{hypot:function(A,M){for(var t,I,e=0,i=0,T=arguments.length,E=0;i0?(I=t/E,e+=I*I):e+=t;return E===1/0?1/0:E*Math.sqrt(e)}})},function(A,M,t){var I=t(1),g=Math.imul;I(I.S+I.F*t(6)(function(){return g(4294967295,5)!=-5||2!=g.length}),"Math",{imul:function(A,M){var t=65535,I=+A,g=+M,e=t&I,i=t&g;return 0|e*i+((t&I>>>16)*i+e*(t&g>>>16)<<16>>>0)}})},function(A,M,t){var I=t(1);I(I.S,"Math",{log10:function(A){return Math.log(A)/Math.LN10}})},function(A,M,t){var I=t(1);I(I.S,"Math",{log1p:t(230)})},function(A,M,t){var I=t(1);I(I.S,"Math",{log2:function(A){return Math.log(A)/Math.LN2}})},function(A,M,t){var I=t(1);I(I.S,"Math",{sign:t(149)})},function(A,M,t){var I=t(1),g=t(148),e=Math.exp;I(I.S+I.F*t(6)(function(){return!Math.sinh(-2e-17)!=-2e-17}),"Math",{sinh:function(A){return Math.abs(A=+A)<1?(g(A)-g(-A))/2:(e(A-1)-e(-A-1))*(Math.E/2)}})},function(A,M,t){var I=t(1),g=t(148),e=Math.exp;I(I.S,"Math",{tanh:function(A){var M=g(A=+A),t=g(-A);return M==1/0?1:t==1/0?-1:(M-t)/(e(A)+e(-A))}})},function(A,M,t){var I=t(1);I(I.S,"Math",{trunc:function(A){return(A>0?Math.floor:Math.ceil)(A)}})},function(A,M,t){"use strict";var I=t(5),g=t(21),e=t(35),i=t(143),T=t(43),E=t(6),n=t(66).f,N=t(30).f,o=t(14).f,c=t(81).trim,C="Number",a=I[C],D=a,r=a.prototype,B=e(t(65)(r))==C,Q="trim"in String.prototype,s=function(A){var M=T(A,!1);if("string"==typeof M&&M.length>2){M=Q?M.trim():c(M,3);var t,I,g,e=M.charCodeAt(0);if(43===e||45===e){if(t=M.charCodeAt(2),88===t||120===t)return NaN}else if(48===e){switch(M.charCodeAt(1)){case 66:case 98:I=2,g=49;break;case 79:case 111:I=8,g=55;break;default:return+M}for(var i,E=M.slice(2),n=0,N=E.length;ng)return NaN;return parseInt(E,I)}}return+M};if(!a(" 0o1")||!a("0b1")||a("+0x1")){a=function(A){var M=arguments.length<1?0:A,t=this;return t instanceof a&&(B?E(function(){r.valueOf.call(t)}):e(t)!=C)?i(new D(s(M)),t,a):s(M)};for(var u,x=t(13)?n(D):"MAX_VALUE,MIN_VALUE,NaN,NEGATIVE_INFINITY,POSITIVE_INFINITY,EPSILON,isFinite,isInteger,isNaN,isSafeInteger,MAX_SAFE_INTEGER,MIN_SAFE_INTEGER,parseFloat,parseInt,isInteger".split(","),y=0;x.length>y;y++)g(D,u=x[y])&&!g(a,u)&&o(a,u,N(D,u));a.prototype=r,r.constructor=a,t(26)(I,C,a)}},function(A,M,t){var I=t(1);I(I.S,"Number",{EPSILON:Math.pow(2,-52)})},function(A,M,t){var I=t(1),g=t(5).isFinite;I(I.S,"Number",{isFinite:function(A){return"number"==typeof A&&g(A)}})},function(A,M,t){var I=t(1);I(I.S,"Number",{isInteger:t(227)})},function(A,M,t){var I=t(1);I(I.S,"Number",{isNaN:function(A){return A!=A}})},function(A,M,t){var I=t(1),g=t(227),e=Math.abs;I(I.S,"Number",{isSafeInteger:function(A){return g(A)&&e(A)<=9007199254740991}})},function(A,M,t){var I=t(1);I(I.S,"Number",{MAX_SAFE_INTEGER:9007199254740991})},function(A,M,t){var I=t(1);I(I.S,"Number",{MIN_SAFE_INTEGER:-9007199254740991})},function(A,M,t){var I=t(1),g=t(237);I(I.S+I.F*(Number.parseFloat!=g),"Number",{parseFloat:g})},function(A,M,t){var I=t(1),g=t(238);I(I.S+I.F*(Number.parseInt!=g),"Number",{parseInt:g})},function(A,M,t){"use strict";var I=t(1),g=t(57),e=t(218),i=t(156),T=1..toFixed,E=Math.floor,n=[0,0,0,0,0,0],N="Number.toFixed: incorrect invocation!",o="0",c=function(A,M){for(var t=-1,I=M;++t<6;)I+=A*n[t],n[t]=I%1e7,I=E(I/1e7)},C=function(A){for(var M=6,t=0;--M>=0;)t+=n[M],n[M]=E(t/A),t=t%A*1e7},a=function(){for(var A=6,M="";--A>=0;)if(""!==M||0===A||0!==n[A]){var t=String(n[A]);M=""===M?t:M+i.call(o,7-t.length)+t}return M},D=function(A,M,t){return 0===M?t:M%2===1?D(A,M-1,t*A):D(A*A,M/2,t)},r=function(A){for(var M=0,t=A;t>=4096;)M+=12,t/=4096;for(;t>=2;)M+=1,t/=2;return M};I(I.P+I.F*(!!T&&("0.000"!==8e-5.toFixed(3)||"1"!==.9.toFixed(0)||"1.25"!==1.255.toFixed(2)||"1000000000000000128"!==(0xde0b6b3a7640080).toFixed(0))||!t(6)(function(){T.call({})})),"Number",{toFixed:function(A){var M,t,I,T,E=e(this,N),n=g(A),B="",Q=o;if(n<0||n>20)throw RangeError(N);if(E!=E)return"NaN";if(E<=-1e21||E>=1e21)return String(E);if(E<0&&(B="-",E=-E),E>1e-21)if(M=r(E*D(2,69,1))-69,t=M<0?E*D(2,-M,1):E/D(2,M,1),t*=4503599627370496,M=52-M,M>0){for(c(0,t),I=n;I>=7;)c(1e7,0),I-=7;for(c(D(10,I,1),0),I=M-1;I>=23;)C(1<<23),I-=23;C(1<0?(T=Q.length,Q=B+(T<=n?"0."+i.call(o,n-T)+Q:Q.slice(0,T-n)+"."+Q.slice(T-n))):Q=B+Q,Q}})},function(A,M,t){"use strict";var I=t(1),g=t(6),e=t(218),i=1..toPrecision;I(I.P+I.F*(g(function(){return"1"!==i.call(1,void 0)})||!g(function(){i.call({})})),"Number",{toPrecision:function(A){var M=e(this,"Number#toPrecision: incorrect invocation!");return void 0===A?i.call(M):i.call(M,A)}})},function(A,M,t){var I=t(1);I(I.S+I.F,"Object",{assign:t(231)})},function(A,M,t){var I=t(1);I(I.S,"Object",{create:t(65)})},function(A,M,t){var I=t(1);I(I.S+I.F*!t(13),"Object",{defineProperties:t(232)})},function(A,M,t){var I=t(1);I(I.S+I.F*!t(13),"Object",{defineProperty:t(14).f})},function(A,M,t){var I=t(9),g=t(55).onFreeze;t(42)("freeze",function(A){return function(M){return A&&I(M)?A(g(M)):M}})},function(A,M,t){var I=t(28),g=t(30).f;t(42)("getOwnPropertyDescriptor",function(){return function(A,M){return g(I(A),M)}})},function(A,M,t){t(42)("getOwnPropertyNames",function(){return t(233).f})},function(A,M,t){var I=t(19),g=t(31);t(42)("getPrototypeOf",function(){return function(A){return g(I(A))}})},function(A,M,t){var I=t(9);t(42)("isExtensible",function(A){return function(M){return!!I(M)&&(!A||A(M))}})},function(A,M,t){var I=t(9);t(42)("isFrozen",function(A){return function(M){return!I(M)||!!A&&A(M)}})},function(A,M,t){var I=t(9);t(42)("isSealed",function(A){return function(M){return!I(M)||!!A&&A(M)}})},function(A,M,t){var I=t(1);I(I.S,"Object",{is:t(239)})},function(A,M,t){var I=t(19),g=t(67);t(42)("keys",function(){return function(A){return g(I(A))}})},function(A,M,t){var I=t(9),g=t(55).onFreeze;t(42)("preventExtensions",function(A){return function(M){return A&&I(M)?A(g(M)):M}})},function(A,M,t){var I=t(9),g=t(55).onFreeze;t(42)("seal",function(A){return function(M){return A&&I(M)?A(g(M)):M}})},function(A,M,t){var I=t(1);I(I.S,"Object",{setPrototypeOf:t(151).set})},function(A,M,t){"use strict";var I=t(92),g={};g[t(11)("toStringTag")]="z",g+""!="[object z]"&&t(26)(Object.prototype,"toString",function(){return"[object "+I(this)+"]"},!0)},function(A,M,t){var I=t(1),g=t(237);I(I.G+I.F*(parseFloat!=g),{parseFloat:g})},function(A,M,t){var I=t(1),g=t(238);I(I.G+I.F*(parseInt!=g),{parseInt:g})},function(A,M,t){"use strict";var I,g,e,i=t(64),T=t(5),E=t(49),n=t(92),N=t(1),o=t(9),c=t(24),C=t(63),a=t(78),D=t(153),r=t(158).set,B=t(150)(),Q="Promise",s=T.TypeError,u=T.process,x=T[Q],u=T.process,y="process"==n(u),j=function(){},l=!!function(){try{var A=x.resolve(1),M=(A.constructor={})[t(11)("species")]=function(A){A(j,j)};return(y||"function"==typeof PromiseRejectionEvent)&&A.then(j)instanceof M}catch(A){}}(),w=function(A,M){return A===M||A===x&&M===e},L=function(A){var M;return!(!o(A)||"function"!=typeof(M=A.then))&&M},Y=function(A){return w(x,A)?new d(A):new g(A)},d=g=function(A){var M,t;this.promise=new A(function(A,I){if(void 0!==M||void 0!==t)throw s("Bad Promise constructor");M=A,t=I}),this.resolve=c(M),this.reject=c(t)},h=function(A){try{A()}catch(A){return{error:A}}},S=function(A,M){if(!A._n){A._n=!0;var t=A._c;B(function(){for(var I=A._v,g=1==A._s,e=0,i=function(M){var t,e,i=g?M.ok:M.fail,T=M.resolve,E=M.reject,n=M.domain;try{i?(g||(2==A._h&&U(A),A._h=1),i===!0?t=I:(n&&n.enter(),t=i(I),n&&n.exit()),t===M.promise?E(s("Promise-chain cycle")):(e=L(t))?e.call(t,T,E):T(t)):E(I)}catch(A){E(A)}};t.length>e;)i(t[e++]);A._c=[],A._n=!1,M&&!A._h&&z(A)})}},z=function(A){r.call(T,function(){var M,t,I,g=A._v;if(p(A)&&(M=h(function(){y?u.emit("unhandledRejection",g,A):(t=T.onunhandledrejection)?t({promise:A,reason:g}):(I=T.console)&&I.error&&I.error("Unhandled promise rejection",g)}),A._h=y||p(A)?2:1),A._a=void 0,M)throw M.error})},p=function(A){if(1==A._h)return!1;for(var M,t=A._a||A._c,I=0;t.length>I;)if(M=t[I++],M.fail||!p(M.promise))return!1;return!0},U=function(A){r.call(T,function(){var M;y?u.emit("rejectionHandled",A):(M=T.onrejectionhandled)&&M({promise:A,reason:A._v})})},O=function(A){var M=this;M._d||(M._d=!0,M=M._w||M,M._v=A,M._s=2,M._a||(M._a=M._c.slice()),S(M,!0))},f=function(A){var M,t=this;if(!t._d){t._d=!0,t=t._w||t;try{if(t===A)throw s("Promise can't be resolved itself");(M=L(A))?B(function(){var I={_w:t,_d:!1};try{M.call(A,E(f,I,1),E(O,I,1))}catch(A){O.call(I,A)}}):(t._v=A,t._s=1,S(t,!1))}catch(A){O.call({_w:t,_d:!1},A)}}};l||(x=function(A){C(this,x,Q,"_h"),c(A),I.call(this);try{A(E(f,this,1),E(O,this,1))}catch(A){O.call(this,A)}},I=function(A){this._c=[],this._a=void 0,this._s=0,this._d=!1,this._v=void 0,this._h=0,this._n=!1},I.prototype=t(68)(x.prototype,{then:function(A,M){var t=Y(D(this,x));return t.ok="function"!=typeof A||A,t.fail="function"==typeof M&&M,t.domain=y?u.domain:void 0,this._c.push(t),this._a&&this._a.push(t),this._s&&S(this,!1),t.promise},catch:function(A){return this.then(void 0,A)}}),d=function(){var A=new I;this.promise=A,this.resolve=E(f,A,1),this.reject=E(O,A,1)}),N(N.G+N.W+N.F*!l,{Promise:x}),t(80)(x,Q),t(69)(Q),e=t(48)[Q],N(N.S+N.F*!l,Q,{reject:function(A){var M=Y(this),t=M.reject;return t(A),M.promise}}),N(N.S+N.F*(i||!l),Q,{resolve:function(A){if(A instanceof x&&w(A.constructor,this))return A;var M=Y(this),t=M.resolve;return t(A),M.promise}}),N(N.S+N.F*!(l&&t(112)(function(A){x.all(A).catch(j)})),Q,{all:function(A){var M=this,t=Y(M),I=t.resolve,g=t.reject,e=h(function(){var t=[],e=0,i=1;a(A,!1,function(A){var T=e++,E=!1;t.push(void 0),i++,M.resolve(A).then(function(A){E||(E=!0,t[T]=A,--i||I(t))},g)}),--i||I(t)});return e&&g(e.error),t.promise},race:function(A){var M=this,t=Y(M),I=t.reject,g=h(function(){a(A,!1,function(A){M.resolve(A).then(t.resolve,I)})});return g&&I(g.error),t.promise}})},function(A,M,t){var I=t(1),g=t(24),e=t(4),i=(t(5).Reflect||{}).apply,T=Function.apply;I(I.S+I.F*!t(6)(function(){i(function(){})}),"Reflect",{apply:function(A,M,t){var I=g(A),E=e(t);return i?i(I,M,E):T.call(I,M,E)}})},function(A,M,t){var I=t(1),g=t(65),e=t(24),i=t(4),T=t(9),E=t(6),n=t(222),N=(t(5).Reflect||{}).construct,o=E(function(){function A(){}return!(N(function(){},[],A)instanceof A)}),c=!E(function(){N(function(){})});I(I.S+I.F*(o||c),"Reflect",{construct:function(A,M){e(A),i(M);var t=arguments.length<3?A:e(arguments[2]);if(c&&!o)return N(A,M,t);if(A==t){switch(M.length){case 0:return new A;case 1:return new A(M[0]);case 2:return new A(M[0],M[1]);case 3:return new A(M[0],M[1],M[2]);case 4:return new A(M[0],M[1],M[2],M[3])}var I=[null];return I.push.apply(I,M),new(n.apply(A,I))}var E=t.prototype,C=g(T(E)?E:Object.prototype),a=Function.apply.call(A,C,M);return T(a)?a:C}})},function(A,M,t){var I=t(14),g=t(1),e=t(4),i=t(43);g(g.S+g.F*t(6)(function(){Reflect.defineProperty(I.f({},1,{value:1}),1,{value:2})}),"Reflect",{defineProperty:function(A,M,t){e(A),M=i(M,!0),e(t);try{return I.f(A,M,t),!0}catch(A){return!1}}})},function(A,M,t){var I=t(1),g=t(30).f,e=t(4);I(I.S,"Reflect",{deleteProperty:function(A,M){var t=g(e(A),M);return!(t&&!t.configurable)&&delete A[M]}})},function(A,M,t){"use strict";var I=t(1),g=t(4),e=function(A){this._t=g(A),this._i=0;var M,t=this._k=[];for(M in A)t.push(M)};t(146)(e,"Object",function(){var A,M=this,t=M._k;do if(M._i>=t.length)return{value:void 0,done:!0};while(!((A=t[M._i++])in M._t));return{value:A,done:!1}}),I(I.S,"Reflect",{enumerate:function(A){return new e(A)}})},function(A,M,t){var I=t(30),g=t(1),e=t(4);g(g.S,"Reflect",{getOwnPropertyDescriptor:function(A,M){return I.f(e(A),M)}})},function(A,M,t){var I=t(1),g=t(31),e=t(4);I(I.S,"Reflect",{getPrototypeOf:function(A){return g(e(A))}})},function(A,M,t){function I(A,M){var t,T,N=arguments.length<3?A:arguments[2];return n(A)===N?A[M]:(t=g.f(A,M))?i(t,"value")?t.value:void 0!==t.get?t.get.call(N):void 0:E(T=e(A))?I(T,M,N):void 0}var g=t(30),e=t(31),i=t(21),T=t(1),E=t(9),n=t(4);T(T.S,"Reflect",{get:I})},function(A,M,t){var I=t(1);I(I.S,"Reflect",{has:function(A,M){return M in A}})},function(A,M,t){var I=t(1),g=t(4),e=Object.isExtensible;I(I.S,"Reflect",{isExtensible:function(A){return g(A),!e||e(A)}})},function(A,M,t){var I=t(1);I(I.S,"Reflect",{ownKeys:t(236)})},function(A,M,t){var I=t(1),g=t(4),e=Object.preventExtensions;I(I.S,"Reflect",{preventExtensions:function(A){g(A);try{return e&&e(A),!0}catch(A){return!1}}})},function(A,M,t){var I=t(1),g=t(151);g&&I(I.S,"Reflect",{setPrototypeOf:function(A,M){g.check(A,M);try{return g.set(A,M),!0}catch(A){return!1}}})},function(A,M,t){function I(A,M,t){var E,c,C=arguments.length<4?A:arguments[3],a=e.f(N(A),M);if(!a){if(o(c=i(A)))return I(c,M,t,C);a=n(0)}return T(a,"value")?!(a.writable===!1||!o(C))&&(E=e.f(C,M)||n(0),E.value=t,g.f(C,M,E),!0):void 0!==a.set&&(a.set.call(C,t),!0)}var g=t(14),e=t(30),i=t(31),T=t(21),E=t(1),n=t(56),N=t(4),o=t(9);E(E.S,"Reflect",{set:I})},function(A,M,t){var I=t(5),g=t(143),e=t(14).f,i=t(66).f,T=t(111),E=t(109),n=I.RegExp,N=n,o=n.prototype,c=/a/g,C=/a/g,a=new n(c)!==c;if(t(13)&&(!a||t(6)(function(){return C[t(11)("match")]=!1,n(c)!=c||n(C)==C||"/a/i"!=n(c,"i")}))){n=function(A,M){var t=this instanceof n,I=T(A),e=void 0===M;return!t&&I&&A.constructor===n&&e?A:g(a?new N(I&&!e?A.source:A,M):N((I=A instanceof n)?A.source:A,I&&e?E.call(A):M),t?this:o,n)};for(var D=(function(A){A in n||e(n,A,{configurable:!0,get:function(){return N[A]},set:function(M){N[A]=M}})}),r=i(N),B=0;r.length>B;)D(r[B++]);o.constructor=n,n.prototype=o,t(26)(I,"RegExp",n)}t(69)("RegExp")},function(A,M,t){t(108)("match",1,function(A,M,t){return[function(t){"use strict";var I=A(this),g=void 0==t?void 0:t[M];return void 0!==g?g.call(t,I):new RegExp(t)[M](String(I))},t]})},function(A,M,t){t(108)("replace",2,function(A,M,t){return[function(I,g){"use strict";var e=A(this),i=void 0==I?void 0:I[M];return void 0!==i?i.call(I,e,g):t.call(String(e),I,g)},t]})},function(A,M,t){t(108)("search",1,function(A,M,t){return[function(t){"use strict";var I=A(this),g=void 0==t?void 0:t[M];return void 0!==g?g.call(t,I):new RegExp(t)[M](String(I))},t]})},function(A,M,t){t(108)("split",2,function(A,M,I){"use strict";var g=t(111),e=I,i=[].push,T="split",E="length",n="lastIndex";if("c"=="abbc"[T](/(b)*/)[1]||4!="test"[T](/(?:)/,-1)[E]||2!="ab"[T](/(?:ab)*/)[E]||4!="."[T](/(.?)(.?)/)[E]||"."[T](/()()/)[E]>1||""[T](/.?/)[E]){var N=void 0===/()??/.exec("")[1];I=function(A,M){var t=String(this);if(void 0===A&&0===M)return[];if(!g(A))return e.call(t,A,M);var I,T,o,c,C,a=[],D=(A.ignoreCase?"i":"")+(A.multiline?"m":"")+(A.unicode?"u":"")+(A.sticky?"y":""),r=0,B=void 0===M?4294967295:M>>>0,Q=new RegExp(A.source,D+"g");for(N||(I=new RegExp("^"+Q.source+"$(?!\\s)",D));(T=Q.exec(t))&&(o=T.index+T[0][E],!(o>r&&(a.push(t.slice(r,T.index)),!N&&T[E]>1&&T[0].replace(I,function(){for(C=1;C1&&T.index=B)));)Q[n]===T.index&&Q[n]++;return r===t[E]?!c&&Q.test("")||a.push(""):a.push(t.slice(r)),a[E]>B?a.slice(0,B):a}}else"0"[T](void 0,0)[E]&&(I=function(A,M){return void 0===A&&0===M?[]:e.call(this,A,M)});return[function(t,g){var e=A(this),i=void 0==t?void 0:t[M];return void 0!==i?i.call(t,e,g):I.call(String(e),t,g)},I]})},function(A,M,t){"use strict";t(243);var I=t(4),g=t(109),e=t(13),i="toString",T=/./[i],E=function(A){t(26)(RegExp.prototype,i,A,!0)};t(6)(function(){return"/a/b"!=T.call({source:"a",flags:"b"})})?E(function(){var A=I(this);return"/".concat(A.source,"/","flags"in A?A.flags:!e&&A instanceof RegExp?g.call(A):void 0)}):T.name!=i&&E(function(){return T.call(this)})},function(A,M,t){"use strict";t(27)("anchor",function(A){return function(M){return A(this,"a","name",M)}})},function(A,M,t){"use strict";t(27)("big",function(A){return function(){return A(this,"big","","")}})},function(A,M,t){"use strict";t(27)("blink",function(A){return function(){return A(this,"blink","","")}})},function(A,M,t){"use strict";t(27)("bold",function(A){return function(){return A(this,"b","","")}})},function(A,M,t){"use strict";var I=t(1),g=t(154)(!1);I(I.P,"String",{codePointAt:function(A){return g(this,A)}})},function(A,M,t){"use strict";var I=t(1),g=t(17),e=t(155),i="endsWith",T=""[i];I(I.P+I.F*t(141)(i),"String",{endsWith:function(A){var M=e(this,A,i),t=arguments.length>1?arguments[1]:void 0,I=g(M.length),E=void 0===t?I:Math.min(g(t),I),n=String(A);return T?T.call(M,n,E):M.slice(E-n.length,E)===n}})},function(A,M,t){"use strict";t(27)("fixed",function(A){return function(){return A(this,"tt","","")}})},function(A,M,t){"use strict";t(27)("fontcolor",function(A){return function(M){return A(this,"font","color",M)}})},function(A,M,t){"use strict";t(27)("fontsize",function(A){return function(M){return A(this,"font","size",M)}})},function(A,M,t){var I=t(1),g=t(70),e=String.fromCharCode,i=String.fromCodePoint;I(I.S+I.F*(!!i&&1!=i.length),"String",{fromCodePoint:function(A){for(var M,t=[],I=arguments.length,i=0;I>i;){if(M=+arguments[i++],g(M,1114111)!==M)throw RangeError(M+" is not a valid code point");t.push(M<65536?e(M):e(((M-=65536)>>10)+55296,M%1024+56320))}return t.join("")}})},function(A,M,t){"use strict";var I=t(1),g=t(155),e="includes";I(I.P+I.F*t(141)(e),"String",{includes:function(A){return!!~g(this,A,e).indexOf(A,arguments.length>1?arguments[1]:void 0)}})},function(A,M,t){"use strict";t(27)("italics",function(A){return function(){return A(this,"i","","")}})},function(A,M,t){"use strict";var I=t(154)(!0);t(147)(String,"String",function(A){this._t=String(A),this._i=0},function(){var A,M=this._t,t=this._i;return t>=M.length?{value:void 0,done:!0}:(A=I(M,t),this._i+=A.length,{value:A,done:!1})})},function(A,M,t){"use strict";t(27)("link",function(A){return function(M){return A(this,"a","href",M)}})},function(A,M,t){var I=t(1),g=t(28),e=t(17);I(I.S,"String",{raw:function(A){for(var M=g(A.raw),t=e(M.length),I=arguments.length,i=[],T=0;t>T;)i.push(String(M[T++])),T1?arguments[1]:void 0,M.length)),I=String(A);return T?T.call(M,I,t):M.slice(t,t+I.length)===I}})},function(A,M,t){"use strict";t(27)("strike",function(A){return function(){return A(this,"strike","","")}})},function(A,M,t){"use strict";t(27)("sub",function(A){return function(){return A(this,"sub","","")}})},function(A,M,t){"use strict";t(27)("sup",function(A){return function(){return A(this,"sup","","")}})},function(A,M,t){"use strict";t(81)("trim",function(A){return function(){return A(this,3)}})},function(A,M,t){"use strict";var I=t(5),g=t(21),e=t(13),i=t(1),T=t(26),E=t(55).KEY,n=t(6),N=t(115),o=t(80),c=t(71),C=t(11),a=t(241),D=t(160),r=t(378),B=t(377),Q=t(145),s=t(4),u=t(28),x=t(43),y=t(56),j=t(65),l=t(233),w=t(30),L=t(14),Y=t(67),d=w.f,h=L.f,S=l.f,z=I.Symbol,p=I.JSON,U=p&&p.stringify,O="prototype",f=C("_hidden"),m=C("toPrimitive"),F={}.propertyIsEnumerable,k=N("symbol-registry"),R=N("symbols"),J=N("op-symbols"),G=Object[O],H="function"==typeof z,v=I.QObject,b=!v||!v[O]||!v[O].findChild,X=e&&n(function(){return 7!=j(h({},"a",{get:function(){return h(this,"a",{value:7}).a}})).a})?function(A,M,t){var I=d(G,M);I&&delete G[M],h(A,M,t),I&&A!==G&&h(G,M,I)}:h,W=function(A){var M=R[A]=j(z[O]);return M._k=A,M},V=H&&"symbol"==typeof z.iterator?function(A){return"symbol"==typeof A}:function(A){return A instanceof z},P=function(A,M,t){return A===G&&P(J,M,t),s(A),M=x(M,!0),s(t),g(R,M)?(t.enumerable?(g(A,f)&&A[f][M]&&(A[f][M]=!1),t=j(t,{enumerable:y(0,!1)})):(g(A,f)||h(A,f,y(1,{})),A[f][M]=!0),X(A,M,t)):h(A,M,t)},Z=function(A,M){s(A);for(var t,I=B(M=u(M)),g=0,e=I.length;e>g;)P(A,t=I[g++],M[t]);return A},K=function(A,M){return void 0===M?j(A):Z(j(A),M)},q=function(A){var M=F.call(this,A=x(A,!0));return!(this===G&&g(R,A)&&!g(J,A))&&(!(M||!g(this,A)||!g(R,A)||g(this,f)&&this[f][A])||M)},_=function(A,M){if(A=u(A),M=x(M,!0),A!==G||!g(R,M)||g(J,M)){var t=d(A,M);return!t||!g(R,M)||g(A,f)&&A[f][M]||(t.enumerable=!0),t}},$=function(A){for(var M,t=S(u(A)),I=[],e=0;t.length>e;)g(R,M=t[e++])||M==f||M==E||I.push(M);return I},AA=function(A){for(var M,t=A===G,I=S(t?J:u(A)),e=[],i=0;I.length>i;)!g(R,M=I[i++])||t&&!g(G,M)||e.push(R[M]);return e};H||(z=function(){if(this instanceof z)throw TypeError("Symbol is not a constructor!");var A=c(arguments.length>0?arguments[0]:void 0),M=function(t){ +this===G&&M.call(J,t),g(this,f)&&g(this[f],A)&&(this[f][A]=!1),X(this,A,y(1,t))};return e&&b&&X(G,A,{configurable:!0,set:M}),W(A)},T(z[O],"toString",function(){return this._k}),w.f=_,L.f=P,t(66).f=l.f=$,t(94).f=q,t(114).f=AA,e&&!t(64)&&T(G,"propertyIsEnumerable",q,!0),a.f=function(A){return W(C(A))}),i(i.G+i.W+i.F*!H,{Symbol:z});for(var MA="hasInstance,isConcatSpreadable,iterator,match,replace,search,species,split,toPrimitive,toStringTag,unscopables".split(","),tA=0;MA.length>tA;)C(MA[tA++]);for(var MA=Y(C.store),tA=0;MA.length>tA;)D(MA[tA++]);i(i.S+i.F*!H,"Symbol",{for:function(A){return g(k,A+="")?k[A]:k[A]=z(A)},keyFor:function(A){if(V(A))return r(k,A);throw TypeError(A+" is not a symbol!")},useSetter:function(){b=!0},useSimple:function(){b=!1}}),i(i.S+i.F*!H,"Object",{create:K,defineProperty:P,defineProperties:Z,getOwnPropertyDescriptor:_,getOwnPropertyNames:$,getOwnPropertySymbols:AA}),p&&i(i.S+i.F*(!H||n(function(){var A=z();return"[null]"!=U([A])||"{}"!=U({a:A})||"{}"!=U(Object(A))})),"JSON",{stringify:function(A){if(void 0!==A&&!V(A)){for(var M,t,I=[A],g=1;arguments.length>g;)I.push(arguments[g++]);return M=I[1],"function"==typeof M&&(t=M),!t&&Q(M)||(M=function(A,M){if(t&&(M=t.call(this,A,M)),!V(M))return M}),I[1]=M,U.apply(p,I)}}}),z[O][m]||t(25)(z[O],m,z[O].valueOf),o(z,"Symbol"),o(Math,"Math",!0),o(I.JSON,"JSON",!0)},function(A,M,t){"use strict";var I=t(1),g=t(116),e=t(159),i=t(4),T=t(70),E=t(17),n=t(9),N=t(5).ArrayBuffer,o=t(153),c=e.ArrayBuffer,C=e.DataView,a=g.ABV&&N.isView,D=c.prototype.slice,r=g.VIEW,B="ArrayBuffer";I(I.G+I.W+I.F*(N!==c),{ArrayBuffer:c}),I(I.S+I.F*!g.CONSTR,B,{isView:function(A){return a&&a(A)||n(A)&&r in A}}),I(I.P+I.U+I.F*t(6)(function(){return!new c(2).slice(1,void 0).byteLength}),B,{slice:function(A,M){if(void 0!==D&&void 0===M)return D.call(i(this),A);for(var t=i(this).byteLength,I=T(A,t),g=T(void 0===M?t:M,t),e=new(o(this,c))(E(g-I)),n=new C(this),N=new C(e),a=0;I0?arguments[0]:void 0)}},{add:function(A){return I.def(this,A,!0)}},I,!1,!0)},function(A,M,t){"use strict";var I=t(1),g=t(106)(!0);I(I.P,"Array",{includes:function(A){return g(this,A,arguments.length>1?arguments[1]:void 0)}}),t(77)("includes")},function(A,M,t){var I=t(1),g=t(150)(),e=t(5).process,i="process"==t(35)(e);I(I.G,{asap:function(A){var M=i&&e.domain;g(M?M.bind(A):A)}})},function(A,M,t){var I=t(1),g=t(35);I(I.S,"Error",{isError:function(A){return"Error"===g(A)}})},function(A,M,t){var I=t(1);I(I.P+I.R,"Map",{toJSON:t(224)("Map")})},function(A,M,t){var I=t(1);I(I.S,"Math",{iaddh:function(A,M,t,I){var g=A>>>0,e=M>>>0,i=t>>>0;return e+(I>>>0)+((g&i|(g|i)&~(g+i>>>0))>>>31)|0}})},function(A,M,t){var I=t(1);I(I.S,"Math",{imulh:function(A,M){var t=65535,I=+A,g=+M,e=I&t,i=g&t,T=I>>16,E=g>>16,n=(T*i>>>0)+(e*i>>>16);return T*E+(n>>16)+((e*E>>>0)+(n&t)>>16)}})},function(A,M,t){var I=t(1);I(I.S,"Math",{isubh:function(A,M,t,I){var g=A>>>0,e=M>>>0,i=t>>>0;return e-(I>>>0)-((~g&i|~(g^i)&g-i>>>0)>>>31)|0}})},function(A,M,t){var I=t(1);I(I.S,"Math",{umulh:function(A,M){var t=65535,I=+A,g=+M,e=I&t,i=g&t,T=I>>>16,E=g>>>16,n=(T*i>>>0)+(e*i>>>16);return T*E+(n>>>16)+((e*E>>>0)+(n&t)>>>16)}})},function(A,M,t){"use strict";var I=t(1),g=t(19),e=t(24),i=t(14);t(13)&&I(I.P+t(113),"Object",{__defineGetter__:function(A,M){i.f(g(this),A,{get:e(M),enumerable:!0,configurable:!0})}})},function(A,M,t){"use strict";var I=t(1),g=t(19),e=t(24),i=t(14);t(13)&&I(I.P+t(113),"Object",{__defineSetter__:function(A,M){i.f(g(this),A,{set:e(M),enumerable:!0,configurable:!0})}})},function(A,M,t){var I=t(1),g=t(235)(!0);I(I.S,"Object",{entries:function(A){return g(A)}})},function(A,M,t){var I=t(1),g=t(236),e=t(28),i=t(30),T=t(138);I(I.S,"Object",{getOwnPropertyDescriptors:function(A){for(var M,t=e(A),I=i.f,E=g(t),n={},N=0;E.length>N;)T(n,M=E[N++],I(t,M));return n}})},function(A,M,t){"use strict";var I=t(1),g=t(19),e=t(43),i=t(31),T=t(30).f;t(13)&&I(I.P+t(113),"Object",{__lookupGetter__:function(A){var M,t=g(this),I=e(A,!0);do if(M=T(t,I))return M.get;while(t=i(t))}})},function(A,M,t){"use strict";var I=t(1),g=t(19),e=t(43),i=t(31),T=t(30).f;t(13)&&I(I.P+t(113),"Object",{__lookupSetter__:function(A){var M,t=g(this),I=e(A,!0);do if(M=T(t,I))return M.set;while(t=i(t))}})},function(A,M,t){var I=t(1),g=t(235)(!1);I(I.S,"Object",{values:function(A){return g(A)}})},function(A,M,t){"use strict";var I=t(1),g=t(5),e=t(48),i=t(150)(),T=t(11)("observable"),E=t(24),n=t(4),N=t(63),o=t(68),c=t(25),C=t(78),a=C.RETURN,D=function(A){return null==A?void 0:E(A)},r=function(A){var M=A._c;M&&(A._c=void 0,M())},B=function(A){return void 0===A._o},Q=function(A){B(A)||(A._o=void 0,r(A))},s=function(A,M){n(A),this._c=void 0,this._o=A,A=new u(this);try{var t=M(A),I=t;null!=t&&("function"==typeof t.unsubscribe?t=function(){I.unsubscribe()}:E(t),this._c=t)}catch(M){return void A.error(M)}B(this)&&r(this)};s.prototype=o({},{unsubscribe:function(){Q(this)}});var u=function(A){this._s=A};u.prototype=o({},{next:function(A){var M=this._s;if(!B(M)){var t=M._o;try{var I=D(t.next);if(I)return I.call(t,A)}catch(A){try{Q(M)}finally{throw A}}}},error:function(A){var M=this._s;if(B(M))throw A;var t=M._o;M._o=void 0;try{var I=D(t.error);if(!I)throw A;A=I.call(t,A)}catch(A){try{r(M)}finally{throw A}}return r(M),A},complete:function(A){var M=this._s;if(!B(M)){var t=M._o;M._o=void 0;try{var I=D(t.complete);A=I?I.call(t,A):void 0}catch(A){try{r(M)}finally{throw A}}return r(M),A}}});var x=function(A){N(this,x,"Observable","_f")._f=E(A)};o(x.prototype,{subscribe:function(A){return new s(A,this._f)},forEach:function(A){var M=this;return new(e.Promise||g.Promise)(function(t,I){E(A);var g=M.subscribe({next:function(M){try{return A(M)}catch(A){I(A),g.unsubscribe()}},error:I,complete:t})})}}),o(x,{from:function(A){var M="function"==typeof this?this:x,t=D(n(A)[T]);if(t){var I=n(t.call(A));return I.constructor===M?I:new M(function(A){return I.subscribe(A)})}return new M(function(M){var t=!1;return i(function(){if(!t){try{if(C(A,!1,function(A){if(M.next(A),t)return a})===a)return}catch(A){if(t)throw A;return void M.error(A)}M.complete()}}),function(){t=!0}})},of:function(){for(var A=0,M=arguments.length,t=Array(M);A1?arguments[1]:void 0,!1)}})},function(A,M,t){"use strict";var I=t(1),g=t(240);I(I.P,"String",{padStart:function(A){return g(this,A,arguments.length>1?arguments[1]:void 0,!0)}})},function(A,M,t){"use strict";t(81)("trimLeft",function(A){return function(){return A(this,1)}},"trimStart")},function(A,M,t){"use strict";t(81)("trimRight",function(A){return function(){return A(this,2)}},"trimEnd")},function(A,M,t){t(160)("asyncIterator")},function(A,M,t){t(160)("observable")},function(A,M,t){var I=t(1);I(I.S,"System",{global:t(5)})},function(A,M,t){for(var I=t(162),g=t(26),e=t(5),i=t(25),T=t(79),E=t(11),n=E("iterator"),N=E("toStringTag"),o=T.Array,c=["NodeList","DOMTokenList","MediaList","StyleSheetList","CSSRuleList"],C=0;C<5;C++){var a,D=c[C],r=e[D],B=r&&r.prototype;if(B){B[n]||i(B,n,o),B[N]||i(B,N,D),T[D]=o;for(a in I)B[a]||g(B,a,I[a],!0)}}},function(A,M,t){var I=t(1),g=t(158);I(I.G+I.B,{setImmediate:g.set,clearImmediate:g.clear})},function(A,M,t){var I=t(5),g=t(1),e=t(110),i=t(379),T=I.navigator,E=!!T&&/MSIE .\./.test(T.userAgent),n=function(A){return E?function(M,t){return A(e(i,[].slice.call(arguments,2),"function"==typeof M?M:Function(M)),t)}:A};g(g.G+g.B+g.F*E,{setTimeout:n(I.setTimeout),setInterval:n(I.setInterval)})},function(A,M,t){t(502),t(441),t(443),t(442),t(445),t(447),t(452),t(446),t(444),t(454),t(453),t(449),t(450),t(448),t(440),t(451),t(455),t(456),t(408),t(410),t(409),t(458),t(457),t(428),t(438),t(439),t(429),t(430),t(431),t(432),t(433),t(434),t(435),t(436),t(437),t(411),t(412),t(413),t(414),t(415),t(416),t(417),t(418),t(419),t(420),t(421),t(422),t(423),t(424),t(425),t(426),t(427),t(489),t(494),t(501),t(492),t(484),t(485),t(490),t(495),t(497),t(480),t(481),t(482),t(483),t(486),t(487),t(488),t(491),t(493),t(496),t(498),t(499),t(500),t(403),t(405),t(404),t(407),t(406),t(392),t(390),t(396),t(393),t(399),t(401),t(389),t(395),t(386),t(400),t(384),t(398),t(397),t(391),t(394),t(383),t(385),t(388),t(387),t(402),t(162),t(474),t(479),t(243),t(475),t(476),t(477),t(478),t(459),t(242),t(244),t(245),t(514),t(503),t(504),t(509),t(512),t(513),t(507),t(510),t(508),t(511),t(505),t(506),t(460),t(461),t(462),t(463),t(464),t(467),t(465),t(466),t(468),t(469),t(470),t(471),t(473),t(472),t(515),t(541),t(544),t(543),t(545),t(546),t(542),t(547),t(548),t(526),t(529),t(525),t(523),t(524),t(527),t(528),t(518),t(540),t(549),t(517),t(519),t(521),t(520),t(522),t(531),t(532),t(534),t(533),t(536),t(535),t(537),t(538),t(539),t(516),t(530),t(552),t(551),t(550),A.exports=t(48)},function(A,M,t){"use strict";function I(A){return A}function g(A,M,t){function g(A,M){var t=Q.hasOwnProperty(M)?Q[M]:null;y.hasOwnProperty(M)&&E("OVERRIDE_BASE"===t,"ReactClassInterface: You are attempting to override ` + "`" + `%s` + "`" + ` from your class specification. Ensure that your method names do not overlap with React methods.",M),A&&E("DEFINE_MANY"===t||"DEFINE_MANY_MERGED"===t,"ReactClassInterface: You are attempting to define ` + "`" + `%s` + "`" + ` on your component more than once. This conflict may be due to a mixin.",M)}function e(A,t){if(t){E("function"!=typeof t,"ReactClass: You're attempting to use a component class or function as a mixin. Instead, just use a regular object."),E(!M(t),"ReactClass: You're attempting to use a component as a mixin. Instead, just use a regular object.");var I=A.prototype,e=I.__reactAutoBindPairs;t.hasOwnProperty(n)&&s.mixins(A,t.mixins);for(var i in t)if(t.hasOwnProperty(i)&&i!==n){var T=t[i],N=I.hasOwnProperty(i);if(g(N,i),s.hasOwnProperty(i))s[i](A,T);else{var o=Q.hasOwnProperty(i),a="function"==typeof T,D=a&&!o&&!N&&t.autobind!==!1;if(D)e.push(i,T),I[i]=T;else if(N){var r=Q[i];E(o&&("DEFINE_MANY_MERGED"===r||"DEFINE_MANY"===r),"ReactClass: Unexpected spec policy %s for key %s when mixing in component specs.",r,i),"DEFINE_MANY_MERGED"===r?I[i]=c(I[i],T):"DEFINE_MANY"===r&&(I[i]=C(I[i],T))}else I[i]=T}}}else;}function N(A,M){if(M)for(var t in M){var I=M[t];if(M.hasOwnProperty(t)){var g=t in s;E(!g,'ReactClass: You are attempting to define a reserved property, ` + "`" + `%s` + "`" + `, that shouldn\'t be on the "statics" key. Define it as an instance property instead; it will still be accessible on the constructor.',t);var e=t in A;E(!e,"ReactClass: You are attempting to define ` + "`" + `%s` + "`" + ` on your component more than once. This conflict may be due to a mixin.",t),A[t]=I}}}function o(A,M){E(A&&M&&"object"==typeof A&&"object"==typeof M,"mergeIntoWithNoDuplicateKeys(): Cannot merge non-objects.");for(var t in M)M.hasOwnProperty(t)&&(E(void 0===A[t],"mergeIntoWithNoDuplicateKeys(): Tried to merge two objects with the same key: ` + "`" + `%s` + "`" + `. This conflict may be due to a mixin; in particular, this may be caused by two getInitialState() or getDefaultProps() methods returning objects with clashing keys.",t),A[t]=M[t]);return A}function c(A,M){return function(){var t=A.apply(this,arguments),I=M.apply(this,arguments);if(null==t)return I;if(null==I)return t;var g={};return o(g,t),o(g,I),g}}function C(A,M){return function(){A.apply(this,arguments),M.apply(this,arguments)}}function a(A,M){var t=M.bind(A);return t}function D(A){for(var M=A.__reactAutoBindPairs,t=0;tli{position:relative}.fa-li{position:absolute;left:-2.14285714em;width:2.14285714em;top:.14285714em;text-align:center}.fa-li.fa-lg{left:-1.85714286em}.fa-border{padding:.2em .25em .15em;border:.08em solid #eee;border-radius:.1em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa.fa-pull-left{margin-right:.3em}.fa.fa-pull-right{margin-left:.3em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left{margin-right:.3em}.fa.pull-right{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s infinite linear;animation:fa-spin 2s infinite linear}.fa-pulse{-webkit-animation:fa-spin 1s infinite steps(8);animation:fa-spin 1s infinite steps(8)}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.fa-rotate-90{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";-webkit-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";-webkit-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";-webkit-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";-webkit-transform:scaleX(-1);transform:scaleX(-1)}.fa-flip-vertical{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)";-webkit-transform:scaleY(-1);transform:scaleY(-1)}:root .fa-flip-horizontal,:root .fa-flip-vertical,:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270{filter:none}.fa-stack{position:relative;display:inline-block;width:2em;height:2em;line-height:2em;vertical-align:middle}.fa-stack-1x,.fa-stack-2x{position:absolute;left:0;width:100%;text-align:center}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-glass:before{content:"\\F000"}.fa-music:before{content:"\\F001"}.fa-search:before{content:"\\F002"}.fa-envelope-o:before{content:"\\F003"}.fa-heart:before{content:"\\F004"}.fa-star:before{content:"\\F005"}.fa-star-o:before{content:"\\F006"}.fa-user:before{content:"\\F007"}.fa-film:before{content:"\\F008"}.fa-th-large:before{content:"\\F009"}.fa-th:before{content:"\\F00A"}.fa-th-list:before{content:"\\F00B"}.fa-check:before{content:"\\F00C"}.fa-close:before,.fa-remove:before,.fa-times:before{content:"\\F00D"}.fa-search-plus:before{content:"\\F00E"}.fa-search-minus:before{content:"\\F010"}.fa-power-off:before{content:"\\F011"}.fa-signal:before{content:"\\F012"}.fa-cog:before,.fa-gear:before{content:"\\F013"}.fa-trash-o:before{content:"\\F014"}.fa-home:before{content:"\\F015"}.fa-file-o:before{content:"\\F016"}.fa-clock-o:before{content:"\\F017"}.fa-road:before{content:"\\F018"}.fa-download:before{content:"\\F019"}.fa-arrow-circle-o-down:before{content:"\\F01A"}.fa-arrow-circle-o-up:before{content:"\\F01B"}.fa-inbox:before{content:"\\F01C"}.fa-play-circle-o:before{content:"\\F01D"}.fa-repeat:before,.fa-rotate-right:before{content:"\\F01E"}.fa-refresh:before{content:"\\F021"}.fa-list-alt:before{content:"\\F022"}.fa-lock:before{content:"\\F023"}.fa-flag:before{content:"\\F024"}.fa-headphones:before{content:"\\F025"}.fa-volume-off:before{content:"\\F026"}.fa-volume-down:before{content:"\\F027"}.fa-volume-up:before{content:"\\F028"}.fa-qrcode:before{content:"\\F029"}.fa-barcode:before{content:"\\F02A"}.fa-tag:before{content:"\\F02B"}.fa-tags:before{content:"\\F02C"}.fa-book:before{content:"\\F02D"}.fa-bookmark:before{content:"\\F02E"}.fa-print:before{content:"\\F02F"}.fa-camera:before{content:"\\F030"}.fa-font:before{content:"\\F031"}.fa-bold:before{content:"\\F032"}.fa-italic:before{content:"\\F033"}.fa-text-height:before{content:"\\F034"}.fa-text-width:before{content:"\\F035"}.fa-align-left:before{content:"\\F036"}.fa-align-center:before{content:"\\F037"}.fa-align-right:before{content:"\\F038"}.fa-align-justify:before{content:"\\F039"}.fa-list:before{content:"\\F03A"}.fa-dedent:before,.fa-outdent:before{content:"\\F03B"}.fa-indent:before{content:"\\F03C"}.fa-video-camera:before{content:"\\F03D"}.fa-image:before,.fa-photo:before,.fa-picture-o:before{content:"\\F03E"}.fa-pencil:before{content:"\\F040"}.fa-map-marker:before{content:"\\F041"}.fa-adjust:before{content:"\\F042"}.fa-tint:before{content:"\\F043"}.fa-edit:before,.fa-pencil-square-o:before{content:"\\F044"}.fa-share-square-o:before{content:"\\F045"}.fa-check-square-o:before{content:"\\F046"}.fa-arrows:before{content:"\\F047"}.fa-step-backward:before{content:"\\F048"}.fa-fast-backward:before{content:"\\F049"}.fa-backward:before{content:"\\F04A"}.fa-play:before{content:"\\F04B"}.fa-pause:before{content:"\\F04C"}.fa-stop:before{content:"\\F04D"}.fa-forward:before{content:"\\F04E"}.fa-fast-forward:before{content:"\\F050"}.fa-step-forward:before{content:"\\F051"}.fa-eject:before{content:"\\F052"}.fa-chevron-left:before{content:"\\F053"}.fa-chevron-right:before{content:"\\F054"}.fa-plus-circle:before{content:"\\F055"}.fa-minus-circle:before{content:"\\F056"}.fa-times-circle:before{content:"\\F057"}.fa-check-circle:before{content:"\\F058"}.fa-question-circle:before{content:"\\F059"}.fa-info-circle:before{content:"\\F05A"}.fa-crosshairs:before{content:"\\F05B"}.fa-times-circle-o:before{content:"\\F05C"}.fa-check-circle-o:before{content:"\\F05D"}.fa-ban:before{content:"\\F05E"}.fa-arrow-left:before{content:"\\F060"}.fa-arrow-right:before{content:"\\F061"}.fa-arrow-up:before{content:"\\F062"}.fa-arrow-down:before{content:"\\F063"}.fa-mail-forward:before,.fa-share:before{content:"\\F064"}.fa-expand:before{content:"\\F065"}.fa-compress:before{content:"\\F066"}.fa-plus:before{content:"\\F067"}.fa-minus:before{content:"\\F068"}.fa-asterisk:before{content:"\\F069"}.fa-exclamation-circle:before{content:"\\F06A"}.fa-gift:before{content:"\\F06B"}.fa-leaf:before{content:"\\F06C"}.fa-fire:before{content:"\\F06D"}.fa-eye:before{content:"\\F06E"}.fa-eye-slash:before{content:"\\F070"}.fa-exclamation-triangle:before,.fa-warning:before{content:"\\F071"}.fa-plane:before{content:"\\F072"}.fa-calendar:before{content:"\\F073"}.fa-random:before{content:"\\F074"}.fa-comment:before{content:"\\F075"}.fa-magnet:before{content:"\\F076"}.fa-chevron-up:before{content:"\\F077"}.fa-chevron-down:before{content:"\\F078"}.fa-retweet:before{content:"\\F079"}.fa-shopping-cart:before{content:"\\F07A"}.fa-folder:before{content:"\\F07B"}.fa-folder-open:before{content:"\\F07C"}.fa-arrows-v:before{content:"\\F07D"}.fa-arrows-h:before{content:"\\F07E"}.fa-bar-chart-o:before,.fa-bar-chart:before{content:"\\F080"}.fa-twitter-square:before{content:"\\F081"}.fa-facebook-square:before{content:"\\F082"}.fa-camera-retro:before{content:"\\F083"}.fa-key:before{content:"\\F084"}.fa-cogs:before,.fa-gears:before{content:"\\F085"}.fa-comments:before{content:"\\F086"}.fa-thumbs-o-up:before{content:"\\F087"}.fa-thumbs-o-down:before{content:"\\F088"}.fa-star-half:before{content:"\\F089"}.fa-heart-o:before{content:"\\F08A"}.fa-sign-out:before{content:"\\F08B"}.fa-linkedin-square:before{content:"\\F08C"}.fa-thumb-tack:before{content:"\\F08D"}.fa-external-link:before{content:"\\F08E"}.fa-sign-in:before{content:"\\F090"}.fa-trophy:before{content:"\\F091"}.fa-github-square:before{content:"\\F092"}.fa-upload:before{content:"\\F093"}.fa-lemon-o:before{content:"\\F094"}.fa-phone:before{content:"\\F095"}.fa-square-o:before{content:"\\F096"}.fa-bookmark-o:before{content:"\\F097"}.fa-phone-square:before{content:"\\F098"}.fa-twitter:before{content:"\\F099"}.fa-facebook-f:before,.fa-facebook:before{content:"\\F09A"}.fa-github:before{content:"\\F09B"}.fa-unlock:before{content:"\\F09C"}.fa-credit-card:before{content:"\\F09D"}.fa-feed:before,.fa-rss:before{content:"\\F09E"}.fa-hdd-o:before{content:"\\F0A0"}.fa-bullhorn:before{content:"\\F0A1"}.fa-bell:before{content:"\\F0F3"}.fa-certificate:before{content:"\\F0A3"}.fa-hand-o-right:before{content:"\\F0A4"}.fa-hand-o-left:before{content:"\\F0A5"}.fa-hand-o-up:before{content:"\\F0A6"}.fa-hand-o-down:before{content:"\\F0A7"}.fa-arrow-circle-left:before{content:"\\F0A8"}.fa-arrow-circle-right:before{content:"\\F0A9"}.fa-arrow-circle-up:before{content:"\\F0AA"}.fa-arrow-circle-down:before{content:"\\F0AB"}.fa-globe:before{content:"\\F0AC"}.fa-wrench:before{content:"\\F0AD"}.fa-tasks:before{content:"\\F0AE"}.fa-filter:before{content:"\\F0B0"}.fa-briefcase:before{content:"\\F0B1"}.fa-arrows-alt:before{content:"\\F0B2"}.fa-group:before,.fa-users:before{content:"\\F0C0"}.fa-chain:before,.fa-link:before{content:"\\F0C1"}.fa-cloud:before{content:"\\F0C2"}.fa-flask:before{content:"\\F0C3"}.fa-cut:before,.fa-scissors:before{content:"\\F0C4"}.fa-copy:before,.fa-files-o:before{content:"\\F0C5"}.fa-paperclip:before{content:"\\F0C6"}.fa-floppy-o:before,.fa-save:before{content:"\\F0C7"}.fa-square:before{content:"\\F0C8"}.fa-bars:before,.fa-navicon:before,.fa-reorder:before{content:"\\F0C9"}.fa-list-ul:before{content:"\\F0CA"}.fa-list-ol:before{content:"\\F0CB"}.fa-strikethrough:before{content:"\\F0CC"}.fa-underline:before{content:"\\F0CD"}.fa-table:before{content:"\\F0CE"}.fa-magic:before{content:"\\F0D0"}.fa-truck:before{content:"\\F0D1"}.fa-pinterest:before{content:"\\F0D2"}.fa-pinterest-square:before{content:"\\F0D3"}.fa-google-plus-square:before{content:"\\F0D4"}.fa-google-plus:before{content:"\\F0D5"}.fa-money:before{content:"\\F0D6"}.fa-caret-down:before{content:"\\F0D7"}.fa-caret-up:before{content:"\\F0D8"}.fa-caret-left:before{content:"\\F0D9"}.fa-caret-right:before{content:"\\F0DA"}.fa-columns:before{content:"\\F0DB"}.fa-sort:before,.fa-unsorted:before{content:"\\F0DC"}.fa-sort-desc:before,.fa-sort-down:before{content:"\\F0DD"}.fa-sort-asc:before,.fa-sort-up:before{content:"\\F0DE"}.fa-envelope:before{content:"\\F0E0"}.fa-linkedin:before{content:"\\F0E1"}.fa-rotate-left:before,.fa-undo:before{content:"\\F0E2"}.fa-gavel:before,.fa-legal:before{content:"\\F0E3"}.fa-dashboard:before,.fa-tachometer:before{content:"\\F0E4"}.fa-comment-o:before{content:"\\F0E5"}.fa-comments-o:before{content:"\\F0E6"}.fa-bolt:before,.fa-flash:before{content:"\\F0E7"}.fa-sitemap:before{content:"\\F0E8"}.fa-umbrella:before{content:"\\F0E9"}.fa-clipboard:before,.fa-paste:before{content:"\\F0EA"}.fa-lightbulb-o:before{content:"\\F0EB"}.fa-exchange:before{content:"\\F0EC"}.fa-cloud-download:before{content:"\\F0ED"}.fa-cloud-upload:before{content:"\\F0EE"}.fa-user-md:before{content:"\\F0F0"}.fa-stethoscope:before{content:"\\F0F1"}.fa-suitcase:before{content:"\\F0F2"}.fa-bell-o:before{content:"\\F0A2"}.fa-coffee:before{content:"\\F0F4"}.fa-cutlery:before{content:"\\F0F5"}.fa-file-text-o:before{content:"\\F0F6"}.fa-building-o:before{content:"\\F0F7"}.fa-hospital-o:before{content:"\\F0F8"}.fa-ambulance:before{content:"\\F0F9"}.fa-medkit:before{content:"\\F0FA"}.fa-fighter-jet:before{content:"\\F0FB"}.fa-beer:before{content:"\\F0FC"}.fa-h-square:before{content:"\\F0FD"}.fa-plus-square:before{content:"\\F0FE"}.fa-angle-double-left:before{content:"\\F100"}.fa-angle-double-right:before{content:"\\F101"}.fa-angle-double-up:before{content:"\\F102"}.fa-angle-double-down:before{content:"\\F103"}.fa-angle-left:before{content:"\\F104"}.fa-angle-right:before{content:"\\F105"}.fa-angle-up:before{content:"\\F106"}.fa-angle-down:before{content:"\\F107"}.fa-desktop:before{content:"\\F108"}.fa-laptop:before{content:"\\F109"}.fa-tablet:before{content:"\\F10A"}.fa-mobile-phone:before,.fa-mobile:before{content:"\\F10B"}.fa-circle-o:before{content:"\\F10C"}.fa-quote-left:before{content:"\\F10D"}.fa-quote-right:before{content:"\\F10E"}.fa-spinner:before{content:"\\F110"}.fa-circle:before{content:"\\F111"}.fa-mail-reply:before,.fa-reply:before{content:"\\F112"}.fa-github-alt:before{content:"\\F113"}.fa-folder-o:before{content:"\\F114"}.fa-folder-open-o:before{content:"\\F115"}.fa-smile-o:before{content:"\\F118"}.fa-frown-o:before{content:"\\F119"}.fa-meh-o:before{content:"\\F11A"}.fa-gamepad:before{content:"\\F11B"}.fa-keyboard-o:before{content:"\\F11C"}.fa-flag-o:before{content:"\\F11D"}.fa-flag-checkered:before{content:"\\F11E"}.fa-terminal:before{content:"\\F120"}.fa-code:before{content:"\\F121"}.fa-mail-reply-all:before,.fa-reply-all:before{content:"\\F122"}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:"\\F123"}.fa-location-arrow:before{content:"\\F124"}.fa-crop:before{content:"\\F125"}.fa-code-fork:before{content:"\\F126"}.fa-chain-broken:before,.fa-unlink:before{content:"\\F127"}.fa-question:before{content:"\\F128"}.fa-info:before{content:"\\F129"}.fa-exclamation:before{content:"\\F12A"}.fa-superscript:before{content:"\\F12B"}.fa-subscript:before{content:"\\F12C"}.fa-eraser:before{content:"\\F12D"}.fa-puzzle-piece:before{content:"\\F12E"}.fa-microphone:before{content:"\\F130"}.fa-microphone-slash:before{content:"\\F131"}.fa-shield:before{content:"\\F132"}.fa-calendar-o:before{content:"\\F133"}.fa-fire-extinguisher:before{content:"\\F134"}.fa-rocket:before{content:"\\F135"}.fa-maxcdn:before{content:"\\F136"}.fa-chevron-circle-left:before{content:"\\F137"}.fa-chevron-circle-right:before{content:"\\F138"}.fa-chevron-circle-up:before{content:"\\F139"}.fa-chevron-circle-down:before{content:"\\F13A"}.fa-html5:before{content:"\\F13B"}.fa-css3:before{content:"\\F13C"}.fa-anchor:before{content:"\\F13D"}.fa-unlock-alt:before{content:"\\F13E"}.fa-bullseye:before{content:"\\F140"}.fa-ellipsis-h:before{content:"\\F141"}.fa-ellipsis-v:before{content:"\\F142"}.fa-rss-square:before{content:"\\F143"}.fa-play-circle:before{content:"\\F144"}.fa-ticket:before{content:"\\F145"}.fa-minus-square:before{content:"\\F146"}.fa-minus-square-o:before{content:"\\F147"}.fa-level-up:before{content:"\\F148"}.fa-level-down:before{content:"\\F149"}.fa-check-square:before{content:"\\F14A"}.fa-pencil-square:before{content:"\\F14B"}.fa-external-link-square:before{content:"\\F14C"}.fa-share-square:before{content:"\\F14D"}.fa-compass:before{content:"\\F14E"}.fa-caret-square-o-down:before,.fa-toggle-down:before{content:"\\F150"}.fa-caret-square-o-up:before,.fa-toggle-up:before{content:"\\F151"}.fa-caret-square-o-right:before,.fa-toggle-right:before{content:"\\F152"}.fa-eur:before,.fa-euro:before{content:"\\F153"}.fa-gbp:before{content:"\\F154"}.fa-dollar:before,.fa-usd:before{content:"\\F155"}.fa-inr:before,.fa-rupee:before{content:"\\F156"}.fa-cny:before,.fa-jpy:before,.fa-rmb:before,.fa-yen:before{content:"\\F157"}.fa-rouble:before,.fa-rub:before,.fa-ruble:before{content:"\\F158"}.fa-krw:before,.fa-won:before{content:"\\F159"}.fa-bitcoin:before,.fa-btc:before{content:"\\F15A"}.fa-file:before{content:"\\F15B"}.fa-file-text:before{content:"\\F15C"}.fa-sort-alpha-asc:before{content:"\\F15D"}.fa-sort-alpha-desc:before{content:"\\F15E"}.fa-sort-amount-asc:before{content:"\\F160"}.fa-sort-amount-desc:before{content:"\\F161"}.fa-sort-numeric-asc:before{content:"\\F162"}.fa-sort-numeric-desc:before{content:"\\F163"}.fa-thumbs-up:before{content:"\\F164"}.fa-thumbs-down:before{content:"\\F165"}.fa-youtube-square:before{content:"\\F166"}.fa-youtube:before{content:"\\F167"}.fa-xing:before{content:"\\F168"}.fa-xing-square:before{content:"\\F169"}.fa-youtube-play:before{content:"\\F16A"}.fa-dropbox:before{content:"\\F16B"}.fa-stack-overflow:before{content:"\\F16C"}.fa-instagram:before{content:"\\F16D"}.fa-flickr:before{content:"\\F16E"}.fa-adn:before{content:"\\F170"}.fa-bitbucket:before{content:"\\F171"}.fa-bitbucket-square:before{content:"\\F172"}.fa-tumblr:before{content:"\\F173"}.fa-tumblr-square:before{content:"\\F174"}.fa-long-arrow-down:before{content:"\\F175"}.fa-long-arrow-up:before{content:"\\F176"}.fa-long-arrow-left:before{content:"\\F177"}.fa-long-arrow-right:before{content:"\\F178"}.fa-apple:before{content:"\\F179"}.fa-windows:before{content:"\\F17A"}.fa-android:before{content:"\\F17B"}.fa-linux:before{content:"\\F17C"}.fa-dribbble:before{content:"\\F17D"}.fa-skype:before{content:"\\F17E"}.fa-foursquare:before{content:"\\F180"}.fa-trello:before{content:"\\F181"}.fa-female:before{content:"\\F182"}.fa-male:before{content:"\\F183"}.fa-gittip:before,.fa-gratipay:before{content:"\\F184"}.fa-sun-o:before{content:"\\F185"}.fa-moon-o:before{content:"\\F186"}.fa-archive:before{content:"\\F187"}.fa-bug:before{content:"\\F188"}.fa-vk:before{content:"\\F189"}.fa-weibo:before{content:"\\F18A"}.fa-renren:before{content:"\\F18B"}.fa-pagelines:before{content:"\\F18C"}.fa-stack-exchange:before{content:"\\F18D"}.fa-arrow-circle-o-right:before{content:"\\F18E"}.fa-arrow-circle-o-left:before{content:"\\F190"}.fa-caret-square-o-left:before,.fa-toggle-left:before{content:"\\F191"}.fa-dot-circle-o:before{content:"\\F192"}.fa-wheelchair:before{content:"\\F193"}.fa-vimeo-square:before{content:"\\F194"}.fa-try:before,.fa-turkish-lira:before{content:"\\F195"}.fa-plus-square-o:before{content:"\\F196"}.fa-space-shuttle:before{content:"\\F197"}.fa-slack:before{content:"\\F198"}.fa-envelope-square:before{content:"\\F199"}.fa-wordpress:before{content:"\\F19A"}.fa-openid:before{content:"\\F19B"}.fa-bank:before,.fa-institution:before,.fa-university:before{content:"\\F19C"}.fa-graduation-cap:before,.fa-mortar-board:before{content:"\\F19D"}.fa-yahoo:before{content:"\\F19E"}.fa-google:before{content:"\\F1A0"}.fa-reddit:before{content:"\\F1A1"}.fa-reddit-square:before{content:"\\F1A2"}.fa-stumbleupon-circle:before{content:"\\F1A3"}.fa-stumbleupon:before{content:"\\F1A4"}.fa-delicious:before{content:"\\F1A5"}.fa-digg:before{content:"\\F1A6"}.fa-pied-piper-pp:before{content:"\\F1A7"}.fa-pied-piper-alt:before{content:"\\F1A8"}.fa-drupal:before{content:"\\F1A9"}.fa-joomla:before{content:"\\F1AA"}.fa-language:before{content:"\\F1AB"}.fa-fax:before{content:"\\F1AC"}.fa-building:before{content:"\\F1AD"}.fa-child:before{content:"\\F1AE"}.fa-paw:before{content:"\\F1B0"}.fa-spoon:before{content:"\\F1B1"}.fa-cube:before{content:"\\F1B2"}.fa-cubes:before{content:"\\F1B3"}.fa-behance:before{content:"\\F1B4"}.fa-behance-square:before{content:"\\F1B5"}.fa-steam:before{content:"\\F1B6"}.fa-steam-square:before{content:"\\F1B7"}.fa-recycle:before{content:"\\F1B8"}.fa-automobile:before,.fa-car:before{content:"\\F1B9"}.fa-cab:before,.fa-taxi:before{content:"\\F1BA"}.fa-tree:before{content:"\\F1BB"}.fa-spotify:before{content:"\\F1BC"}.fa-deviantart:before{content:"\\F1BD"}.fa-soundcloud:before{content:"\\F1BE"}.fa-database:before{content:"\\F1C0"}.fa-file-pdf-o:before{content:"\\F1C1"}.fa-file-word-o:before{content:"\\F1C2"}.fa-file-excel-o:before{content:"\\F1C3"}.fa-file-powerpoint-o:before{content:"\\F1C4"}.fa-file-image-o:before,.fa-file-photo-o:before,.fa-file-picture-o:before{content:"\\F1C5"}.fa-file-archive-o:before,.fa-file-zip-o:before{content:"\\F1C6"}.fa-file-audio-o:before,.fa-file-sound-o:before{content:"\\F1C7"}.fa-file-movie-o:before,.fa-file-video-o:before{content:"\\F1C8"}.fa-file-code-o:before{content:"\\F1C9"}.fa-vine:before{content:"\\F1CA"}.fa-codepen:before{content:"\\F1CB"}.fa-jsfiddle:before{content:"\\F1CC"}.fa-life-bouy:before,.fa-life-buoy:before,.fa-life-ring:before,.fa-life-saver:before,.fa-support:before{content:"\\F1CD"}.fa-circle-o-notch:before{content:"\\F1CE"}.fa-ra:before,.fa-rebel:before,.fa-resistance:before{content:"\\F1D0"}.fa-empire:before,.fa-ge:before{content:"\\F1D1"}.fa-git-square:before{content:"\\F1D2"}.fa-git:before{content:"\\F1D3"}.fa-hacker-news:before,.fa-y-combinator-square:before,.fa-yc-square:before{content:"\\F1D4"}.fa-tencent-weibo:before{content:"\\F1D5"}.fa-qq:before{content:"\\F1D6"}.fa-wechat:before,.fa-weixin:before{content:"\\F1D7"}.fa-paper-plane:before,.fa-send:before{content:"\\F1D8"}.fa-paper-plane-o:before,.fa-send-o:before{content:"\\F1D9"}.fa-history:before{content:"\\F1DA"}.fa-circle-thin:before{content:"\\F1DB"}.fa-header:before{content:"\\F1DC"}.fa-paragraph:before{content:"\\F1DD"}.fa-sliders:before{content:"\\F1DE"}.fa-share-alt:before{content:"\\F1E0"}.fa-share-alt-square:before{content:"\\F1E1"}.fa-bomb:before{content:"\\F1E2"}.fa-futbol-o:before,.fa-soccer-ball-o:before{content:"\\F1E3"}.fa-tty:before{content:"\\F1E4"}.fa-binoculars:before{content:"\\F1E5"}.fa-plug:before{content:"\\F1E6"}.fa-slideshare:before{content:"\\F1E7"}.fa-twitch:before{content:"\\F1E8"}.fa-yelp:before{content:"\\F1E9"}.fa-newspaper-o:before{content:"\\F1EA"}.fa-wifi:before{content:"\\F1EB"}.fa-calculator:before{content:"\\F1EC"}.fa-paypal:before{content:"\\F1ED"}.fa-google-wallet:before{content:"\\F1EE"}.fa-cc-visa:before{content:"\\F1F0"}.fa-cc-mastercard:before{content:"\\F1F1"}.fa-cc-discover:before{content:"\\F1F2"}.fa-cc-amex:before{content:"\\F1F3"}.fa-cc-paypal:before{content:"\\F1F4"}.fa-cc-stripe:before{content:"\\F1F5"}.fa-bell-slash:before{content:"\\F1F6"}.fa-bell-slash-o:before{content:"\\F1F7"}.fa-trash:before{content:"\\F1F8"}.fa-copyright:before{content:"\\F1F9"}.fa-at:before{content:"\\F1FA"}.fa-eyedropper:before{content:"\\F1FB"}.fa-paint-brush:before{content:"\\F1FC"}.fa-birthday-cake:before{content:"\\F1FD"}.fa-area-chart:before{content:"\\F1FE"}.fa-pie-chart:before{content:"\\F200"}.fa-line-chart:before{content:"\\F201"}.fa-lastfm:before{content:"\\F202"}.fa-lastfm-square:before{content:"\\F203"}.fa-toggle-off:before{content:"\\F204"}.fa-toggle-on:before{content:"\\F205"}.fa-bicycle:before{content:"\\F206"}.fa-bus:before{content:"\\F207"}.fa-ioxhost:before{content:"\\F208"}.fa-angellist:before{content:"\\F209"}.fa-cc:before{content:"\\F20A"}.fa-ils:before,.fa-shekel:before,.fa-sheqel:before{content:"\\F20B"}.fa-meanpath:before{content:"\\F20C"}.fa-buysellads:before{content:"\\F20D"}.fa-connectdevelop:before{content:"\\F20E"}.fa-dashcube:before{content:"\\F210"}.fa-forumbee:before{content:"\\F211"}.fa-leanpub:before{content:"\\F212"}.fa-sellsy:before{content:"\\F213"}.fa-shirtsinbulk:before{content:"\\F214"}.fa-simplybuilt:before{content:"\\F215"}.fa-skyatlas:before{content:"\\F216"}.fa-cart-plus:before{content:"\\F217"}.fa-cart-arrow-down:before{content:"\\F218"}.fa-diamond:before{content:"\\F219"}.fa-ship:before{content:"\\F21A"}.fa-user-secret:before{content:"\\F21B"}.fa-motorcycle:before{content:"\\F21C"}.fa-street-view:before{content:"\\F21D"}.fa-heartbeat:before{content:"\\F21E"}.fa-venus:before{content:"\\F221"}.fa-mars:before{content:"\\F222"}.fa-mercury:before{content:"\\F223"}.fa-intersex:before,.fa-transgender:before{content:"\\F224"}.fa-transgender-alt:before{content:"\\F225"}.fa-venus-double:before{content:"\\F226"}.fa-mars-double:before{content:"\\F227"}.fa-venus-mars:before{content:"\\F228"}.fa-mars-stroke:before{content:"\\F229"}.fa-mars-stroke-v:before{content:"\\F22A"}.fa-mars-stroke-h:before{content:"\\F22B"}.fa-neuter:before{content:"\\F22C"}.fa-genderless:before{content:"\\F22D"}.fa-facebook-official:before{content:"\\F230"}.fa-pinterest-p:before{content:"\\F231"}.fa-whatsapp:before{content:"\\F232"}.fa-server:before{content:"\\F233"}.fa-user-plus:before{content:"\\F234"}.fa-user-times:before{content:"\\F235"}.fa-bed:before,.fa-hotel:before{content:"\\F236"}.fa-viacoin:before{content:"\\F237"}.fa-train:before{content:"\\F238"}.fa-subway:before{content:"\\F239"}.fa-medium:before{content:"\\F23A"}.fa-y-combinator:before,.fa-yc:before{content:"\\F23B"}.fa-optin-monster:before{content:"\\F23C"}.fa-opencart:before{content:"\\F23D"}.fa-expeditedssl:before{content:"\\F23E"}.fa-battery-4:before,.fa-battery-full:before,.fa-battery:before{content:"\\F240"}.fa-battery-3:before,.fa-battery-three-quarters:before{content:"\\F241"}.fa-battery-2:before,.fa-battery-half:before{content:"\\F242"}.fa-battery-1:before,.fa-battery-quarter:before{content:"\\F243"}.fa-battery-0:before,.fa-battery-empty:before{content:"\\F244"}.fa-mouse-pointer:before{content:"\\F245"}.fa-i-cursor:before{content:"\\F246"}.fa-object-group:before{content:"\\F247"}.fa-object-ungroup:before{content:"\\F248"}.fa-sticky-note:before{content:"\\F249"}.fa-sticky-note-o:before{content:"\\F24A"}.fa-cc-jcb:before{content:"\\F24B"}.fa-cc-diners-club:before{content:"\\F24C"}.fa-clone:before{content:"\\F24D"}.fa-balance-scale:before{content:"\\F24E"}.fa-hourglass-o:before{content:"\\F250"}.fa-hourglass-1:before,.fa-hourglass-start:before{content:"\\F251"}.fa-hourglass-2:before,.fa-hourglass-half:before{content:"\\F252"}.fa-hourglass-3:before,.fa-hourglass-end:before{content:"\\F253"}.fa-hourglass:before{content:"\\F254"}.fa-hand-grab-o:before,.fa-hand-rock-o:before{content:"\\F255"}.fa-hand-paper-o:before,.fa-hand-stop-o:before{content:"\\F256"}.fa-hand-scissors-o:before{content:"\\F257"}.fa-hand-lizard-o:before{content:"\\F258"}.fa-hand-spock-o:before{content:"\\F259"}.fa-hand-pointer-o:before{content:"\\F25A"}.fa-hand-peace-o:before{content:"\\F25B"}.fa-trademark:before{content:"\\F25C"}.fa-registered:before{content:"\\F25D"}.fa-creative-commons:before{content:"\\F25E"}.fa-gg:before{content:"\\F260"}.fa-gg-circle:before{content:"\\F261"}.fa-tripadvisor:before{content:"\\F262"}.fa-odnoklassniki:before{content:"\\F263"}.fa-odnoklassniki-square:before{content:"\\F264"}.fa-get-pocket:before{content:"\\F265"}.fa-wikipedia-w:before{content:"\\F266"}.fa-safari:before{content:"\\F267"}.fa-chrome:before{content:"\\F268"}.fa-firefox:before{content:"\\F269"}.fa-opera:before{content:"\\F26A"}.fa-internet-explorer:before{content:"\\F26B"}.fa-television:before,.fa-tv:before{content:"\\F26C"}.fa-contao:before{content:"\\F26D"}.fa-500px:before{content:"\\F26E"}.fa-amazon:before{content:"\\F270"}.fa-calendar-plus-o:before{content:"\\F271"}.fa-calendar-minus-o:before{content:"\\F272"}.fa-calendar-times-o:before{content:"\\F273"}.fa-calendar-check-o:before{content:"\\F274"}.fa-industry:before{content:"\\F275"}.fa-map-pin:before{content:"\\F276"}.fa-map-signs:before{content:"\\F277"}.fa-map-o:before{content:"\\F278"}.fa-map:before{content:"\\F279"}.fa-commenting:before{content:"\\F27A"}.fa-commenting-o:before{content:"\\F27B"}.fa-houzz:before{content:"\\F27C"}.fa-vimeo:before{content:"\\F27D"}.fa-black-tie:before{content:"\\F27E"}.fa-fonticons:before{content:"\\F280"}.fa-reddit-alien:before{content:"\\F281"}.fa-edge:before{content:"\\F282"}.fa-credit-card-alt:before{content:"\\F283"}.fa-codiepie:before{content:"\\F284"}.fa-modx:before{content:"\\F285"}.fa-fort-awesome:before{content:"\\F286"}.fa-usb:before{content:"\\F287"}.fa-product-hunt:before{content:"\\F288"}.fa-mixcloud:before{content:"\\F289"}.fa-scribd:before{content:"\\F28A"}.fa-pause-circle:before{content:"\\F28B"}.fa-pause-circle-o:before{content:"\\F28C"}.fa-stop-circle:before{content:"\\F28D"}.fa-stop-circle-o:before{content:"\\F28E"}.fa-shopping-bag:before{content:"\\F290"}.fa-shopping-basket:before{content:"\\F291"}.fa-hashtag:before{content:"\\F292"}.fa-bluetooth:before{content:"\\F293"}.fa-bluetooth-b:before{content:"\\F294"}.fa-percent:before{content:"\\F295"}.fa-gitlab:before{content:"\\F296"}.fa-wpbeginner:before{content:"\\F297"}.fa-wpforms:before{content:"\\F298"}.fa-envira:before{content:"\\F299"}.fa-universal-access:before{content:"\\F29A"}.fa-wheelchair-alt:before{content:"\\F29B"}.fa-question-circle-o:before{content:"\\F29C"}.fa-blind:before{content:"\\F29D"}.fa-audio-description:before{content:"\\F29E"}.fa-volume-control-phone:before{content:"\\F2A0"}.fa-braille:before{content:"\\F2A1"}.fa-assistive-listening-systems:before{content:"\\F2A2"}.fa-american-sign-language-interpreting:before,.fa-asl-interpreting:before{content:"\\F2A3"}.fa-deaf:before,.fa-deafness:before,.fa-hard-of-hearing:before{content:"\\F2A4"}.fa-glide:before{content:"\\F2A5"}.fa-glide-g:before{content:"\\F2A6"}.fa-sign-language:before,.fa-signing:before{content:"\\F2A7"}.fa-low-vision:before{content:"\\F2A8"}.fa-viadeo:before{content:"\\F2A9"}.fa-viadeo-square:before{content:"\\F2AA"}.fa-snapchat:before{content:"\\F2AB"}.fa-snapchat-ghost:before{content:"\\F2AC"}.fa-snapchat-square:before{content:"\\F2AD"}.fa-pied-piper:before{content:"\\F2AE"}.fa-first-order:before{content:"\\F2B0"}.fa-yoast:before{content:"\\F2B1"}.fa-themeisle:before{content:"\\F2B2"}.fa-google-plus-circle:before,.fa-google-plus-official:before{content:"\\F2B3"}.fa-fa:before,.fa-font-awesome:before{content:"\\F2B4"}.fa-handshake-o:before{content:"\\F2B5"}.fa-envelope-open:before{content:"\\F2B6"}.fa-envelope-open-o:before{content:"\\F2B7"}.fa-linode:before{content:"\\F2B8"}.fa-address-book:before{content:"\\F2B9"}.fa-address-book-o:before{content:"\\F2BA"}.fa-address-card:before,.fa-vcard:before{content:"\\F2BB"}.fa-address-card-o:before,.fa-vcard-o:before{content:"\\F2BC"}.fa-user-circle:before{content:"\\F2BD"}.fa-user-circle-o:before{content:"\\F2BE"}.fa-user-o:before{content:"\\F2C0"}.fa-id-badge:before{content:"\\F2C1"}.fa-drivers-license:before,.fa-id-card:before{content:"\\F2C2"}.fa-drivers-license-o:before,.fa-id-card-o:before{content:"\\F2C3"}.fa-quora:before{content:"\\F2C4"}.fa-free-code-camp:before{content:"\\F2C5"}.fa-telegram:before{content:"\\F2C6"}.fa-thermometer-4:before,.fa-thermometer-full:before,.fa-thermometer:before{content:"\\F2C7"}.fa-thermometer-3:before,.fa-thermometer-three-quarters:before{content:"\\F2C8"}.fa-thermometer-2:before,.fa-thermometer-half:before{content:"\\F2C9"}.fa-thermometer-1:before,.fa-thermometer-quarter:before{content:"\\F2CA"}.fa-thermometer-0:before,.fa-thermometer-empty:before{content:"\\F2CB"}.fa-shower:before{content:"\\F2CC"}.fa-bath:before,.fa-bathtub:before,.fa-s15:before{content:"\\F2CD"}.fa-podcast:before{content:"\\F2CE"}.fa-window-maximize:before{content:"\\F2D0"}.fa-window-minimize:before{content:"\\F2D1"}.fa-window-restore:before{content:"\\F2D2"}.fa-times-rectangle:before,.fa-window-close:before{content:"\\F2D3"}.fa-times-rectangle-o:before,.fa-window-close-o:before{content:"\\F2D4"}.fa-bandcamp:before{content:"\\F2D5"}.fa-grav:before{content:"\\F2D6"}.fa-etsy:before{content:"\\F2D7"}.fa-imdb:before{content:"\\F2D8"}.fa-ravelry:before{content:"\\F2D9"}.fa-eercast:before{content:"\\F2DA"}.fa-microchip:before{content:"\\F2DB"}.fa-snowflake-o:before{content:"\\F2DC"}.fa-superpowers:before{content:"\\F2DD"}.fa-wpexplorer:before{content:"\\F2DE"}.fa-meetup:before{content:"\\F2E0"}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}',""]); +},function(A,M,t){M=A.exports=t(246)(),M.push([A.id,'*,:after,:before{box-sizing:border-box}body{font-family:Lato,sans-serif;font-size:15px;line-height:1.42857143;color:#8e8e8e;background-color:#edecec}button,input,select,textarea{font-family:inherit;font-size:inherit;line-height:inherit}a{color:#46a5e0}a,a:focus,a:hover{text-decoration:none}a:focus,a:hover{color:#1f7fba}a:focus{outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}figure{margin:0}img{vertical-align:middle}.img-responsive{display:block;max-width:100%;height:auto}.img-rounded{border-radius:6px}.img-thumbnail{padding:4px;line-height:1.42857143;background-color:#edecec;border:1px solid #ddd;border-radius:4px;transition:all .2s ease-in-out;display:inline-block;max-width:100%;height:auto}.img-circle{border-radius:50%}hr{margin-top:21px;margin-bottom:21px;border:0;border-top:1px solid #eee}.sr-only{position:absolute;width:1px;height:1px;margin:-1px;padding:0;overflow:hidden;clip:rect(0,0,0,0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}[role=button]{cursor:pointer}.container{margin-right:auto;margin-left:auto;padding-left:15px;padding-right:15px}@media (min-width:768px){.container{width:750px}}@media (min-width:992px){.container{width:970px}}@media (min-width:1200px){.container{width:1170px}}.container-fluid{margin-right:auto;margin-left:auto;padding-left:15px;padding-right:15px}.row{margin-left:-15px;margin-right:-15px}.col-lg-1,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-lg-10,.col-lg-11,.col-lg-12,.col-md-1,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-md-10,.col-md-11,.col-md-12,.col-sm-1,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-sm-10,.col-sm-11,.col-sm-12,.col-xs-1,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9,.col-xs-10,.col-xs-11,.col-xs-12{position:relative;min-height:1px;padding-left:15px;padding-right:15px}.col-xs-1,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9,.col-xs-10,.col-xs-11,.col-xs-12{float:left}.col-xs-12{width:100%}.col-xs-11{width:91.66666667%}.col-xs-10{width:83.33333333%}.col-xs-9{width:75%}.col-xs-8{width:66.66666667%}.col-xs-7{width:58.33333333%}.col-xs-6{width:50%}.col-xs-5{width:41.66666667%}.col-xs-4{width:33.33333333%}.col-xs-3{width:25%}.col-xs-2{width:16.66666667%}.col-xs-1{width:8.33333333%}.col-xs-pull-12{right:100%}.col-xs-pull-11{right:91.66666667%}.col-xs-pull-10{right:83.33333333%}.col-xs-pull-9{right:75%}.col-xs-pull-8{right:66.66666667%}.col-xs-pull-7{right:58.33333333%}.col-xs-pull-6{right:50%}.col-xs-pull-5{right:41.66666667%}.col-xs-pull-4{right:33.33333333%}.col-xs-pull-3{right:25%}.col-xs-pull-2{right:16.66666667%}.col-xs-pull-1{right:8.33333333%}.col-xs-pull-0{right:auto}.col-xs-push-12{left:100%}.col-xs-push-11{left:91.66666667%}.col-xs-push-10{left:83.33333333%}.col-xs-push-9{left:75%}.col-xs-push-8{left:66.66666667%}.col-xs-push-7{left:58.33333333%}.col-xs-push-6{left:50%}.col-xs-push-5{left:41.66666667%}.col-xs-push-4{left:33.33333333%}.col-xs-push-3{left:25%}.col-xs-push-2{left:16.66666667%}.col-xs-push-1{left:8.33333333%}.col-xs-push-0{left:auto}.col-xs-offset-12{margin-left:100%}.col-xs-offset-11{margin-left:91.66666667%}.col-xs-offset-10{margin-left:83.33333333%}.col-xs-offset-9{margin-left:75%}.col-xs-offset-8{margin-left:66.66666667%}.col-xs-offset-7{margin-left:58.33333333%}.col-xs-offset-6{margin-left:50%}.col-xs-offset-5{margin-left:41.66666667%}.col-xs-offset-4{margin-left:33.33333333%}.col-xs-offset-3{margin-left:25%}.col-xs-offset-2{margin-left:16.66666667%}.col-xs-offset-1{margin-left:8.33333333%}.col-xs-offset-0{margin-left:0}@media (min-width:768px){.col-sm-1,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-sm-10,.col-sm-11,.col-sm-12{float:left}.col-sm-12{width:100%}.col-sm-11{width:91.66666667%}.col-sm-10{width:83.33333333%}.col-sm-9{width:75%}.col-sm-8{width:66.66666667%}.col-sm-7{width:58.33333333%}.col-sm-6{width:50%}.col-sm-5{width:41.66666667%}.col-sm-4{width:33.33333333%}.col-sm-3{width:25%}.col-sm-2{width:16.66666667%}.col-sm-1{width:8.33333333%}.col-sm-pull-12{right:100%}.col-sm-pull-11{right:91.66666667%}.col-sm-pull-10{right:83.33333333%}.col-sm-pull-9{right:75%}.col-sm-pull-8{right:66.66666667%}.col-sm-pull-7{right:58.33333333%}.col-sm-pull-6{right:50%}.col-sm-pull-5{right:41.66666667%}.col-sm-pull-4{right:33.33333333%}.col-sm-pull-3{right:25%}.col-sm-pull-2{right:16.66666667%}.col-sm-pull-1{right:8.33333333%}.col-sm-pull-0{right:auto}.col-sm-push-12{left:100%}.col-sm-push-11{left:91.66666667%}.col-sm-push-10{left:83.33333333%}.col-sm-push-9{left:75%}.col-sm-push-8{left:66.66666667%}.col-sm-push-7{left:58.33333333%}.col-sm-push-6{left:50%}.col-sm-push-5{left:41.66666667%}.col-sm-push-4{left:33.33333333%}.col-sm-push-3{left:25%}.col-sm-push-2{left:16.66666667%}.col-sm-push-1{left:8.33333333%}.col-sm-push-0{left:auto}.col-sm-offset-12{margin-left:100%}.col-sm-offset-11{margin-left:91.66666667%}.col-sm-offset-10{margin-left:83.33333333%}.col-sm-offset-9{margin-left:75%}.col-sm-offset-8{margin-left:66.66666667%}.col-sm-offset-7{margin-left:58.33333333%}.col-sm-offset-6{margin-left:50%}.col-sm-offset-5{margin-left:41.66666667%}.col-sm-offset-4{margin-left:33.33333333%}.col-sm-offset-3{margin-left:25%}.col-sm-offset-2{margin-left:16.66666667%}.col-sm-offset-1{margin-left:8.33333333%}.col-sm-offset-0{margin-left:0}}@media (min-width:992px){.col-md-1,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-md-10,.col-md-11,.col-md-12{float:left}.col-md-12{width:100%}.col-md-11{width:91.66666667%}.col-md-10{width:83.33333333%}.col-md-9{width:75%}.col-md-8{width:66.66666667%}.col-md-7{width:58.33333333%}.col-md-6{width:50%}.col-md-5{width:41.66666667%}.col-md-4{width:33.33333333%}.col-md-3{width:25%}.col-md-2{width:16.66666667%}.col-md-1{width:8.33333333%}.col-md-pull-12{right:100%}.col-md-pull-11{right:91.66666667%}.col-md-pull-10{right:83.33333333%}.col-md-pull-9{right:75%}.col-md-pull-8{right:66.66666667%}.col-md-pull-7{right:58.33333333%}.col-md-pull-6{right:50%}.col-md-pull-5{right:41.66666667%}.col-md-pull-4{right:33.33333333%}.col-md-pull-3{right:25%}.col-md-pull-2{right:16.66666667%}.col-md-pull-1{right:8.33333333%}.col-md-pull-0{right:auto}.col-md-push-12{left:100%}.col-md-push-11{left:91.66666667%}.col-md-push-10{left:83.33333333%}.col-md-push-9{left:75%}.col-md-push-8{left:66.66666667%}.col-md-push-7{left:58.33333333%}.col-md-push-6{left:50%}.col-md-push-5{left:41.66666667%}.col-md-push-4{left:33.33333333%}.col-md-push-3{left:25%}.col-md-push-2{left:16.66666667%}.col-md-push-1{left:8.33333333%}.col-md-push-0{left:auto}.col-md-offset-12{margin-left:100%}.col-md-offset-11{margin-left:91.66666667%}.col-md-offset-10{margin-left:83.33333333%}.col-md-offset-9{margin-left:75%}.col-md-offset-8{margin-left:66.66666667%}.col-md-offset-7{margin-left:58.33333333%}.col-md-offset-6{margin-left:50%}.col-md-offset-5{margin-left:41.66666667%}.col-md-offset-4{margin-left:33.33333333%}.col-md-offset-3{margin-left:25%}.col-md-offset-2{margin-left:16.66666667%}.col-md-offset-1{margin-left:8.33333333%}.col-md-offset-0{margin-left:0}}@media (min-width:1200px){.col-lg-1,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-lg-10,.col-lg-11,.col-lg-12{float:left}.col-lg-12{width:100%}.col-lg-11{width:91.66666667%}.col-lg-10{width:83.33333333%}.col-lg-9{width:75%}.col-lg-8{width:66.66666667%}.col-lg-7{width:58.33333333%}.col-lg-6{width:50%}.col-lg-5{width:41.66666667%}.col-lg-4{width:33.33333333%}.col-lg-3{width:25%}.col-lg-2{width:16.66666667%}.col-lg-1{width:8.33333333%}.col-lg-pull-12{right:100%}.col-lg-pull-11{right:91.66666667%}.col-lg-pull-10{right:83.33333333%}.col-lg-pull-9{right:75%}.col-lg-pull-8{right:66.66666667%}.col-lg-pull-7{right:58.33333333%}.col-lg-pull-6{right:50%}.col-lg-pull-5{right:41.66666667%}.col-lg-pull-4{right:33.33333333%}.col-lg-pull-3{right:25%}.col-lg-pull-2{right:16.66666667%}.col-lg-pull-1{right:8.33333333%}.col-lg-pull-0{right:auto}.col-lg-push-12{left:100%}.col-lg-push-11{left:91.66666667%}.col-lg-push-10{left:83.33333333%}.col-lg-push-9{left:75%}.col-lg-push-8{left:66.66666667%}.col-lg-push-7{left:58.33333333%}.col-lg-push-6{left:50%}.col-lg-push-5{left:41.66666667%}.col-lg-push-4{left:33.33333333%}.col-lg-push-3{left:25%}.col-lg-push-2{left:16.66666667%}.col-lg-push-1{left:8.33333333%}.col-lg-push-0{left:auto}.col-lg-offset-12{margin-left:100%}.col-lg-offset-11{margin-left:91.66666667%}.col-lg-offset-10{margin-left:83.33333333%}.col-lg-offset-9{margin-left:75%}.col-lg-offset-8{margin-left:66.66666667%}.col-lg-offset-7{margin-left:58.33333333%}.col-lg-offset-6{margin-left:50%}.col-lg-offset-5{margin-left:41.66666667%}.col-lg-offset-4{margin-left:33.33333333%}.col-lg-offset-3{margin-left:25%}.col-lg-offset-2{margin-left:16.66666667%}.col-lg-offset-1{margin-left:8.33333333%}.col-lg-offset-0{margin-left:0}}/*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */html{font-family:sans-serif;-ms-text-size-adjust:100%;-webkit-text-size-adjust:100%}body{margin:0}article,aside,details,figcaption,figure,footer,header,hgroup,main,menu,nav,section,summary{display:block}audio,canvas,progress,video{display:inline-block;vertical-align:baseline}audio:not([controls]){display:none;height:0}[hidden],template{display:none}a{background-color:transparent}a:active,a:hover{outline:0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:700}dfn{font-style:italic}h1{font-size:2em;margin:.67em 0}mark{background:#ff0;color:#000}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sup{top:-.5em}sub{bottom:-.25em}img{border:0}svg:not(:root){overflow:hidden}figure{margin:1em 40px}hr{box-sizing:content-box;height:0}pre{overflow:auto}code,kbd,pre,samp{font-family:monospace,monospace;font-size:1em}button,input,optgroup,select,textarea{color:inherit;font:inherit;margin:0}button{overflow:visible}button,select{text-transform:none}button,html input[type=button],input[type=reset],input[type=submit]{-webkit-appearance:button;cursor:pointer}button[disabled],html input[disabled]{cursor:default}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}input{line-height:normal}input[type=checkbox],input[type=radio]{box-sizing:border-box;padding:0}input[type=number]::-webkit-inner-spin-button,input[type=number]::-webkit-outer-spin-button{height:auto}input[type=search]{-webkit-appearance:textfield;box-sizing:content-box}input[type=search]::-webkit-search-cancel-button,input[type=search]::-webkit-search-decoration{-webkit-appearance:none}fieldset{border:1px solid silver;margin:0 2px;padding:.35em .625em .75em}legend{border:0;padding:0}textarea{overflow:auto}optgroup{font-weight:700}table{border-collapse:collapse;border-spacing:0}td,th{padding:0}.caret{display:inline-block;width:0;height:0;margin-left:2px;vertical-align:middle;border-top:4px dashed;border-top:4px solid\\9;border-right:4px solid transparent;border-left:4px solid transparent}.dropdown,.dropup{position:relative}.dropdown-toggle:focus{outline:0}.dropdown-menu{position:absolute;top:100%;left:0;z-index:1000;display:none;float:left;min-width:160px;padding:5px 0;margin:2px 0 0;list-style:none;font-size:15px;text-align:left;background-color:#fff;border:1px solid transparent;border-radius:4px;box-shadow:0 6px 12px rgba(0,0,0,.175);background-clip:padding-box}.dropdown-menu.pull-right{right:0;left:auto}.dropdown-menu .divider{height:1px;margin:9.5px 0;overflow:hidden;background-color:rgba(0,0,0,.08)}.dropdown-menu>li>a{display:block;padding:3px 20px;clear:both;font-weight:400;line-height:1.42857143;color:#8e8e8e;white-space:nowrap}.dropdown-menu>li>a:focus,.dropdown-menu>li>a:hover{text-decoration:none;color:#333;background-color:rgba(0,0,0,.05)}.dropdown-menu>.active>a,.dropdown-menu>.active>a:focus,.dropdown-menu>.active>a:hover{color:#333;text-decoration:none;outline:0;background-color:rgba(0,0,0,.075)}.dropdown-menu>.disabled>a,.dropdown-menu>.disabled>a:focus,.dropdown-menu>.disabled>a:hover{color:#e4e4e4}.dropdown-menu>.disabled>a:focus,.dropdown-menu>.disabled>a:hover{text-decoration:none;background-color:transparent;background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled = false);cursor:not-allowed}.open>.dropdown-menu{display:block}.open>a{outline:0}.dropdown-menu-right{left:auto;right:0}.dropdown-menu-left{left:0;right:auto}.dropdown-header{display:block;padding:3px 20px;font-size:13px;line-height:1.42857143;color:#777;white-space:nowrap}.dropdown-backdrop{position:fixed;left:0;right:0;bottom:0;top:0;z-index:990}.pull-right>.dropdown-menu{right:0;left:auto}.dropup .caret,.navbar-fixed-bottom .dropdown .caret{border-top:0;border-bottom:4px dashed;border-bottom:4px solid\\9;content:""}.dropup .dropdown-menu,.navbar-fixed-bottom .dropdown .dropdown-menu{top:auto;bottom:100%;margin-bottom:2px}@media (min-width:768px){.navbar-right .dropdown-menu{left:auto;right:0}.navbar-right .dropdown-menu>li>a{text-align:right}.navbar-right .dropdown-menu-left{left:0;right:auto}}.modal,.modal-open{overflow:hidden}.modal{display:none;position:fixed;top:0;right:0;bottom:0;left:0;z-index:1050;-webkit-overflow-scrolling:touch;outline:0}.modal.fade .modal-dialog{-webkit-transform:translateY(-25%);transform:translateY(-25%);transition:transform .3s ease-out}.modal.in .modal-dialog{-webkit-transform:translate(0);transform:translate(0)}.modal-open .modal{overflow-x:hidden;overflow-y:auto}.modal-dialog{position:relative;width:auto;margin:10px}.modal-content{position:relative;background-color:#fff;border:1px solid transparent;border-radius:6px;box-shadow:0 3px 9px rgba(0,0,0,.5);background-clip:padding-box;outline:0}.modal-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1040;background-color:rgba(0,0,0,.1)}.modal-backdrop.fade{opacity:0;filter:alpha(opacity=0)}.modal-backdrop.in{opacity:.5;filter:alpha(opacity=50)}.modal-header{padding:30px 35px 0;border-bottom:1px solid transparent}.modal-header .close{margin-top:-2px}.modal-title{margin:0;line-height:transparent}.modal-body{position:relative;padding:30px 35px}.modal-footer{padding:30px 35px;text-align:right;border-top:1px solid transparent}.modal-footer .btn+.btn{margin-left:5px;margin-bottom:0}.modal-footer .btn-group .btn+.btn{margin-left:-1px}.modal-footer .btn-block+.btn-block{margin-left:0}.modal-scrollbar-measure{position:absolute;top:-9999px;width:50px;height:50px;overflow:scroll}@media (min-width:768px){.modal-dialog{width:600px;margin:30px auto}.modal-content{box-shadow:0 5px 15px rgba(0,0,0,.5)}.modal-sm{width:400px}}@media (min-width:992px){.modal-lg{width:900px}}.tooltip{position:absolute;z-index:1070;display:block;font-family:Lato,sans-serif;font-style:normal;font-weight:400;letter-spacing:normal;line-break:auto;line-height:1.42857143;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;white-space:normal;word-break:normal;word-spacing:normal;word-wrap:normal;font-size:13px;opacity:0;filter:alpha(opacity=0)}.tooltip.in{opacity:.9;filter:alpha(opacity=90)}.tooltip.top{margin-top:-3px;padding:5px 0}.tooltip.right{margin-left:3px;padding:0 5px}.tooltip.bottom{margin-top:3px;padding:5px 0}.tooltip.left{margin-left:-3px;padding:0 5px}.tooltip-inner{max-width:200px;padding:3px 8px;color:#fff;text-align:center;background-color:#000;border-radius:4px}.tooltip-arrow{position:absolute;width:0;height:0;border-color:transparent;border-style:solid}.tooltip.top .tooltip-arrow{bottom:0;left:50%;margin-left:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.top-left .tooltip-arrow{right:5px}.tooltip.top-left .tooltip-arrow,.tooltip.top-right .tooltip-arrow{bottom:0;margin-bottom:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.top-right .tooltip-arrow{left:5px}.tooltip.right .tooltip-arrow{top:50%;left:0;margin-top:-5px;border-width:5px 5px 5px 0;border-right-color:#000}.tooltip.left .tooltip-arrow{top:50%;right:0;margin-top:-5px;border-width:5px 0 5px 5px;border-left-color:#000}.tooltip.bottom .tooltip-arrow{top:0;left:50%;margin-left:-5px;border-width:0 5px 5px;border-bottom-color:#000}.tooltip.bottom-left .tooltip-arrow{top:0;right:5px;margin-top:-5px;border-width:0 5px 5px;border-bottom-color:#000}.tooltip.bottom-right .tooltip-arrow{top:0;left:5px;margin-top:-5px;border-width:0 5px 5px;border-bottom-color:#000}@-ms-viewport{width:device-width}.visible-lg,.visible-lg-block,.visible-lg-inline,.visible-lg-inline-block,.visible-md,.visible-md-block,.visible-md-inline,.visible-md-inline-block,.visible-sm,.visible-sm-block,.visible-sm-inline,.visible-sm-inline-block,.visible-xs,.visible-xs-block,.visible-xs-inline,.visible-xs-inline-block{display:none!important}@media (max-width:767px){.visible-xs{display:block!important}table.visible-xs{display:table!important}tr.visible-xs{display:table-row!important}td.visible-xs,th.visible-xs{display:table-cell!important}}@media (max-width:767px){.visible-xs-block{display:block!important}}@media (max-width:767px){.visible-xs-inline{display:inline!important}}@media (max-width:767px){.visible-xs-inline-block{display:inline-block!important}}@media (min-width:768px) and (max-width:991px){.visible-sm{display:block!important}table.visible-sm{display:table!important}tr.visible-sm{display:table-row!important}td.visible-sm,th.visible-sm{display:table-cell!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-block{display:block!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-inline{display:inline!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-inline-block{display:inline-block!important}}@media (min-width:992px) and (max-width:1199px){.visible-md{display:block!important}table.visible-md{display:table!important}tr.visible-md{display:table-row!important}td.visible-md,th.visible-md{display:table-cell!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-block{display:block!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-inline{display:inline!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-inline-block{display:inline-block!important}}@media (min-width:1200px){.visible-lg{display:block!important}table.visible-lg{display:table!important}tr.visible-lg{display:table-row!important}td.visible-lg,th.visible-lg{display:table-cell!important}}@media (min-width:1200px){.visible-lg-block{display:block!important}}@media (min-width:1200px){.visible-lg-inline{display:inline!important}}@media (min-width:1200px){.visible-lg-inline-block{display:inline-block!important}}@media (max-width:767px){.hidden-xs{display:none!important}}@media (min-width:768px) and (max-width:991px){.hidden-sm{display:none!important}}@media (min-width:992px) and (max-width:1199px){.hidden-md{display:none!important}}@media (min-width:1200px){.hidden-lg{display:none!important}}.visible-print{display:none!important}@media print{.visible-print{display:block!important}table.visible-print{display:table!important}tr.visible-print{display:table-row!important}td.visible-print,th.visible-print{display:table-cell!important}}.visible-print-block{display:none!important}@media print{.visible-print-block{display:block!important}}.visible-print-inline{display:none!important}@media print{.visible-print-inline{display:inline!important}}.visible-print-inline-block{display:none!important}@media print{.visible-print-inline-block{display:inline-block!important}}@media print{.hidden-print{display:none!important}}*{-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}:active,:focus{outline:0}html{font-size:10px;-webkit-tap-highlight-color:rgba(0,0,0,0)}body,html{min-height:100%}a{transition:color;transition-duration:.3s}button{border:0}.animated.infinite{-webkit-animation-iteration-count:infinite;animation-iteration-count:infinite}@-webkit-keyframes fadeIn{0%{opacity:0}to{opacity:1}}@keyframes fadeIn{0%{opacity:0}to{opacity:1}}.fadeIn{-webkit-animation-name:fadeIn;animation-name:fadeIn}@-webkit-keyframes fadeInDown{0%{opacity:0;-webkit-transform:translateY(-20px)}to{opacity:1;-webkit-transform:translateY(0)}}@keyframes fadeInDown{0%{opacity:0;transform:translateY(-20px)}to{opacity:1;transform:translateY(0)}}.fadeInDown{-webkit-animation-name:fadeInDown;animation-name:fadeInDown}@-webkit-keyframes fadeInUp{0%{opacity:0;-webkit-transform:translateY(20px)}to{opacity:1;-webkit-transform:translateY(0)}}@keyframes fadeInUp{0%{opacity:0;transform:translateY(20px)}to{opacity:1;transform:translateY(0)}}.fadeInUp{-webkit-animation-name:fadeInUp;animation-name:fadeInUp}@-webkit-keyframes fadeOut{0%{opacity:1}to{opacity:0}}@keyframes fadeOut{0%{opacity:1}to{opacity:0}}.fadeOut{-webkit-animation-name:fadeOut;animation-name:fadeOut}@-webkit-keyframes fadeOutDown{0%{opacity:1;-webkit-transform:translateY(0)}to{opacity:0;-webkit-transform:translateY(20px)}}@keyframes fadeOutDown{0%{opacity:1;transform:translateY(0)}to{opacity:0;transform:translateY(20px)}}.fadeOutDown{-webkit-animation-name:fadeOutDown;animation-name:fadeOutDown}@-webkit-keyframes fadeOutUp{0%{opacity:1;-webkit-transform:translateY(0)}to{opacity:0;-webkit-transform:translateY(-20px)}}@keyframes fadeOutUp{0%{opacity:1;transform:translateY(0)}to{opacity:0;transform:translateY(-20px)}}.fadeOutUp{-webkit-animation-name:fadeOutUp;animation-name:fadeOutUp}@-webkit-keyframes zoomIn{0%{opacity:0;-webkit-transform:scale3d(.3,.3,.3);transform:scale3d(.3,.3,.3)}50%{opacity:1}}@keyframes zoomIn{0%{opacity:0;-webkit-transform:scale3d(.3,.3,.3);transform:scale3d(.3,.3,.3)}50%{opacity:1}}.text-center{text-align:center!important}.text-left{text-align:left!important}.text-right{text-align:right!important}.clearfix:after,.clearfix:before,.container-fluid:after,.container-fluid:before,.container:after,.container:before,.modal-footer:after,.modal-footer:before,.modal-header:after,.modal-header:before,.row:after,.row:before{content:" ";display:table}.clearfix:after,.container-fluid:after,.container:after,.modal-footer:after,.modal-header:after,.row:after{clear:both}.pull-right{float:right!important}.pull-left{float:left!important}.p-relative{position:relative}.m-0{margin:0!important}.m-t-0{margin-top:0!important}.m-b-0{margin-bottom:0!important}.m-l-0{margin-left:0!important}.m-r-0{margin-right:0!important}.m-5{margin:5px!important}.m-t-5{margin-top:5px!important}.m-b-5{margin-bottom:5px!important}.m-l-5{margin-left:5px!important}.m-r-5{margin-right:5px!important}.m-10{margin:10px!important}.m-t-10{margin-top:10px!important}.m-b-10{margin-bottom:10px!important}.m-l-10{margin-left:10px!important}.m-r-10{margin-right:10px!important}.m-15{margin:15px!important}.m-t-15{margin-top:15px!important}.m-b-15{margin-bottom:15px!important}.m-l-15{margin-left:15px!important}.m-r-15{margin-right:15px!important}.m-20{margin:20px!important}.m-t-20{margin-top:20px!important}.m-b-20{margin-bottom:20px!important}.m-l-20{margin-left:20px!important}.m-r-20{margin-right:20px!important}.m-25{margin:25px!important}.m-t-25{margin-top:25px!important}.m-b-25{margin-bottom:25px!important}.m-l-25{margin-left:25px!important}.m-r-25{margin-right:25px!important}.m-30{margin:30px!important}.m-t-30{margin-top:30px!important}.m-b-30{margin-bottom:30px!important}.m-l-30{margin-left:30px!important}.m-r-30{margin-right:30px!important}.p-0{padding:0!important}.p-t-0{padding-top:0!important}.p-b-0{padding-bottom:0!important}.p-l-0{padding-left:0!important}.p-r-0{padding-right:0!important}.p-5{padding:5px!important}.p-t-5{padding-top:5px!important}.p-b-5{padding-bottom:5px!important}.p-l-5{padding-left:5px!important}.p-r-5{padding-right:5px!important}.p-10{padding:10px!important}.p-t-10{padding-top:10px!important}.p-b-10{padding-bottom:10px!important}.p-l-10{padding-left:10px!important}.p-r-10{padding-right:10px!important}.p-15{padding:15px!important}.p-t-15{padding-top:15px!important}.p-b-15{padding-bottom:15px!important}.p-l-15{padding-left:15px!important}.p-r-15{padding-right:15px!important}.p-20{padding:20px!important}.p-t-20{padding-top:20px!important}.p-b-20{padding-bottom:20px!important}.p-l-20{padding-left:20px!important}.p-r-20{padding-right:20px!important}.p-25{padding:25px!important}.p-t-25{padding-top:25px!important}.p-b-25{padding-bottom:25px!important}.p-l-25{padding-left:25px!important}.p-r-25{padding-right:25px!important}.p-30{padding:30px!important}.p-t-30{padding-top:30px!important}.p-b-30{padding-bottom:30px!important}.p-l-30{padding-left:30px!important}.p-r-30{padding-right:30px!important}@font-face{font-family:Lato;src:url('+t(818)+') format("woff2"),url('+t(817)+') format("woff");font-weight:400;font-style:normal}.form-control{border:0;border-bottom:1px solid #eee;color:#32393f;padding:5px;width:100%;font-size:13px;background-color:transparent}select.form-control{-webkit-appearance:none;-moz-appearance:none;border-radius:0;background:url('+t(821)+') no-repeat bottom 7px right}.input-group{position:relative}.input-group:not(:last-child){margin-bottom:25px}.input-group label:not(.ig-label){font-size:13px;display:block;margin-bottom:10px}.ig-label{position:absolute;text-align:center;bottom:7px;left:0;width:100%;transition:all;transition-duration:.25s;padding:2px 0 3px;border-radius:2px;font-weight:400}.ig-helpers{z-index:1;width:100%;left:0}.ig-helpers,.ig-helpers:after,.ig-helpers:before{position:absolute;height:2px;bottom:0}.ig-helpers:after,.ig-helpers:before{content:"";width:0;transition:all;transition-duration:.25s;background-color:#03a9f4}.ig-helpers:before{left:50%}.ig-helpers:after{right:50%}.ig-text{width:100%;height:40px;border:0;background:transparent!important;text-align:center;position:relative;z-index:1;border-bottom:1px solid #eee;color:#32393f;font-size:13px}.ig-text:focus+.ig-helpers:after,.ig-text:focus+.ig-helpers:before{width:50%}.ig-text:disabled~.ig-label,.ig-text:focus~.ig-label,.ig-text:valid~.ig-label{bottom:35px;font-size:13px;z-index:1}.ig-text:disabled{opacity:.5;filter:alpha(opacity=50)}.ig-dark .ig-text{color:#fff!important;border-color:hsla(0,0%,100%,.1)!important}.ig-dark .ig-helpers:after,.ig-dark .ig-helpers:before{background-color:#dfdfdf;height:1px}.ig-left .ig-label,.ig-left .ig-text{text-align:left}.ig-error .ig-label{color:#e23f3f}.ig-error .ig-helpers i:first-child,.ig-error .ig-helpers i:first-child:after,.ig-error .ig-helpers i:first-child:before{background:rgba(226,63,63,.43)}.ig-error .ig-helpers i:last-child,.ig-error .ig-helpers i:last-child:after,.ig-error .ig-helpers i:last-child:before{background:#e23f3f!important}.ig-error:after{content:"\\F05A";font-family:FontAwesome;position:absolute;top:17px;right:9px;font-size:20px;color:#d33d3e}.ig-search:before{font-family:fontAwesome;content:"\\F002";font-size:15px;position:absolute;left:2px;top:8px}.ig-search .ig-text{padding-left:25px}.set-expire{border:1px solid #eee;margin:35px 0 30px;position:relative}.set-expire:before{content:"";position:absolute;left:0;top:0;width:100%;height:100%;z-index:1}.set-expire-item{padding:9px 5px 3px;position:relative;display:table-cell;width:1%;text-align:center;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.set-expire-item:not(:last-child){border-right:1px solid #eee}.set-expire-title{font-size:10px;text-transform:uppercase}.set-expire-value{display:inline-block;overflow:hidden;position:relative;left:-8px}.set-expire-value input{-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;font-size:20px;text-align:center;position:relative;right:-15px;border:0;color:#333;padding:0;height:25px;width:100%;font-weight:400}.set-expire-decrease,.set-expire-increase{position:absolute;width:20px;height:20px;background:url('+t(819)+') no-repeat 50%;background-size:85%;left:50%;margin-left:-10px;opacity:.2;filter:alpha(opacity=20);cursor:pointer}.set-expire-decrease:hover,.set-expire-increase:hover{opacity:.5;filter:alpha(opacity=50)}.set-expire-increase{top:-25px}.set-expire-decrease{bottom:-27px;-webkit-transform:rotate(-180deg);transform:rotate(-180deg)}.btn{border:0;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:2px;text-align:center;transition:all;transition-duration:.3s}.btn:focus,.btn:hover{opacity:.9;filter:alpha(opacity=90)}.btn-block{display:block;width:100%}.btn-white{color:#5b5b5b;background-color:#fff}.btn-white:focus,.btn-white:hover{color:#5b5b5b;background-color:#f0f0f0}.btn-link{color:#545454;background-color:#eee}.btn-link:focus,.btn-link:hover{color:#545454;background-color:#dfdfdf}.btn-danger{color:#fff;background-color:#ff726f}.btn-danger:focus,.btn-danger:hover{color:#fff;background-color:#ff5450}.btn-primary{color:#fff;background-color:#50b2ff}.btn-primary:focus,.btn-primary:hover{color:#fff;background-color:#31a5ff}.btn-success{color:#fff;background-color:#33d46f}.btn-success:focus,.btn-success:hover{color:#fff;background-color:#28c061}.close{right:15px;font-weight:400;opacity:1;font-size:18px;position:absolute;text-align:center;top:16px;z-index:1;padding:0;border:0;background-color:transparent}.close span{width:25px;height:25px;display:block;border-radius:50%;line-height:24px;text-shadow:none}.close:not(.close-alt) span{background-color:hsla(0,0%,100%,.1);color:hsla(0,0%,100%,.8)}.close:not(.close-alt):focus span,.close:not(.close-alt):hover span{background-color:hsla(0,0%,100%,.2);color:#fff}.close-alt span{background-color:#efefef;color:#989898}.close-alt:focus span,.close-alt:hover span{background-color:#e8e8e8;color:#7b7b7b}.hidden{display:none!important}.copy-text input{width:100%;border-radius:1px;border:1px solid #eee;padding:7px 12px;font-size:13px;line-height:100%;cursor:text;transition:border-color;transition-duration:.3s}.copy-text input:hover{border-color:#e1e1e1}.share-availability{margin-bottom:40px}.share-availability:after,.share-availability:before{position:absolute;bottom:-30px;font-size:10px}.share-availability:before{content:"01 Sec";left:0}.share-availability:after{content:"7 days";right:0}.modal-aheader{height:100px}.modal-aheader:before{height:0!important}.modal-aheader .modal-dialog{margin:0;vertical-align:top}.login{height:100vh;min-height:500px;background:#32393f;text-align:center}.login:before{height:calc(100% - 110px);width:1px;content:""}.l-wrap,.login:before{display:inline-block;vertical-align:middle}.l-wrap{width:80%;max-width:500px;margin-top:-50px}.l-wrap.toggled{display:inline-block}.l-wrap .input-group:not(:last-child){margin-bottom:40px}.l-footer{height:110px;padding:0 50px}.lf-logo{float:right}.lf-logo img{width:40px}.lf-server{float:left;color:hsla(0,0%,100%,.4);font-size:20px;font-weight:400;padding-top:40px}@media (max-width:768px){.lf-logo,.lf-server{float:none;display:block;text-align:center;width:100%}.lf-logo{margin-bottom:5px}.lf-server{font-size:15px}}.lw-btn{width:50px;height:50px;border:1px solid #fff;display:inline-block;border-radius:50%;font-size:22px;color:#fff;transition:all;transition-duration:.3s;opacity:.3;background-color:transparent;line-height:45px;padding:0}.lw-btn:hover{color:#fff;opacity:.8;border-color:#fff}.lw-btn i{display:block;width:100%;padding-left:3px}input:-webkit-autofill{-webkit-box-shadow:0 0 0 50px #32393f inset!important;-webkit-text-fill-color:#fff!important}@media (min-width:668px){.fe-header{position:relative;padding:40px 40px 20px 45px}}@media (max-width:667px){.fe-header{padding:20px}}.fe-header h2{font-size:16px;font-weight:400;margin:0}.fe-header h2>span{margin-bottom:7px;display:inline-block}.fe-header h2>span:not(:first-child):before{content:"/";margin:0 4px;color:#8e8e8e}.fe-header p{margin-top:7px}.feh-usage{margin-top:12px;max-width:285px}@media (max-width:667px){.feh-usage{max-width:100%;font-size:12px}}.feh-usage>ul{margin-top:7px;list-style:none;padding:0}.feh-usage>ul>li{padding-right:0;display:inline-block}.fehu-chart{height:5px;background:#eee;position:relative;border-radius:2px;overflow:hidden}.fehu-chart>div{position:absolute;left:0;height:100%;background:#46a5e0}.feh-actions{list-style:none;padding:0;margin:0;position:absolute;right:35px;top:30px;z-index:11}@media (max-width:991px){.feh-actions{top:7px;right:10px;position:fixed}}.feh-actions>li{display:inline-block;text-align:right;vertical-align:top;line-height:100%}.feh-actions>li>.btn-group>button,.feh-actions>li>a{display:block;height:45px;min-width:45px;text-align:center;border-radius:50%;padding:0;border:0;background:none}@media (min-width:992px){.feh-actions>li>.btn-group>button,.feh-actions>li>a{color:#7b7b7b;font-size:21px;line-height:45px;transition:all;transition-duration:.3s}.feh-actions>li>.btn-group>button:hover,.feh-actions>li>a:hover{background:rgba(0,0,0,.09)}}@media (max-width:991px){.feh-actions>li>.btn-group>button,.feh-actions>li>a{background:url('+t(349)+') no-repeat 50%}.feh-actions>li>.btn-group>button .fa-reorder,.feh-actions>li>a .fa-reorder{display:none}}@media (max-width:991px){.fe-header-mobile{background-color:#32393f;padding:10px 50px 9px 12px;text-align:center;position:fixed;z-index:10;box-shadow:0 0 10px rgba(0,0,0,.3);left:0;top:0;width:100%}.fe-header-mobile .mh-logo{height:35px;position:relative;top:4px}.feh-trigger{width:41px;height:41px;cursor:pointer;float:left;position:relative;text-align:center}.feh-trigger:after,.feh-trigger:before{content:"";position:absolute;top:0;left:0;width:100%;height:100%;border-radius:50%}.feh-trigger:after{z-index:1}.feh-trigger:before{background:hsla(0,0%,100%,.1);transition:all;transition-duration:.3s;-webkit-transform:scale(0);transform:scale(0)}.feht-toggled:before{-webkit-transform:scale(1);transform:scale(1)}.feht-toggled .feht-lines{-webkit-transform:rotate(180deg);transform:rotate(180deg)}.feht-toggled .feht-lines>div.top{width:12px;transform:translateX(8px) translateY(1px) rotate(45deg);-webkit-transform:translateX(8px) translateY(1px) rotate(45deg)}.feht-toggled .feht-lines>div.bottom{width:12px;transform:translateX(8px) translateY(-1px) rotate(-45deg);-webkit-transform:translateX(8px) translateY(-1px) rotate(-45deg)}.feht-lines,.feht-lines>div{transition:all;transition-duration:.3s}.feht-lines{width:18px;height:12px;display:inline-block;margin-top:14px}.feht-lines>div{background-color:#eaeaea;width:18px;height:2px}.feht-lines>div.center{margin:3px 0}}.fe-sidebar{width:320px;background-color:#32393f;position:fixed;height:100%;overflow:hidden;padding:25px}@media (min-width:992px){.fe-sidebar{-webkit-transform:translateZ(0);transform:translateZ(0)}}@media (max-width:991px){.fe-sidebar{padding-top:85px;z-index:9;box-shadow:0 0 10px rgba(0,0,0,.65);transition:all;transition-duration:.3s;-webkit-transform:translate3d(-335px,0,0);transform:translate3d(-335px,0,0)}.fe-sidebar.toggled{-webkit-transform:translateZ(0);transform:translateZ(0)}}.fe-sidebar a{color:hsla(0,0%,100%,.58)}.fe-sidebar a:hover{color:#fff}.fes-header{margin-bottom:40px}.fes-header h2,.fes-header img{float:left}.fes-header h2{margin:13px 0 0 10px;font-weight:400}.fes-header img{width:32px}.fesl-inner{height:calc(100vh - 260px);overflow:auto;padding:0;margin:0 -25px}.fesl-inner li{position:relative}.fesl-inner li>a{display:block;padding:10px 45px 12px 55px;word-wrap:break-word}.fesl-inner li>a:before{font-family:FontAwesome;content:"\\F0A0";font-size:17px;position:absolute;top:10px;left:25px;opacity:.8;filter:alpha(opacity=80)}.fesl-inner li>a.fesli-loading:before{content:"";width:20px;height:20px;border-radius:50%;-webkit-animation-name:zoomIn;animation-name:zoomIn;-webkit-animation-duration:.5s;animation-duration:.5s;-webkit-animation-fill-mode:both;animation-fill-mode:both;border:2px solid hsla(0,0%,100%,.1);border-bottom-color:hsla(0,0%,100%,.5);position:absolute;z-index:1;-webkit-animation:zoomIn .25s,spin .7s .25s infinite linear;animation:zoomIn .25s,spin .7s .25s infinite linear;left:32px;top:0;bottom:0;margin:auto}.fesl-inner li.active{background-color:#282e32}.fesl-inner li.active>a{color:#fff}.fesl-inner li:not(.active):hover{background-color:rgba(0,0,0,.1)}.fesl-inner li:not(.active):hover>a{color:#fff}.fesl-inner li:hover .fesli-trigger{opacity:.6;filter:alpha(opacity=60)}.fesl-inner li:hover .fesli-trigger:hover{opacity:1;filter:alpha(opacity=100)}.fesl-inner ul{list-style:none;padding:0;margin:0}.fesl-inner:hover .scrollbar-vertical{opacity:1}.fesli-trigger{filter:alpha(opacity=0);transition:all;transition-duration:.2s;top:0;right:0;width:35px;cursor:pointer;background:url('+t(349)+') no-repeat top 20px left}.fesli-trigger,.scrollbar-vertical{opacity:0;position:absolute;height:100%}.scrollbar-vertical{right:5px;width:4px;transition:opacity;transition-duration:.3s}.scrollbar-vertical div{border-radius:1px!important;background-color:#6a6a6a!important}.fes-host{position:fixed;left:0;bottom:0;z-index:1;background:#32393f;color:hsla(0,0%,100%,.4);font-size:15px;font-weight:400;width:320px;padding:20px;overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.fes-host>i{margin-right:10px}.fesl-row{position:relative}@media (min-width:668px){.fesl-row{padding:5px 20px 5px 40px;display:flex;flex-flow:row nowrap;justify-content:space-between}}@media (max-width:667px){.fesl-row{padding:5px 20px}}.fesl-row:after,.fesl-row:before{content:" ";display:table}.fesl-row:after{clear:both}@media (min-width:668px){header.fesl-row{margin-bottom:20px;border-bottom:1px solid #f0f0f0;padding-left:30px}header.fesl-row .fesl-item,header.fesl-row .fesli-sort{transition:all;transition-duration:.3s}header.fesl-row .fesl-item{cursor:pointer;color:#8e8e8e;font-weight:500;margin-bottom:-5px}header.fesl-row .fesl-item>.fesli-sort{float:right;margin:4px 0 0;opacity:0;filter:alpha(opacity=0);color:#32393f;font-size:14px}header.fesl-row .fesl-item:hover:not(.fesl-item-actions){background:#f5f5f5;color:#32393f}header.fesl-row .fesl-item:hover:not(.fesl-item-actions)>.fesli-sort{opacity:.5;filter:alpha(opacity=50)}}@media (max-width:667px){header.fesl-row{display:none}}div.fesl-row{border-bottom:1px solid transparent;cursor:default;transition:background-color;transition-duration:.5s}@media (max-width:667px){div.fesl-row{padding:5px 20px}}div.fesl-row:not(.fesl-row-selected):nth-child(2n){background-color:#fafafa}div.fesl-row:hover .fis-icon:before{opacity:0;filter:alpha(opacity=0)}div.fesl-row:hover .fis-helper:before{opacity:1;filter:alpha(opacity=100)}div.fesl-row[data-type=folder] .fis-icon{background-color:#a1d6dd}div.fesl-row[data-type=folder] .fis-icon:before{content:"\\F114"}div.fesl-row[data-type=pdf] .fis-icon{background-color:#fa7775}div.fesl-row[data-type=pdf] .fis-icon:before{content:"\\F1C1"}div.fesl-row[data-type=zip] .fis-icon{background-color:#427089}div.fesl-row[data-type=zip] .fis-icon:before{content:"\\F1C6"}div.fesl-row[data-type=audio] .fis-icon{background-color:#009688}div.fesl-row[data-type=audio] .fis-icon:before{content:"\\F1C7"}div.fesl-row[data-type=code] .fis-icon{background-color:#997867}div.fesl-row[data-type=code] .fis-icon:before{content:"\\F1C9"}div.fesl-row[data-type=excel] .fis-icon{background-color:#f1c3 3}div.fesl-row[data-type=excel] .fis-icon:before{content:"\\F1C3"}div.fesl-row[data-type=image] .fis-icon{background-color:#f06292}div.fesl-row[data-type=image] .fis-icon:before{content:"\\F1C5"}div.fesl-row[data-type=video] .fis-icon{background-color:#f8c363}div.fesl-row[data-type=video] .fis-icon:before{content:"\\F1C8"}div.fesl-row[data-type=other] .fis-icon{background-color:#afafaf}div.fesl-row[data-type=other] .fis-icon:before{content:"\\F016"}div.fesl-row[data-type=text] .fis-icon{background-color:#8a8a8a}div.fesl-row[data-type=text] .fis-icon:before{content:"\\F0F6"}div.fesl-row[data-type=doc] .fis-icon{background-color:#2196f5}div.fesl-row[data-type=doc] .fis-icon:before{content:"\\F1C2"}div.fesl-row[data-type=presentation] .fis-icon{background-color:#896ea6}div.fesl-row[data-type=presentation] .fis-icon:before{content:"\\F1C4"}div.fesl-row.fesl-loading:before{content:""}div.fesl-row.fesl-loading:after{content:"";width:20px;height:20px;border-radius:50%;-webkit-animation-name:zoomIn;animation-name:zoomIn;-webkit-animation-duration:.5s;animation-duration:.5s;-webkit-animation-fill-mode:both;animation-fill-mode:both;border:2px solid hsla(0,0%,100%,.5);border-bottom-color:#fff;position:absolute;z-index:1;-webkit-animation:zoomIn .25s,spin .7s .25s infinite linear;animation:zoomIn .25s,spin .7s .25s infinite linear;left:57px;top:17px}@media (max-width:667px){div.fesl-row.fesl-loading:after{left:27px}}.fesl-row-selected{background-color:#fbf2bf}.fesl-row-selected,.fesl-row-selected .fesl-item a{color:#757575}.fi-select{float:left;position:relative;width:35px;height:35px;margin:3px 0}@media (max-width:667px){.fi-select{margin-right:15px}}.fi-select input{position:absolute;left:0;top:0;width:35px;height:35px;z-index:20;opacity:0;cursor:pointer}.fi-select input:checked~.fis-icon{background-color:#32393f}.fi-select input:checked~.fis-icon:before{opacity:0}.fi-select input:checked~.fis-helper:before{-webkit-transform:scale(0);transform:scale(0)}.fi-select input:checked~.fis-helper:after{-webkit-transform:scale(1);transform:scale(1)}.fis-icon{display:inline-block;vertical-align:top;border-radius:50%;width:35px;height:35px;transition:background-color;transition-duration:.25s}.fis-icon:before{width:100%;height:100%;text-align:center;position:absolute;border-radius:50%;font-family:fontAwesome;line-height:35px;font-size:16px;color:#fff;transition:all;transition-duration:.3s;font-style:normal}.fis-helper:after,.fis-helper:before{position:absolute;transition:all;transition-duration:.25s}.fis-helper:before{content:"";width:15px;height:15px;border:2px solid #fff;z-index:10;border-radius:2px;top:10px;left:10px;opacity:0}.fis-helper:after{font-family:fontAwesome;content:"\\F00C";top:8px;left:9px;color:#fff;font-size:14px;-webkit-transform:scale(0);transform:scale(0)}.fesl-item{display:block}.fesl-item a{color:#818181}@media (min-width:668px){.fesl-item:not(.fesl-item-actions):not(.fesl-item-icon){text-overflow:ellipsis;padding:10px 15px;white-space:nowrap;overflow:hidden}.fesl-item.fesl-item-name{flex:3}.fesl-item.fesl-item-size{width:140px}.fesl-item.fesl-item-modified{width:190px}.fesl-item.fesl-item-actions{width:40px}}@media (max-width:667px){.fesl-item{padding:0}.fesl-item.fesl-item-name{width:100%;margin-bottom:3px}.fesl-item.fesl-item-modified,.fesl-item.fesl-item-size{font-size:12px;color:#b5b5b5;float:left}.fesl-item.fesl-item-modified{max-width:72px;white-space:nowrap;overflow:hidden}.fesl-item.fesl-item-size{margin-right:10px}.fesl-item.fesl-item-actions{position:absolute;top:5px;right:10px}}.fia-toggle{height:36px;width:36px;background:transparent url('+t(820)+') no-repeat 50%;position:relative;top:3px;opacity:.4;filter:alpha(opacity=40)}.fia-toggle:hover{opacity:.7;filter:alpha(opacity=70)}.fesl-item-actions .dropdown-menu{background-color:transparent;box-shadow:none;padding:0;right:38px;left:auto;margin:0;height:100%;text-align:right}.fesl-item-actions .dropdown.open .dropdown-menu .fiad-action{right:0}.fiad-action{height:35px;width:35px;background:#ffc107;display:inline-block;border-radius:50%;text-align:center;line-height:35px;font-weight:400;position:relative;top:4px;margin-left:5px;-webkit-animation-name:fiad-action-anim;animation-name:fiad-action-anim;-webkit-transform-origin:center center;transform-origin:center center;-webkit-backface-visibility:none;backface-visibility:none;box-shadow:0 2px 4px rgba(0,0,0,.1)}.fiad-action:nth-child(2){-webkit-animation-duration:.1s;animation-duration:.1s}.fiad-action:first-child{-webkit-animation-duration:.25s;animation-duration:.25s}.fiad-action>i{font-size:14px;color:#fff}.fiad-action:hover{background-color:#f7b900}.list-actions{position:fixed;-webkit-transform:translate3d(0,-100%,0);transform:translate3d(0,-100%,0);opacity:0;filter:alpha(opacity=0);transition:all;transition-duration:.2s;padding:20px 70px 20px 25px;top:0;left:0;width:100%;background-color:#2298f7;z-index:20;box-shadow:0 0 10px rgba(0,0,0,.3);text-align:center}.list-actions.list-actions-toggled{-webkit-transform:translateZ(0);transform:translateZ(0);opacity:1;filter:alpha(opacity=100)}.la-close{position:absolute;right:20px;top:0;color:#fff;width:30px;height:30px;border-radius:50%;text-align:center;line-height:30px!important;background:hsla(0,0%,100%,.1);font-weight:400;bottom:0;margin:auto;cursor:pointer}.la-close:hover{background-color:hsla(0,0%,100%,.2)}.la-label{color:#fff;float:left;padding:4px 0}.la-label .fa{font-size:22px;vertical-align:top;margin-right:10px;margin-top:-1px}.la-actions button{background-color:transparent;border:2px solid hsla(0,0%,100%,.9);color:#fff;border-radius:2px;padding:5px 10px;font-size:13px;transition:all;transition-duration:.3s;margin-left:10px}.la-actions button:hover{background-color:#fff;color:#2298f7}@-webkit-keyframes fiad-action-anim{0%{-webkit-transform:scale(0);transform:scale(0);opacity:0;filter:alpha(opacity=0);right:-20px}to{-webkit-transform:scale(1);transform:scale(1);opacity:1;filter:alpha(opacity=100);right:0}}@keyframes fiad-action-anim{0%{-webkit-transform:scale(0);transform:scale(0);opacity:0;filter:alpha(opacity=0);right:-20px}to{-webkit-transform:scale(1);transform:scale(1);opacity:1;filter:alpha(opacity=100);right:0}}.file-explorer{background-color:#fff;position:relative;height:100%}.file-explorer.toggled{height:100vh;overflow:hidden}.fe-body{min-height:100vh;overflow:auto}@media (min-width:992px){.fe-body{padding:0 0 40px 320px}}@media (max-width:991px){.fe-body{padding:75px 0 80px}}.feb-actions{position:fixed;bottom:30px;right:30px}.feb-actions .dropdown-menu{min-width:55px;width:55px;text-align:center;background:transparent;box-shadow:none;margin:0}.feb-actions.open .feba-btn{-webkit-transform:scale(1);transform:scale(1)}.feb-actions.open .feba-btn:first-child{-webkit-animation-name:feba-btn-anim;animation-name:feba-btn-anim;-webkit-animation-duration:.3s;animation-duration:.3s}.feb-actions.open .feba-btn:last-child{-webkit-animation-name:feba-btn-anim;animation-name:feba-btn-anim;-webkit-animation-duration:.1s;animation-duration:.1s}.feb-actions.open .feba-toggle{background:#ff403c}.feb-actions.open .feba-toggle>span{-webkit-transform:rotate(135deg);transform:rotate(135deg)}.feba-toggle{width:55px;height:55px;line-height:55px;border-radius:50%;background:#ff726f;box-shadow:0 2px 3px rgba(0,0,0,.15);display:inline-block;text-align:center;border:0;padding:0}.feba-toggle span{display:inline-block;height:100%;width:100%}.feba-toggle i{color:#fff;font-size:17px;line-height:58px}.feba-toggle,.feba-toggle>span{transition:all;transition-duration:.25s;-webkit-backface-visibility:hidden;backface-visibility:hidden}.feba-btn{width:40px;margin-top:10px;height:40px;border-radius:50%;text-align:center;display:inline-block;line-height:40px;box-shadow:0 2px 3px rgba(0,0,0,.15);-webkit-transform:scale(0);transform:scale(0);position:relative}.feba-btn,.feba-btn:focus,.feba-btn:hover{color:#fff}.feba-btn label{width:100%;height:100%;position:absolute;left:0;top:0;cursor:pointer}.feba-bucket{background:#ffc155}.feba-upload{background:#ffc107}@-webkit-keyframes feba-btn-anim{0%{-webkit-transform:scale(0);transform:scale(0);opacity:0;filter:alpha(opacity=0)}to{-webkit-transform:scale(1);transform:scale(1);opacity:1;filter:alpha(opacity=100)}}@keyframes feba-btn-anim{0%{-webkit-transform:scale(0);transform:scale(0);opacity:0;filter:alpha(opacity=0)}to{-webkit-transform:scale(1);transform:scale(1);opacity:1;filter:alpha(opacity=100)}}.ie-warning{background-color:#ff5252;width:100%;height:100%;position:fixed;left:0;top:0;text-align:center}.ie-warning:before{width:1px;content:"";height:100%}.ie-warning .iw-inner,.ie-warning:before{display:inline-block;vertical-align:middle}.iw-inner{width:470px;height:300px;background-color:#fff;border-radius:5px;padding:40px;position:relative}.iw-inner ul{list-style:none;padding:0;margin:0;width:230px;margin-left:80px;margin-top:16px}.iw-inner ul>li{float:left}.iw-inner ul>li>a{display:block;padding:10px 15px 7px;font-size:14px;margin:0 1px;border-radius:3px}.iw-inner ul>li>a:hover{background:#eee}.iw-inner ul>li>a img{height:40px;margin-bottom:5px}.iwi-icon{color:#ff5252;font-size:40px;display:block;line-height:100%;margin-bottom:15px}.iwi-skip{position:absolute;left:0;bottom:-35px;width:100%;color:hsla(0,0%,100%,.6);cursor:pointer}.iwi-skip:hover{color:#fff}.dropdown-menu{padding:15px 0;top:0;margin-top:-1px}.dropdown-menu>li>a{padding:8px 20px;font-size:15px}.dropdown-menu>li>a>i{width:20px;position:relative;top:1px}.dropdown-menu-right>li>a{text-align:right}.alert{border:0;position:fixed;max-width:500px;margin:0;box-shadow:0 4px 5px rgba(0,0,0,.1);color:#fff;width:100%;right:20px;border-radius:3px;padding:17px 50px 17px 17px;z-index:10010;-webkit-animation-duration:.8s;animation-duration:.8s;-webkit-animation-fill-mode:both;animation-fill-mode:both}.alert:not(.progress){top:20px}@media (min-width:768px){.alert:not(.progress){left:50%;margin-left:-250px}}.alert.progress{bottom:20px;right:20px}.alert.alert-danger{background:#ff726f}.alert.alert-success{background:#33d46f}.alert.alert-info{background:#50b2ff}@media (max-width:767px){.alert{left:20px;width:calc(100% - 40px);max-width:100%}}.alert .progress{margin:10px 10px 8px 0;height:5px;box-shadow:none;border-radius:1px;background-color:#50b2ff;border-radius:2px;overflow:hidden}.alert .progress-bar{box-shadow:none;background-color:#fff;height:100%}.alert .close{position:absolute;top:15px}@media (min-width:768px){.modal{text-align:center}.modal:before{content:"";height:100%;width:1px}.modal .modal-dialog,.modal:before{display:inline-block;vertical-align:middle}.modal .modal-dialog{text-align:left;margin:10px auto}}.modal-dark .modal-header{color:hsla(0,0%,100%,.4)}.modal-dark .modal-header small{color:hsla(0,0%,100%,.2)}.modal-dark .modal-content{background-color:#32393f}.modal-backdrop{-webkit-animation-name:fadeIn;animation-name:fadeIn;-webkit-animation-fill-mode:both;animation-fill-mode:both}.modal-backdrop,.modal-dialog{-webkit-animation-duration:.2s;animation-duration:.2s}.modal-dialog{-webkit-animation-name:zoomIn;animation-name:zoomIn;-webkit-animation-fill-mode:both;animation-fill-mode:both}.modal-header{color:#333;position:relative}.modal-header small{display:block;text-transform:none;font-size:12px;margin-top:5px;color:#a8a8a8}.modal-content{border-radius:3px;box-shadow:none}.modal-footer{padding:0 30px 30px}.modal-confirm .modal-dialog,.modal-footer{text-align:center}.mc-icon{margin:0 0 10px}.mc-icon>i{font-size:60px}.mci-red{color:#ff8f8f}.mci-amber{color:#ffc107}.mci-green{color:#64e096}.mc-text{color:#333}.mc-sub{color:#bdbdbd;margin-top:5px;font-size:13px}@media (max-width:767px){.modal-about{text-align:center}.modal-about .modal-dialog{max-width:400px;width:90%;margin:20px auto 0}}.ma-inner{display:flex;flex-direction:row;align-items:center;min-height:350px;position:relative}@media (min-width:768px){.ma-inner:before{content:"";width:150px;height:100%;top:0;left:0;position:absolute;border-radius:3px 0 0 3px;background-color:#23282c}}.mai-item:first-child{width:150px;text-align:center}.mai-item:last-child{flex:4;padding:30px}.maii-logo{width:70px;position:relative}.maii-list{list-style:none;padding:0}.maii-list>li{margin-bottom:15px}.maii-list>li div{color:hsla(0,0%,100%,.8);text-transform:uppercase;font-size:14px}.maii-list>li small{font-size:13px;color:hsla(0,0%,100%,.4)}.toggle-password{position:absolute;bottom:30px;right:35px;width:30px;height:30px;border:1px solid #eee;border-radius:0;text-align:center;cursor:pointer;z-index:10;background-color:#fff;padding-top:5px}.toggle-password.toggled{background:#eee}.pm-body{padding-bottom:30px}.pmb-header{margin-bottom:35px}.pmb-list{display:flex;flex-flow:row nowrap;align-items:center;justify-content:center;padding:10px 35px}.pmb-list:nth-child(2n){background-color:#f7f7f7}.pmb-list .form-control{padding-left:0;padding-right:0}header.pmb-list{margin:20px 0 10px}.pmbl-item{display:block;font-size:13px}.pmbl-item:first-child{flex:2}.pmbl-item:nth-child(2){margin:0 25px;width:150px}.pmbl-item:nth-child(3){width:70px}div.pmb-list select{border:0}div.pmb-list .pml-item:not(:last-child){padding:0 5px}.modal-create-bucket .modal-dialog{position:fixed;right:25px;bottom:95px;margin:0;height:110px}.modal-create-bucket .modal-content{width:100%;height:100%}',""]); +},function(A,M,t){function I(A){return null===A||void 0===A}function g(A){return!(!A||"object"!=typeof A||"number"!=typeof A.length)&&("function"==typeof A.copy&&"function"==typeof A.slice&&!(A.length>0&&"number"!=typeof A[0]))}function e(A,M,t){var e,N;if(I(A)||I(M))return!1;if(A.prototype!==M.prototype)return!1;if(E(A))return!!E(M)&&(A=i.call(A),M=i.call(M),n(A,M,t));if(g(A)){if(!g(M))return!1;if(A.length!==M.length)return!1;for(e=0;e=0;e--)if(o[e]!=c[e])return!1;for(e=o.length-1;e>=0;e--)if(N=o[e],!n(A[N],M[N],t))return!1;return typeof A==typeof M}var i=Array.prototype.slice,T=t(560),E=t(559),n=A.exports=function(A,M,t){return t||(t={}),A===M||(A instanceof Date&&M instanceof Date?A.getTime()===M.getTime():!A||!M||"object"!=typeof A&&"object"!=typeof M?t.strict?A===M:A==M:e(A,M,t))}},function(A,M){function t(A){return"[object Arguments]"==Object.prototype.toString.call(A)}function I(A){return A&&"object"==typeof A&&"number"==typeof A.length&&Object.prototype.hasOwnProperty.call(A,"callee")&&!Object.prototype.propertyIsEnumerable.call(A,"callee")||!1}var g="[object Arguments]"==function(){return Object.prototype.toString.call(arguments)}();M=A.exports=g?t:I,M.supported=t,M.unsupported=I},function(A,M){function t(A){var M=[];for(var t in A)M.push(t);return M}M=A.exports="function"==typeof Object.keys?Object.keys:t,M.shim=t},function(A,M,t){"use strict";function I(){var A=void 0===arguments[0]?document:arguments[0];try{return A.activeElement}catch(A){}}var g=t(567);M.__esModule=!0,M.default=I;var e=t(248);g.interopRequireDefault(e);A.exports=M.default},function(A,M,t){"use strict";var I=t(163),g=t(566);A.exports=function(A,M){return function(t){var e=t.currentTarget,i=t.target,T=g(e,A);T.some(function(A){return I(A,i)})&&M.call(this,t)}}},function(A,M,t){"use strict";var I=t(565),g=t(564),e=t(562);A.exports={on:I,off:g,filter:e}},function(A,M,t){"use strict";var I=t(95),g=function(){};I&&(g=function(){return document.addEventListener?function(A,M,t,I){return A.removeEventListener(M,t,I||!1)}:document.attachEvent?function(A,M,t){return A.detachEvent("on"+M,t)}:void 0}()),A.exports=g},function(A,M,t){"use strict";var I=t(95),g=function(){};I&&(g=function(){return document.addEventListener?function(A,M,t,I){return A.addEventListener(M,t,I||!1)}:document.attachEvent?function(A,M,t){return A.attachEvent("on"+M,t)}:void 0}()),A.exports=g},function(A,M){"use strict";var t=/^[\w-]*$/,I=Function.prototype.bind.call(Function.prototype.call,[].slice);A.exports=function(A,M){var g,e="#"===M[0],i="."===M[0],T=e||i?M.slice(1):M,E=t.test(T);return E?e?(A=A.getElementById?A:document,(g=A.getElementById(T))?[g]:[]):I(A.getElementsByClassName&&i?A.getElementsByClassName(T):A.getElementsByTagName(M)):I(A.querySelectorAll(M))}},function(A,M,t){var I,g,e;!function(t,i){g=[M],I=i,e="function"==typeof I?I.apply(M,g):I,!(void 0!==e&&(A.exports=e))}(this,function(A){var M=A;M.interopRequireDefault=function(A){return A&&A.__esModule?A:{default:A}},M._extends=Object.assign||function(A){for(var M=1;M0&&void 0!==arguments[0]?arguments[0]:"localStorage";try{var M=window[A];return M.setItem(I,"1"),M.removeItem(I),!0}catch(A){return!1}}Object.defineProperty(M,"__esModule",{value:!0}),M.default=t;var I="__test"},function(A,M){function t(A){var M=A?A.length:0;return M?A[M-1]:void 0}A.exports=t},function(A,M,t){var I=t(584),g=t(602),e=g(I);A.exports=e},function(A,M,t){(function(M){function I(A){var M=A?A.length:0;for(this.data={hash:T(null),set:new i};M--;)this.push(A[M])}var g=t(598),e=t(118),i=e(M,"Set"),T=e(Object,"create");I.prototype.push=g,A.exports=I}).call(M,function(){return this}())},function(A,M){function t(A,M){for(var t=-1,I=A.length;++t=T?i(M):null,c=M.length;o&&(n=e,N=!1,M=o);A:for(;++Eg?0:g+M),t=void 0===t||t>g?g:+t||0,t<0&&(t+=g),g=M>t?0:t-M>>>0,M>>>=0;for(var e=Array(g);++I-1?t[n]:void 0}return e(t,I,A)}}var g=t(582),e=t(585),i=t(586),T=t(58);A.exports=I},function(A,M,t){function I(A,M,t,I,e,i,T){var E=-1,n=A.length,N=M.length;if(n!=N&&!(e&&N>n))return!1;for(;++En||N===n&&"application/"===M[T].substr(0,12))continue}M[T]=I}}})}var E=t(635),N=t(638).extname,n=/^\s*([^;\s]*)(?:;|\s|$)/,o=/^text\//i;M.charset=I,M.charsets={lookup:I},M.contentType=g,M.extension=e,M.extensions=Object.create(null),M.lookup=i,M.types=Object.create(null),T(M.extensions,M.types)},function(A,M){function t(A){throw new Error("Cannot find module '"+A+"'.")}t.keys=function(){return[]},t.resolve=t,A.exports=t,t.id=637},function(A,M,t){(function(A){function t(A,M){for(var t=0,I=A.length-1;I>=0;I--){var g=A[I];"."===g?A.splice(I,1):".."===g?(A.splice(I,1),t++):t&&(A.splice(I,1),t--)}if(M)for(;t--;t)A.unshift("..");return A}function I(A,M){if(A.filter)return A.filter(M);for(var t=[],I=0;I=-1&&!g;e--){var i=e>=0?arguments[e]:A.cwd();if("string"!=typeof i)throw new TypeError("Arguments to path.resolve must be strings");i&&(M=i+"/"+M,g="/"===i.charAt(0))}return M=t(I(M.split("/"),function(A){return!!A}),!g).join("/"),(g?"/":"")+M||"."},M.normalize=function(A){var g=M.isAbsolute(A),e="/"===i(A,-1);return A=t(I(A.split("/"),function(A){return!!A}),!g).join("/"),A||g||(A="."),A&&e&&(A+="/"),(g?"/":"")+A},M.isAbsolute=function(A){return"/"===A.charAt(0)},M.join=function(){var A=Array.prototype.slice.call(arguments,0);return M.normalize(I(A,function(A,M){if("string"!=typeof A)throw new TypeError("Arguments to path.join must be strings");return A}).join("/"))},M.relative=function(A,t){function I(A){for(var M=0;M=0&&""===A[t];t--);return M>t?[]:A.slice(M,t-M+1)}A=M.resolve(A).substr(1),t=M.resolve(t).substr(1);for(var g=I(A.split("/")),e=I(t.split("/")),i=Math.min(g.length,e.length),T=i,E=0;E=0;e--){var i=I[e]+g;if(i in M)return i}return!1}},function(A,M,t){"use strict";var I=t(782);M.extract=function(A){return A.split("?")[1]||""},M.parse=function(A){return"string"!=typeof A?{}:(A=A.trim().replace(/^(\?|#|&)/,""),A?A.split("&").reduce(function(A,M){var t=M.replace(/\+/g," ").split("="),I=t.shift(),g=t.length>0?t.join("="):void 0;return I=decodeURIComponent(I),g=void 0===g?null:decodeURIComponent(g),A.hasOwnProperty(I)?Array.isArray(A[I])?A[I].push(g):A[I]=[A[I],g]:A[I]=g,A},{}):{})},M.stringify=function(A){return A?Object.keys(A).sort().map(function(M){var t=A[M];return void 0===t?"":null===t?M:Array.isArray(t)?t.slice().sort().map(function(A){return I(M)+"="+I(A)}).join("&"):I(M)+"="+I(t)}).filter(function(A){return A.length>0}).join("&"):""}},function(A,M){"use strict";function t(A,M){return Object.prototype.hasOwnProperty.call(A,M)}A.exports=function(A,M,I,g){M=M||"&",I=I||"=";var e={};if("string"!=typeof A||0===A.length)return e;var i=/\+/g;A=A.split(M);var T=1e3;g&&"number"==typeof g.maxKeys&&(T=g.maxKeys);var E=A.length;T>0&&E>T&&(E=T);for(var N=0;N=0?(n=a.substr(0,D),o=a.substr(D+1)):(n=a,o=""),c=decodeURIComponent(n),C=decodeURIComponent(o),t(e,c)?Array.isArray(e[c])?e[c].push(C):e[c]=[e[c],C]:e[c]=C}return e}},function(A,M){"use strict";var t=function(A){switch(typeof A){case"string":return A;case"boolean":return A?"true":"false";case"number":return isFinite(A)?A:"";default:return""}};A.exports=function(A,M,I,g){return M=M||"&",I=I||"=",null===A&&(A=void 0),"object"==typeof A?Object.keys(A).map(function(g){var e=encodeURIComponent(t(g))+I;return Array.isArray(A[g])?A[g].map(function(A){return e+encodeURIComponent(t(A))}).join(M):e+encodeURIComponent(t(A[g]))}).join(M):g?encodeURIComponent(t(g))+I+encodeURIComponent(t(A)):""}},function(A,M,t){"use strict";M.decode=M.parse=t(642),M.encode=M.stringify=t(643)},function(A,M,t){(function(M){for(var I=t(639),g="undefined"==typeof window?M:window,e=["moz","webkit"],i="AnimationFrame",T=g["request"+i],E=g["cancel"+i]||g["cancelRequest"+i],N=0;!T&&N1)||(e=A,!1)}),e)return new Error("(children) "+I+" - Duplicate children detected of bsRole: "+e+". Only one child each allowed with the following bsRoles: "+M.join(", "))})}},A.exports=M.default},function(A,M,t){"use strict";function I(A){var M=[];return void 0===A?M:(i.default.forEach(A,function(A){M.push(A)}),M)}var g=t(12).default;M.__esModule=!0,M.default=I;var e=t(98),i=g(e);A.exports=M.default},function(A,M,t){A.exports={default:t(662),__esModule:!0}},function(A,M,t){t(672),A.exports=t(99).Object.assign},function(A,M,t){var I=t(182);A.exports=function(A,M){return I.create(A,M)}},function(A,M,t){t(673),A.exports=t(99).Object.keys},function(A,M,t){t(674),A.exports=t(99).Object.setPrototypeOf},function(A,M){A.exports=function(A){if("function"!=typeof A)throw TypeError(A+" is not a function!");return A}},function(A,M,t){var I=t(288);A.exports=function(A){if(!I(A))throw TypeError(A+" is not an object!");return A}},function(A,M){var t={}.toString;A.exports=function(A){return t.call(A).slice(8,-1)}},function(A,M){A.exports=function(A){if(void 0==A)throw TypeError("Can't call method on "+A);return A}},function(A,M){var t=A.exports="undefined"!=typeof window&&window.Math==Math?window:"undefined"!=typeof self&&self.Math==Math?self:Function("return this")();"number"==typeof __g&&(__g=t)},function(A,M,t){var I=t(665);A.exports=Object("z").propertyIsEnumerable(0)?Object:function(A){return"String"==I(A)?A.split(""):Object(A)}},function(A,M,t){var I=t(182),g=t(289),e=t(668);A.exports=t(287)(function(){var A=Object.assign,M={},t={},I=Symbol(),g="abcdefghijklmnopqrst";return M[I]=7,g.split("").forEach(function(A){t[A]=A}),7!=A({},M)[I]||Object.keys(A({},t)).join("")!=g})?function(A,M){for(var t=g(A),i=arguments,T=i.length,E=1,N=I.getKeys,n=I.getSymbols,o=I.isEnum;T>E;)for(var c,C=e(i[E++]),a=n?N(C).concat(n(C)):N(C),D=a.length,r=0;D>r;)o.call(C,c=a[r++])&&(t[c]=C[c]);return t}:Object.assign},function(A,M,t){var I=t(181),g=t(99),e=t(287);A.exports=function(A,M){var t=(g.Object||{})[A]||Object[A],i={};i[A]=M(t),I(I.S+I.F*e(function(){t(1)}),"Object",i)}},function(A,M,t){var I=t(182).getDesc,g=t(288),e=t(664),i=function(A,M){if(e(A),!g(M)&&null!==M)throw TypeError(M+": can't set as prototype!")};A.exports={set:Object.setPrototypeOf||("__proto__"in{}?function(A,M,g){try{g=t(286)(Function.call,I(Object.prototype,"__proto__").set,2),g(A,[]),M=!(A instanceof Array)}catch(A){M=!0}return function(A,t){return i(A,t),M?A.__proto__=t:g(A,t),A}}({},!1):void 0),check:i}},function(A,M,t){var I=t(181);I(I.S+I.F,"Object",{assign:t(669)})},function(A,M,t){var I=t(289);t(670)("keys",function(A){return function(M){return A(I(M))}})},function(A,M,t){var I=t(181);I(I.S,"Object",{setPrototypeOf:t(671).set})},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}function g(A,M){var t={};for(var I in A)M.indexOf(I)>=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}Object.defineProperty(M,"__esModule",{value:!0}),M.CopyToClipboard=void 0;var e=Object.assign||function(A){for(var M=1;M=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}function e(A){var M=A.style,t=g(A,["style"]),I=n({},M,{height:6,right:2,bottom:2,left:2,borderRadius:3});return c.default.createElement("div",n({style:I},t))}function i(A){var M=A.style,t=g(A,["style"]),I=n({},M,{width:6,right:2,bottom:2,top:2,borderRadius:3});return c.default.createElement("div",n({style:I},t))}function T(A){var M=A.style,t=g(A,["style"]),I=n({},M,{cursor:"pointer",borderRadius:"inherit",backgroundColor:"rgba(0,0,0,.2)"});return c.default.createElement("div",n({style:I},t))}function E(A){var M=A.style,t=g(A,["style"]),I=n({},M,{cursor:"pointer",borderRadius:"inherit",backgroundColor:"rgba(0,0,0,.2)"});return c.default.createElement("div",n({style:I},t))}function N(A){return c.default.createElement("div",A)}var n=Object.assign||function(A){for(var M=1;M=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}var e=Object.assign||function(A){for(var M=1;M=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}function e(A,M){if(!(A instanceof M))throw new TypeError("Cannot call a class as a function")}function i(A,M){if(!A)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!M||"object"!=typeof M&&"function"!=typeof M?A:M}function T(A,M){if("function"!=typeof M&&null!==M)throw new TypeError("Super expression must either be null or a function, not "+typeof M);A.prototype=Object.create(M&&M.prototype,{constructor:{value:A,enumerable:!1,writable:!0,configurable:!0}}),M&&(Object.setPrototypeOf?Object.setPrototypeOf(A,M):A.__proto__=M)}Object.defineProperty(M,"__esModule",{value:!0});var E=Object.assign||function(A){for(var M=1;M0||(this.setState({isDragActive:!1,isDragReject:!1}),this.props.onDragLeave&&this.props.onDragLeave.call(this,A))}},{key:"onDrop",value:function A(M){var t=this,I=this.props,A=I.onDrop,g=I.onDropAccepted,e=I.onDropRejected,i=I.multiple,T=I.disablePreview,E=(0,D.default)(M,i),N=[],n=[];M.preventDefault(),this.enterCounter=0,this.isFileDialogActive=!1,E.forEach(function(A){T||(A.preview=window.URL.createObjectURL(A)),t.fileAccepted(A)&&t.fileMatchSize(A)?N.push(A):n.push(A)}),A&&A.call(this,N,n,M),n.length>0&&e&&e.call(this,n,M),N.length>0&&g&&g.call(this,N,M),this.setState({isDragActive:!1,isDragReject:!1})}},{key:"onClick",value:function A(M){M.stopPropagation();var t=this.props,A=t.onClick,I=t.disableClick;I||(this.open(),A&&A.call(this,M))}},{key:"onFileDialogCancel",value:function A(){var A=this.props.onFileDialogCancel,M=this.fileInputEl,t=this.isFileDialogActive;A&&t&&setTimeout(function(){var I=M.files;I.length||(t=!1,A())},300)}},{key:"fileAccepted",value:function(A){return(0,C.default)(A,this.props.accept)}},{key:"fileMatchSize",value:function(A){return A.size<=this.props.maxSize&&A.size>=this.props.minSize}},{key:"allFilesAccepted",value:function(A){return A.every(this.fileAccepted)}},{key:"open",value:function(){this.isFileDialogActive=!0,this.fileInputEl.value=null,this.fileInputEl.click()}},{key:"render",value:function(){var A=this,M=this.props,t=M.accept,I=M.activeClassName,e=M.inputProps,i=M.multiple,T=M.name,N=M.rejectClassName,n=g(M,["accept","activeClassName","inputProps","multiple","name","rejectClassName"]),c=n.activeStyle,C=n.className,a=n.rejectStyle,D=n.style,B=g(n,["activeStyle","className","rejectStyle","style"]),Q=this.state,s=Q.isDragActive,u=Q.isDragReject;C=C||"",s&&I&&(C+=" "+I),u&&N&&(C+=" "+N),C||D||c||a||(D={width:200,height:200,borderWidth:2,borderColor:"#666",borderStyle:"dashed",borderRadius:5},c={borderStyle:"solid",backgroundColor:"#eee"},a={borderStyle:"solid",backgroundColor:"#ffdddd"});var x=void 0;x=c&&s?E({},D,c):a&&u?E({},D,a):E({},D);var y={accept:t,type:"file",style:{display:"none"},multiple:r&&i,ref:function(M){return A.fileInputEl=M},onChange:this.onDrop};T&&T.length&&(y.name=T);var j=["acceptedFiles","disablePreview","disableClick","onDropAccepted","onDropRejected","onFileDialogCancel","maxSize","minSize"],l=E({},B);return j.forEach(function(A){return delete l[A]}),o.default.createElement("div",E({className:C,style:x},l,{onClick:this.onClick,onDragStart:this.onDragStart,onDragEnter:this.onDragEnter,onDragOver:this.onDragOver,onDragLeave:this.onDragLeave,onDrop:this.onDrop}),this.props.children,o.default.createElement("input",E({},e,y)))}}]),M}(o.default.Component);B.defaultProps={disablePreview:!1,disableClick:!1,multiple:!0,maxSize:1/0,minSize:0},B.propTypes={onClick:o.default.PropTypes.func,onDrop:o.default.PropTypes.func,onDropAccepted:o.default.PropTypes.func,onDropRejected:o.default.PropTypes.func,onDragStart:o.default.PropTypes.func,onDragEnter:o.default.PropTypes.func,onDragOver:o.default.PropTypes.func,onDragLeave:o.default.PropTypes.func,children:o.default.PropTypes.node,style:o.default.PropTypes.object,activeStyle:o.default.PropTypes.object,rejectStyle:o.default.PropTypes.object,className:o.default.PropTypes.string,activeClassName:o.default.PropTypes.string,rejectClassName:o.default.PropTypes.string,disablePreview:o.default.PropTypes.bool,disableClick:o.default.PropTypes.bool,onFileDialogCancel:o.default.PropTypes.func,inputProps:o.default.PropTypes.object,multiple:o.default.PropTypes.bool,accept:o.default.PropTypes.string,name:o.default.PropTypes.string,maxSize:o.default.PropTypes.number,minSize:o.default.PropTypes.number},M.default=B,A.exports=M.default},function(M,t){M.exports=A},function(A,M){A.exports=function(A){function M(I){if(t[I])return t[I].exports;var g=t[I]={exports:{},id:I,loaded:!1};return A[I].call(g.exports,g,g.exports,M),g.loaded=!0,g.exports}var t={};return M.m=A,M.c=t,M.p="",M(0)}([function(A,M,t){"use strict";M.__esModule=!0,t(8),t(9),M.default=function(A,M){if(A&&M){var t=function(){var t=Array.isArray(M)?M:M.split(","),I=A.name||"",g=A.type||"",e=g.replace(/\/.*$/,"");return{v:t.some(function(A){var M=A.trim();return"."===M.charAt(0)?I.toLowerCase().endsWith(M.toLowerCase()):/\/\*$/.test(M)?e===M.replace(/\/.*$/,""):g===M})}}();if("object"==typeof t)return t.v}return!0},A.exports=M.default},function(A,M){var t=A.exports={version:"1.2.2"};"number"==typeof __e&&(__e=t)},function(A,M){var t=A.exports="undefined"!=typeof window&&window.Math==Math?window:"undefined"!=typeof self&&self.Math==Math?self:Function("return this")();"number"==typeof __g&&(__g=t)},function(A,M,t){var I=t(2),g=t(1),e=t(4),i=t(19),T="prototype",E=function(A,M){return function(){return A.apply(M,arguments)}},N=function(A,M,t){var n,o,c,C,a=A&N.G,D=A&N.P,r=a?I:A&N.S?I[M]||(I[M]={}):(I[M]||{})[T],B=a?g:g[M]||(g[M]={});a&&(t=M);for(n in t)o=!(A&N.F)&&r&&n in r,c=(o?r:t)[n],C=A&N.B&&o?E(c,I):D&&"function"==typeof c?E(Function.call,c):c,r&&!o&&i(r,n,c),B[n]!=c&&e(B,n,C),D&&((B[T]||(B[T]={}))[n]=c)};I.core=g,N.F=1,N.G=2,N.S=4,N.P=8,N.B=16,N.W=32,A.exports=N},function(A,M,t){var I=t(5),g=t(18);A.exports=t(22)?function(A,M,t){return I.setDesc(A,M,g(1,t))}:function(A,M,t){return A[M]=t,A}},function(A,M){var t=Object;A.exports={create:t.create,getProto:t.getPrototypeOf,isEnum:{}.propertyIsEnumerable,getDesc:t.getOwnPropertyDescriptor,setDesc:t.defineProperty,setDescs:t.defineProperties,getKeys:t.keys,getNames:t.getOwnPropertyNames,getSymbols:t.getOwnPropertySymbols,each:[].forEach}},function(A,M){var t=0,I=Math.random();A.exports=function(A){return"Symbol(".concat(void 0===A?"":A,")_",(++t+I).toString(36))}},function(A,M,t){var I=t(20)("wks"),g=t(2).Symbol;A.exports=function(A){return I[A]||(I[A]=g&&g[A]||(g||t(6))("Symbol."+A))}},function(A,M,t){t(26),A.exports=t(1).Array.some},function(A,M,t){t(25),A.exports=t(1).String.endsWith},function(A,M){A.exports=function(A){if("function"!=typeof A)throw TypeError(A+" is not a function!");return A}},function(A,M){var t={}.toString;A.exports=function(A){return t.call(A).slice(8,-1)}},function(A,M,t){var I=t(10);A.exports=function(A,M,t){if(I(A),void 0===M)return A;switch(t){case 1:return function(t){return A.call(M,t)};case 2:return function(t,I){return A.call(M,t,I)};case 3:return function(t,I,g){return A.call(M,t,I,g)}}return function(){return A.apply(M,arguments)}}},function(A,M){A.exports=function(A){if(void 0==A)throw TypeError("Can't call method on "+A);return A}},function(A,M,t){A.exports=function(A){var M=/./;try{"/./"[A](M)}catch(I){try{return M[t(7)("match")]=!1,!"/./"[A](M)}catch(A){}}return!0}},function(A,M){A.exports=function(A){try{return!!A()}catch(A){return!0}}},function(A,M){A.exports=function(A){return"object"==typeof A?null!==A:"function"==typeof A}},function(A,M,t){var I=t(16),g=t(11),e=t(7)("match");A.exports=function(A){var M;return I(A)&&(void 0!==(M=A[e])?!!M:"RegExp"==g(A))}},function(A,M){A.exports=function(A,M){return{enumerable:!(1&A),configurable:!(2&A),writable:!(4&A),value:M}}},function(A,M,t){var I=t(2),g=t(4),e=t(6)("src"),i="toString",T=Function[i],E=(""+T).split(i);t(1).inspectSource=function(A){return T.call(A)},(A.exports=function(A,M,t,i){"function"==typeof t&&(g(t,e,A[M]?""+A[M]:E.join(String(M))),"name"in t||(t.name=M)),A===I?A[M]=t:(i||delete A[M],g(A,M,t))})(Function.prototype,i,function(){return"function"==typeof this&&this[e]||T.call(this)})},function(A,M,t){var I=t(2),g="__core-js_shared__",e=I[g]||(I[g]={});A.exports=function(A){return e[A]||(e[A]={})}},function(A,M,t){var I=t(17),g=t(13);A.exports=function(A,M,t){if(I(M))throw TypeError("String#"+t+" doesn't accept regex!");return String(g(A))}},function(A,M,t){A.exports=!t(15)(function(){return 7!=Object.defineProperty({},"a",{get:function(){return 7}}).a})},function(A,M){var t=Math.ceil,I=Math.floor;A.exports=function(A){return isNaN(A=+A)?0:(A>0?I:t)(A)}},function(A,M,t){var I=t(23),g=Math.min;A.exports=function(A){return A>0?g(I(A),9007199254740991):0}},function(A,M,t){"use strict";var I=t(3),g=t(24),e=t(21),i="endsWith",T=""[i];I(I.P+I.F*t(14)(i),"String",{endsWith:function(A){var M=e(this,A,i),t=arguments,I=t.length>1?t[1]:void 0,E=g(M.length),N=void 0===I?E:Math.min(g(I),E),n=String(A);return T?T.call(M,n,N):M.slice(N-n.length,N)===n}})},function(A,M,t){var I=t(5),g=t(3),e=t(1).Array||Array,i={},T=function(A,M){I.each.call(A.split(","),function(A){void 0==M&&A in e?i[A]=e[A]:A in[]&&(i[A]=t(12)(Function.call,[][A],M))})};T("pop,reverse,shift,keys,values,entries",1),T("indexOf,every,some,forEach,map,filter,find,findIndex,includes",3),T("join,slice,concat,push,splice,unshift,sort,lastIndexOf,reduce,reduceRight,copyWithin,fill"),g(g.S,"Array",i)}])},function(A,M){"use strict";function t(A){var M=!(arguments.length>1&&void 0!==arguments[1])||arguments[1],t=[];if(A.dataTransfer){var I=A.dataTransfer;I.files&&I.files.length?t=I.files:I.items&&I.items.length&&(t=I.items)}else A.target&&A.target.files&&(t=A.target.files);return t.length>0&&(t=M?t:[t[0]]),Array.prototype.slice.call(t)}Object.defineProperty(M,"__esModule",{value:!0}),M.default=t,A.exports=M.default}])})},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}function g(A,M){var t={};for(var I in A)M.indexOf(I)>=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}function e(A,M){if(!(A instanceof M))throw new TypeError("Cannot call a class as a function")}function i(A,M){if(!A)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!M||"object"!=typeof M&&"function"!=typeof M?A:M}function T(A,M){if("function"!=typeof M&&null!==M)throw new TypeError("Super expression must either be null or a function, not "+typeof M);A.prototype=Object.create(M&&M.prototype,{constructor:{value:A,enumerable:!1,writable:!0,configurable:!0}}),M&&(Object.setPrototypeOf?Object.setPrototypeOf(A,M):A.__proto__=M)}Object.defineProperty(M,"__esModule",{value:!0});var E=function(){function A(A,M){for(var t=0;t0&&void 0!==arguments[0]?arguments[0]:{},t=M.hideSiblingNodes,I=void 0===t||t,e=M.handleContainerOverflow,i=void 0===e||e;g(this,A),this.hideSiblingNodes=I,this.handleContainerOverflow=i,this.modals=[],this.containers=[],this.data=[]}return N(A,[{key:"add",value:function(A,M,t){var I=this.modals.indexOf(A),g=this.containers.indexOf(M);if(I!==-1)return I;if(I=this.modals.length,this.modals.push(A),this.hideSiblingNodes&&(0,Q.hideSiblings)(M,A.mountNode),g!==-1)return this.data[g].modals.push(A),I;var e={modals:[A],classes:t?t.split(/\s+/):[],overflowing:(0,B.default)(M)};return this.handleContainerOverflow&&T(e,M),e.classes.forEach(C.default.addClass.bind(null,M)),this.containers.push(M),this.data.push(e),I}},{key:"remove",value:function(A){var M=this.modals.indexOf(A);if(M!==-1){var t=i(this.data,A),I=this.data[t],g=this.containers[t];I.modals.splice(I.modals.indexOf(A),1),this.modals.splice(M,1),0===I.modals.length?(I.classes.forEach(C.default.removeClass.bind(null,g)),this.handleContainerOverflow&&E(I,g),this.hideSiblingNodes&&(0,Q.showSiblings)(g,A.mountNode),this.containers.splice(t,1),this.data.splice(t,1)):this.hideSiblingNodes&&(0,Q.ariaHidden)(!1,I.modals[I.modals.length-1].mountNode)}}},{key:"isTopModal",value:function(A){return!!this.modals.length&&this.modals[this.modals.length-1]===A}}]),A}();M.default=s,A.exports=M.default},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}function g(A,M){var t={};for(var I in A)M.indexOf(I)>=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}function e(A,M){if(!(A instanceof M))throw new TypeError("Cannot call a class as a function")}function i(A,M){if(!A)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!M||"object"!=typeof M&&"function"!=typeof M?A:M}function T(A,M){if("function"!=typeof M&&null!==M)throw new TypeError("Super expression must either be null or a function, not "+typeof M);A.prototype=Object.create(M&&M.prototype,{constructor:{value:A,enumerable:!1,writable:!0,configurable:!0}}),M&&(Object.setPrototypeOf?Object.setPrototypeOf(A,M):A.__proto__=M)}Object.defineProperty(M,"__esModule",{value:!0});var E=Object.assign||function(A){for(var M=1;M1?t-1:0),g=1;g=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}function e(A,M){if(!(A instanceof M))throw new TypeError("Cannot call a class as a function")}function i(A,M){if(!A)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!M||"object"!=typeof M&&"function"!=typeof M?A:M}function T(A,M){if("function"!=typeof M&&null!==M)throw new TypeError("Super expression must either be null or a function, not "+typeof M);A.prototype=Object.create(M&&M.prototype,{constructor:{value:A,enumerable:!1,writable:!0,configurable:!0}}),M&&(Object.setPrototypeOf?Object.setPrototypeOf(A,M):A.__proto__=M)}Object.defineProperty(M,"__esModule",{value:!0});var E=Object.assign||function(A){for(var M=1;M=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}function e(A,M){if(!(A instanceof M))throw new TypeError("Cannot call a class as a function")}function i(A,M){if(!A)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!M||"object"!=typeof M&&"function"!=typeof M?A:M}function T(A,M){if("function"!=typeof M&&null!==M)throw new TypeError("Super expression must either be null or a function, not "+typeof M);A.prototype=Object.create(M&&M.prototype,{constructor:{value:A,enumerable:!1,writable:!0,configurable:!0}}),M&&(Object.setPrototypeOf?Object.setPrototypeOf(A,M):A.__proto__=M)}function E(){}Object.defineProperty(M,"__esModule",{value:!0}),M.EXITING=M.ENTERED=M.ENTERING=M.EXITED=M.UNMOUNTED=void 0;var N=Object.assign||function(A){for(var M=1;MT?T-N:0}function i(A,M,t,I){var e=g(t),i=e.width,T=A-I,E=A+I+M;return T<0?-T:E>i?i-E:0}function T(A,M,t,I,g){var T="BODY"===I.tagName?(0,N.default)(t):(0,o.default)(t,I),E=(0,N.default)(M),n=E.height,c=E.width,C=void 0,a=void 0,D=void 0,r=void 0;if("left"===A||"right"===A){a=T.top+(T.height-n)/2,C="left"===A?T.left-c:T.left+T.width;var B=e(a,n,I,g);a+=B,r=50*(1-2*B/n)+"%",D=void 0}else{if("top"!==A&&"bottom"!==A)throw new Error('calcOverlayPosition(): No such placement of "'+A+'" found.');C=T.left+(T.width-c)/2,a="top"===A?T.top-n:T.top+T.height;var Q=i(C,c,I,g);C+=Q,D=50*(1-2*Q/c)+"%",r=void 0}return{positionLeft:C,positionTop:a,arrowOffsetLeft:D,arrowOffsetTop:r}}Object.defineProperty(M,"__esModule",{value:!0}),M.default=T;var E=t(250),N=I(E),n=t(563),o=I(n),c=t(251),C=I(c),a=t(100),D=I(a);A.exports=M.default},function(A,M){"use strict";function t(A,M){M&&(A?M.setAttribute("aria-hidden","true"):M.removeAttribute("aria-hidden"))}function I(A,M){T(A,M,function(A){return t(!0,A)})}function g(A,M){T(A,M,function(A){return t(!1,A)})}Object.defineProperty(M,"__esModule",{value:!0}),M.ariaHidden=t,M.hideSiblings=I,M.showSiblings=g;var e=["template","script","style"],i=function(A){var M=A.nodeType,t=A.tagName;return 1===M&&e.indexOf(t.toLowerCase())===-1},T=function(A,M,t){M=[].concat(M),[].forEach.call(A.children,function(A){M.indexOf(A)===-1&&i(A)&&t(A)})}},function(A,M,t){"use strict";var I=function(){};A.exports=I},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}function g(A,M){return function(t,I,g){if(null!=t[I]){var e='"'+I+'" property of "'+g+'" has been deprecated.\n'+M;E[e]||(T.default(!1,e),E[e]=!0)}return A(t,I,g)}}function e(){E={}}M.__esModule=!0,M.default=g;var i=t(214),T=I(i),E={};g._resetWarned=e,A.exports=M.default},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}function g(A,M){if(!(A instanceof M))throw new TypeError("Cannot call a class as a function")}function e(A,M){if(!A)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!M||"object"!=typeof M&&"function"!=typeof M?A:M}function i(A,M){if("function"!=typeof M&&null!==M)throw new TypeError("Super expression must either be null or a function, not "+typeof M);A.prototype=Object.create(M&&M.prototype,{constructor:{value:A,enumerable:!1,writable:!0,configurable:!0}}),M&&(Object.setPrototypeOf?Object.setPrototypeOf(A,M):A.__proto__=M)}M.__esModule=!0,M.default=void 0;var T=t(2),E=t(297),N=I(E),n=t(298),o=(I(n),function(A){function M(t,I){g(this,M);var i=e(this,A.call(this,t,I));return i.store=t.store,i}return i(M,A),M.prototype.getChildContext=function(){return{store:this.store}},M.prototype.render=function(){return T.Children.only(this.props.children)},M}(T.Component));M.default=o,o.propTypes={store:N.default.isRequired,children:T.PropTypes.element.isRequired},o.childContextTypes={store:N.default.isRequired}},function(A,M){"use strict";function t(A,M){if(A===M)return!0;var t=Object.keys(A),I=Object.keys(M);if(t.length!==I.length)return!1;for(var g=Object.prototype.hasOwnProperty,e=0;e=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}function e(A,M){var t=A.history,I=A.routes,e=A.location,E=g(A,["history","routes","location"]);t||e?void 0:(0,N.default)(!1),t=t?t:(0,o.default)(E);var n=(0,C.default)(t,(0,a.createRoutes)(I)),c=void 0;e?e=t.createLocation(e):c=t.listen(function(A){e=A});var r=(0,D.createRouterObject)(t,n);t=(0,D.createRoutingHistory)(t,n),n.match(e,function(A,I,g){M(A,I&&r.createLocation(I,T.REPLACE),g&&i({},g,{history:t,router:r,matchContext:{history:t,transitionManager:n,router:r}})),c&&c()})}M.__esModule=!0;var i=Object.assign||function(A){for(var M=1;M=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}function e(A){return function(){var M=arguments.length<=0||void 0===arguments[0]?{}:arguments[0],t=M.routes,I=g(M,["routes"]),e=(0,E.default)(A)(I),T=(0,n.default)(e,t);return i({},e,T)}}M.__esModule=!0;var i=Object.assign||function(A){for(var M=1;M=A&&N&&(T=!0,t()))}}var i=0,T=!1,E=!1,N=!1,n=void 0;e()}M.__esModule=!0;var I=Array.prototype.slice;M.loopAsync=t},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}function g(){function A(A){try{A=A||window.history.state||{}}catch(M){A={}}var M=o.getWindowPath(),t=A,I=t.key,g=void 0;I?g=c.readState(I):(g=null,I=s.createKey(),B&&window.history.replaceState(e({},A,{key:I}),null));var i=N.parsePath(M);return s.createLocation(e({},i,{state:g}),void 0,I)}function M(M){function t(M){void 0!==M.state&&I(A(M.state))}var I=M.transitionTo;return o.addEventListener(window,"popstate",t),function(){o.removeEventListener(window,"popstate",t)}}function t(A){var M=A.basename,t=A.pathname,I=A.search,g=A.hash,e=A.state,i=A.action,T=A.key;if(i!==E.POP){c.saveState(T,e);var N=(M||"")+t+I+g,n={key:T};if(i===E.PUSH){if(Q)return window.location.href=N,!1;window.history.pushState(n,null,N)}else{if(Q)return window.location.replace(N),!1;window.history.replaceState(n,null,N)}}}function I(A){1===++u&&(x=M(s));var t=s.listenBefore(A);return function(){t(),0===--u&&x()}}function g(A){1===++u&&(x=M(s));var t=s.listen(A);return function(){t(),0===--u&&x()}}function i(A){1===++u&&(x=M(s)),s.registerTransitionHook(A)}function C(A){s.unregisterTransitionHook(A),0===--u&&x()}var D=arguments.length<=0||void 0===arguments[0]?{}:arguments[0];n.canUseDOM?void 0:T.default(!1);var r=D.forceRefresh,B=o.supportsHistory(),Q=!B||r,s=a.default(e({},D,{getCurrentLocation:A,finishTransition:t,saveState:c.saveState})),u=0,x=void 0;return e({},s,{listenBefore:I,listen:g,registerTransitionHook:i,unregisterTransitionHook:C})}M.__esModule=!0;var e=Object.assign||function(A){for(var M=1;M=0&&M=0&&r8&&j<=11),L=32,Y=String.fromCharCode(L),d=C.topLevelTypes,h={beforeInput:{phasedRegistrationNames:{bubbled:s({onBeforeInput:null}),captured:s({onBeforeInputCapture:null})},dependencies:[d.topCompositionEnd,d.topKeyPress,d.topTextInput,d.topPaste]},compositionEnd:{phasedRegistrationNames:{bubbled:s({onCompositionEnd:null}),captured:s({onCompositionEndCapture:null})},dependencies:[d.topBlur,d.topCompositionEnd,d.topKeyDown,d.topKeyPress,d.topKeyUp,d.topMouseDown]},compositionStart:{phasedRegistrationNames:{bubbled:s({onCompositionStart:null}),captured:s({onCompositionStartCapture:null})},dependencies:[d.topBlur,d.topCompositionStart,d.topKeyDown,d.topKeyPress,d.topKeyUp,d.topMouseDown]},compositionUpdate:{phasedRegistrationNames:{bubbled:s({onCompositionUpdate:null}),captured:s({onCompositionUpdateCapture:null})},dependencies:[d.topBlur,d.topCompositionUpdate,d.topKeyDown,d.topKeyPress,d.topKeyUp,d.topMouseDown]}},S=!1,z=null,p={eventTypes:h,extractEvents:function(A,M,t,I,g){return[N(A,M,t,I,g),c(A,M,t,I,g)]}};A.exports=p},function(A,M,t){"use strict";var I=t(314),g=t(20),e=t(38),i=(t(573),t(768)),T=t(578),E=t(582),N=(t(7),E(function(A){return T(A)})),n=!1,o="cssFloat";if(g.canUseDOM){var c=document.createElement("div").style;try{c.font=""}catch(A){n=!0}void 0===document.documentElement.style.cssFloat&&(o="styleFloat")}var C={createMarkupForStyles:function(A){var M="";for(var t in A)if(A.hasOwnProperty(t)){var I=A[t];null!=I&&(M+=N(t)+":",M+=i(t,I)+";")}return M||null},setValueForStyles:function(A,M){var t=A.style;for(var g in M)if(M.hasOwnProperty(g)){var e=i(g,M[g]);if("float"===g&&(g=o),e)t[g]=e;else{var T=n&&I.shorthandPropertyExpansions[g];if(T)for(var E in T)t[E]="";else t[g]=""}}}};e.measureMethods(C,"CSSPropertyOperations",{setValueForStyles:"setValueForStyles"}),A.exports=C},function(A,M,t){"use strict";function I(A){var M=A.nodeName&&A.nodeName.toLowerCase();return"select"===M||"input"===M&&"file"===A.type}function g(A){var M=j.getPooled(h.change,z,A,l(A));u.accumulateTwoPhaseDispatches(M),y.batchedUpdates(e,M)}function e(A){s.enqueueEvents(A),s.processEventQueue(!1)}function i(A,M){S=A,z=M,S.attachEvent("onchange",g)}function T(){S&&(S.detachEvent("onchange",g),S=null,z=null)}function E(A,M,t){if(A===d.topChange)return t}function N(A,M,t){A===d.topFocus?(T(),i(M,t)):A===d.topBlur&&T()}function n(A,M){S=A,z=M,p=A.value,U=Object.getOwnPropertyDescriptor(A.constructor.prototype,"value"),Object.defineProperty(S,"value",m),S.attachEvent("onpropertychange",c)}function o(){S&&(delete S.value,S.detachEvent("onpropertychange",c),S=null,z=null,p=null,U=null)}function c(A){if("value"===A.propertyName){var M=A.srcElement.value;M!==p&&(p=M,g(A))}}function C(A,M,t){if(A===d.topInput)return t}function a(A,M,t){A===d.topFocus?(o(),n(M,t)):A===d.topBlur&&o()}function D(A,M,t){if((A===d.topSelectionChange||A===d.topKeyUp||A===d.topKeyDown)&&S&&S.value!==p)return p=S.value,z}function r(A){return A.nodeName&&"input"===A.nodeName.toLowerCase()&&("checkbox"===A.type||"radio"===A.type)}function B(A,M,t){if(A===d.topClick)return t}var Q=t(52),s=t(101),u=t(102),x=t(20),y=t(39),j=t(77),l=t(204),w=t(207),L=t(341),Y=t(58),d=Q.topLevelTypes,h={change:{phasedRegistrationNames:{bubbled:Y({onChange:null}),captured:Y({onChangeCapture:null})},dependencies:[d.topBlur,d.topChange,d.topClick,d.topFocus,d.topInput,d.topKeyDown,d.topKeyUp,d.topSelectionChange]}},S=null,z=null,p=null,U=null,O=!1;x.canUseDOM&&(O=w("change")&&(!("documentMode"in document)||document.documentMode>8));var f=!1;x.canUseDOM&&(f=w("input")&&(!("documentMode"in document)||document.documentMode>9));var m={get:function(){return U.get.call(this)},set:function(A){p=""+A,U.set.call(this,A)}},F={eventTypes:h,extractEvents:function(A,M,t,g,e){var i,T;if(I(M)?O?i=E:T=N:L(M)?f?i=C:(i=D,T=a):r(M)&&(i=B),i){var n=i(A,M,t);if(n){var o=j.getPooled(h.change,n,g,e);return o.type="change",u.accumulateTwoPhaseDispatches(o),o}}T&&T(A,M,t)}};A.exports=F},function(A,M){"use strict";var t=0,I={createReactRootIndex:function(){return t++}};A.exports=I},function(A,M,t){"use strict";function I(A){return A.substring(1,A.indexOf(" "))}var g=t(20),e=t(575),i=t(44),T=t(258),E=t(3),N=/^(<[^ \/>]+)/,n="data-danger-index",o={dangerouslyRenderMarkup:function(A){g.canUseDOM?void 0:E(!1);for(var M,t={},o=0;o1?1-M:void 0;return this._fallbackText=g.slice(A,T),this._fallbackText}}),g.addPoolingTo(I),A.exports=I},function(A,M,t){"use strict";var I,g=t(92),e=t(20),i=g.injection.MUST_USE_ATTRIBUTE,T=g.injection.MUST_USE_PROPERTY,E=g.injection.HAS_BOOLEAN_VALUE,N=g.injection.HAS_SIDE_EFFECTS,n=g.injection.HAS_NUMERIC_VALUE,o=g.injection.HAS_POSITIVE_NUMERIC_VALUE,c=g.injection.HAS_OVERLOADED_BOOLEAN_VALUE;if(e.canUseDOM){var C=document.implementation;I=C&&C.hasFeature&&C.hasFeature("http://www.w3.org/TR/SVG11/feature#BasicStructure","1.1")}var a={isCustomAttribute:RegExp.prototype.test.bind(/^(data|aria)-[a-z_][a-z\d_.\-]*$/),Properties:{accept:null,acceptCharset:null,accessKey:null,action:null,allowFullScreen:i|E,allowTransparency:i,alt:null,async:E,autoComplete:null,autoPlay:E,capture:i|E,cellPadding:null,cellSpacing:null,charSet:i,challenge:i,checked:T|E,classID:i,className:I?i:T,cols:i|o,colSpan:null,content:null,contentEditable:null,contextMenu:i,controls:T|E,coords:null,crossOrigin:null,data:null,dateTime:i,default:E,defer:E,dir:null,disabled:i|E,download:c,draggable:null,encType:null,form:i,formAction:i,formEncType:i,formMethod:i,formNoValidate:E,formTarget:i,frameBorder:i,headers:null,height:i,hidden:i|E,high:null,href:null,hrefLang:null,htmlFor:null,httpEquiv:null,icon:null,id:T,inputMode:i,integrity:null,is:i,keyParams:i,keyType:i,kind:null,label:null,lang:null,list:i,loop:T|E,low:null,manifest:i,marginHeight:null,marginWidth:null,max:null,maxLength:i,media:i,mediaGroup:null,method:null,min:null,minLength:i,multiple:T|E,muted:T|E,name:null,nonce:i,noValidate:E,open:E,optimum:null,pattern:null,placeholder:null,poster:null,preload:null,radioGroup:null,readOnly:T|E,rel:null,required:E,reversed:E,role:i,rows:i|o,rowSpan:null,sandbox:null,scope:null,scoped:E,scrolling:null,seamless:i|E,selected:T|E,shape:null,size:i|o,sizes:i,span:o,spellCheck:null,src:null,srcDoc:T,srcLang:null,srcSet:i,start:n,step:null,style:null,summary:null,tabIndex:null,target:null,title:null,type:null,useMap:null,value:T|N,width:i,wmode:i,wrap:null,about:i,datatype:i,inlist:i,prefix:i,property:i,resource:i,typeof:i,vocab:i,autoCapitalize:i,autoCorrect:i,autoSave:null,color:null,itemProp:i,itemScope:i|E,itemType:i,itemID:i,itemRef:i,results:null,security:i,unselectable:i},DOMAttributeNames:{acceptCharset:"accept-charset",className:"class",htmlFor:"for",httpEquiv:"http-equiv"},DOMPropertyNames:{autoComplete:"autocomplete",autoFocus:"autofocus",autoPlay:"autoplay",autoSave:"autosave",encType:"encoding",hrefLang:"hreflang",radioGroup:"radiogroup",spellCheck:"spellcheck",srcDoc:"srcdoc",srcSet:"srcset"}};A.exports=a},function(A,M,t){"use strict";var I=t(320),g=t(742),e=t(747),i=t(8),T=t(769),E={};i(E,e),i(E,{findDOMNode:T("findDOMNode","ReactDOM","react-dom",I,I.findDOMNode),render:T("render","ReactDOM","react-dom",I,I.render),unmountComponentAtNode:T("unmountComponentAtNode","ReactDOM","react-dom",I,I.unmountComponentAtNode),renderToString:T("renderToString","ReactDOMServer","react-dom/server",g,g.renderToString),renderToStaticMarkup:T("renderToStaticMarkup","ReactDOMServer","react-dom/server",g,g.renderToStaticMarkup)}),E.__SECRET_DOM_DO_NOT_USE_OR_YOU_WILL_BE_FIRED=I,E.__SECRET_DOM_SERVER_DO_NOT_USE_OR_YOU_WILL_BE_FIRED=g,A.exports=E},function(A,M,t){"use strict";var I=(t(103),t(201)),g=(t(7),"_getDOMNodeDidWarn"),e={getDOMNode:function(){return this.constructor[g]=!0,I(this)}};A.exports=e},function(A,M,t){"use strict";function I(A,M,t){var I=void 0===A[t];null!=M&&I&&(A[t]=e(M,null))}var g=t(76),e=t(206),i=t(209),T=t(210),E=(t(7),{instantiateChildren:function(A,M,t){if(null==A)return null;var g={};return T(A,I,g),g},updateChildren:function(A,M,t,I){if(!M&&!A)return null;var T;for(T in M)if(M.hasOwnProperty(T)){var E=A&&A[T],N=E&&E._currentElement,n=M[T];if(null!=E&&i(N,n))g.receiveComponent(E,n,t,I),M[T]=E;else{E&&g.unmountComponent(E,T);var o=e(n,null);M[T]=o}}for(T in A)!A.hasOwnProperty(T)||M&&M.hasOwnProperty(T)||g.unmountComponent(A[T]);return M},unmountChildren:function(A){for(var M in A)if(A.hasOwnProperty(M)){var t=A[M];g.unmountComponent(t)}}});A.exports=E},function(A,M,t){"use strict";function I(A){var M=A._currentElement._owner||null;if(M){var t=M.getName();if(t)return" Check the render method of ` + "`" + `"+t+"` + "`" + `."}return""}function g(A){}var e=t(197),i=t(53),T=t(29),E=t(103),N=t(38),n=t(130),o=(t(129),t(76)),c=t(199),C=t(8),a=t(97),D=t(3),r=t(209);t(7);g.prototype.render=function(){var A=E.get(this)._currentElement.type;return A(this.props,this.context,this.updater)};var B=1,Q={construct:function(A){this._currentElement=A,this._rootNodeID=null,this._instance=null,this._pendingElement=null,this._pendingStateQueue=null,this._pendingReplaceState=!1,this._pendingForceUpdate=!1,this._renderedComponent=null,this._context=null,this._mountOrder=0,this._topLevelWrapper=null,this._pendingCallbacks=null},mountComponent:function(A,M,t){this._context=t,this._mountOrder=B++,this._rootNodeID=A;var I,e,i=this._processProps(this._currentElement.props),N=this._processContext(t),n=this._currentElement.type,C="prototype"in n;C&&(I=new n(i,N,c)),C&&null!==I&&I!==!1&&!T.isValidElement(I)||(e=I,I=new g(n)),I.props=i,I.context=N,I.refs=a,I.updater=c,this._instance=I,E.set(I,this);var r=I.state;void 0===r&&(I.state=r=null),"object"!=typeof r||Array.isArray(r)?D(!1):void 0,this._pendingStateQueue=null,this._pendingReplaceState=!1,this._pendingForceUpdate=!1,I.componentWillMount&&(I.componentWillMount(),this._pendingStateQueue&&(I.state=this._processPendingState(I.props,I.context))),void 0===e&&(e=this._renderValidatedComponent()),this._renderedComponent=this._instantiateReactComponent(e);var Q=o.mountComponent(this._renderedComponent,A,M,this._processChildContext(t));return I.componentDidMount&&M.getReactMountReady().enqueue(I.componentDidMount,I),Q},unmountComponent:function(){var A=this._instance;A.componentWillUnmount&&A.componentWillUnmount(),o.unmountComponent(this._renderedComponent),this._renderedComponent=null,this._instance=null,this._pendingStateQueue=null,this._pendingReplaceState=!1,this._pendingForceUpdate=!1,this._pendingCallbacks=null,this._pendingElement=null,this._context=null,this._rootNodeID=null,this._topLevelWrapper=null,E.remove(A)},_maskContext:function(A){var M=null,t=this._currentElement.type,I=t.contextTypes;if(!I)return a;M={};for(var g in I)M[g]=A[g];return M},_processContext:function(A){var M=this._maskContext(A);return M},_processChildContext:function(A){var M=this._currentElement.type,t=this._instance,I=t.getChildContext&&t.getChildContext();if(I){"object"!=typeof M.childContextTypes?D(!1):void 0;for(var g in I)g in M.childContextTypes?void 0:D(!1);return C({},A,I)}return A},_processProps:function(A){return A},_checkPropTypes:function(A,M,t){var g=this.getName();for(var e in A)if(A.hasOwnProperty(e)){var i;try{"function"!=typeof A[e]?D(!1):void 0,i=A[e](M,e,g,t)}catch(A){i=A}if(i instanceof Error){I(this);t===n.prop}}},receiveComponent:function(A,M,t){var I=this._currentElement,g=this._context;this._pendingElement=null,this.updateComponent(M,I,A,g,t)},performUpdateIfNecessary:function(A){null!=this._pendingElement&&o.receiveComponent(this,this._pendingElement||this._currentElement,A,this._context),(null!==this._pendingStateQueue||this._pendingForceUpdate)&&this.updateComponent(A,this._currentElement,this._currentElement,this._context,this._context)},updateComponent:function(A,M,t,I,g){var e,i=this._instance,T=this._context===g?i.context:this._processContext(g);M===t?e=t.props:(e=this._processProps(t.props),i.componentWillReceiveProps&&i.componentWillReceiveProps(e,T));var E=this._processPendingState(e,T),N=this._pendingForceUpdate||!i.shouldComponentUpdate||i.shouldComponentUpdate(e,E,T);N?(this._pendingForceUpdate=!1,this._performComponentUpdate(t,e,E,T,A,g)):(this._currentElement=t,this._context=g,i.props=e,i.state=E,i.context=T)},_processPendingState:function(A,M){var t=this._instance,I=this._pendingStateQueue,g=this._pendingReplaceState;if(this._pendingReplaceState=!1,this._pendingStateQueue=null,!I)return t.state;if(g&&1===I.length)return I[0];for(var e=C({},g?I[0]:t.state),i=g?1:0;i=0||null!=M.is}function r(A){a(A),this._tag=A.toLowerCase(),this._renderedChildren=null,this._previousStyle=null,this._previousStyleCopy=null,this._rootNodeID=null,this._wrapperState=null,this._topLevelWrapper=null,this._nodeWithLegacyProperties=null}var B=t(721),Q=t(723),s=t(92),u=t(194),x=t(52),y=t(128),j=t(196),l=t(736),w=t(739),L=t(740),Y=t(322),d=t(743),h=t(23),S=t(748),z=t(38),p=t(199),U=t(8),O=t(133),f=t(134),m=t(3),F=(t(207),t(58)),k=t(135),R=t(208),J=(t(259),t(211),t(7),y.deleteListener),G=y.listenTo,H=y.registrationNameModules,v={string:!0,number:!0},b=F({children:null}),X=F({style:null}),W=F({__html:null}),V=1,P={topAbort:"abort",topCanPlay:"canplay",topCanPlayThrough:"canplaythrough",topDurationChange:"durationchange",topEmptied:"emptied",topEncrypted:"encrypted",topEnded:"ended",topError:"error",topLoadedData:"loadeddata",topLoadedMetadata:"loadedmetadata",topLoadStart:"loadstart",topPause:"pause",topPlay:"play",topPlaying:"playing",topProgress:"progress",topRateChange:"ratechange",topSeeked:"seeked",topSeeking:"seeking",topStalled:"stalled",topSuspend:"suspend",topTimeUpdate:"timeupdate",topVolumeChange:"volumechange",topWaiting:"waiting"},Z={area:!0,base:!0,br:!0,col:!0,embed:!0,hr:!0,img:!0,input:!0,keygen:!0,link:!0,meta:!0,param:!0,source:!0,track:!0,wbr:!0},K={listing:!0,pre:!0,textarea:!0},q=(U({menuitem:!0},Z),/^[a-zA-Z][a-zA-Z:_\.\-\d]*$/),_={},$={}.hasOwnProperty;r.displayName="ReactDOMComponent",r.Mixin={construct:function(A){this._currentElement=A},mountComponent:function(A,M,t){this._rootNodeID=A;var I=this._currentElement.props;switch(this._tag){case"iframe":case"img":case"form":case"video":case"audio":this._wrapperState={listeners:null},M.getReactMountReady().enqueue(o,this);break;case"button":I=l.getNativeProps(this,I,t);break;case"input":w.mountWrapper(this,I,t),I=w.getNativeProps(this,I,t);break;case"option":L.mountWrapper(this,I,t),I=L.getNativeProps(this,I,t);break;case"select":Y.mountWrapper(this,I,t),I=Y.getNativeProps(this,I,t),t=Y.processChildContext(this,I,t);break;case"textarea":d.mountWrapper(this,I,t),I=d.getNativeProps(this,I,t)}E(this,I);var g;if(M.useCreateElement){var e=t[h.ownerDocumentContextKey],i=e.createElement(this._currentElement.type);u.setAttributeForID(i,this._rootNodeID),h.getID(i),this._updateDOMProperties({},I,M,i),this._createInitialChildren(M,I,t,i),g=i}else{var T=this._createOpenTagMarkupAndPutListeners(M,I),N=this._createContentMarkup(M,I,t);g=!N&&Z[this._tag]?T+"/>":T+">"+N+""}switch(this._tag){case"input":M.getReactMountReady().enqueue(c,this);case"button":case"select":case"textarea":I.autoFocus&&M.getReactMountReady().enqueue(B.focusDOMComponent,this)}return g},_createOpenTagMarkupAndPutListeners:function(A,M){var t="<"+this._currentElement.type;for(var I in M)if(M.hasOwnProperty(I)){var g=M[I];if(null!=g)if(H.hasOwnProperty(I))g&&N(this._rootNodeID,I,g,A);else{I===X&&(g&&(g=this._previousStyleCopy=U({},M.style)),g=Q.createMarkupForStyles(g));var e=null;null!=this._tag&&D(this._tag,M)?I!==b&&(e=u.createMarkupForCustomAttribute(I,g)):e=u.createMarkupForProperty(I,g),e&&(t+=" "+e)}}if(A.renderToStaticMarkup)return t;var i=u.createMarkupForID(this._rootNodeID);return t+" "+i},_createContentMarkup:function(A,M,t){var I="",g=M.dangerouslySetInnerHTML;if(null!=g)null!=g.__html&&(I=g.__html);else{var e=v[typeof M.children]?M.children:null,i=null!=e?null:M.children;if(null!=e)I=f(e);else if(null!=i){var T=this.mountChildren(i,A,t);I=T.join("")}}return K[this._tag]&&"\n"===I.charAt(0)?"\n"+I:I},_createInitialChildren:function(A,M,t,I){var g=M.dangerouslySetInnerHTML;if(null!=g)null!=g.__html&&k(I,g.__html);else{var e=v[typeof M.children]?M.children:null,i=null!=e?null:M.children;if(null!=e)R(I,e);else if(null!=i)for(var T=this.mountChildren(i,A,t),E=0;EM.end?(t=M.end,I=M.start):(t=M.start,I=M.end),g.moveToElementText(A),g.moveStart("character",t),g.setEndPoint("EndToStart",g),g.moveEnd("character",I-t),g.select()}function T(A,M){if(window.getSelection){var t=window.getSelection(),I=A[n()].length,g=Math.min(M.start,I),e="undefined"==typeof M.end?g:Math.min(M.end,I);if(!t.extend&&g>e){var i=e;e=g,g=i}var T=N(A,g),E=N(A,e);if(T&&E){var o=document.createRange();o.setStart(T.node,T.offset),t.removeAllRanges(),g>e?(t.addRange(o),t.extend(E.node,E.offset)):(o.setEnd(E.node,E.offset),t.addRange(o))}}}var E=t(20),N=t(772),n=t(340),o=E.canUseDOM&&"selection"in document&&!("getSelection"in window),c={getOffsets:o?g:e,setOffsets:o?i:T};A.exports=c},function(A,M,t){"use strict";var I=t(325),g=t(753),e=t(200);I.inject();var i={renderToString:g.renderToString,renderToStaticMarkup:g.renderToStaticMarkup,version:e};A.exports=i},function(A,M,t){"use strict";function I(){this._rootNodeID&&n.updateWrapper(this)}function g(A){var M=this._currentElement.props,t=e.executeOnChange(M,A);return T.asap(I,this),t}var e=t(195),i=t(198),T=t(39),E=t(8),N=t(3),n=(t(7),{getNativeProps:function(A,M,t){null!=M.dangerouslySetInnerHTML?N(!1):void 0;var I=E({},M,{defaultValue:void 0,value:void 0,children:A._wrapperState.initialValue,onChange:A._wrapperState.onChange});return I},mountWrapper:function(A,M){var t=M.defaultValue,I=M.children;null!=I&&(null!=t?N(!1):void 0,Array.isArray(I)&&(I.length<=1?void 0:N(!1),I=I[0]),t=""+I),null==t&&(t="");var i=e.getValue(M);A._wrapperState={initialValue:""+(null!=i?i:t),onChange:g.bind(A)}},updateWrapper:function(A){var M=A._currentElement.props,t=e.getValue(M);null!=t&&i.updatePropertyByID(A._rootNodeID,"value",""+t)}});A.exports=n},function(A,M,t){"use strict";function I(A){g.enqueueEvents(A),g.processEventQueue(!1)}var g=t(101),e={handleTopLevel:function(A,M,t,e,i){var T=g.extractEvents(A,M,t,e,i);I(T)}};A.exports=e},function(A,M,t){"use strict";function I(A){var M=c.getID(A),t=o.getReactRootIDFromNodeID(M),I=c.findReactContainerForID(t),g=c.getFirstReactDOM(I);return g}function g(A,M){this.topLevelType=A,this.nativeEvent=M,this.ancestors=[]}function e(A){i(A)}function i(A){for(var M=c.getFirstReactDOM(D(A.nativeEvent))||window,t=M;t;)A.ancestors.push(t),t=I(t);for(var g=0;g=M)return{node:g,offset:M-e};e=i}g=t(I(g))}}A.exports=g},function(A,M,t){"use strict";function I(A){return g.isValidElement(A)?void 0:e(!1),A}var g=t(29),e=t(3);A.exports=I},function(A,M,t){"use strict";function I(A){return'"'+g(A)+'"'}var g=t(134);A.exports=I},function(A,M,t){"use strict";var I=t(23);A.exports=I.renderSubtreeIntoContainer},function(A,M){A.exports=function(A,M,t){for(var I=0,g=A.length,e=3==arguments.length?t:A[I++];I=0;--I){var g=this.tryEntries[I],e=g.completion;if("root"===g.tryLoc)return M("end");if(g.tryLoc<=this.prev){var i=Q.call(g,"catchLoc"),T=Q.call(g,"finallyLoc");if(i&&T){if(this.prev=0;--t){var I=this.tryEntries[t];if(I.tryLoc<=this.prev&&Q.call(I,"finallyLoc")&&this.prev=0;--M){var t=this.tryEntries[M];if(t.finallyLoc===A)return this.complete(t.completion,t.afterLoc),c(t),d}},catch:function(A){for(var M=this.tryEntries.length-1;M>=0;--M){var t=this.tryEntries[M];if(t.tryLoc===A){var I=t.completion;if("throw"===I.type){var g=I.arg;c(t)}return g}}throw new Error("illegal catch attempt")},delegateYield:function(A,M,t){return this.delegate={iterator:a(A),resultName:M,nextLoc:t},d}}}("object"==typeof M?M:"object"==typeof window?window:"object"==typeof self?self:this)}).call(M,function(){return this}(),t(174))},function(A,M){"use strict";A.exports=function(A){return encodeURIComponent(A).replace(/[!'()*]/g,function(A){return"%"+A.charCodeAt(0).toString(16).toUpperCase()})}},function(A,M,t){var I=t(552);"string"==typeof I&&(I=[[A.id,I,""]]);t(345)(I,{});I.locals&&(A.exports=I.locals)},function(A,M,t){var I=t(553);"string"==typeof I&&(I=[[A.id,I,""]]);t(345)(I,{});I.locals&&(A.exports=I.locals)},function(A,M,t){var I=A.exports=t(786),g=I.Request,e=I.SuperagentPromiseError=function(A){this.name="SuperagentPromiseError",this.message=A||"Bad request"};e.prototype=new Error,e.prototype.constructor=e,g.prototype.promise=function(){var A,M=this;return new Promise(function(t,I){M.end(function(g,i){if("undefined"!=typeof i&&i.status>=400){var T="cannot "+M.method+" "+M.url+" ("+i.status+")";A=new e(T),A.status=i.status,A.body=i.body,A.res=i,I(A)}else g?I(new e(g)):t(i)})})},g.prototype.then=function(){var A=this.promise();return A.then.apply(A,arguments)}},function(A,M,t){function I(){}function g(A){var M={}.toString.call(A);switch(M){case"[object File]":case"[object Blob]":case"[object FormData]":return!0;default:return!1}}function e(A){if(!s(A))return A;var M=[];for(var t in A)null!=A[t]&&i(M,t,A[t]);return M.join("&")}function i(A,M,t){return Array.isArray(t)?t.forEach(function(t){i(A,M,t)}):void A.push(encodeURIComponent(M)+"="+encodeURIComponent(t))}function T(A){for(var M,t,I={},g=A.split("&"),e=0,i=g.length;e=200&&M.status<300)return t.callback(A,M);var I=new Error(M.statusText||"Unsuccessful HTTP response");I.original=A,I.response=M,I.status=M.status,t.callback(I,M)})}function a(A,M){var t=u("DELETE",A);return M&&t.end(M),t}var D,r=t(368),B=t(776),Q=t(787),s=t(346);D="undefined"!=typeof window?window:"undefined"!=typeof self?self:this;var u=A.exports=t(788).bind(null,C);u.getXHR=function(){if(!(!D.XMLHttpRequest||D.location&&"file:"==D.location.protocol&&D.ActiveXObject))return new XMLHttpRequest;try{return new ActiveXObject("Microsoft.XMLHTTP")}catch(A){}try{return new ActiveXObject("Msxml2.XMLHTTP.6.0")}catch(A){}try{return new ActiveXObject("Msxml2.XMLHTTP.3.0")}catch(A){}try{return new ActiveXObject("Msxml2.XMLHTTP")}catch(A){}return!1};var x="".trim?function(A){return A.trim()}:function(A){return A.replace(/(^\s*|\s*$)/g,"")};u.serializeObject=e,u.parseString=T,u.types={html:"text/html",json:"application/json",xml:"application/xml",urlencoded:"application/x-www-form-urlencoded",form:"application/x-www-form-urlencoded","form-data":"application/x-www-form-urlencoded"},u.serialize={"application/x-www-form-urlencoded":e,"application/json":JSON.stringify},u.parse={"application/x-www-form-urlencoded":T,"application/json":JSON.parse},c.prototype.get=function(A){return this.header[A.toLowerCase()]},c.prototype.setHeaderProperties=function(A){var M=this.header["content-type"]||"";this.type=n(M);var t=o(M);for(var I in t)this[I]=t[I]},c.prototype.parseBody=function(A){var M=u.parse[this.type];return!M&&N(this.type)&&(M=u.parse["application/json"]),M&&A&&(A.length||A instanceof Object)?M(A):null},c.prototype.setStatusProperties=function(A){1223===A&&(A=204);var M=A/100|0;this.status=this.statusCode=A,this.statusType=M,this.info=1==M,this.ok=2==M,this.clientError=4==M,this.serverError=5==M,this.error=(4==M||5==M)&&this.toError(),this.accepted=202==A,this.noContent=204==A,this.badRequest=400==A,this.unauthorized=401==A,this.notAcceptable=406==A,this.notFound=404==A,this.forbidden=403==A},c.prototype.toError=function(){var A=this.req,M=A.method,t=A.url,I="cannot "+M+" "+t+" ("+this.status+")",g=new Error(I);return g.status=this.status,g.method=M,g.url=t,g},u.Response=c,r(C.prototype);for(var y in Q)C.prototype[y]=Q[y];C.prototype.abort=function(){if(!this.aborted)return this.aborted=!0,this.xhr&&this.xhr.abort(),this.clearTimeout(),this.emit("abort"),this},C.prototype.type=function(A){return this.set("Content-Type",u.types[A]||A),this},C.prototype.responseType=function(A){return this._responseType=A,this},C.prototype.accept=function(A){return this.set("Accept",u.types[A]||A),this},C.prototype.auth=function(A,M,t){switch(t||(t={type:"basic"}),t.type){case"basic":var I=btoa(A+":"+M);this.set("Authorization","Basic "+I);break;case"auto":this.username=A,this.password=M}return this},C.prototype.query=function(A){return"string"!=typeof A&&(A=e(A)),A&&this._query.push(A),this},C.prototype.attach=function(A,M,t){return this._getFormData().append(A,M,t||M.name),this},C.prototype._getFormData=function(){return this._formData||(this._formData=new D.FormData),this._formData},C.prototype.send=function(A){var M=s(A),t=this._header["content-type"];if(M&&s(this._data))for(var I in A)this._data[I]=A[I];else"string"==typeof A?(t||this.type("form"),t=this._header["content-type"],"application/x-www-form-urlencoded"==t?this._data=this._data?this._data+"&"+A:A:this._data=(this._data||"")+A):this._data=A;return!M||g(A)?this:(t||this.type("json"),this)},c.prototype.parse=function(A){return D.console&&console.warn("Client-side parse() method has been renamed to serialize(). This method is not compatible with superagent v2.0"),this.serialize(A),this},c.prototype.serialize=function(A){return this._parser=A,this},C.prototype.callback=function(A,M){var t=this._callback;this.clearTimeout(),t(A,M)},C.prototype.crossDomainError=function(){var A=new Error("Request has been terminated\nPossible causes: the network is offline, Origin is not allowed by Access-Control-Allow-Origin, the page is being unloaded, etc.");A.crossDomain=!0,A.status=this.status,A.method=this.method,A.url=this.url,this.callback(A)},C.prototype.timeoutError=function(){var A=this._timeout,M=new Error("timeout of "+A+"ms exceeded");M.timeout=A,this.callback(M)},C.prototype.withCredentials=function(){return this._withCredentials=!0,this},C.prototype.end=function(A){var M=this,t=this.xhr=u.getXHR(),e=this._query.join("&"),i=this._timeout,T=this._formData||this._data;this._callback=A||I,t.onreadystatechange=function(){if(4==t.readyState){var A;try{A=t.status}catch(M){A=0}if(0==A){if(M.timedout)return M.timeoutError();if(M.aborted)return;return M.crossDomainError()}M.emit("end")}};var E=function(A){A.total>0&&(A.percent=A.loaded/A.total*100),A.direction="download",M.emit("progress",A)};this.hasListeners("progress")&&(t.onprogress=E);try{t.upload&&this.hasListeners("progress")&&(t.upload.onprogress=E)}catch(A){}if(i&&!this._timer&&(this._timer=setTimeout(function(){M.timedout=!0,M.abort()},i)),e&&(e=u.serializeObject(e),this.url+=~this.url.indexOf("?")?"&"+e:"?"+e),this.username&&this.password?t.open(this.method,this.url,!0,this.username,this.password):t.open(this.method,this.url,!0),this._withCredentials&&(t.withCredentials=!0),"GET"!=this.method&&"HEAD"!=this.method&&"string"!=typeof T&&!g(T)){var n=this._header["content-type"],o=this._parser||u.serialize[n?n.split(";")[0]:""];!o&&N(n)&&(o=u.serialize["application/json"]),o&&(T=o(T))}for(var c in this.header)null!=this.header[c]&&t.setRequestHeader(c,this.header[c]);return this._responseType&&(t.responseType=this._responseType),this.emit("request",this),t.send("undefined"!=typeof T?T:null),this},u.Request=C,u.get=function(A,M,t){var I=u("GET",A);return"function"==typeof M&&(t=M,M=null),M&&I.query(M),t&&I.end(t),I},u.head=function(A,M,t){var I=u("HEAD",A);return"function"==typeof M&&(t=M,M=null),M&&I.send(M),t&&I.end(t),I},u.del=a,u.delete=a,u.patch=function(A,M,t){var I=u("PATCH",A);return"function"==typeof M&&(t=M,M=null),M&&I.send(M),t&&I.end(t),I},u.post=function(A,M,t){var I=u("POST",A);return"function"==typeof M&&(t=M,M=null),M&&I.send(M),t&&I.end(t),I},u.put=function(A,M,t){var I=u("PUT",A);return"function"==typeof M&&(t=M,M=null),M&&I.send(M),t&&I.end(t),I}},function(A,M,t){var I=t(346);M.clearTimeout=function(){return this._timeout=0,clearTimeout(this._timer),this},M.parse=function(A){return this._parser=A,this},M.timeout=function(A){return this._timeout=A,this},M.then=function(A,M){return this.end(function(t,I){t?M(t):A(I)})},M.use=function(A){return A(this),this},M.get=function(A){return this._header[A.toLowerCase()]},M.getHeader=M.get,M.set=function(A,M){if(I(A)){for(var t in A)this.set(t,A[t]);return this}return this._header[A.toLowerCase()]=M,this.header[A]=M,this},M.unset=function(A){return delete this._header[A.toLowerCase()],delete this.header[A],this},M.field=function(A,M){return this._getFormData().append(A,M),this}},function(A,M){function t(A,M,t){return"function"==typeof t?new A("GET",M).end(t):2==arguments.length?new A("GET",M):new A(M,t)}A.exports=t},function(A,M,t){A.exports=t(790)},function(A,M,t){(function(A,I){"use strict";function g(A){return A&&A.__esModule?A:{default:A}}Object.defineProperty(M,"__esModule",{value:!0});var e,i=t(791),T=g(i);e="undefined"!=typeof self?self:"undefined"!=typeof window?window:"undefined"!=typeof A?A:I;var E=(0,T.default)(e);M.default=E}).call(M,function(){return this}(),t(215)(A))},function(A,M){"use strict";function t(A){var M,t=A.Symbol;return"function"==typeof t?t.observable?M=t.observable:(M=t("observable"),t.observable=M):M="@@observable",M}Object.defineProperty(M,"__esModule",{value:!0}),M.default=t},function(A,M,t){function I(A){return g(A).replace(/\s(\w)/g,function(A,M){return M.toUpperCase()})}var g=t(794);A.exports=I},function(A,M){function t(A){return e.test(A)?A.toLowerCase():i.test(A)?(I(A)||A).toLowerCase():T.test(A)?g(A).toLowerCase():A.toLowerCase()}function I(A){return A.replace(E,function(A,M){return M?" "+M:""})}function g(A){return A.replace(N,function(A,M,t){return M+" "+t.toLowerCase().split("").join(" ")})}A.exports=t;var e=/\s/,i=/(_|-|\.|:)/,T=/([a-z][A-Z]|[A-Z][a-z])/,E=/[\W_]+(.|$)/g,N=/(.)([A-Z]+)/g},function(A,M,t){function I(A){return g(A).replace(/[\W_]+(.|$)/g,function(A,M){return M?" "+M:""}).trim()}var g=t(793);A.exports=I},function(A,M){A.exports=function(){var A=document.getSelection();if(!A.rangeCount)return function(){};for(var M=document.activeElement,t=[],I=0;I=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}function i(A,M){function t(I,g){function i(A,t){var I=C.getLinkName(A),e=this.props[g[A]];I&&E(this.props,I)&&!e&&(e=this.props[I].requestChange);for(var i=arguments.length,T=Array(i>2?i-2:0),N=2;N=15||0===s[0]&&s[1]>=13?A:A.type}function T(A,M){var t=N(M);return t&&!E(A,M)&&E(A,t)?A[t].value:A[M]}function E(A,M){return void 0!==A[M]}function N(A){return"value"===A?"valueLink":"checked"===A?"checkedLink":null}function n(A){return"default"+A.charAt(0).toUpperCase()+A.substr(1)}function o(A,M,t){return function(){for(var I=arguments.length,g=Array(I),e=0;eN||n===N&&"application/"===M[T].substr(0,12)))continue}M[T]=I}}})}var E=t(623),n=t(626).extname,N=/^\s*([^;\s]*)(?:;|\s|$)/,o=/^text\//i;M.charset=I,M.charsets={lookup:I},M.contentType=g,M.extension=e,M.extensions=Object.create(null),M.lookup=i,M.types=Object.create(null),T(M.extensions,M.types)},function(A,M,t){/*! + * mime-db + * Copyright(c) 2014 Jonathan Ong + * MIT Licensed + */ +A.exports=t(571)},function(A,M){function t(A){throw new Error("Cannot find module '"+A+"'.")}t.keys=function(){return[]},t.resolve=t,A.exports=t,t.id=624},function(A,M){/* + object-assign + (c) Sindre Sorhus + @license MIT + */ +"use strict";function t(A){if(null===A||void 0===A)throw new TypeError("Object.assign cannot be called with null or undefined");return Object(A)}function I(){try{if(!Object.assign)return!1;var A=new String("abc");if(A[5]="de","5"===Object.getOwnPropertyNames(A)[0])return!1;for(var M={},t=0;t<10;t++)M["_"+String.fromCharCode(t)]=t;var I=Object.getOwnPropertyNames(M).map(function(A){return M[A]});if("0123456789"!==I.join(""))return!1;var g={};return"abcdefghijklmnopqrst".split("").forEach(function(A){g[A]=A}),"abcdefghijklmnopqrst"===Object.keys(Object.assign({},g)).join("")}catch(A){return!1}}var g=Object.getOwnPropertySymbols,e=Object.prototype.hasOwnProperty,i=Object.prototype.propertyIsEnumerable;A.exports=I()?Object.assign:function(A,M){for(var I,T,E=t(A),n=1;n=0;I--){var g=A[I];"."===g?A.splice(I,1):".."===g?(A.splice(I,1),t++):t&&(A.splice(I,1),t--)}if(M)for(;t--;t)A.unshift("..");return A}function I(A,M){if(A.filter)return A.filter(M);for(var t=[],I=0;I=-1&&!g;e--){var i=e>=0?arguments[e]:A.cwd();if("string"!=typeof i)throw new TypeError("Arguments to path.resolve must be strings");i&&(M=i+"/"+M,g="/"===i.charAt(0))}return M=t(I(M.split("/"),function(A){return!!A}),!g).join("/"),(g?"/":"")+M||"."},M.normalize=function(A){var g=M.isAbsolute(A),e="/"===i(A,-1);return A=t(I(A.split("/"),function(A){return!!A}),!g).join("/"),A||g||(A="."),A&&e&&(A+="/"),(g?"/":"")+A},M.isAbsolute=function(A){return"/"===A.charAt(0)},M.join=function(){var A=Array.prototype.slice.call(arguments,0);return M.normalize(I(A,function(A,M){if("string"!=typeof A)throw new TypeError("Arguments to path.join must be strings");return A}).join("/"))},M.relative=function(A,t){function I(A){for(var M=0;M=0&&""===A[t];t--);return M>t?[]:A.slice(M,t-M+1)}A=M.resolve(A).substr(1),t=M.resolve(t).substr(1);for(var g=I(A.split("/")),e=I(t.split("/")),i=Math.min(g.length,e.length),T=i,E=0;E=0;e--){var i=I[e]+g;if(i in M)return i}return!1}},function(A,M,t){"use strict";var I=t(569),g=t(249),e=t(629);A.exports=function(){function A(A,M,t,I,i,T){T!==e&&g(!1,"Calling PropTypes validators directly is not supported by the ` + "`" + `prop-types` + "`" + ` package. Use PropTypes.checkPropTypes() to call them. Read more at http://fb.me/use-check-prop-types")}function M(){return A}A.isRequired=A;var t={array:A,bool:A,func:A,number:A,object:A,string:A,symbol:A,any:A,arrayOf:M,element:A,instanceOf:M,node:A,objectOf:M,oneOf:M,oneOfType:M,shape:M};return t.checkPropTypes=I,t.PropTypes=t,t}},function(A,M){"use strict";var t="SECRET_DO_NOT_PASS_THIS_OR_YOU_WILL_BE_FIRED";A.exports=t},function(A,M,t){"use strict";var I=t(800);M.extract=function(A){return A.split("?")[1]||""},M.parse=function(A){return"string"!=typeof A?{}:(A=A.trim().replace(/^(\?|#|&)/,""),A?A.split("&").reduce(function(A,M){var t=M.replace(/\+/g," ").split("="),I=t.shift(),g=t.length>0?t.join("="):void 0;return I=decodeURIComponent(I),g=void 0===g?null:decodeURIComponent(g),A.hasOwnProperty(I)?Array.isArray(A[I])?A[I].push(g):A[I]=[A[I],g]:A[I]=g,A},{}):{})},M.stringify=function(A){return A?Object.keys(A).sort().map(function(M){var t=A[M];return void 0===t?"":null===t?M:Array.isArray(t)?t.slice().sort().map(function(A){return I(M)+"="+I(A)}).join("&"):I(M)+"="+I(t)}).filter(function(A){return A.length>0}).join("&"):""}},function(A,M){"use strict";function t(A,M){return Object.prototype.hasOwnProperty.call(A,M)}A.exports=function(A,M,I,g){M=M||"&",I=I||"=";var e={};if("string"!=typeof A||0===A.length)return e;var i=/\+/g;A=A.split(M);var T=1e3;g&&"number"==typeof g.maxKeys&&(T=g.maxKeys);var E=A.length;T>0&&E>T&&(E=T);for(var n=0;n=0?(N=a.substr(0,D),o=a.substr(D+1)):(N=a,o=""),c=decodeURIComponent(N),C=decodeURIComponent(o),t(e,c)?Array.isArray(e[c])?e[c].push(C):e[c]=[e[c],C]:e[c]=C}return e}},function(A,M){"use strict";var t=function(A){switch(typeof A){case"string":return A;case"boolean":return A?"true":"false";case"number":return isFinite(A)?A:"";default:return""}};A.exports=function(A,M,I,g){return M=M||"&",I=I||"=",null===A&&(A=void 0),"object"==typeof A?Object.keys(A).map(function(g){var e=encodeURIComponent(t(g))+I;return Array.isArray(A[g])?A[g].map(function(A){return e+encodeURIComponent(t(A))}).join(M):e+encodeURIComponent(t(A[g]))}).join(M):g?encodeURIComponent(t(g))+I+encodeURIComponent(t(A)):""}},function(A,M,t){"use strict";M.decode=M.parse=t(631),M.encode=M.stringify=t(632)},function(A,M,t){(function(M){for(var I=t(635),g="undefined"==typeof window?M:window,e=["moz","webkit"],i="AnimationFrame",T=g["request"+i],E=g["cancel"+i]||g["cancelRequest"+i],n=0;!T&&n1)||(e=A,!1)}),e)return new Error("(children) "+I+" - Duplicate children detected of bsRole: "+e+". Only one child each allowed with the following bsRoles: "+M.join(", "))})}},A.exports=M.default},function(A,M,t){"use strict";function I(A){var M=[];return void 0===A?M:(i.default.forEach(A,function(A){M.push(A)}),M)}var g=t(12).default;M.__esModule=!0,M.default=I;var e=t(97),i=g(e);A.exports=M.default},function(A,M,t){A.exports={default:t(652),__esModule:!0}},function(A,M,t){t(662),A.exports=t(98).Object.assign},function(A,M,t){var I=t(180);A.exports=function(A,M){return I.create(A,M)}},function(A,M,t){t(663),A.exports=t(98).Object.keys},function(A,M,t){t(664),A.exports=t(98).Object.setPrototypeOf},function(A,M){A.exports=function(A){if("function"!=typeof A)throw TypeError(A+" is not a function!");return A}},function(A,M,t){var I=t(279);A.exports=function(A){if(!I(A))throw TypeError(A+" is not an object!");return A}},function(A,M){var t={}.toString;A.exports=function(A){return t.call(A).slice(8,-1)}},function(A,M){A.exports=function(A){if(void 0==A)throw TypeError("Can't call method on "+A);return A}},function(A,M){var t=A.exports="undefined"!=typeof window&&window.Math==Math?window:"undefined"!=typeof self&&self.Math==Math?self:Function("return this")();"number"==typeof __g&&(__g=t)},function(A,M,t){var I=t(655);A.exports=Object("z").propertyIsEnumerable(0)?Object:function(A){return"String"==I(A)?A.split(""):Object(A)}},function(A,M,t){var I=t(180),g=t(280),e=t(658);A.exports=t(278)(function(){var A=Object.assign,M={},t={},I=Symbol(),g="abcdefghijklmnopqrst";return M[I]=7,g.split("").forEach(function(A){t[A]=A}),7!=A({},M)[I]||Object.keys(A({},t)).join("")!=g})?function(A,M){for(var t=g(A),i=arguments,T=i.length,E=1,n=I.getKeys,N=I.getSymbols,o=I.isEnum;T>E;)for(var c,C=e(i[E++]),a=N?n(C).concat(N(C)):n(C),D=a.length,r=0;D>r;)o.call(C,c=a[r++])&&(t[c]=C[c]);return t}:Object.assign},function(A,M,t){var I=t(179),g=t(98),e=t(278);A.exports=function(A,M){var t=(g.Object||{})[A]||Object[A],i={};i[A]=M(t),I(I.S+I.F*e(function(){t(1)}),"Object",i)}},function(A,M,t){var I=t(180).getDesc,g=t(279),e=t(654),i=function(A,M){if(e(A),!g(M)&&null!==M)throw TypeError(M+": can't set as prototype!")};A.exports={set:Object.setPrototypeOf||("__proto__"in{}?function(A,M,g){try{g=t(277)(Function.call,I(Object.prototype,"__proto__").set,2),g(A,[]),M=!(A instanceof Array)}catch(A){M=!0}return function(A,t){return i(A,t),M?A.__proto__=t:g(A,t),A}}({},!1):void 0),check:i}},function(A,M,t){var I=t(179);I(I.S+I.F,"Object",{assign:t(659)})},function(A,M,t){var I=t(280);t(660)("keys",function(A){return function(M){return A(I(M))}})},function(A,M,t){var I=t(179);I(I.S,"Object",{setPrototypeOf:t(661).set})},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}function g(A,M){var t={};for(var I in A)M.indexOf(I)>=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}Object.defineProperty(M,"__esModule",{value:!0}),M.CopyToClipboard=void 0;var e=Object.assign||function(A){for(var M=1;M=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}function e(A){var M=A.style,t=g(A,["style"]),I=N({},M,{height:6,right:2,bottom:2,left:2,borderRadius:3});return c.default.createElement("div",N({style:I},t))}function i(A){var M=A.style,t=g(A,["style"]),I=N({},M,{width:6,right:2,bottom:2,top:2,borderRadius:3});return c.default.createElement("div",N({style:I},t))}function T(A){var M=A.style,t=g(A,["style"]),I=N({},M,{cursor:"pointer",borderRadius:"inherit",backgroundColor:"rgba(0,0,0,.2)"});return c.default.createElement("div",N({style:I},t))}function E(A){var M=A.style,t=g(A,["style"]),I=N({},M,{cursor:"pointer",borderRadius:"inherit",backgroundColor:"rgba(0,0,0,.2)"});return c.default.createElement("div",N({style:I},t))}function n(A){return c.default.createElement("div",A)}var N=Object.assign||function(A){for(var M=1;M=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}var e=Object.assign||function(A){for(var M=1;M=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}function i(A){if(Array.isArray(A)){for(var M=0,t=Array(A.length);M0||(this.setState({draggedFiles:[]}),this.props.onDragLeave&&this.props.onDragLeave.call(this,A))}},{key:"onDrop",value:function A(M){var t=this,g=this.props,A=g.onDrop,e=g.onDropAccepted,T=g.onDropRejected,E=g.multiple,n=g.disablePreview,o=g.accept,c=(0,u.default)(M),C=[],a=[];M.preventDefault(),this.dragTargets=[],this.isFileDialogActive=!1,c.forEach(function(A){if(!n)try{A.preview=window.URL.createObjectURL(A)}catch(M){"production"!==I.env.NODE_ENV&&console.error("Failed to generate preview for file",A,M)}N(A,o)&&t.fileMatchSize(A)?C.push(A):a.push(A)}),E||a.push.apply(a,i(C.splice(1))),A&&A.call(this,C,a,M),a.length>0&&T&&T.call(this,a,M),C.length>0&&e&&e.call(this,C,M),this.draggedFiles=null,this.setState({draggedFiles:[],acceptedFiles:C,rejectedFiles:a})}},{key:"onClick",value:function A(M){var t=this.props,A=t.onClick,I=t.disableClick;I||(M.stopPropagation(),this.open(),A&&A.call(this,M))}},{key:"onInputElementClick",value:function(A){A.stopPropagation(),this.props.inputProps&&this.props.inputProps.onClick&&this.props.inputProps.onClick()}},{key:"onFileDialogCancel",value:function A(){var A=this.props.onFileDialogCancel,M=this.fileInputEl,t=this.isFileDialogActive;A&&t&&setTimeout(function(){var I=M.files;I.length||(t=!1,A())},300)}},{key:"setRef",value:function(A){this.node=A}},{key:"setRefs",value:function(A){this.fileInputEl=A}},{key:"fileMatchSize",value:function(A){return A.size<=this.props.maxSize&&A.size>=this.props.minSize}},{key:"allFilesAccepted",value:function(A){var M=this;return A.every(function(A){return N(A,M.props.accept)})}},{key:"open",value:function(){this.isFileDialogActive=!0,this.fileInputEl.value=null,this.fileInputEl.click()}},{key:"render",value:function(){var A=this.props,M=A.accept,t=A.activeClassName,I=A.inputProps,g=A.multiple,i=A.name,T=A.rejectClassName,E=A.children,n=e(A,["accept","activeClassName","inputProps","multiple","name","rejectClassName","children"]),N=n.activeStyle,c=n.className,C=n.rejectStyle,D=n.style,r=e(n,["activeStyle","className","rejectStyle","style"]),B=this.state.draggedFiles,Q=B.length,s=g||Q<=1,u=Q>0&&this.allFilesAccepted(B),y=Q>0&&(!u||!s);c=c||"",u&&t&&(c+=" "+t),y&&T&&(c+=" "+T),c||D||N||C||(D={width:200,height:200,borderWidth:2,borderColor:"#666",borderStyle:"dashed",borderRadius:5},N={borderStyle:"solid",borderColor:"#6c6",backgroundColor:"#eee"},C={borderStyle:"solid",borderColor:"#c66",backgroundColor:"#eee"});var j=void 0;j=N&&u?o({},D,N):C&&y?o({},D,C):o({},D);var l={accept:M,type:"file",style:{display:"none"},multiple:x&&g,ref:this.setRefs,onChange:this.onDrop};i&&i.length&&(l.name=i);var w=["acceptedFiles","preventDropOnDocument","disablePreview","disableClick","onDropAccepted","onDropRejected","onFileDialogCancel","maxSize","minSize"],L=o({},r);return w.forEach(function(A){return delete L[A]}),a.default.createElement("div",o({className:c,style:j},L,{onClick:this.onClick,onDragStart:this.onDragStart,onDragEnter:this.onDragEnter,onDragOver:this.onDragOver,onDragLeave:this.onDragLeave,onDrop:this.onDrop,ref:this.setRef}),this.renderChildren(E,u,y),a.default.createElement("input",o({},I,l)))}}]),M}(a.default.Component);y.propTypes={accept:r.default.string,children:r.default.oneOfType([r.default.node,r.default.func]),disableClick:r.default.bool,disablePreview:r.default.bool,preventDropOnDocument:r.default.bool,inputProps:r.default.object,multiple:r.default.bool,name:r.default.string,maxSize:r.default.number,minSize:r.default.number,className:r.default.string,activeClassName:r.default.string,rejectClassName:r.default.string,style:r.default.object,activeStyle:r.default.object,rejectStyle:r.default.object,onClick:r.default.func,onDrop:r.default.func,onDropAccepted:r.default.func,onDropRejected:r.default.func,onDragStart:r.default.func,onDragEnter:r.default.func,onDragOver:r.default.func,onDragLeave:r.default.func,onFileDialogCancel:r.default.func},y.defaultProps={preventDropOnDocument:!0,disablePreview:!1,disableClick:!1,multiple:!0,maxSize:1/0,minSize:0},M.default=y,A.exports=M.default}).call(M,t(1))},function(A,M){function t(){throw new Error("setTimeout has not been defined")}function I(){throw new Error("clearTimeout has not been defined")}function g(A){if(N===setTimeout)return setTimeout(A,0);if((N===t||!N)&&setTimeout)return N=setTimeout,setTimeout(A,0);try{return N(A,0)}catch(M){try{return N.call(null,A,0)}catch(M){return N.call(this,A,0)}}}function e(A){if(o===clearTimeout)return clearTimeout(A);if((o===I||!o)&&clearTimeout)return o=clearTimeout,clearTimeout(A);try{return o(A)}catch(M){try{return o.call(null,A)}catch(M){return o.call(this,A)}}}function i(){D&&C&&(D=!1,C.length?a=C.concat(a):r=-1,a.length&&T())}function T(){if(!D){var A=g(i);D=!0;for(var M=a.length;M;){for(C=a,a=[];++r1)for(var t=1;t0?I:t)(A)}},function(A,M,t){var I=t(23),g=Math.min;A.exports=function(A){return A>0?g(I(A),9007199254740991):0}},function(A,M,t){"use strict";var I=t(3),g=t(24),e=t(21),i="endsWith",T=""[i];I(I.P+I.F*t(14)(i),"String",{endsWith:function(A){var M=e(this,A,i),t=arguments,I=t.length>1?t[1]:void 0,E=g(M.length),n=void 0===I?E:Math.min(g(I),E),N=String(A);return T?T.call(M,N,n):M.slice(n-N.length,n)===N}})},function(A,M,t){var I=t(5),g=t(3),e=t(1).Array||Array,i={},T=function(A,M){I.each.call(A.split(","),function(A){void 0==M&&A in e?i[A]=e[A]:A in[]&&(i[A]=t(12)(Function.call,[][A],M))})};T("pop,reverse,shift,keys,values,entries",1),T("indexOf,every,some,forEach,map,filter,find,findIndex,includes",3),T("join,slice,concat,push,splice,unshift,sort,lastIndexOf,reduce,reduceRight,copyWithin,fill"),g(g.S,"Array",i)}])},function(A,M){"use strict";function t(A){var M=[];if(A.dataTransfer){var t=A.dataTransfer;t.files&&t.files.length?M=t.files:t.items&&t.items.length&&(M=t.items)}else A.target&&A.target.files&&(M=A.target.files);return Array.prototype.slice.call(M)}Object.defineProperty(M,"__esModule",{value:!0}),M.default=t,A.exports=M.default}])})},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}function g(A,M){var t={};for(var I in A)M.indexOf(I)>=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}function e(A,M){if(!(A instanceof M))throw new TypeError("Cannot call a class as a function")}function i(A,M){if(!A)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!M||"object"!=typeof M&&"function"!=typeof M?A:M}function T(A,M){if("function"!=typeof M&&null!==M)throw new TypeError("Super expression must either be null or a function, not "+typeof M);A.prototype=Object.create(M&&M.prototype,{constructor:{value:A,enumerable:!1,writable:!0,configurable:!0}}),M&&(Object.setPrototypeOf?Object.setPrototypeOf(A,M):A.__proto__=M)}Object.defineProperty(M,"__esModule",{value:!0});var E=function(){function A(A,M){for(var t=0;t0&&void 0!==arguments[0]?arguments[0]:{},t=M.hideSiblingNodes,I=void 0===t||t,e=M.handleContainerOverflow,i=void 0===e||e;g(this,A),this.hideSiblingNodes=I,this.handleContainerOverflow=i,this.modals=[],this.containers=[],this.data=[]}return n(A,[{key:"add",value:function(A,M,t){var I=this.modals.indexOf(A),g=this.containers.indexOf(M);if(I!==-1)return I; +if(I=this.modals.length,this.modals.push(A),this.hideSiblingNodes&&(0,Q.hideSiblings)(M,A.mountNode),g!==-1)return this.data[g].modals.push(A),I;var e={modals:[A],classes:t?t.split(/\s+/):[],overflowing:(0,B.default)(M)};return this.handleContainerOverflow&&T(e,M),e.classes.forEach(C.default.addClass.bind(null,M)),this.containers.push(M),this.data.push(e),I}},{key:"remove",value:function(A){var M=this.modals.indexOf(A);if(M!==-1){var t=i(this.data,A),I=this.data[t],g=this.containers[t];I.modals.splice(I.modals.indexOf(A),1),this.modals.splice(M,1),0===I.modals.length?(I.classes.forEach(C.default.removeClass.bind(null,g)),this.handleContainerOverflow&&E(I,g),this.hideSiblingNodes&&(0,Q.showSiblings)(g,A.mountNode),this.containers.splice(t,1),this.data.splice(t,1)):this.hideSiblingNodes&&(0,Q.ariaHidden)(!1,I.modals[I.modals.length-1].mountNode)}}},{key:"isTopModal",value:function(A){return!!this.modals.length&&this.modals[this.modals.length-1]===A}}]),A}();M.default=s,A.exports=M.default},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}function g(A,M){var t={};for(var I in A)M.indexOf(I)>=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}function e(A,M){if(!(A instanceof M))throw new TypeError("Cannot call a class as a function")}function i(A,M){if(!A)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!M||"object"!=typeof M&&"function"!=typeof M?A:M}function T(A,M){if("function"!=typeof M&&null!==M)throw new TypeError("Super expression must either be null or a function, not "+typeof M);A.prototype=Object.create(M&&M.prototype,{constructor:{value:A,enumerable:!1,writable:!0,configurable:!0}}),M&&(Object.setPrototypeOf?Object.setPrototypeOf(A,M):A.__proto__=M)}Object.defineProperty(M,"__esModule",{value:!0});var E=Object.assign||function(A){for(var M=1;M1?t-1:0),g=1;g=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}function e(A,M){if(!(A instanceof M))throw new TypeError("Cannot call a class as a function")}function i(A,M){if(!A)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!M||"object"!=typeof M&&"function"!=typeof M?A:M}function T(A,M){if("function"!=typeof M&&null!==M)throw new TypeError("Super expression must either be null or a function, not "+typeof M);A.prototype=Object.create(M&&M.prototype,{constructor:{value:A,enumerable:!1,writable:!0,configurable:!0}}),M&&(Object.setPrototypeOf?Object.setPrototypeOf(A,M):A.__proto__=M)}Object.defineProperty(M,"__esModule",{value:!0});var E=Object.assign||function(A){for(var M=1;M=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}function e(A,M){if(!(A instanceof M))throw new TypeError("Cannot call a class as a function")}function i(A,M){if(!A)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!M||"object"!=typeof M&&"function"!=typeof M?A:M}function T(A,M){if("function"!=typeof M&&null!==M)throw new TypeError("Super expression must either be null or a function, not "+typeof M);A.prototype=Object.create(M&&M.prototype,{constructor:{value:A,enumerable:!1,writable:!0,configurable:!0}}),M&&(Object.setPrototypeOf?Object.setPrototypeOf(A,M):A.__proto__=M)}function E(){}Object.defineProperty(M,"__esModule",{value:!0}),M.EXITING=M.ENTERED=M.ENTERING=M.EXITED=M.UNMOUNTED=void 0;var n=Object.assign||function(A){for(var M=1;MT?T-n:0}function i(A,M,t,I){var e=g(t),i=e.width,T=A-I,E=A+I+M;return T<0?-T:E>i?i-E:0}function T(A,M,t,I,g){var T="BODY"===I.tagName?(0,n.default)(t):(0,o.default)(t,I),E=(0,n.default)(M),N=E.height,c=E.width,C=void 0,a=void 0,D=void 0,r=void 0;if("left"===A||"right"===A){a=T.top+(T.height-N)/2,C="left"===A?T.left-c:T.left+T.width;var B=e(a,N,I,g);a+=B,r=50*(1-2*B/N)+"%",D=void 0}else{if("top"!==A&&"bottom"!==A)throw new Error('calcOverlayPosition(): No such placement of "'+A+'" found.');C=T.left+(T.width-c)/2,a="top"===A?T.top-N:T.top+T.height;var Q=i(C,c,I,g);C+=Q,D=50*(1-2*Q/c)+"%",r=void 0}return{positionLeft:C,positionTop:a,arrowOffsetLeft:D,arrowOffsetTop:r}}Object.defineProperty(M,"__esModule",{value:!0}),M.default=T;var E=t(287),n=I(E),N=t(690),o=I(N),c=t(288),C=I(c),a=t(99),D=I(a);A.exports=M.default},function(A,M){"use strict";function t(A,M){M&&(A?M.setAttribute("aria-hidden","true"):M.removeAttribute("aria-hidden"))}function I(A,M){T(A,M,function(A){return t(!0,A)})}function g(A,M){T(A,M,function(A){return t(!1,A)})}Object.defineProperty(M,"__esModule",{value:!0}),M.ariaHidden=t,M.hideSiblings=I,M.showSiblings=g;var e=["template","script","style"],i=function(A){var M=A.nodeType,t=A.tagName;return 1===M&&e.indexOf(t.toLowerCase())===-1},T=function(A,M,t){M=[].concat(M),[].forEach.call(A.children,function(A){M.indexOf(A)===-1&&i(A)&&t(A)})}},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}function g(){var A=arguments.length>0&&void 0!==arguments[0]?arguments[0]:(0,i.default)();try{return A.activeElement}catch(A){}}Object.defineProperty(M,"__esModule",{value:!0}),M.default=g;var e=t(100),i=I(e);A.exports=M.default},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}function g(A,M){A.classList?A.classList.add(M):(0,i.default)(A)||(A.className=A.className+" "+M)}Object.defineProperty(M,"__esModule",{value:!0}),M.default=g;var e=t(285),i=I(e);A.exports=M.default},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}Object.defineProperty(M,"__esModule",{value:!0}),M.hasClass=M.removeClass=M.addClass=void 0;var g=t(685),e=I(g),i=t(687),T=I(i),E=t(285),n=I(E);M.addClass=e.default,M.removeClass=T.default,M.hasClass=n.default,M.default={addClass:e.default,removeClass:T.default,hasClass:n.default}},function(A,M){"use strict";A.exports=function(A,M){A.classList?A.classList.remove(M):A.className=A.className.replace(new RegExp("(^|\\s)"+M+"(?:\\s|$)","g"),"$1").replace(/\s+/g," ").replace(/^\s*|\s*$/g,"")}},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}Object.defineProperty(M,"__esModule",{value:!0});var g=t(87),e=I(g),i=function(){};e.default&&(i=function(){return document.addEventListener?function(A,M,t,I){return A.removeEventListener(M,t,I||!1)}:document.attachEvent?function(A,M,t){return A.detachEvent("on"+M,t)}:void 0}()),M.default=i,A.exports=M.default},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}function g(A){return A.nodeName&&A.nodeName.toLowerCase()}function e(A){for(var M=(0,T.default)(A),t=A&&A.offsetParent;t&&"html"!==g(A)&&"static"===(0,n.default)(t,"position");)t=t.offsetParent;return t||M.documentElement}Object.defineProperty(M,"__esModule",{value:!0}),M.default=e;var i=t(100),T=I(i),E=t(183),n=I(E);A.exports=M.default},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}function g(A){return A.nodeName&&A.nodeName.toLowerCase()}function e(A,M){var t,I={top:0,left:0};return"fixed"===(0,r.default)(A,"position")?t=A.getBoundingClientRect():(M=M||(0,N.default)(A),t=(0,E.default)(A),"html"!==g(M)&&(I=(0,E.default)(M)),I.top+=parseInt((0,r.default)(M,"borderTopWidth"),10)-(0,c.default)(M)||0,I.left+=parseInt((0,r.default)(M,"borderLeftWidth"),10)-(0,a.default)(M)||0),i({},t,{top:t.top-I.top-(parseInt((0,r.default)(A,"marginTop"),10)||0),left:t.left-I.left-(parseInt((0,r.default)(A,"marginLeft"),10)||0)})}Object.defineProperty(M,"__esModule",{value:!0});var i=Object.assign||function(A){for(var M=1;M=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}function e(A,M){var t=A.history,I=A.routes,e=A.location,E=g(A,["history","routes","location"]);t||e?void 0:(0,n.default)(!1),t=t?t:(0,o.default)(E);var N=(0,C.default)(t,(0,a.createRoutes)(I)),c=void 0;e?e=t.createLocation(e):c=t.listen(function(A){e=A});var r=(0,D.createRouterObject)(t,N);t=(0,D.createRoutingHistory)(t,N),N.match(e,function(A,I,g){M(A,I&&r.createLocation(I,T.REPLACE),g&&i({},g,{history:t,router:r,matchContext:{history:t,transitionManager:N,router:r}})),c&&c()})}M.__esModule=!0;var i=Object.assign||function(A){for(var M=1;M=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}function e(A){return function(){var M=arguments.length<=0||void 0===arguments[0]?{}:arguments[0],t=M.routes,I=g(M,["routes"]),e=(0,E.default)(A)(I),T=(0,N.default)(e,t);return i({},e,T)}}M.__esModule=!0;var i=Object.assign||function(A){for(var M=1;M=A&&n&&(T=!0,t()))}}var i=0,T=!1,E=!1,n=!1,N=void 0;e()}M.__esModule=!0;var I=Array.prototype.slice;M.loopAsync=t},function(A,M,t){"use strict";function I(A){return A&&A.__esModule?A:{default:A}}function g(){function A(A){try{A=A||window.history.state||{}}catch(M){A={}}var M=o.getWindowPath(),t=A,I=t.key,g=void 0;I?g=c.readState(I):(g=null,I=s.createKey(),B&&window.history.replaceState(e({},A,{key:I}),null));var i=n.parsePath(M);return s.createLocation(e({},i,{state:g}),void 0,I)}function M(M){function t(M){void 0!==M.state&&I(A(M.state))}var I=M.transitionTo;return o.addEventListener(window,"popstate",t),function(){o.removeEventListener(window,"popstate",t)}}function t(A){var M=A.basename,t=A.pathname,I=A.search,g=A.hash,e=A.state,i=A.action,T=A.key;if(i!==E.POP){c.saveState(T,e);var n=(M||"")+t+I+g,N={key:T};if(i===E.PUSH){if(Q)return window.location.href=n,!1;window.history.pushState(N,null,n)}else{if(Q)return window.location.replace(n),!1;window.history.replaceState(N,null,n)}}}function I(A){1===++u&&(x=M(s));var t=s.listenBefore(A);return function(){t(),0===--u&&x()}}function g(A){1===++u&&(x=M(s));var t=s.listen(A);return function(){t(),0===--u&&x()}}function i(A){1===++u&&(x=M(s)),s.registerTransitionHook(A)}function C(A){s.unregisterTransitionHook(A),0===--u&&x()}var D=arguments.length<=0||void 0===arguments[0]?{}:arguments[0];N.canUseDOM?void 0:T.default(!1);var r=D.forceRefresh,B=o.supportsHistory(),Q=!B||r,s=a.default(e({},D,{getCurrentLocation:A,finishTransition:t,saveState:c.saveState})),u=0,x=void 0;return e({},s,{listenBefore:I,listen:g,registerTransitionHook:i,unregisterTransitionHook:C})}M.__esModule=!0;var e=Object.assign||function(A){for(var M=1;M=0&&M=0&&r8&&j<=11),L=32,Y=String.fromCharCode(L),d=C.topLevelTypes,h={beforeInput:{phasedRegistrationNames:{bubbled:s({onBeforeInput:null}),captured:s({onBeforeInputCapture:null})},dependencies:[d.topCompositionEnd,d.topKeyPress,d.topTextInput,d.topPaste]},compositionEnd:{phasedRegistrationNames:{bubbled:s({onCompositionEnd:null}),captured:s({onCompositionEndCapture:null})},dependencies:[d.topBlur,d.topCompositionEnd,d.topKeyDown,d.topKeyPress,d.topKeyUp,d.topMouseDown]},compositionStart:{phasedRegistrationNames:{bubbled:s({onCompositionStart:null}),captured:s({onCompositionStartCapture:null})},dependencies:[d.topBlur,d.topCompositionStart,d.topKeyDown,d.topKeyPress,d.topKeyUp,d.topMouseDown]},compositionUpdate:{phasedRegistrationNames:{bubbled:s({onCompositionUpdate:null}),captured:s({onCompositionUpdateCapture:null})},dependencies:[d.topBlur,d.topCompositionUpdate,d.topKeyDown,d.topKeyPress,d.topKeyUp,d.topMouseDown]}},S=!1,z=null,p={eventTypes:h,extractEvents:function(A,M,t,I,g){return[n(A,M,t,I,g),c(A,M,t,I,g)]}};A.exports=p},function(A,M,t){"use strict";var I=t(311),g=t(20),e=t(38),i=(t(783),t(774)),T=t(788),E=t(792),n=(t(8),E(function(A){return T(A)})),N=!1,o="cssFloat";if(g.canUseDOM){var c=document.createElement("div").style;try{c.font=""}catch(A){N=!0}void 0===document.documentElement.style.cssFloat&&(o="styleFloat")}var C={createMarkupForStyles:function(A){var M="";for(var t in A)if(A.hasOwnProperty(t)){var I=A[t];null!=I&&(M+=n(t)+":",M+=i(t,I)+";")}return M||null},setValueForStyles:function(A,M){var t=A.style;for(var g in M)if(M.hasOwnProperty(g)){var e=i(g,M[g]);if("float"===g&&(g=o),e)t[g]=e;else{var T=N&&I.shorthandPropertyExpansions[g];if(T)for(var E in T)t[E]="";else t[g]=""}}}};e.measureMethods(C,"CSSPropertyOperations",{setValueForStyles:"setValueForStyles"}),A.exports=C},function(A,M,t){"use strict";function I(A){var M=A.nodeName&&A.nodeName.toLowerCase();return"select"===M||"input"===M&&"file"===A.type}function g(A){var M=j.getPooled(h.change,z,A,l(A));u.accumulateTwoPhaseDispatches(M),y.batchedUpdates(e,M)}function e(A){s.enqueueEvents(A),s.processEventQueue(!1)}function i(A,M){S=A,z=M,S.attachEvent("onchange",g)}function T(){S&&(S.detachEvent("onchange",g),S=null,z=null)}function E(A,M,t){if(A===d.topChange)return t}function n(A,M,t){A===d.topFocus?(T(),i(M,t)):A===d.topBlur&&T()}function N(A,M){S=A,z=M,p=A.value,U=Object.getOwnPropertyDescriptor(A.constructor.prototype,"value"),Object.defineProperty(S,"value",m),S.attachEvent("onpropertychange",c)}function o(){S&&(delete S.value,S.detachEvent("onpropertychange",c),S=null,z=null,p=null,U=null)}function c(A){if("value"===A.propertyName){var M=A.srcElement.value;M!==p&&(p=M,g(A))}}function C(A,M,t){if(A===d.topInput)return t}function a(A,M,t){A===d.topFocus?(o(),N(M,t)):A===d.topBlur&&o()}function D(A,M,t){if((A===d.topSelectionChange||A===d.topKeyUp||A===d.topKeyDown)&&S&&S.value!==p)return p=S.value,z}function r(A){return A.nodeName&&"input"===A.nodeName.toLowerCase()&&("checkbox"===A.type||"radio"===A.type)}function B(A,M,t){if(A===d.topClick)return t}var Q=t(52),s=t(101),u=t(102),x=t(20),y=t(39),j=t(76),l=t(204),w=t(207),L=t(338),Y=t(62),d=Q.topLevelTypes,h={change:{phasedRegistrationNames:{bubbled:Y({onChange:null}),captured:Y({onChangeCapture:null})},dependencies:[d.topBlur,d.topChange,d.topClick,d.topFocus,d.topInput,d.topKeyDown,d.topKeyUp,d.topSelectionChange]}},S=null,z=null,p=null,U=null,O=!1;x.canUseDOM&&(O=w("change")&&(!("documentMode"in document)||document.documentMode>8));var f=!1;x.canUseDOM&&(f=w("input")&&(!("documentMode"in document)||document.documentMode>9));var m={get:function(){return U.get.call(this)},set:function(A){p=""+A,U.set.call(this,A)}},F={eventTypes:h,extractEvents:function(A,M,t,g,e){var i,T;if(I(M)?O?i=E:T=n:L(M)?f?i=C:(i=D,T=a):r(M)&&(i=B),i){var N=i(A,M,t);if(N){var o=j.getPooled(h.change,N,g,e);return o.type="change",u.accumulateTwoPhaseDispatches(o),o}}T&&T(A,M,t)}};A.exports=F},function(A,M){"use strict";var t=0,I={createReactRootIndex:function(){return t++}};A.exports=I},function(A,M,t){"use strict";function I(A){return A.substring(1,A.indexOf(" "))}var g=t(20),e=t(785),i=t(47),T=t(343),E=t(3),n=/^(<[^ \/>]+)/,N="data-danger-index",o={dangerouslyRenderMarkup:function(A){g.canUseDOM?void 0:E(!1);for(var M,t={},o=0;o1?1-M:void 0;return this._fallbackText=g.slice(A,T),this._fallbackText}}),g.addPoolingTo(I),A.exports=I},function(A,M,t){"use strict";var I,g=t(90),e=t(20),i=g.injection.MUST_USE_ATTRIBUTE,T=g.injection.MUST_USE_PROPERTY,E=g.injection.HAS_BOOLEAN_VALUE,n=g.injection.HAS_SIDE_EFFECTS,N=g.injection.HAS_NUMERIC_VALUE,o=g.injection.HAS_POSITIVE_NUMERIC_VALUE,c=g.injection.HAS_OVERLOADED_BOOLEAN_VALUE;if(e.canUseDOM){var C=document.implementation;I=C&&C.hasFeature&&C.hasFeature("http://www.w3.org/TR/SVG11/feature#BasicStructure","1.1")}var a={isCustomAttribute:RegExp.prototype.test.bind(/^(data|aria)-[a-z_][a-z\d_.\-]*$/),Properties:{accept:null,acceptCharset:null,accessKey:null,action:null,allowFullScreen:i|E,allowTransparency:i,alt:null,async:E,autoComplete:null,autoPlay:E,capture:i|E,cellPadding:null,cellSpacing:null,charSet:i,challenge:i,checked:T|E,classID:i,className:I?i:T,cols:i|o,colSpan:null,content:null,contentEditable:null,contextMenu:i,controls:T|E,coords:null,crossOrigin:null,data:null,dateTime:i,default:E,defer:E,dir:null,disabled:i|E,download:c,draggable:null,encType:null,form:i,formAction:i,formEncType:i,formMethod:i,formNoValidate:E,formTarget:i,frameBorder:i,headers:null,height:i,hidden:i|E,high:null,href:null,hrefLang:null,htmlFor:null,httpEquiv:null,icon:null,id:T,inputMode:i,integrity:null,is:i,keyParams:i,keyType:i,kind:null,label:null,lang:null,list:i,loop:T|E,low:null,manifest:i,marginHeight:null,marginWidth:null,max:null,maxLength:i,media:i,mediaGroup:null,method:null,min:null,minLength:i,multiple:T|E,muted:T|E,name:null,nonce:i,noValidate:E,open:E,optimum:null,pattern:null,placeholder:null,poster:null,preload:null,radioGroup:null,readOnly:T|E,rel:null,required:E,reversed:E,role:i,rows:i|o,rowSpan:null,sandbox:null,scope:null,scoped:E,scrolling:null,seamless:i|E,selected:T|E,shape:null,size:i|o,sizes:i,span:o,spellCheck:null,src:null,srcDoc:T,srcLang:null,srcSet:i,start:N,step:null,style:null,summary:null,tabIndex:null,target:null,title:null,type:null,useMap:null,value:T|n,width:i,wmode:i,wrap:null,about:i,datatype:i,inlist:i,prefix:i,property:i,resource:i,typeof:i,vocab:i,autoCapitalize:i,autoCorrect:i,autoSave:null,color:null,itemProp:i,itemScope:i|E,itemType:i,itemID:i,itemRef:i,results:null,security:i,unselectable:i},DOMAttributeNames:{acceptCharset:"accept-charset",className:"class",htmlFor:"for",httpEquiv:"http-equiv"},DOMPropertyNames:{autoComplete:"autocomplete",autoFocus:"autofocus",autoPlay:"autoplay",autoSave:"autosave",encType:"encoding",hrefLang:"hreflang",radioGroup:"radiogroup",spellCheck:"spellcheck",srcDoc:"srcdoc",srcSet:"srcset"}};A.exports=a},function(A,M,t){"use strict";var I=t(317),g=t(748),e=t(753),i=t(7),T=t(775),E={};i(E,e),i(E,{findDOMNode:T("findDOMNode","ReactDOM","react-dom",I,I.findDOMNode),render:T("render","ReactDOM","react-dom",I,I.render),unmountComponentAtNode:T("unmountComponentAtNode","ReactDOM","react-dom",I,I.unmountComponentAtNode),renderToString:T("renderToString","ReactDOMServer","react-dom/server",g,g.renderToString),renderToStaticMarkup:T("renderToStaticMarkup","ReactDOMServer","react-dom/server",g,g.renderToStaticMarkup)}),E.__SECRET_DOM_DO_NOT_USE_OR_YOU_WILL_BE_FIRED=I,E.__SECRET_DOM_SERVER_DO_NOT_USE_OR_YOU_WILL_BE_FIRED=g,A.exports=E},function(A,M,t){"use strict";var I=(t(103),t(201)),g=(t(8),"_getDOMNodeDidWarn"),e={getDOMNode:function(){return this.constructor[g]=!0,I(this)}};A.exports=e},function(A,M,t){"use strict";function I(A,M,t){var I=void 0===A[t];null!=M&&I&&(A[t]=e(M,null))}var g=t(75),e=t(206),i=t(209),T=t(210),E=(t(8),{instantiateChildren:function(A,M,t){if(null==A)return null;var g={};return T(A,I,g),g},updateChildren:function(A,M,t,I){if(!M&&!A)return null;var T;for(T in M)if(M.hasOwnProperty(T)){var E=A&&A[T],n=E&&E._currentElement,N=M[T];if(null!=E&&i(n,N))g.receiveComponent(E,N,t,I),M[T]=E;else{E&&g.unmountComponent(E,T);var o=e(N,null);M[T]=o}}for(T in A)!A.hasOwnProperty(T)||M&&M.hasOwnProperty(T)||g.unmountComponent(A[T]);return M},unmountChildren:function(A){for(var M in A)if(A.hasOwnProperty(M)){var t=A[M];g.unmountComponent(t)}}});A.exports=E},function(A,M,t){"use strict";function I(A){var M=A._currentElement._owner||null;if(M){var t=M.getName();if(t)return" Check the render method of ` + "`" + `"+t+"` + "`" + `."}return""}function g(A){}var e=t(197),i=t(53),T=t(29),E=t(103),n=t(38),N=t(129),o=(t(128),t(75)),c=t(199),C=t(7),a=t(105),D=t(3),r=t(209);t(8);g.prototype.render=function(){var A=E.get(this)._currentElement.type;return A(this.props,this.context,this.updater)};var B=1,Q={construct:function(A){this._currentElement=A,this._rootNodeID=null,this._instance=null,this._pendingElement=null,this._pendingStateQueue=null,this._pendingReplaceState=!1,this._pendingForceUpdate=!1,this._renderedComponent=null,this._context=null,this._mountOrder=0,this._topLevelWrapper=null,this._pendingCallbacks=null},mountComponent:function(A,M,t){this._context=t,this._mountOrder=B++,this._rootNodeID=A;var I,e,i=this._processProps(this._currentElement.props),n=this._processContext(t),N=this._currentElement.type,C="prototype"in N;C&&(I=new N(i,n,c)),C&&null!==I&&I!==!1&&!T.isValidElement(I)||(e=I,I=new g(N)),I.props=i,I.context=n,I.refs=a,I.updater=c,this._instance=I,E.set(I,this);var r=I.state;void 0===r&&(I.state=r=null),"object"!=typeof r||Array.isArray(r)?D(!1):void 0,this._pendingStateQueue=null,this._pendingReplaceState=!1,this._pendingForceUpdate=!1,I.componentWillMount&&(I.componentWillMount(),this._pendingStateQueue&&(I.state=this._processPendingState(I.props,I.context))),void 0===e&&(e=this._renderValidatedComponent()),this._renderedComponent=this._instantiateReactComponent(e);var Q=o.mountComponent(this._renderedComponent,A,M,this._processChildContext(t));return I.componentDidMount&&M.getReactMountReady().enqueue(I.componentDidMount,I),Q},unmountComponent:function(){var A=this._instance;A.componentWillUnmount&&A.componentWillUnmount(),o.unmountComponent(this._renderedComponent),this._renderedComponent=null,this._instance=null,this._pendingStateQueue=null,this._pendingReplaceState=!1,this._pendingForceUpdate=!1,this._pendingCallbacks=null,this._pendingElement=null,this._context=null,this._rootNodeID=null,this._topLevelWrapper=null,E.remove(A)},_maskContext:function(A){var M=null,t=this._currentElement.type,I=t.contextTypes;if(!I)return a;M={};for(var g in I)M[g]=A[g];return M},_processContext:function(A){var M=this._maskContext(A);return M},_processChildContext:function(A){var M=this._currentElement.type,t=this._instance,I=t.getChildContext&&t.getChildContext();if(I){"object"!=typeof M.childContextTypes?D(!1):void 0;for(var g in I)g in M.childContextTypes?void 0:D(!1);return C({},A,I)}return A},_processProps:function(A){return A},_checkPropTypes:function(A,M,t){var g=this.getName();for(var e in A)if(A.hasOwnProperty(e)){var i;try{"function"!=typeof A[e]?D(!1):void 0,i=A[e](M,e,g,t,null,"SECRET_DO_NOT_PASS_THIS_OR_YOU_WILL_BE_FIRED")}catch(A){i=A}if(i instanceof Error){I(this);t===N.prop}}},receiveComponent:function(A,M,t){var I=this._currentElement,g=this._context;this._pendingElement=null,this.updateComponent(M,I,A,g,t)},performUpdateIfNecessary:function(A){null!=this._pendingElement&&o.receiveComponent(this,this._pendingElement||this._currentElement,A,this._context),(null!==this._pendingStateQueue||this._pendingForceUpdate)&&this.updateComponent(A,this._currentElement,this._currentElement,this._context,this._context)},updateComponent:function(A,M,t,I,g){var e,i=this._instance,T=this._context===g?i.context:this._processContext(g);M===t?e=t.props:(e=this._processProps(t.props),i.componentWillReceiveProps&&i.componentWillReceiveProps(e,T));var E=this._processPendingState(e,T),n=this._pendingForceUpdate||!i.shouldComponentUpdate||i.shouldComponentUpdate(e,E,T);n?(this._pendingForceUpdate=!1,this._performComponentUpdate(t,e,E,T,A,g)):(this._currentElement=t,this._context=g,i.props=e,i.state=E,i.context=T)},_processPendingState:function(A,M){var t=this._instance,I=this._pendingStateQueue,g=this._pendingReplaceState;if(this._pendingReplaceState=!1,this._pendingStateQueue=null,!I)return t.state;if(g&&1===I.length)return I[0];for(var e=C({},g?I[0]:t.state),i=g?1:0;i=0||null!=M.is}function r(A){a(A),this._tag=A.toLowerCase(),this._renderedChildren=null,this._previousStyle=null,this._previousStyleCopy=null,this._rootNodeID=null,this._wrapperState=null,this._topLevelWrapper=null,this._nodeWithLegacyProperties=null}var B=t(727),Q=t(729),s=t(90),u=t(194),x=t(52),y=t(127),j=t(196),l=t(742),w=t(745),L=t(746),Y=t(319),d=t(749),h=t(23),S=t(754),z=t(38),p=t(199),U=t(7),O=t(132),f=t(133),m=t(3),F=(t(207),t(62)),k=t(134),R=t(208),J=(t(344),t(211), +t(8),y.deleteListener),G=y.listenTo,H=y.registrationNameModules,v={string:!0,number:!0},b=F({children:null}),X=F({style:null}),W=F({__html:null}),V=1,P={topAbort:"abort",topCanPlay:"canplay",topCanPlayThrough:"canplaythrough",topDurationChange:"durationchange",topEmptied:"emptied",topEncrypted:"encrypted",topEnded:"ended",topError:"error",topLoadedData:"loadeddata",topLoadedMetadata:"loadedmetadata",topLoadStart:"loadstart",topPause:"pause",topPlay:"play",topPlaying:"playing",topProgress:"progress",topRateChange:"ratechange",topSeeked:"seeked",topSeeking:"seeking",topStalled:"stalled",topSuspend:"suspend",topTimeUpdate:"timeupdate",topVolumeChange:"volumechange",topWaiting:"waiting"},Z={area:!0,base:!0,br:!0,col:!0,embed:!0,hr:!0,img:!0,input:!0,keygen:!0,link:!0,meta:!0,param:!0,source:!0,track:!0,wbr:!0},K={listing:!0,pre:!0,textarea:!0},q=(U({menuitem:!0},Z),/^[a-zA-Z][a-zA-Z:_\.\-\d]*$/),_={},$={}.hasOwnProperty;r.displayName="ReactDOMComponent",r.Mixin={construct:function(A){this._currentElement=A},mountComponent:function(A,M,t){this._rootNodeID=A;var I=this._currentElement.props;switch(this._tag){case"iframe":case"img":case"form":case"video":case"audio":this._wrapperState={listeners:null},M.getReactMountReady().enqueue(o,this);break;case"button":I=l.getNativeProps(this,I,t);break;case"input":w.mountWrapper(this,I,t),I=w.getNativeProps(this,I,t);break;case"option":L.mountWrapper(this,I,t),I=L.getNativeProps(this,I,t);break;case"select":Y.mountWrapper(this,I,t),I=Y.getNativeProps(this,I,t),t=Y.processChildContext(this,I,t);break;case"textarea":d.mountWrapper(this,I,t),I=d.getNativeProps(this,I,t)}E(this,I);var g;if(M.useCreateElement){var e=t[h.ownerDocumentContextKey],i=e.createElement(this._currentElement.type);u.setAttributeForID(i,this._rootNodeID),h.getID(i),this._updateDOMProperties({},I,M,i),this._createInitialChildren(M,I,t,i),g=i}else{var T=this._createOpenTagMarkupAndPutListeners(M,I),n=this._createContentMarkup(M,I,t);g=!n&&Z[this._tag]?T+"/>":T+">"+n+""}switch(this._tag){case"input":M.getReactMountReady().enqueue(c,this);case"button":case"select":case"textarea":I.autoFocus&&M.getReactMountReady().enqueue(B.focusDOMComponent,this)}return g},_createOpenTagMarkupAndPutListeners:function(A,M){var t="<"+this._currentElement.type;for(var I in M)if(M.hasOwnProperty(I)){var g=M[I];if(null!=g)if(H.hasOwnProperty(I))g&&n(this._rootNodeID,I,g,A);else{I===X&&(g&&(g=this._previousStyleCopy=U({},M.style)),g=Q.createMarkupForStyles(g));var e=null;null!=this._tag&&D(this._tag,M)?I!==b&&(e=u.createMarkupForCustomAttribute(I,g)):e=u.createMarkupForProperty(I,g),e&&(t+=" "+e)}}if(A.renderToStaticMarkup)return t;var i=u.createMarkupForID(this._rootNodeID);return t+" "+i},_createContentMarkup:function(A,M,t){var I="",g=M.dangerouslySetInnerHTML;if(null!=g)null!=g.__html&&(I=g.__html);else{var e=v[typeof M.children]?M.children:null,i=null!=e?null:M.children;if(null!=e)I=f(e);else if(null!=i){var T=this.mountChildren(i,A,t);I=T.join("")}}return K[this._tag]&&"\n"===I.charAt(0)?"\n"+I:I},_createInitialChildren:function(A,M,t,I){var g=M.dangerouslySetInnerHTML;if(null!=g)null!=g.__html&&k(I,g.__html);else{var e=v[typeof M.children]?M.children:null,i=null!=e?null:M.children;if(null!=e)R(I,e);else if(null!=i)for(var T=this.mountChildren(i,A,t),E=0;EM.end?(t=M.end,I=M.start):(t=M.start,I=M.end),g.moveToElementText(A),g.moveStart("character",t),g.setEndPoint("EndToStart",g),g.moveEnd("character",I-t),g.select()}function T(A,M){if(window.getSelection){var t=window.getSelection(),I=A[N()].length,g=Math.min(M.start,I),e="undefined"==typeof M.end?g:Math.min(M.end,I);if(!t.extend&&g>e){var i=e;e=g,g=i}var T=n(A,g),E=n(A,e);if(T&&E){var o=document.createRange();o.setStart(T.node,T.offset),t.removeAllRanges(),g>e?(t.addRange(o),t.extend(E.node,E.offset)):(o.setEnd(E.node,E.offset),t.addRange(o))}}}var E=t(20),n=t(778),N=t(337),o=E.canUseDOM&&"selection"in document&&!("getSelection"in window),c={getOffsets:o?g:e,setOffsets:o?i:T};A.exports=c},function(A,M,t){"use strict";var I=t(322),g=t(759),e=t(200);I.inject();var i={renderToString:g.renderToString,renderToStaticMarkup:g.renderToStaticMarkup,version:e};A.exports=i},function(A,M,t){"use strict";function I(){this._rootNodeID&&N.updateWrapper(this)}function g(A){var M=this._currentElement.props,t=e.executeOnChange(M,A);return T.asap(I,this),t}var e=t(195),i=t(198),T=t(39),E=t(7),n=t(3),N=(t(8),{getNativeProps:function(A,M,t){null!=M.dangerouslySetInnerHTML?n(!1):void 0;var I=E({},M,{defaultValue:void 0,value:void 0,children:A._wrapperState.initialValue,onChange:A._wrapperState.onChange});return I},mountWrapper:function(A,M){var t=M.defaultValue,I=M.children;null!=I&&(null!=t?n(!1):void 0,Array.isArray(I)&&(I.length<=1?void 0:n(!1),I=I[0]),t=""+I),null==t&&(t="");var i=e.getValue(M);A._wrapperState={initialValue:""+(null!=i?i:t),onChange:g.bind(A)}},updateWrapper:function(A){var M=A._currentElement.props,t=e.getValue(M);null!=t&&i.updatePropertyByID(A._rootNodeID,"value",""+t)}});A.exports=N},function(A,M,t){"use strict";function I(A){g.enqueueEvents(A),g.processEventQueue(!1)}var g=t(101),e={handleTopLevel:function(A,M,t,e,i){var T=g.extractEvents(A,M,t,e,i);I(T)}};A.exports=e},function(A,M,t){"use strict";function I(A){var M=c.getID(A),t=o.getReactRootIDFromNodeID(M),I=c.findReactContainerForID(t),g=c.getFirstReactDOM(I);return g}function g(A,M){this.topLevelType=A,this.nativeEvent=M,this.ancestors=[]}function e(A){i(A)}function i(A){for(var M=c.getFirstReactDOM(D(A.nativeEvent))||window,t=M;t;)A.ancestors.push(t),t=I(t);for(var g=0;g=M)return{node:g,offset:M-e};e=i}g=t(I(g))}}A.exports=g},function(A,M,t){"use strict";function I(A){return g.isValidElement(A)?void 0:e(!1),A}var g=t(29),e=t(3);A.exports=I},function(A,M,t){"use strict";function I(A){return'"'+g(A)+'"'}var g=t(133);A.exports=I},function(A,M,t){"use strict";var I=t(23);A.exports=I.renderSubtreeIntoContainer},function(A,M){"use strict";function t(A){return A.replace(I,function(A,M){return M.toUpperCase()})}var I=/-(.)/g;A.exports=t},function(A,M,t){"use strict";function I(A){return g(A.replace(e,"ms-"))}var g=t(782),e=/^-ms-/;A.exports=I},function(A,M,t){"use strict";function I(A){return!!A&&("object"==typeof A||"function"==typeof A)&&"length"in A&&!("setInterval"in A)&&"number"!=typeof A.nodeType&&(Array.isArray(A)||"callee"in A||"item"in A)}function g(A){return I(A)?Array.isArray(A)?A.slice():e(A):[A]}var e=t(793);A.exports=g},function(A,M,t){"use strict";function I(A){var M=A.match(N);return M&&M[1].toLowerCase()}function g(A,M){var t=n;n?void 0:E(!1);var g=I(A),e=g&&T(g);if(e){t.innerHTML=e[1]+A+e[2];for(var N=e[0];N--;)t=t.lastChild}else t.innerHTML=A;var o=t.getElementsByTagName("script");o.length&&(M?void 0:E(!1),i(o).forEach(M));for(var c=i(t.childNodes);t.lastChild;)t.removeChild(t.lastChild);return c}var e=t(20),i=t(784),T=t(343),E=t(3),n=e.canUseDOM?document.createElement("div"):null,N=/^\s*<(\w+)/;A.exports=g},function(A,M){"use strict";function t(A){return A===window?{x:window.pageXOffset||document.documentElement.scrollLeft,y:window.pageYOffset||document.documentElement.scrollTop}:{x:A.scrollLeft,y:A.scrollTop}}A.exports=t},function(A,M){"use strict";function t(A){return A.replace(I,"-$1").toLowerCase()}var I=/([A-Z])/g;A.exports=t},function(A,M,t){"use strict";function I(A){return g(A).replace(e,"-ms-")}var g=t(787),e=/^ms-/;A.exports=I},function(A,M){"use strict";function t(A){return!(!A||!("function"==typeof Node?A instanceof Node:"object"==typeof A&&"number"==typeof A.nodeType&&"string"==typeof A.nodeName))}A.exports=t},function(A,M,t){"use strict";function I(A){return g(A)&&3==A.nodeType}var g=t(789);A.exports=I},function(A,M){"use strict";function t(A,M,t){if(!A)return null;var g={};for(var e in A)I.call(A,e)&&(g[e]=M.call(t,A[e],e,A));return g}var I=Object.prototype.hasOwnProperty;A.exports=t},function(A,M){"use strict";function t(A){var M={};return function(t){return M.hasOwnProperty(t)||(M[t]=A.call(this,t)),M[t]}}A.exports=t},function(A,M,t){"use strict";function I(A){var M=A.length;if(Array.isArray(A)||"object"!=typeof A&&"function"!=typeof A?g(!1):void 0,"number"!=typeof M?g(!1):void 0,0===M||M-1 in A?void 0:g(!1),A.hasOwnProperty)try{return Array.prototype.slice.call(A)}catch(A){}for(var t=Array(M),I=0;I0&&void 0!==arguments[0]?arguments[0]:{},M=arguments[1];if(E)throw E;for(var I=!1,e={},i=0;i=0;--I){var g=this.tryEntries[I],e=g.completion;if("root"===g.tryLoc)return M("end");if(g.tryLoc<=this.prev){var i=Q.call(g,"catchLoc"),T=Q.call(g,"finallyLoc");if(i&&T){if(this.prev=0;--t){var I=this.tryEntries[t];if(I.tryLoc<=this.prev&&Q.call(I,"finallyLoc")&&this.prev=0;--M){var t=this.tryEntries[M];if(t.finallyLoc===A)return this.complete(t.completion,t.afterLoc),c(t),h}},catch:function(A){for(var M=this.tryEntries.length-1;M>=0;--M){var t=this.tryEntries[M];if(t.tryLoc===A){var I=t.completion;if("throw"===I.type){var g=I.arg;c(t)}return g}}throw new Error("illegal catch attempt")},delegateYield:function(A,M,t){return this.delegate={iterator:a(A),resultName:M,nextLoc:t},"next"===this.method&&(this.arg=r),h}}}("object"==typeof M?M:"object"==typeof window?window:"object"==typeof self?self:this)}).call(M,function(){return this}())},function(A,M){"use strict";A.exports=function(A){return encodeURIComponent(A).replace(/[!'()*]/g,function(A){return"%"+A.charCodeAt(0).toString(16).toUpperCase()})}},function(A,M,t){var I=t(556);"string"==typeof I&&(I=[[A.id,I,""]]);t(347)(I,{});I.locals&&(A.exports=I.locals)},function(A,M,t){var I=t(557);"string"==typeof I&&(I=[[A.id,I,""]]);t(347)(I,{});I.locals&&(A.exports=I.locals)},function(A,M,t){var I=A.exports=t(804),g=I.Request,e=I.SuperagentPromiseError=function(A){this.name="SuperagentPromiseError",this.message=A||"Bad request"};e.prototype=new Error,e.prototype.constructor=e,g.prototype.promise=function(){var A,M=this;return new Promise(function(t,I){M.end(function(g,i){if("undefined"!=typeof i&&i.status>=400){var T="cannot "+M.method+" "+M.url+" ("+i.status+")";A=new e(T),A.status=i.status,A.body=i.body,A.res=i,I(A)}else g?I(new e(g)):t(i)})})},g.prototype.then=function(){var A=this.promise();return A.then.apply(A,arguments)}},function(A,M,t){function I(){}function g(A){var M={}.toString.call(A);switch(M){case"[object File]":case"[object Blob]":case"[object FormData]":return!0;default:return!1}}function e(A){if(!s(A))return A;var M=[];for(var t in A)null!=A[t]&&i(M,t,A[t]);return M.join("&")}function i(A,M,t){return Array.isArray(t)?t.forEach(function(t){i(A,M,t)}):void A.push(encodeURIComponent(M)+"="+encodeURIComponent(t))}function T(A){for(var M,t,I={},g=A.split("&"),e=0,i=g.length;e=200&&M.status<300)return t.callback(A,M);var I=new Error(M.statusText||"Unsuccessful HTTP response");I.original=A,I.response=M,I.status=M.status,t.callback(I,M)})}function a(A,M){var t=u("DELETE",A);return M&&t.end(M),t}var D,r=t(370),B=t(794),Q=t(805),s=t(348);D="undefined"!=typeof window?window:"undefined"!=typeof self?self:this;var u=A.exports=t(806).bind(null,C);u.getXHR=function(){if(!(!D.XMLHttpRequest||D.location&&"file:"==D.location.protocol&&D.ActiveXObject))return new XMLHttpRequest;try{return new ActiveXObject("Microsoft.XMLHTTP")}catch(A){}try{return new ActiveXObject("Msxml2.XMLHTTP.6.0")}catch(A){}try{return new ActiveXObject("Msxml2.XMLHTTP.3.0")}catch(A){}try{return new ActiveXObject("Msxml2.XMLHTTP")}catch(A){}return!1};var x="".trim?function(A){return A.trim()}:function(A){return A.replace(/(^\s*|\s*$)/g,"")};u.serializeObject=e,u.parseString=T,u.types={html:"text/html",json:"application/json",xml:"application/xml",urlencoded:"application/x-www-form-urlencoded",form:"application/x-www-form-urlencoded","form-data":"application/x-www-form-urlencoded"},u.serialize={"application/x-www-form-urlencoded":e,"application/json":JSON.stringify},u.parse={"application/x-www-form-urlencoded":T,"application/json":JSON.parse},c.prototype.get=function(A){return this.header[A.toLowerCase()]},c.prototype.setHeaderProperties=function(A){var M=this.header["content-type"]||"";this.type=N(M);var t=o(M);for(var I in t)this[I]=t[I]},c.prototype.parseBody=function(A){var M=u.parse[this.type];return!M&&n(this.type)&&(M=u.parse["application/json"]),M&&A&&(A.length||A instanceof Object)?M(A):null},c.prototype.setStatusProperties=function(A){1223===A&&(A=204);var M=A/100|0;this.status=this.statusCode=A,this.statusType=M,this.info=1==M,this.ok=2==M,this.clientError=4==M,this.serverError=5==M,this.error=(4==M||5==M)&&this.toError(),this.accepted=202==A,this.noContent=204==A,this.badRequest=400==A,this.unauthorized=401==A,this.notAcceptable=406==A,this.notFound=404==A,this.forbidden=403==A},c.prototype.toError=function(){var A=this.req,M=A.method,t=A.url,I="cannot "+M+" "+t+" ("+this.status+")",g=new Error(I);return g.status=this.status,g.method=M,g.url=t,g},u.Response=c,r(C.prototype);for(var y in Q)C.prototype[y]=Q[y];C.prototype.abort=function(){if(!this.aborted)return this.aborted=!0,this.xhr&&this.xhr.abort(),this.clearTimeout(),this.emit("abort"),this},C.prototype.type=function(A){return this.set("Content-Type",u.types[A]||A),this},C.prototype.responseType=function(A){return this._responseType=A,this},C.prototype.accept=function(A){return this.set("Accept",u.types[A]||A),this},C.prototype.auth=function(A,M,t){switch(t||(t={type:"basic"}),t.type){case"basic":var I=btoa(A+":"+M);this.set("Authorization","Basic "+I);break;case"auto":this.username=A,this.password=M}return this},C.prototype.query=function(A){return"string"!=typeof A&&(A=e(A)),A&&this._query.push(A),this},C.prototype.attach=function(A,M,t){return this._getFormData().append(A,M,t||M.name),this},C.prototype._getFormData=function(){return this._formData||(this._formData=new D.FormData),this._formData},C.prototype.send=function(A){var M=s(A),t=this._header["content-type"];if(M&&s(this._data))for(var I in A)this._data[I]=A[I];else"string"==typeof A?(t||this.type("form"),t=this._header["content-type"],"application/x-www-form-urlencoded"==t?this._data=this._data?this._data+"&"+A:A:this._data=(this._data||"")+A):this._data=A;return!M||g(A)?this:(t||this.type("json"),this)},c.prototype.parse=function(A){return D.console&&console.warn("Client-side parse() method has been renamed to serialize(). This method is not compatible with superagent v2.0"),this.serialize(A),this},c.prototype.serialize=function(A){return this._parser=A,this},C.prototype.callback=function(A,M){var t=this._callback;this.clearTimeout(),t(A,M)},C.prototype.crossDomainError=function(){var A=new Error("Request has been terminated\nPossible causes: the network is offline, Origin is not allowed by Access-Control-Allow-Origin, the page is being unloaded, etc.");A.crossDomain=!0,A.status=this.status,A.method=this.method,A.url=this.url,this.callback(A)},C.prototype.timeoutError=function(){var A=this._timeout,M=new Error("timeout of "+A+"ms exceeded");M.timeout=A,this.callback(M)},C.prototype.withCredentials=function(){return this._withCredentials=!0,this},C.prototype.end=function(A){var M=this,t=this.xhr=u.getXHR(),e=this._query.join("&"),i=this._timeout,T=this._formData||this._data;this._callback=A||I,t.onreadystatechange=function(){if(4==t.readyState){var A;try{A=t.status}catch(M){A=0}if(0==A){if(M.timedout)return M.timeoutError();if(M.aborted)return;return M.crossDomainError()}M.emit("end")}};var E=function(A){A.total>0&&(A.percent=A.loaded/A.total*100),A.direction="download",M.emit("progress",A)};this.hasListeners("progress")&&(t.onprogress=E);try{t.upload&&this.hasListeners("progress")&&(t.upload.onprogress=E)}catch(A){}if(i&&!this._timer&&(this._timer=setTimeout(function(){M.timedout=!0,M.abort()},i)),e&&(e=u.serializeObject(e),this.url+=~this.url.indexOf("?")?"&"+e:"?"+e),this.username&&this.password?t.open(this.method,this.url,!0,this.username,this.password):t.open(this.method,this.url,!0),this._withCredentials&&(t.withCredentials=!0),"GET"!=this.method&&"HEAD"!=this.method&&"string"!=typeof T&&!g(T)){var N=this._header["content-type"],o=this._parser||u.serialize[N?N.split(";")[0]:""];!o&&n(N)&&(o=u.serialize["application/json"]),o&&(T=o(T))}for(var c in this.header)null!=this.header[c]&&t.setRequestHeader(c,this.header[c]);return this._responseType&&(t.responseType=this._responseType),this.emit("request",this),t.send("undefined"!=typeof T?T:null),this},u.Request=C,u.get=function(A,M,t){var I=u("GET",A);return"function"==typeof M&&(t=M,M=null),M&&I.query(M),t&&I.end(t),I},u.head=function(A,M,t){var I=u("HEAD",A);return"function"==typeof M&&(t=M,M=null),M&&I.send(M),t&&I.end(t),I},u.del=a,u.delete=a,u.patch=function(A,M,t){var I=u("PATCH",A);return"function"==typeof M&&(t=M,M=null),M&&I.send(M),t&&I.end(t),I},u.post=function(A,M,t){var I=u("POST",A);return"function"==typeof M&&(t=M,M=null),M&&I.send(M),t&&I.end(t),I},u.put=function(A,M,t){var I=u("PUT",A);return"function"==typeof M&&(t=M,M=null),M&&I.send(M),t&&I.end(t),I}},function(A,M,t){var I=t(348);M.clearTimeout=function(){return this._timeout=0,clearTimeout(this._timer),this},M.parse=function(A){return this._parser=A,this},M.timeout=function(A){return this._timeout=A,this},M.then=function(A,M){return this.end(function(t,I){t?M(t):A(I)})},M.use=function(A){return A(this),this},M.get=function(A){return this._header[A.toLowerCase()]},M.getHeader=M.get,M.set=function(A,M){if(I(A)){for(var t in A)this.set(t,A[t]);return this}return this._header[A.toLowerCase()]=M,this.header[A]=M,this},M.unset=function(A){return delete this._header[A.toLowerCase()],delete this.header[A],this},M.field=function(A,M){return this._getFormData().append(A,M),this}},function(A,M){function t(A,M,t){return"function"==typeof t?new A("GET",M).end(t):2==arguments.length?new A("GET",M):new A(M,t)}A.exports=t},function(A,M,t){A.exports=t(808)},function(A,M,t){(function(A,I){"use strict";function g(A){return A&&A.__esModule?A:{default:A}}Object.defineProperty(M,"__esModule",{value:!0});var e,i=t(809),T=g(i);e="undefined"!=typeof self?self:"undefined"!=typeof window?window:"undefined"!=typeof A?A:I;var E=(0,T.default)(e);M.default=E}).call(M,function(){return this}(),t(216)(A))},function(A,M){"use strict";function t(A){var M,t=A.Symbol;return"function"==typeof t?t.observable?M=t.observable:(M=t("observable"),t.observable=M):M="@@observable",M}Object.defineProperty(M,"__esModule",{value:!0}),M.default=t},function(A,M,t){function I(A){return g(A).replace(/\s(\w)/g,function(A,M){return M.toUpperCase()})}var g=t(812);A.exports=I},function(A,M){function t(A){return e.test(A)?A.toLowerCase():i.test(A)?(I(A)||A).toLowerCase():T.test(A)?g(A).toLowerCase():A.toLowerCase()}function I(A){return A.replace(E,function(A,M){return M?" "+M:""})}function g(A){return A.replace(n,function(A,M,t){return M+" "+t.toLowerCase().split("").join(" ")})}A.exports=t;var e=/\s/,i=/(_|-|\.|:)/,T=/([a-z][A-Z]|[A-Z][a-z])/,E=/[\W_]+(.|$)/g,n=/(.)([A-Z]+)/g},function(A,M,t){function I(A){return g(A).replace(/[\W_]+(.|$)/g,function(A,M){return M?" "+M:""}).trim()}var g=t(811);A.exports=I},function(A,M){A.exports=function(){var A=document.getSelection();if(!A.rangeCount)return function(){};for(var M=document.activeElement,t=[],I=0;I=0||Object.prototype.hasOwnProperty.call(A,I)&&(t[I]=A[I]);return t}function i(A,M){function t(I,g){function i(A,t){var I=C.getLinkName(A),e=this.props[g[A]];I&&E(this.props,I)&&!e&&(e=this.props[I].requestChange);for(var i=arguments.length,T=Array(i>2?i-2:0),n=2;n=15||0===s[0]&&s[1]>=13?A:A.type}function T(A,M){var t=n(M);return t&&!E(A,M)&&E(A,t)?A[t].value:A[M]}function E(A,M){return void 0!==A[M]}function n(A){return"value"===A?"valueLink":"checked"===A?"checkedLink":null}function N(A){return"default"+A.charAt(0).toUpperCase()+A.substr(1)}function o(A,M,t){return function(){for(var I=arguments.length,g=Array(I),e=0;e1&&(I=t[0]+"@",A=t[1]),A=A.replace(z,".");var g=A.split("."),e=T(g,M).join(".");return I+e}function N(A){for(var M,t,I=[],g=0,e=A.length;g=55296&&M<=56319&&g65535&&(A-=65536,M+=f(A>>>10&1023|55296),A=56320|1023&A),M+=f(A)}).join("")}function o(A){return A-48<10?A-22:A-65<26?A-65:A-97<26?A-97:x}function c(A,M){return A+22+75*(A<26)-((0!=M)<<5)}function C(A,M,t){var I=0;for(A=t?O(A/w):A>>1,A+=O(A/M);A>U*j>>1;I+=x)A=O(A/U);return O(I+(U+1)*A/(A+l))}function a(A){var M,t,I,g,e,T,E,N,c,a,D=[],r=A.length,B=0,Q=Y,s=L;for(t=A.lastIndexOf(d),t<0&&(t=0),I=0;I=128&&i("not-basic"),D.push(A.charCodeAt(I));for(g=t>0?t+1:0;g=r&&i("invalid-input"),N=o(A.charCodeAt(g++)),(N>=x||N>O((u-B)/T))&&i("overflow"),B+=N*T,c=E<=s?y:E>=s+j?j:E-s,!(NO(u/a)&&i("overflow"),T*=a;M=D.length+1,s=C(B-e,M,0==e),O(B/M)>u-Q&&i("overflow"),Q+=O(B/M),B%=M,D.splice(B++,0,Q)}return n(D)}function D(A){var M,t,I,g,e,T,E,n,o,a,D,r,B,Q,s,l=[];for(A=N(A),r=A.length,M=Y,t=0,e=L,T=0;T=M&&DO((u-t)/B)&&i("overflow"),t+=(E-M)*B,M=E,T=0;Tu&&i("overflow"),D==M){for(n=t,o=x;a=o<=e?y:o>=e+j?j:o-e,!(n= 0x80 (not a basic code point)","invalid-input":"Invalid input"},U=x-y,O=Math.floor,f=String.fromCharCode;s={version:"1.3.2",ucs2:{decode:N,encode:n},decode:a,encode:D,toASCII:B,toUnicode:r},I=function(){return s}.call(M,t,M,A),!(void 0!==I&&(A.exports=I))}(this)}).call(M,t(215)(A),function(){return this}())},function(A,M,t){"use strict";function I(){this.protocol=null,this.slashes=null,this.auth=null,this.host=null,this.port=null,this.hostname=null,this.hash=null,this.search=null,this.query=null,this.pathname=null,this.path=null,this.href=null}function g(A,M,t){if(A&&N.isObject(A)&&A instanceof I)return A;var g=new I;return g.parse(A,M,t),g}function e(A){return N.isString(A)&&(A=g(A)),A instanceof I?A.format():I.prototype.format.call(A)}function i(A,M){return g(A,!1,!0).resolve(M)}function T(A,M){return A?g(A,!1,!0).resolveObject(M):M}var E=t(810),N=t(812);M.parse=g,M.resolve=i,M.resolveObject=T,M.format=e,M.Url=I;var n=/^([a-z0-9.+-]+:)/i,o=/:[0-9]*$/,c=/^(\/\/?(?!\/)[^\?\s]*)(\?[^\s]*)?$/,C=["<",">",'"',"` + "`" + `"," ","\r","\n","\t"],a=["{","}","|","\\","^","` + "`" + `"].concat(C),D=["'"].concat(a),r=["%","/","?",";","#"].concat(D),B=["/","?","#"],Q=255,s=/^[+a-z0-9A-Z_-]{0,63}$/,u=/^([+a-z0-9A-Z_-]{0,63})(.*)$/,x={javascript:!0,"javascript:":!0},y={javascript:!0,"javascript:":!0},j={http:!0,https:!0,ftp:!0,gopher:!0,file:!0,"http:":!0,"https:":!0,"ftp:":!0,"gopher:":!0,"file:":!0},l=t(644);I.prototype.parse=function(A,M,t){if(!N.isString(A))throw new TypeError("Parameter 'url' must be a string, not "+typeof A);var I=A.indexOf("?"),g=I!==-1&&I127?"x":O[m];if(!f.match(s)){var k=p.slice(0,Y),R=p.slice(Y+1),J=O.match(u);J&&(k.push(J[1]),R.unshift(J[2])),R.length&&(T="/"+R.join(".")+T),this.hostname=k.join(".");break}}}this.hostname.length>Q?this.hostname="":this.hostname=this.hostname.toLowerCase(),z||(this.hostname=E.toASCII(this.hostname));var G=this.port?":"+this.port:"",H=this.hostname||"";this.host=H+G,this.href+=this.host,z&&(this.hostname=this.hostname.substr(1,this.hostname.length-2),"/"!==T[0]&&(T="/"+T))}if(!x[a])for(var Y=0,U=D.length;Y0)&&t.host.split("@");w&&(t.auth=w.shift(),t.host=t.hostname=w.shift())}return t.search=A.search,t.query=A.query,N.isNull(t.pathname)&&N.isNull(t.search)||(t.path=(t.pathname?t.pathname:"")+(t.search?t.search:"")),t.href=t.format(),t}if(!x.length)return t.pathname=null,t.search?t.path="/"+t.search:t.path=null,t.href=t.format(),t;for(var L=x.slice(-1)[0],Y=(t.host||A.host||x.length>1)&&("."===L||".."===L)||""===L,d=0,h=x.length;h>=0;h--)L=x[h],"."===L?x.splice(h,1):".."===L?(x.splice(h,1),d++):d&&(x.splice(h,1),d--);if(!s&&!u)for(;d--;d)x.unshift("..");!s||""===x[0]||x[0]&&"/"===x[0].charAt(0)||x.unshift(""),Y&&"/"!==x.join("/").substr(-1)&&x.push("");var S=""===x[0]||x[0]&&"/"===x[0].charAt(0);if(l){t.hostname=t.host=S?"":x.length?x.shift():"";var w=!!(t.host&&t.host.indexOf("@")>0)&&t.host.split("@");w&&(t.auth=w.shift(),t.host=t.hostname=w.shift())}return s=s||t.host&&x.length,s&&!S&&x.unshift(""),x.length?t.pathname=x.join("/"):(t.pathname=null,t.path=null),N.isNull(t.pathname)&&N.isNull(t.search)||(t.path=(t.pathname?t.pathname:"")+(t.search?t.search:"")),t.auth=A.auth||t.auth,t.slashes=t.slashes||A.slashes,t.href=t.format(),t},I.prototype.parseHost=function(){var A=this.host,M=o.exec(A);M&&(M=M[0],":"!==M&&(this.port=M.substr(1)),A=A.substr(0,A.length-M.length)),A&&(this.hostname=A)}},function(A,M){"use strict";A.exports={isString:function(A){return"string"==typeof A},isObject:function(A){return"object"==typeof A&&null!==A},isNull:function(A){return null===A},isNullOrUndefined:function(A){return null==A}}}]);`) +},function(A,M,t){var I;(function(A,g){!function(e){function i(A){throw RangeError(p[A])}function T(A,M){for(var t=A.length,I=[];t--;)I[t]=M(A[t]);return I}function E(A,M){var t=A.split("@"),I="";t.length>1&&(I=t[0]+"@",A=t[1]),A=A.replace(z,".");var g=A.split("."),e=T(g,M).join(".");return I+e}function n(A){for(var M,t,I=[],g=0,e=A.length;g=55296&&M<=56319&&g65535&&(A-=65536,M+=f(A>>>10&1023|55296),A=56320|1023&A),M+=f(A)}).join("")}function o(A){return A-48<10?A-22:A-65<26?A-65:A-97<26?A-97:x}function c(A,M){return A+22+75*(A<26)-((0!=M)<<5)}function C(A,M,t){var I=0;for(A=t?O(A/w):A>>1,A+=O(A/M);A>U*j>>1;I+=x)A=O(A/U);return O(I+(U+1)*A/(A+l))}function a(A){var M,t,I,g,e,T,E,n,c,a,D=[],r=A.length,B=0,Q=Y,s=L;for(t=A.lastIndexOf(d),t<0&&(t=0),I=0;I=128&&i("not-basic"),D.push(A.charCodeAt(I));for(g=t>0?t+1:0;g=r&&i("invalid-input"),n=o(A.charCodeAt(g++)),(n>=x||n>O((u-B)/T))&&i("overflow"),B+=n*T,c=E<=s?y:E>=s+j?j:E-s,!(nO(u/a)&&i("overflow"),T*=a;M=D.length+1,s=C(B-e,M,0==e),O(B/M)>u-Q&&i("overflow"),Q+=O(B/M),B%=M,D.splice(B++,0,Q)}return N(D)}function D(A){var M,t,I,g,e,T,E,N,o,a,D,r,B,Q,s,l=[];for(A=n(A),r=A.length,M=Y,t=0,e=L,T=0;T=M&&DO((u-t)/B)&&i("overflow"),t+=(E-M)*B,M=E,T=0;Tu&&i("overflow"),D==M){for(N=t,o=x;a=o<=e?y:o>=e+j?j:o-e,!(N= 0x80 (not a basic code point)","invalid-input":"Invalid input"},U=x-y,O=Math.floor,f=String.fromCharCode;s={version:"1.3.2",ucs2:{decode:n,encode:N},decode:a,encode:D,toASCII:B,toUnicode:r},I=function(){return s}.call(M,t,M,A),!(void 0!==I&&(A.exports=I))}(this)}).call(M,t(216)(A),function(){return this}())},function(A,M,t){"use strict";function I(){this.protocol=null,this.slashes=null,this.auth=null,this.host=null,this.port=null,this.hostname=null,this.hash=null,this.search=null,this.query=null,this.pathname=null,this.path=null,this.href=null}function g(A,M,t){if(A&&n.isObject(A)&&A instanceof I)return A;var g=new I;return g.parse(A,M,t),g}function e(A){return n.isString(A)&&(A=g(A)),A instanceof I?A.format():I.prototype.format.call(A)}function i(A,M){return g(A,!1,!0).resolve(M)}function T(A,M){return A?g(A,!1,!0).resolveObject(M):M}var E=t(828),n=t(830);M.parse=g,M.resolve=i,M.resolveObject=T,M.format=e,M.Url=I;var N=/^([a-z0-9.+-]+:)/i,o=/:[0-9]*$/,c=/^(\/\/?(?!\/)[^\?\s]*)(\?[^\s]*)?$/,C=["<",">",'"',"` + "`" + `"," ","\r","\n","\t"],a=["{","}","|","\\","^","` + "`" + `"].concat(C),D=["'"].concat(a),r=["%","/","?",";","#"].concat(D),B=["/","?","#"],Q=255,s=/^[+a-z0-9A-Z_-]{0,63}$/,u=/^([+a-z0-9A-Z_-]{0,63})(.*)$/,x={javascript:!0,"javascript:":!0},y={javascript:!0,"javascript:":!0},j={http:!0,https:!0,ftp:!0,gopher:!0,file:!0,"http:":!0,"https:":!0,"ftp:":!0,"gopher:":!0,"file:":!0},l=t(633);I.prototype.parse=function(A,M,t){if(!n.isString(A))throw new TypeError("Parameter 'url' must be a string, not "+typeof A);var I=A.indexOf("?"),g=I!==-1&&I127?"x":O[m];if(!f.match(s)){var k=p.slice(0,Y),R=p.slice(Y+1),J=O.match(u);J&&(k.push(J[1]),R.unshift(J[2])),R.length&&(T="/"+R.join(".")+T),this.hostname=k.join(".");break}}}this.hostname.length>Q?this.hostname="":this.hostname=this.hostname.toLowerCase(),z||(this.hostname=E.toASCII(this.hostname));var G=this.port?":"+this.port:"",H=this.hostname||"";this.host=H+G,this.href+=this.host,z&&(this.hostname=this.hostname.substr(1,this.hostname.length-2),"/"!==T[0]&&(T="/"+T))}if(!x[a])for(var Y=0,U=D.length;Y0)&&t.host.split("@");w&&(t.auth=w.shift(),t.host=t.hostname=w.shift())}return t.search=A.search,t.query=A.query,n.isNull(t.pathname)&&n.isNull(t.search)||(t.path=(t.pathname?t.pathname:"")+(t.search?t.search:"")),t.href=t.format(),t}if(!x.length)return t.pathname=null,t.search?t.path="/"+t.search:t.path=null,t.href=t.format(),t;for(var L=x.slice(-1)[0],Y=(t.host||A.host||x.length>1)&&("."===L||".."===L)||""===L,d=0,h=x.length;h>=0;h--)L=x[h],"."===L?x.splice(h,1):".."===L?(x.splice(h,1),d++):d&&(x.splice(h,1),d--);if(!s&&!u)for(;d--;d)x.unshift("..");!s||""===x[0]||x[0]&&"/"===x[0].charAt(0)||x.unshift(""),Y&&"/"!==x.join("/").substr(-1)&&x.push("");var S=""===x[0]||x[0]&&"/"===x[0].charAt(0);if(l){t.hostname=t.host=S?"":x.length?x.shift():"";var w=!!(t.host&&t.host.indexOf("@")>0)&&t.host.split("@");w&&(t.auth=w.shift(),t.host=t.hostname=w.shift())}return s=s||t.host&&x.length,s&&!S&&x.unshift(""),x.length?t.pathname=x.join("/"):(t.pathname=null,t.path=null),n.isNull(t.pathname)&&n.isNull(t.search)||(t.path=(t.pathname?t.pathname:"")+(t.search?t.search:"")),t.auth=A.auth||t.auth,t.slashes=t.slashes||A.slashes,t.href=t.format(),t},I.prototype.parseHost=function(){var A=this.host,M=o.exec(A);M&&(M=M[0],":"!==M&&(this.port=M.substr(1)),A=A.substr(0,A.length-M.length)),A&&(this.hostname=A)}},function(A,M){"use strict";A.exports={isString:function(A){return"string"==typeof A},isObject:function(A){return"object"==typeof A&&null!==A},isNull:function(A){return null===A},isNullOrUndefined:function(A){return null==A}}}]);`) -func productionIndex_bundle20170602t213618zJsBytes() ([]byte, error) { - return _productionIndex_bundle20170602t213618zJs, nil +func productionIndex_bundle20170624t083558zJsBytes() ([]byte, error) { + return _productionIndex_bundle20170624t083558zJs, nil } -func productionIndex_bundle20170602t213618zJs() (*asset, error) { - bytes, err := productionIndex_bundle20170602t213618zJsBytes() +func productionIndex_bundle20170624t083558zJs() (*asset, error) { + bytes, err := productionIndex_bundle20170624t083558zJsBytes() if err != nil { return nil, err } - info := bindataFileInfo{name: "production/index_bundle-2017-06-02T21-36-18Z.js", size: 2368338, mode: os.FileMode(436), modTime: time.Unix(1496439395, 0)} + info := bindataFileInfo{name: "production/index_bundle-2017-06-24T08-35-58Z.js", size: 2388385, mode: os.FileMode(436), modTime: time.Unix(1498293380, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -371,7 +377,7 @@ func productionLoaderCss() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "production/loader.css", size: 1738, mode: os.FileMode(436), modTime: time.Unix(1496439395, 0)} + info := bindataFileInfo{name: "production/loader.css", size: 1738, mode: os.FileMode(436), modTime: time.Unix(1498293380, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -445,7 +451,7 @@ func productionLogoSvg() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "production/logo.svg", size: 3079, mode: os.FileMode(436), modTime: time.Unix(1496439395, 0)} + info := bindataFileInfo{name: "production/logo.svg", size: 3079, mode: os.FileMode(436), modTime: time.Unix(1498293380, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -462,7 +468,7 @@ func productionSafariPng() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "production/safari.png", size: 4971, mode: os.FileMode(436), modTime: time.Unix(1496439395, 0)} + info := bindataFileInfo{name: "production/safari.png", size: 4971, mode: os.FileMode(436), modTime: time.Unix(1498293380, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -523,7 +529,7 @@ var _bindata = map[string]func() (*asset, error){ "production/favicon.ico": productionFaviconIco, "production/firefox.png": productionFirefoxPng, "production/index.html": productionIndexHTML, - "production/index_bundle-2017-06-02T21-36-18Z.js": productionIndex_bundle20170602t213618zJs, + "production/index_bundle-2017-06-24T08-35-58Z.js": productionIndex_bundle20170624t083558zJs, "production/loader.css": productionLoaderCss, "production/logo.svg": productionLogoSvg, "production/safari.png": productionSafariPng, @@ -575,7 +581,7 @@ var _bintree = &bintree{nil, map[string]*bintree{ "favicon.ico": {productionFaviconIco, map[string]*bintree{}}, "firefox.png": {productionFirefoxPng, map[string]*bintree{}}, "index.html": {productionIndexHTML, map[string]*bintree{}}, - "index_bundle-2017-06-02T21-36-18Z.js": {productionIndex_bundle20170602t213618zJs, map[string]*bintree{}}, + "index_bundle-2017-06-24T08-35-58Z.js": {productionIndex_bundle20170624t083558zJs, map[string]*bintree{}}, "loader.css": {productionLoaderCss, map[string]*bintree{}}, "logo.svg": {productionLogoSvg, map[string]*bintree{}}, "safari.png": {productionSafariPng, map[string]*bintree{}}, @@ -639,6 +645,6 @@ func assetFS() *assetfs.AssetFS { panic("unreachable") } -var UIReleaseTag = "RELEASE.2017-06-02T21-36-18Z" -var UICommitID = "432bf7d99e5c24c074a1c2bac0f2fadc9891e0a0" -var UIVersion = "2017-06-02T21:36:18Z" +var UIReleaseTag = "RELEASE.2017-06-24T08-35-58Z" +var UICommitID = "ba6d997d18da425094575ac87e19f793f32ad56a" +var UIVersion = "2017-06-24T08:35:58Z" diff --git a/browser/yarn.lock b/browser/yarn.lock index f8d00f7da..119aead18 100644 --- a/browser/yarn.lock +++ b/browser/yarn.lock @@ -3,8 +3,8 @@ abbrev@1: - version "1.0.9" - resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.0.9.tgz#91b4792588a7738c25f35dd6f63752a2f8776135" + version "1.1.0" + resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.1.0.tgz#d0554c2256636e2f56e7c2e5ad183f859428d81f" accepts@1.3.3, accepts@~1.3.3: version "1.3.3" @@ -17,10 +17,14 @@ acorn-to-esprima@^2.0.6, acorn-to-esprima@^2.0.8: version "2.0.8" resolved "https://registry.yarnpkg.com/acorn-to-esprima/-/acorn-to-esprima-2.0.8.tgz#003f0c642eb92132f417d3708f14ada82adf2eb1" -acorn@^3.0.0, acorn@^3.1.0: +acorn@^3.0.0: version "3.3.0" resolved "https://registry.yarnpkg.com/acorn/-/acorn-3.3.0.tgz#45e37fb39e8da3f25baee3ff5369e2bb5f22017a" +acorn@^4.0.3: + version "4.0.13" + resolved "https://registry.yarnpkg.com/acorn/-/acorn-4.0.13.tgz#105495ae5361d697bd195c825192e1ad7f253787" + add-px-to-style@1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/add-px-to-style/-/add-px-to-style-1.0.0.tgz#d0c135441fa8014a8137904531096f67f28f263a" @@ -29,6 +33,13 @@ after@0.8.2: version "0.8.2" resolved "https://registry.yarnpkg.com/after/-/after-0.8.2.tgz#fedb394f9f0e02aa9768e702bda23b505fae7e1f" +ajv@^4.9.1: + version "4.11.8" + resolved "https://registry.yarnpkg.com/ajv/-/ajv-4.11.8.tgz#82ffb02b29e662ae53bdc20af15947706739c536" + dependencies: + co "^4.6.0" + json-stable-stringify "^1.0.1" + align-text@^0.1.1, align-text@^0.1.3: version "0.1.4" resolved "https://registry.yarnpkg.com/align-text/-/align-text-0.1.4.tgz#0cd90a561093f35d0a99256c22b7069433fad117" @@ -61,15 +72,15 @@ anymatch@^1.3.0: micromatch "^2.1.5" aproba@^1.0.3: - version "1.0.4" - resolved "https://registry.yarnpkg.com/aproba/-/aproba-1.0.4.tgz#2713680775e7614c8ba186c065d4e2e52d1072c0" + version "1.1.2" + resolved "https://registry.yarnpkg.com/aproba/-/aproba-1.1.2.tgz#45c6629094de4e96f693ef7eab74ae079c240fc1" are-we-there-yet@~1.1.2: - version "1.1.2" - resolved "https://registry.yarnpkg.com/are-we-there-yet/-/are-we-there-yet-1.1.2.tgz#80e470e95a084794fe1899262c5667c6e88de1b3" + version "1.1.4" + resolved "https://registry.yarnpkg.com/are-we-there-yet/-/are-we-there-yet-1.1.4.tgz#bb5dca382bb94f05e15194373d16fd3ba1ca110d" dependencies: delegates "^1.0.0" - readable-stream "^2.0.0 || ^1.1.13" + readable-stream "^2.0.6" argparse@^1.0.7: version "1.0.9" @@ -84,8 +95,8 @@ arr-diff@^2.0.0: arr-flatten "^1.0.1" arr-flatten@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/arr-flatten/-/arr-flatten-1.0.1.tgz#e5ffe54d45e19f32f216e91eb99c8ce892bb604b" + version "1.0.3" + resolved "https://registry.yarnpkg.com/arr-flatten/-/arr-flatten-1.0.3.tgz#a274ed85ac08849b6bd7847c4580745dc51adfb1" array-flatten@1.1.1: version "1.1.1" @@ -115,23 +126,23 @@ asn1@~0.2.3: version "0.2.3" resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.2.3.tgz#dac8787713c9966849fc8180777ebe9c1ddf3b86" +assert-plus@1.0.0, assert-plus@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-1.0.0.tgz#f12e0f3c5d77b0b1cdd9146942e4e96c1e4dd525" + assert-plus@^0.2.0: version "0.2.0" resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-0.2.0.tgz#d74e1b87e7affc0db8aadb7021f3fe48101ab234" -assert-plus@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-1.0.0.tgz#f12e0f3c5d77b0b1cdd9146942e4e96c1e4dd525" - assert@^1.1.1: version "1.4.1" resolved "https://registry.yarnpkg.com/assert/-/assert-1.4.1.tgz#99912d591836b5a6f5b345c0f07eefc08fc65d91" dependencies: util "0.10.3" -ast-types@0.9.4: - version "0.9.4" - resolved "https://registry.yarnpkg.com/ast-types/-/ast-types-0.9.4.tgz#410d1f81890aeb8e0a38621558ba5869ae53c91b" +ast-types@0.9.6: + version "0.9.6" + resolved "https://registry.yarnpkg.com/ast-types/-/ast-types-0.9.6.tgz#102c9e9e9005d3e7e3829bf0c4fa24ee862ee9b9" async-each@^1.0.0: version "1.0.1" @@ -162,14 +173,14 @@ attr-accept@^1.0.3: resolved "https://registry.yarnpkg.com/attr-accept/-/attr-accept-1.1.0.tgz#b5cd35227f163935a8f1de10ed3eba16941f6be6" autoprefixer@^6.3.1: - version "6.7.0" - resolved "https://registry.yarnpkg.com/autoprefixer/-/autoprefixer-6.7.0.tgz#88992cf04df141e7b8293550f2ee716c565d1cae" + version "6.7.7" + resolved "https://registry.yarnpkg.com/autoprefixer/-/autoprefixer-6.7.7.tgz#1dbd1c835658e35ce3f9984099db00585c782014" dependencies: - browserslist "~1.6.0" - caniuse-db "^1.0.30000613" + browserslist "^1.7.6" + caniuse-db "^1.0.30000634" normalize-range "^0.1.2" num2fraction "^1.2.2" - postcss "^5.2.11" + postcss "^5.2.16" postcss-value-parser "^3.2.3" aws-sign2@~0.6.0: @@ -177,16 +188,16 @@ aws-sign2@~0.6.0: resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.6.0.tgz#14342dd38dbcc94d0e5b87d763cd63612c0e794f" aws4@^1.2.1: - version "1.5.0" - resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.5.0.tgz#0a29ffb79c31c9e712eeb087e8e7a64b4a56d755" + version "1.6.0" + resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.6.0.tgz#83ef5ca860b2b32e4a0deedee8c771b9db57471e" babel-cli@^6.14.0: - version "6.22.2" - resolved "https://registry.yarnpkg.com/babel-cli/-/babel-cli-6.22.2.tgz#3f814c8acf52759082b8fedd9627f938936ab559" + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-cli/-/babel-cli-6.24.1.tgz#207cd705bba61489b2ea41b5312341cf6aca2283" dependencies: - babel-core "^6.22.1" - babel-polyfill "^6.22.0" - babel-register "^6.22.0" + babel-core "^6.24.1" + babel-polyfill "^6.23.0" + babel-register "^6.24.1" babel-runtime "^6.22.0" commander "^2.8.1" convert-source-map "^1.1.0" @@ -209,20 +220,20 @@ babel-code-frame@^6.22.0: esutils "^2.0.2" js-tokens "^3.0.0" -babel-core@^6.14.0, babel-core@^6.22.0, babel-core@^6.22.1: - version "6.22.1" - resolved "https://registry.yarnpkg.com/babel-core/-/babel-core-6.22.1.tgz#9c5fd658ba1772d28d721f6d25d968fc7ae21648" +babel-core@^6.14.0, babel-core@^6.24.1: + version "6.25.0" + resolved "https://registry.yarnpkg.com/babel-core/-/babel-core-6.25.0.tgz#7dd42b0463c742e9d5296deb3ec67a9322dad729" dependencies: babel-code-frame "^6.22.0" - babel-generator "^6.22.0" - babel-helpers "^6.22.0" - babel-messages "^6.22.0" - babel-register "^6.22.0" + babel-generator "^6.25.0" + babel-helpers "^6.24.1" + babel-messages "^6.23.0" + babel-register "^6.24.1" babel-runtime "^6.22.0" - babel-template "^6.22.0" - babel-traverse "^6.22.1" - babel-types "^6.22.0" - babylon "^6.11.0" + babel-template "^6.25.0" + babel-traverse "^6.25.0" + babel-types "^6.25.0" + babylon "^6.17.2" convert-source-map "^1.1.0" debug "^2.1.1" json5 "^0.5.0" @@ -233,114 +244,114 @@ babel-core@^6.14.0, babel-core@^6.22.0, babel-core@^6.22.1: slash "^1.0.0" source-map "^0.5.0" -babel-generator@^6.22.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-generator/-/babel-generator-6.22.0.tgz#d642bf4961911a8adc7c692b0c9297f325cda805" +babel-generator@^6.25.0: + version "6.25.0" + resolved "https://registry.yarnpkg.com/babel-generator/-/babel-generator-6.25.0.tgz#33a1af70d5f2890aeb465a4a7793c1df6a9ea9fc" dependencies: - babel-messages "^6.22.0" + babel-messages "^6.23.0" babel-runtime "^6.22.0" - babel-types "^6.22.0" + babel-types "^6.25.0" detect-indent "^4.0.0" jsesc "^1.3.0" lodash "^4.2.0" source-map "^0.5.0" + trim-right "^1.0.1" -babel-helper-builder-react-jsx@^6.22.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-helper-builder-react-jsx/-/babel-helper-builder-react-jsx-6.22.0.tgz#aafb31913e47761fd4d0b6987756a144a65fca0d" +babel-helper-builder-react-jsx@^6.24.1: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-helper-builder-react-jsx/-/babel-helper-builder-react-jsx-6.24.1.tgz#0ad7917e33c8d751e646daca4e77cc19377d2cbc" dependencies: babel-runtime "^6.22.0" - babel-types "^6.22.0" + babel-types "^6.24.1" esutils "^2.0.0" + +babel-helper-call-delegate@^6.24.1: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-helper-call-delegate/-/babel-helper-call-delegate-6.24.1.tgz#ece6aacddc76e41c3461f88bfc575bd0daa2df8d" + dependencies: + babel-helper-hoist-variables "^6.24.1" + babel-runtime "^6.22.0" + babel-traverse "^6.24.1" + babel-types "^6.24.1" + +babel-helper-define-map@^6.24.1: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-helper-define-map/-/babel-helper-define-map-6.24.1.tgz#7a9747f258d8947d32d515f6aa1c7bd02204a080" + dependencies: + babel-helper-function-name "^6.24.1" + babel-runtime "^6.22.0" + babel-types "^6.24.1" lodash "^4.2.0" -babel-helper-call-delegate@^6.22.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-helper-call-delegate/-/babel-helper-call-delegate-6.22.0.tgz#119921b56120f17e9dae3f74b4f5cc7bcc1b37ef" +babel-helper-function-name@^6.24.1: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-helper-function-name/-/babel-helper-function-name-6.24.1.tgz#d3475b8c03ed98242a25b48351ab18399d3580a9" dependencies: - babel-helper-hoist-variables "^6.22.0" + babel-helper-get-function-arity "^6.24.1" babel-runtime "^6.22.0" - babel-traverse "^6.22.0" - babel-types "^6.22.0" + babel-template "^6.24.1" + babel-traverse "^6.24.1" + babel-types "^6.24.1" -babel-helper-define-map@^6.22.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-helper-define-map/-/babel-helper-define-map-6.22.0.tgz#9544e9502b2d6dfe7d00ff60e82bd5a7a89e95b7" +babel-helper-get-function-arity@^6.24.1: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-helper-get-function-arity/-/babel-helper-get-function-arity-6.24.1.tgz#8f7782aa93407c41d3aa50908f89b031b1b6853d" dependencies: - babel-helper-function-name "^6.22.0" babel-runtime "^6.22.0" - babel-types "^6.22.0" + babel-types "^6.24.1" + +babel-helper-hoist-variables@^6.24.1: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-helper-hoist-variables/-/babel-helper-hoist-variables-6.24.1.tgz#1ecb27689c9d25513eadbc9914a73f5408be7a76" + dependencies: + babel-runtime "^6.22.0" + babel-types "^6.24.1" + +babel-helper-optimise-call-expression@^6.24.1: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-helper-optimise-call-expression/-/babel-helper-optimise-call-expression-6.24.1.tgz#f7a13427ba9f73f8f4fa993c54a97882d1244257" + dependencies: + babel-runtime "^6.22.0" + babel-types "^6.24.1" + +babel-helper-regex@^6.24.1: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-helper-regex/-/babel-helper-regex-6.24.1.tgz#d36e22fab1008d79d88648e32116868128456ce8" + dependencies: + babel-runtime "^6.22.0" + babel-types "^6.24.1" lodash "^4.2.0" -babel-helper-function-name@^6.22.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-helper-function-name/-/babel-helper-function-name-6.22.0.tgz#51f1bdc4bb89b15f57a9b249f33d742816dcbefc" +babel-helper-replace-supers@^6.24.1: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-helper-replace-supers/-/babel-helper-replace-supers-6.24.1.tgz#bf6dbfe43938d17369a213ca8a8bf74b6a90ab1a" dependencies: - babel-helper-get-function-arity "^6.22.0" + babel-helper-optimise-call-expression "^6.24.1" + babel-messages "^6.23.0" babel-runtime "^6.22.0" - babel-template "^6.22.0" - babel-traverse "^6.22.0" - babel-types "^6.22.0" + babel-template "^6.24.1" + babel-traverse "^6.24.1" + babel-types "^6.24.1" -babel-helper-get-function-arity@^6.22.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-helper-get-function-arity/-/babel-helper-get-function-arity-6.22.0.tgz#0beb464ad69dc7347410ac6ade9f03a50634f5ce" +babel-helpers@^6.24.1: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-helpers/-/babel-helpers-6.24.1.tgz#3471de9caec388e5c850e597e58a26ddf37602b2" dependencies: babel-runtime "^6.22.0" - babel-types "^6.22.0" - -babel-helper-hoist-variables@^6.22.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-helper-hoist-variables/-/babel-helper-hoist-variables-6.22.0.tgz#3eacbf731d80705845dd2e9718f600cfb9b4ba72" - dependencies: - babel-runtime "^6.22.0" - babel-types "^6.22.0" - -babel-helper-optimise-call-expression@^6.22.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-helper-optimise-call-expression/-/babel-helper-optimise-call-expression-6.22.0.tgz#f8d5d4b40a6e2605a6a7f9d537b581bea3756d15" - dependencies: - babel-runtime "^6.22.0" - babel-types "^6.22.0" - -babel-helper-regex@^6.22.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-helper-regex/-/babel-helper-regex-6.22.0.tgz#79f532be1647b1f0ee3474b5f5c3da58001d247d" - dependencies: - babel-runtime "^6.22.0" - babel-types "^6.22.0" - lodash "^4.2.0" - -babel-helper-replace-supers@^6.22.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-helper-replace-supers/-/babel-helper-replace-supers-6.22.0.tgz#1fcee2270657548908c34db16bcc345f9850cf42" - dependencies: - babel-helper-optimise-call-expression "^6.22.0" - babel-messages "^6.22.0" - babel-runtime "^6.22.0" - babel-template "^6.22.0" - babel-traverse "^6.22.0" - babel-types "^6.22.0" - -babel-helpers@^6.22.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-helpers/-/babel-helpers-6.22.0.tgz#d275f55f2252b8101bff07bc0c556deda657392c" - dependencies: - babel-runtime "^6.22.0" - babel-template "^6.22.0" + babel-template "^6.24.1" babel-loader@^6.2.5: - version "6.2.10" - resolved "https://registry.yarnpkg.com/babel-loader/-/babel-loader-6.2.10.tgz#adefc2b242320cd5d15e65b31cea0e8b1b02d4b0" + version "6.4.1" + resolved "https://registry.yarnpkg.com/babel-loader/-/babel-loader-6.4.1.tgz#0b34112d5b0748a8dcdbf51acf6f9bd42d50b8ca" dependencies: find-cache-dir "^0.1.1" - loader-utils "^0.2.11" + loader-utils "^0.2.16" mkdirp "^0.5.1" object-assign "^4.0.1" -babel-messages@^6.22.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-messages/-/babel-messages-6.22.0.tgz#36066a214f1217e4ed4164867669ecb39e3ea575" +babel-messages@^6.23.0: + version "6.23.0" + resolved "https://registry.yarnpkg.com/babel-messages/-/babel-messages-6.23.0.tgz#f3cdf4703858035b2a2951c6ec5edf6c62f2630e" dependencies: babel-runtime "^6.22.0" @@ -350,7 +361,7 @@ babel-plugin-check-es2015-constants@^6.22.0: dependencies: babel-runtime "^6.22.0" -babel-plugin-syntax-flow@^6.18.0, babel-plugin-syntax-flow@^6.3.13: +babel-plugin-syntax-flow@^6.18.0: version "6.18.0" resolved "https://registry.yarnpkg.com/babel-plugin-syntax-flow/-/babel-plugin-syntax-flow-6.18.0.tgz#4c3ab20a2af26aa20cd25995c398c4eb70310c8d" @@ -374,63 +385,63 @@ babel-plugin-transform-es2015-block-scoped-functions@^6.22.0: dependencies: babel-runtime "^6.22.0" -babel-plugin-transform-es2015-block-scoping@^6.22.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-block-scoping/-/babel-plugin-transform-es2015-block-scoping-6.22.0.tgz#00d6e3a0bebdcfe7536b9d653b44a9141e63e47e" +babel-plugin-transform-es2015-block-scoping@^6.24.1: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-block-scoping/-/babel-plugin-transform-es2015-block-scoping-6.24.1.tgz#76c295dc3a4741b1665adfd3167215dcff32a576" dependencies: babel-runtime "^6.22.0" - babel-template "^6.22.0" - babel-traverse "^6.22.0" - babel-types "^6.22.0" + babel-template "^6.24.1" + babel-traverse "^6.24.1" + babel-types "^6.24.1" lodash "^4.2.0" -babel-plugin-transform-es2015-classes@^6.22.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-classes/-/babel-plugin-transform-es2015-classes-6.22.0.tgz#54d44998fd823d9dca15292324161c331c1b6f14" +babel-plugin-transform-es2015-classes@^6.24.1: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-classes/-/babel-plugin-transform-es2015-classes-6.24.1.tgz#5a4c58a50c9c9461e564b4b2a3bfabc97a2584db" dependencies: - babel-helper-define-map "^6.22.0" - babel-helper-function-name "^6.22.0" - babel-helper-optimise-call-expression "^6.22.0" - babel-helper-replace-supers "^6.22.0" - babel-messages "^6.22.0" + babel-helper-define-map "^6.24.1" + babel-helper-function-name "^6.24.1" + babel-helper-optimise-call-expression "^6.24.1" + babel-helper-replace-supers "^6.24.1" + babel-messages "^6.23.0" babel-runtime "^6.22.0" - babel-template "^6.22.0" - babel-traverse "^6.22.0" - babel-types "^6.22.0" + babel-template "^6.24.1" + babel-traverse "^6.24.1" + babel-types "^6.24.1" -babel-plugin-transform-es2015-computed-properties@^6.22.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-computed-properties/-/babel-plugin-transform-es2015-computed-properties-6.22.0.tgz#7c383e9629bba4820c11b0425bdd6290f7f057e7" +babel-plugin-transform-es2015-computed-properties@^6.24.1: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-computed-properties/-/babel-plugin-transform-es2015-computed-properties-6.24.1.tgz#6fe2a8d16895d5634f4cd999b6d3480a308159b3" dependencies: babel-runtime "^6.22.0" - babel-template "^6.22.0" + babel-template "^6.24.1" babel-plugin-transform-es2015-destructuring@^6.22.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-destructuring/-/babel-plugin-transform-es2015-destructuring-6.22.0.tgz#8e0af2f885a0b2cf999d47c4c1dd23ce88cfa4c6" + version "6.23.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-destructuring/-/babel-plugin-transform-es2015-destructuring-6.23.0.tgz#997bb1f1ab967f682d2b0876fe358d60e765c56d" dependencies: babel-runtime "^6.22.0" -babel-plugin-transform-es2015-duplicate-keys@^6.22.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-duplicate-keys/-/babel-plugin-transform-es2015-duplicate-keys-6.22.0.tgz#672397031c21610d72dd2bbb0ba9fb6277e1c36b" +babel-plugin-transform-es2015-duplicate-keys@^6.24.1: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-duplicate-keys/-/babel-plugin-transform-es2015-duplicate-keys-6.24.1.tgz#73eb3d310ca969e3ef9ec91c53741a6f1576423e" dependencies: babel-runtime "^6.22.0" - babel-types "^6.22.0" + babel-types "^6.24.1" babel-plugin-transform-es2015-for-of@^6.22.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-for-of/-/babel-plugin-transform-es2015-for-of-6.22.0.tgz#180467ad63aeea592a1caeee4bf1c8b3e2616265" + version "6.23.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-for-of/-/babel-plugin-transform-es2015-for-of-6.23.0.tgz#f47c95b2b613df1d3ecc2fdb7573623c75248691" dependencies: babel-runtime "^6.22.0" -babel-plugin-transform-es2015-function-name@^6.22.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-function-name/-/babel-plugin-transform-es2015-function-name-6.22.0.tgz#f5fcc8b09093f9a23c76ac3d9e392c3ec4b77104" +babel-plugin-transform-es2015-function-name@^6.24.1: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-function-name/-/babel-plugin-transform-es2015-function-name-6.24.1.tgz#834c89853bc36b1af0f3a4c5dbaa94fd8eacaa8b" dependencies: - babel-helper-function-name "^6.22.0" + babel-helper-function-name "^6.24.1" babel-runtime "^6.22.0" - babel-types "^6.22.0" + babel-types "^6.24.1" babel-plugin-transform-es2015-literals@^6.22.0: version "6.22.0" @@ -438,63 +449,63 @@ babel-plugin-transform-es2015-literals@^6.22.0: dependencies: babel-runtime "^6.22.0" -babel-plugin-transform-es2015-modules-amd@^6.22.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-modules-amd/-/babel-plugin-transform-es2015-modules-amd-6.22.0.tgz#bf69cd34889a41c33d90dfb740e0091ccff52f21" +babel-plugin-transform-es2015-modules-amd@^6.24.1: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-modules-amd/-/babel-plugin-transform-es2015-modules-amd-6.24.1.tgz#3b3e54017239842d6d19c3011c4bd2f00a00d154" dependencies: - babel-plugin-transform-es2015-modules-commonjs "^6.22.0" + babel-plugin-transform-es2015-modules-commonjs "^6.24.1" babel-runtime "^6.22.0" - babel-template "^6.22.0" + babel-template "^6.24.1" -babel-plugin-transform-es2015-modules-commonjs@^6.22.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-modules-commonjs/-/babel-plugin-transform-es2015-modules-commonjs-6.22.0.tgz#6ca04e22b8e214fb50169730657e7a07dc941145" +babel-plugin-transform-es2015-modules-commonjs@^6.24.1: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-modules-commonjs/-/babel-plugin-transform-es2015-modules-commonjs-6.24.1.tgz#d3e310b40ef664a36622200097c6d440298f2bfe" dependencies: - babel-plugin-transform-strict-mode "^6.22.0" + babel-plugin-transform-strict-mode "^6.24.1" babel-runtime "^6.22.0" - babel-template "^6.22.0" - babel-types "^6.22.0" + babel-template "^6.24.1" + babel-types "^6.24.1" -babel-plugin-transform-es2015-modules-systemjs@^6.22.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-modules-systemjs/-/babel-plugin-transform-es2015-modules-systemjs-6.22.0.tgz#810cd0cd025a08383b84236b92c6e31f88e644ad" +babel-plugin-transform-es2015-modules-systemjs@^6.24.1: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-modules-systemjs/-/babel-plugin-transform-es2015-modules-systemjs-6.24.1.tgz#ff89a142b9119a906195f5f106ecf305d9407d23" dependencies: - babel-helper-hoist-variables "^6.22.0" + babel-helper-hoist-variables "^6.24.1" babel-runtime "^6.22.0" - babel-template "^6.22.0" + babel-template "^6.24.1" -babel-plugin-transform-es2015-modules-umd@^6.22.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-modules-umd/-/babel-plugin-transform-es2015-modules-umd-6.22.0.tgz#60d0ba3bd23258719c64391d9bf492d648dc0fae" +babel-plugin-transform-es2015-modules-umd@^6.24.1: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-modules-umd/-/babel-plugin-transform-es2015-modules-umd-6.24.1.tgz#ac997e6285cd18ed6176adb607d602344ad38468" dependencies: - babel-plugin-transform-es2015-modules-amd "^6.22.0" + babel-plugin-transform-es2015-modules-amd "^6.24.1" babel-runtime "^6.22.0" - babel-template "^6.22.0" + babel-template "^6.24.1" -babel-plugin-transform-es2015-object-super@^6.22.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-object-super/-/babel-plugin-transform-es2015-object-super-6.22.0.tgz#daa60e114a042ea769dd53fe528fc82311eb98fc" +babel-plugin-transform-es2015-object-super@^6.24.1: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-object-super/-/babel-plugin-transform-es2015-object-super-6.24.1.tgz#24cef69ae21cb83a7f8603dad021f572eb278f8d" dependencies: - babel-helper-replace-supers "^6.22.0" + babel-helper-replace-supers "^6.24.1" babel-runtime "^6.22.0" -babel-plugin-transform-es2015-parameters@^6.22.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-parameters/-/babel-plugin-transform-es2015-parameters-6.22.0.tgz#57076069232019094f27da8c68bb7162fe208dbb" +babel-plugin-transform-es2015-parameters@^6.24.1: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-parameters/-/babel-plugin-transform-es2015-parameters-6.24.1.tgz#57ac351ab49caf14a97cd13b09f66fdf0a625f2b" dependencies: - babel-helper-call-delegate "^6.22.0" - babel-helper-get-function-arity "^6.22.0" + babel-helper-call-delegate "^6.24.1" + babel-helper-get-function-arity "^6.24.1" babel-runtime "^6.22.0" - babel-template "^6.22.0" - babel-traverse "^6.22.0" - babel-types "^6.22.0" + babel-template "^6.24.1" + babel-traverse "^6.24.1" + babel-types "^6.24.1" -babel-plugin-transform-es2015-shorthand-properties@^6.22.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-shorthand-properties/-/babel-plugin-transform-es2015-shorthand-properties-6.22.0.tgz#8ba776e0affaa60bff21e921403b8a652a2ff723" +babel-plugin-transform-es2015-shorthand-properties@^6.24.1: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-shorthand-properties/-/babel-plugin-transform-es2015-shorthand-properties-6.24.1.tgz#24f875d6721c87661bbd99a4622e51f14de38aa0" dependencies: babel-runtime "^6.22.0" - babel-types "^6.22.0" + babel-types "^6.24.1" babel-plugin-transform-es2015-spread@^6.22.0: version "6.22.0" @@ -502,13 +513,13 @@ babel-plugin-transform-es2015-spread@^6.22.0: dependencies: babel-runtime "^6.22.0" -babel-plugin-transform-es2015-sticky-regex@^6.22.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-sticky-regex/-/babel-plugin-transform-es2015-sticky-regex-6.22.0.tgz#ab316829e866ee3f4b9eb96939757d19a5bc4593" +babel-plugin-transform-es2015-sticky-regex@^6.24.1: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-sticky-regex/-/babel-plugin-transform-es2015-sticky-regex-6.24.1.tgz#00c1cdb1aca71112cdf0cf6126c2ed6b457ccdbc" dependencies: - babel-helper-regex "^6.22.0" + babel-helper-regex "^6.24.1" babel-runtime "^6.22.0" - babel-types "^6.22.0" + babel-types "^6.24.1" babel-plugin-transform-es2015-template-literals@^6.22.0: version "6.22.0" @@ -517,16 +528,16 @@ babel-plugin-transform-es2015-template-literals@^6.22.0: babel-runtime "^6.22.0" babel-plugin-transform-es2015-typeof-symbol@^6.22.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-typeof-symbol/-/babel-plugin-transform-es2015-typeof-symbol-6.22.0.tgz#87faf2336d3b6a97f68c4d906b0cd0edeae676e1" + version "6.23.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-typeof-symbol/-/babel-plugin-transform-es2015-typeof-symbol-6.23.0.tgz#dec09f1cddff94b52ac73d505c84df59dcceb372" dependencies: babel-runtime "^6.22.0" -babel-plugin-transform-es2015-unicode-regex@^6.22.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-unicode-regex/-/babel-plugin-transform-es2015-unicode-regex-6.22.0.tgz#8d9cc27e7ee1decfe65454fb986452a04a613d20" +babel-plugin-transform-es2015-unicode-regex@^6.24.1: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-unicode-regex/-/babel-plugin-transform-es2015-unicode-regex-6.24.1.tgz#d38b12f42ea7323f729387f18a7c5ae1faeb35e9" dependencies: - babel-helper-regex "^6.22.0" + babel-helper-regex "^6.24.1" babel-runtime "^6.22.0" regexpu-core "^2.0.0" @@ -538,15 +549,15 @@ babel-plugin-transform-flow-strip-types@^6.22.0: babel-runtime "^6.22.0" babel-plugin-transform-object-rest-spread@^6.8.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-object-rest-spread/-/babel-plugin-transform-object-rest-spread-6.22.0.tgz#1d419b55e68d2e4f64a5ff3373bd67d73c8e83bc" + version "6.23.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-object-rest-spread/-/babel-plugin-transform-object-rest-spread-6.23.0.tgz#875d6bc9be761c58a2ae3feee5dc4895d8c7f921" dependencies: babel-plugin-syntax-object-rest-spread "^6.8.0" babel-runtime "^6.22.0" -babel-plugin-transform-react-display-name@^6.22.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-react-display-name/-/babel-plugin-transform-react-display-name-6.22.0.tgz#077197520fa8562b8d3da4c3c4b0b1bdd7853f26" +babel-plugin-transform-react-display-name@^6.23.0: + version "6.25.0" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-react-display-name/-/babel-plugin-transform-react-display-name-6.25.0.tgz#67e2bf1f1e9c93ab08db96792e05392bf2cc28d1" dependencies: babel-runtime "^6.22.0" @@ -564,34 +575,26 @@ babel-plugin-transform-react-jsx-source@^6.22.0: babel-plugin-syntax-jsx "^6.8.0" babel-runtime "^6.22.0" -babel-plugin-transform-react-jsx@^6.22.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-react-jsx/-/babel-plugin-transform-react-jsx-6.22.0.tgz#48556b7dd4c3fe97d1c943bcd54fc3f2561c1817" +babel-plugin-transform-react-jsx@^6.24.1: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-react-jsx/-/babel-plugin-transform-react-jsx-6.24.1.tgz#840a028e7df460dfc3a2d29f0c0d91f6376e66a3" dependencies: - babel-helper-builder-react-jsx "^6.22.0" + babel-helper-builder-react-jsx "^6.24.1" babel-plugin-syntax-jsx "^6.8.0" babel-runtime "^6.22.0" -babel-plugin-transform-regenerator@^6.22.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-regenerator/-/babel-plugin-transform-regenerator-6.22.0.tgz#65740593a319c44522157538d690b84094617ea6" +babel-plugin-transform-regenerator@^6.24.1: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-regenerator/-/babel-plugin-transform-regenerator-6.24.1.tgz#b8da305ad43c3c99b4848e4fe4037b770d23c418" dependencies: - regenerator-transform "0.9.8" + regenerator-transform "0.9.11" -babel-plugin-transform-strict-mode@^6.22.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-strict-mode/-/babel-plugin-transform-strict-mode-6.22.0.tgz#e008df01340fdc87e959da65991b7e05970c8c7c" +babel-plugin-transform-strict-mode@^6.24.1: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-strict-mode/-/babel-plugin-transform-strict-mode-6.24.1.tgz#d5faf7aa578a65bbe591cf5edae04a0c67020758" dependencies: babel-runtime "^6.22.0" - babel-types "^6.22.0" - -babel-polyfill@^6.22.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-polyfill/-/babel-polyfill-6.22.0.tgz#1ac99ebdcc6ba4db1e2618c387b2084a82154a3b" - dependencies: - babel-runtime "^6.22.0" - core-js "^2.4.0" - regenerator-runtime "^0.10.0" + babel-types "^6.24.1" babel-polyfill@^6.23.0: version "6.23.0" @@ -602,51 +605,56 @@ babel-polyfill@^6.23.0: regenerator-runtime "^0.10.0" babel-preset-es2015@^6.14.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-preset-es2015/-/babel-preset-es2015-6.22.0.tgz#af5a98ecb35eb8af764ad8a5a05eb36dc4386835" + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-preset-es2015/-/babel-preset-es2015-6.24.1.tgz#d44050d6bc2c9feea702aaf38d727a0210538939" dependencies: babel-plugin-check-es2015-constants "^6.22.0" babel-plugin-transform-es2015-arrow-functions "^6.22.0" babel-plugin-transform-es2015-block-scoped-functions "^6.22.0" - babel-plugin-transform-es2015-block-scoping "^6.22.0" - babel-plugin-transform-es2015-classes "^6.22.0" - babel-plugin-transform-es2015-computed-properties "^6.22.0" + babel-plugin-transform-es2015-block-scoping "^6.24.1" + babel-plugin-transform-es2015-classes "^6.24.1" + babel-plugin-transform-es2015-computed-properties "^6.24.1" babel-plugin-transform-es2015-destructuring "^6.22.0" - babel-plugin-transform-es2015-duplicate-keys "^6.22.0" + babel-plugin-transform-es2015-duplicate-keys "^6.24.1" babel-plugin-transform-es2015-for-of "^6.22.0" - babel-plugin-transform-es2015-function-name "^6.22.0" + babel-plugin-transform-es2015-function-name "^6.24.1" babel-plugin-transform-es2015-literals "^6.22.0" - babel-plugin-transform-es2015-modules-amd "^6.22.0" - babel-plugin-transform-es2015-modules-commonjs "^6.22.0" - babel-plugin-transform-es2015-modules-systemjs "^6.22.0" - babel-plugin-transform-es2015-modules-umd "^6.22.0" - babel-plugin-transform-es2015-object-super "^6.22.0" - babel-plugin-transform-es2015-parameters "^6.22.0" - babel-plugin-transform-es2015-shorthand-properties "^6.22.0" + babel-plugin-transform-es2015-modules-amd "^6.24.1" + babel-plugin-transform-es2015-modules-commonjs "^6.24.1" + babel-plugin-transform-es2015-modules-systemjs "^6.24.1" + babel-plugin-transform-es2015-modules-umd "^6.24.1" + babel-plugin-transform-es2015-object-super "^6.24.1" + babel-plugin-transform-es2015-parameters "^6.24.1" + babel-plugin-transform-es2015-shorthand-properties "^6.24.1" babel-plugin-transform-es2015-spread "^6.22.0" - babel-plugin-transform-es2015-sticky-regex "^6.22.0" + babel-plugin-transform-es2015-sticky-regex "^6.24.1" babel-plugin-transform-es2015-template-literals "^6.22.0" babel-plugin-transform-es2015-typeof-symbol "^6.22.0" - babel-plugin-transform-es2015-unicode-regex "^6.22.0" - babel-plugin-transform-regenerator "^6.22.0" + babel-plugin-transform-es2015-unicode-regex "^6.24.1" + babel-plugin-transform-regenerator "^6.24.1" + +babel-preset-flow@^6.23.0: + version "6.23.0" + resolved "https://registry.yarnpkg.com/babel-preset-flow/-/babel-preset-flow-6.23.0.tgz#e71218887085ae9a24b5be4169affb599816c49d" + dependencies: + babel-plugin-transform-flow-strip-types "^6.22.0" babel-preset-react@^6.11.1: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-preset-react/-/babel-preset-react-6.22.0.tgz#7bc97e2d73eec4b980fb6b4e4e0884e81ccdc165" + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-preset-react/-/babel-preset-react-6.24.1.tgz#ba69dfaea45fc3ec639b6a4ecea6e17702c91380" dependencies: - babel-plugin-syntax-flow "^6.3.13" babel-plugin-syntax-jsx "^6.3.13" - babel-plugin-transform-flow-strip-types "^6.22.0" - babel-plugin-transform-react-display-name "^6.22.0" - babel-plugin-transform-react-jsx "^6.22.0" + babel-plugin-transform-react-display-name "^6.23.0" + babel-plugin-transform-react-jsx "^6.24.1" babel-plugin-transform-react-jsx-self "^6.22.0" babel-plugin-transform-react-jsx-source "^6.22.0" + babel-preset-flow "^6.23.0" -babel-register@^6.14.0, babel-register@^6.22.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-register/-/babel-register-6.22.0.tgz#a61dd83975f9ca4a9e7d6eff3059494cd5ea4c63" +babel-register@^6.14.0, babel-register@^6.24.1: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-register/-/babel-register-6.24.1.tgz#7e10e13a2f71065bdfad5a1787ba45bca6ded75f" dependencies: - babel-core "^6.22.0" + babel-core "^6.24.1" babel-runtime "^6.22.0" core-js "^2.4.0" home-or-tmp "^2.0.0" @@ -661,39 +669,39 @@ babel-runtime@^5.8.25: core-js "^1.0.0" babel-runtime@^6.18.0, babel-runtime@^6.22.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-runtime/-/babel-runtime-6.22.0.tgz#1cf8b4ac67c77a4ddb0db2ae1f74de52ac4ca611" + version "6.23.0" + resolved "https://registry.yarnpkg.com/babel-runtime/-/babel-runtime-6.23.0.tgz#0a9489f144de70efb3ce4300accdb329e2fc543b" dependencies: core-js "^2.4.0" regenerator-runtime "^0.10.0" -babel-template@^6.22.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-template/-/babel-template-6.22.0.tgz#403d110905a4626b317a2a1fcb8f3b73204b2edb" +babel-template@^6.24.1, babel-template@^6.25.0: + version "6.25.0" + resolved "https://registry.yarnpkg.com/babel-template/-/babel-template-6.25.0.tgz#665241166b7c2aa4c619d71e192969552b10c071" dependencies: babel-runtime "^6.22.0" - babel-traverse "^6.22.0" - babel-types "^6.22.0" - babylon "^6.11.0" + babel-traverse "^6.25.0" + babel-types "^6.25.0" + babylon "^6.17.2" lodash "^4.2.0" -babel-traverse@^6.22.0, babel-traverse@^6.22.1, babel-traverse@^6.4.5, babel-traverse@^6.9.0: - version "6.22.1" - resolved "https://registry.yarnpkg.com/babel-traverse/-/babel-traverse-6.22.1.tgz#3b95cd6b7427d6f1f757704908f2fc9748a5f59f" +babel-traverse@^6.24.1, babel-traverse@^6.25.0, babel-traverse@^6.4.5, babel-traverse@^6.9.0: + version "6.25.0" + resolved "https://registry.yarnpkg.com/babel-traverse/-/babel-traverse-6.25.0.tgz#2257497e2fcd19b89edc13c4c91381f9512496f1" dependencies: babel-code-frame "^6.22.0" - babel-messages "^6.22.0" + babel-messages "^6.23.0" babel-runtime "^6.22.0" - babel-types "^6.22.0" - babylon "^6.15.0" + babel-types "^6.25.0" + babylon "^6.17.2" debug "^2.2.0" globals "^9.0.0" invariant "^2.2.0" lodash "^4.2.0" -babel-types@^6.19.0, babel-types@^6.22.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-types/-/babel-types-6.22.0.tgz#2a447e8d0ea25d2512409e4175479fd78cc8b1db" +babel-types@^6.19.0, babel-types@^6.24.1, babel-types@^6.25.0: + version "6.25.0" + resolved "https://registry.yarnpkg.com/babel-types/-/babel-types-6.25.0.tgz#70afb248d5660e5d18f811d91c8303b54134a18e" dependencies: babel-runtime "^6.22.0" esutils "^2.0.2" @@ -704,41 +712,49 @@ babylon@6.14.1: version "6.14.1" resolved "https://registry.yarnpkg.com/babylon/-/babylon-6.14.1.tgz#956275fab72753ad9b3435d7afe58f8bf0a29815" -babylon@^6.11.0, babylon@^6.15.0, babylon@^6.8.0: - version "6.15.0" - resolved "https://registry.yarnpkg.com/babylon/-/babylon-6.15.0.tgz#ba65cfa1a80e1759b0e89fb562e27dccae70348e" +babylon@^6.17.2, babylon@^6.8.0: + version "6.17.4" + resolved "https://registry.yarnpkg.com/babylon/-/babylon-6.17.4.tgz#3e8b7402b88d22c3423e137a1577883b15ff869a" backo2@1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/backo2/-/backo2-1.0.2.tgz#31ab1ac8b129363463e35b3ebb69f4dfcfba7947" -balanced-match@^0.4.1, balanced-match@^0.4.2: +balanced-match@^0.4.2: version "0.4.2" resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-0.4.2.tgz#cb3f3e3c732dc0f01ee70b403f302e61d7709838" +balanced-match@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.0.tgz#89b4d199ab2bee49de164ea02b89ce462d71b767" + base62@^1.1.0: - version "1.1.2" - resolved "https://registry.yarnpkg.com/base62/-/base62-1.1.2.tgz#22ced6a49913565bc0b8d9a11563a465c084124c" + version "1.2.0" + resolved "https://registry.yarnpkg.com/base62/-/base62-1.2.0.tgz#31e7e560dc846c9f44c1a531df6514da35474157" base64-arraybuffer@0.1.5: version "0.1.5" resolved "https://registry.yarnpkg.com/base64-arraybuffer/-/base64-arraybuffer-0.1.5.tgz#73926771923b5a19747ad666aa5cd4bf9c6e9ce8" base64-js@^1.0.2: - version "1.2.0" - resolved "https://registry.yarnpkg.com/base64-js/-/base64-js-1.2.0.tgz#a39992d723584811982be5e290bb6a53d86700f1" + version "1.2.1" + resolved "https://registry.yarnpkg.com/base64-js/-/base64-js-1.2.1.tgz#a91947da1f4a516ea38e5b4ec0ec3773675e0886" base64id@1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/base64id/-/base64id-1.0.0.tgz#47688cb99bb6804f0e06d3e763b1c32e57d8e6b6" -batch@0.5.3, batch@^0.5.3: +batch@0.6.1: + version "0.6.1" + resolved "https://registry.yarnpkg.com/batch/-/batch-0.6.1.tgz#dc34314f4e679318093fc760272525f94bf25c16" + +batch@^0.5.3: version "0.5.3" resolved "https://registry.yarnpkg.com/batch/-/batch-0.5.3.tgz#3f3414f380321743bfc1042f9a83ff1d5824d464" bcrypt-pbkdf@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.0.tgz#3ca76b85241c7170bf7d9703e7b9aa74630040d4" + version "1.0.1" + resolved "https://registry.yarnpkg.com/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.1.tgz#63bc5dcb61331b92bc05fd528953c33462a06f8d" dependencies: tweetnacl "^0.14.3" @@ -771,23 +787,23 @@ bluebird@^2.10.2, bluebird@^2.9.27: resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-2.11.0.tgz#534b9033c022c9579c56ba3b3e5a5caafbb650e1" bluebird@^3.0.5, bluebird@^3.4.7: - version "3.4.7" - resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-3.4.7.tgz#f72d760be09b7f76d08ed8fae98b289a8d05fab3" + version "3.5.0" + resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-3.5.0.tgz#791420d7f551eea2897453a8a77653f96606d67c" body-parser@^1.12.4: - version "1.16.0" - resolved "https://registry.yarnpkg.com/body-parser/-/body-parser-1.16.0.tgz#924a5e472c6229fb9d69b85a20d5f2532dec788b" + version "1.17.2" + resolved "https://registry.yarnpkg.com/body-parser/-/body-parser-1.17.2.tgz#f8892abc8f9e627d42aedafbca66bf5ab99104ee" dependencies: bytes "2.4.0" content-type "~1.0.2" - debug "2.6.0" + debug "2.6.7" depd "~1.1.0" - http-errors "~1.5.1" + http-errors "~1.6.1" iconv-lite "0.4.15" on-finished "~2.3.0" - qs "6.2.1" + qs "6.4.0" raw-body "~2.2.0" - type-is "~1.6.14" + type-is "~1.6.15" boolbase@~1.0.0: version "1.0.0" @@ -803,11 +819,11 @@ bootstrap@^3.3.6: version "3.3.7" resolved "https://registry.yarnpkg.com/bootstrap/-/bootstrap-3.3.7.tgz#5a389394549f23330875a3b150656574f8a9eb71" -brace-expansion@^1.0.0: - version "1.1.6" - resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.6.tgz#7197d7eaa9b87e648390ea61fc66c84427420df9" +brace-expansion@^1.1.7: + version "1.1.8" + resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.8.tgz#c07b211c7c952ec1f8efd51a77ef0d1d3990a292" dependencies: - balanced-match "^0.4.1" + balanced-match "^1.0.0" concat-map "0.0.1" braces@^0.1.2: @@ -836,16 +852,12 @@ browserify-zlib@^0.1.4: dependencies: pako "~0.2.0" -browserslist@^1.0.1, browserslist@^1.5.2, browserslist@~1.6.0: - version "1.6.0" - resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-1.6.0.tgz#85fb7c993540d3fda31c282baf7f5aee698ac9ee" +browserslist@^1.3.6, browserslist@^1.5.2, browserslist@^1.7.6: + version "1.7.7" + resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-1.7.7.tgz#0bd76704258be829b2398bb50e4b62d1a166b0b9" dependencies: - caniuse-db "^1.0.30000613" - electron-to-chromium "^1.2.0" - -buffer-shims@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/buffer-shims/-/buffer-shims-1.0.0.tgz#9978ce317388c649ad8793028c3477ef044a8b51" + caniuse-db "^1.0.30000639" + electron-to-chromium "^1.2.7" buffer@^4.9.0: version "4.9.1" @@ -855,6 +867,10 @@ buffer@^4.9.0: ieee754 "^1.1.4" isarray "^1.0.0" +builtin-modules@^1.0.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/builtin-modules/-/builtin-modules-1.1.1.tgz#270f076c5a72c02f5b65a47df94c5fe3a278892f" + builtin-status-codes@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/builtin-status-codes/-/builtin-status-codes-3.0.0.tgz#85982878e21b98e1c66425e03d0174788f569ee8" @@ -882,27 +898,26 @@ camelcase@^1.0.2: version "1.2.1" resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-1.2.1.tgz#9bb5304d2e0b56698b2c758b08a3eaa9daa58a39" -camelcase@^2.0.1: - version "2.1.1" - resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-2.1.1.tgz#7c1d16d679a1bbe59ca02cacecfb011e201f5a1f" +camelcase@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-4.1.0.tgz#d545635be1e33c542649c69173e5de6acfae34dd" caniuse-api@^1.5.2: - version "1.5.2" - resolved "https://registry.yarnpkg.com/caniuse-api/-/caniuse-api-1.5.2.tgz#8f393c682f661c0a997b77bba6e826483fb3600e" + version "1.6.1" + resolved "https://registry.yarnpkg.com/caniuse-api/-/caniuse-api-1.6.1.tgz#b534e7c734c4f81ec5fbe8aca2ad24354b962c6c" dependencies: - browserslist "^1.0.1" - caniuse-db "^1.0.30000346" - lodash.memoize "^4.1.0" - lodash.uniq "^4.3.0" - shelljs "^0.7.0" + browserslist "^1.3.6" + caniuse-db "^1.0.30000529" + lodash.memoize "^4.1.2" + lodash.uniq "^4.5.0" -caniuse-db@^1.0.30000346, caniuse-db@^1.0.30000613: - version "1.0.30000613" - resolved "https://registry.yarnpkg.com/caniuse-db/-/caniuse-db-1.0.30000613.tgz#639133b7a5380c1416f9701d23d54d093dd68299" +caniuse-db@^1.0.30000529, caniuse-db@^1.0.30000634, caniuse-db@^1.0.30000639: + version "1.0.30000694" + resolved "https://registry.yarnpkg.com/caniuse-db/-/caniuse-db-1.0.30000694.tgz#02009f4f82d2f0126e4c691b7cd5adb351935c01" -caseless@~0.11.0: - version "0.11.0" - resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.11.0.tgz#715b96ea9841593cc33067923f5ec60ebda4f7d7" +caseless@~0.12.0: + version "0.12.0" + resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.12.0.tgz#1b681c21ff84033c826543090689420d187151dc" center-align@^0.1.1: version "0.1.3" @@ -911,7 +926,7 @@ center-align@^0.1.1: align-text "^0.1.3" lazy-cache "^1.0.3" -chalk@^1.1.0, chalk@^1.1.1, chalk@^1.1.3: +chalk@^1.1.0, chalk@^1.1.3: version "1.1.3" resolved "https://registry.yarnpkg.com/chalk/-/chalk-1.1.3.tgz#a8115c55e4a702fe4d150abd3872822a7e09fc98" dependencies: @@ -922,8 +937,8 @@ chalk@^1.1.0, chalk@^1.1.1, chalk@^1.1.3: supports-color "^2.0.0" chokidar@^1.0.0, chokidar@^1.4.1, chokidar@^1.6.1: - version "1.6.1" - resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-1.6.1.tgz#2f4447ab5e96e50fb3d789fd90d4c72e0e4c70c2" + version "1.7.0" + resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-1.7.0.tgz#798e689778151c8076b4b360e5edd28cda2bb468" dependencies: anymatch "^1.3.0" async-each "^1.0.0" @@ -937,8 +952,8 @@ chokidar@^1.0.0, chokidar@^1.4.1, chokidar@^1.6.1: fsevents "^1.0.0" clap@^1.0.9: - version "1.1.2" - resolved "https://registry.yarnpkg.com/clap/-/clap-1.1.2.tgz#316545bf22229225a2cecaa6824cd2f56a9709ed" + version "1.2.0" + resolved "https://registry.yarnpkg.com/clap/-/clap-1.2.0.tgz#59c90fe3e137104746ff19469a27a634ff68c857" dependencies: chalk "^1.1.3" @@ -946,12 +961,11 @@ classnames@^2.1.5, classnames@^2.2.3, classnames@^2.2.5: version "2.2.5" resolved "https://registry.yarnpkg.com/classnames/-/classnames-2.2.5.tgz#fb3801d453467649ef3603c7d61a02bd129bde6d" -clean-css@3.4.x, clean-css@^3.2.10: - version "3.4.24" - resolved "https://registry.yarnpkg.com/clean-css/-/clean-css-3.4.24.tgz#89f5a5e9da37ae02394fe049a41388abbe72c3b5" +clean-css@4.1.x, clean-css@^4.0.12: + version "4.1.4" + resolved "https://registry.yarnpkg.com/clean-css/-/clean-css-4.1.4.tgz#eec8811db27457e0078d8ca921fa81b72fa82bf4" dependencies: - commander "2.8.x" - source-map "0.4.x" + source-map "0.5.x" cliui@^2.1.0: version "2.1.0" @@ -961,7 +975,7 @@ cliui@^2.1.0: right-align "^0.1.1" wordwrap "0.0.2" -cliui@^3.0.3: +cliui@^3.2.0: version "3.2.0" resolved "https://registry.yarnpkg.com/cliui/-/cliui-3.2.0.tgz#120601537a916d29940f934da3b48d585a39213d" dependencies: @@ -973,9 +987,13 @@ clone@^1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/clone/-/clone-1.0.2.tgz#260b7a99ebb1edfe247538175f783243cb19d149" +co@^4.6.0: + version "4.6.0" + resolved "https://registry.yarnpkg.com/co/-/co-4.6.0.tgz#6ea6bdf3d853ae54ccb8e47bfa0bf3f9031fb184" + coa@~1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/coa/-/coa-1.0.1.tgz#7f959346cfc8719e3f7233cd6852854a7c67d8a3" + version "1.0.3" + resolved "https://registry.yarnpkg.com/coa/-/coa-1.0.3.tgz#1b54a5e1dcf77c990455d4deea98c564416dc893" dependencies: q "^1.1.2" @@ -990,8 +1008,8 @@ color-convert@^1.3.0: color-name "^1.1.1" color-name@^1.0.0, color-name@^1.1.1: - version "1.1.1" - resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.1.tgz#4b1415304cf50028ea81643643bd82ea05803689" + version "1.1.2" + resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.2.tgz#5c8ab72b64bd2215d617ae9559ebb148475cf98d" color-string@^0.3.0: version "0.3.0" @@ -1033,15 +1051,15 @@ commander@2.3.0: version "2.3.0" resolved "https://registry.yarnpkg.com/commander/-/commander-2.3.0.tgz#fd430e889832ec353b9acd1de217c11cb3eef873" -commander@2.8.x: - version "2.8.1" - resolved "https://registry.yarnpkg.com/commander/-/commander-2.8.1.tgz#06be367febfda0c330aa1e2a072d3dc9762425d4" +commander@2.9.x, commander@~2.9.0: + version "2.9.0" + resolved "https://registry.yarnpkg.com/commander/-/commander-2.9.0.tgz#9c99094176e12240cb22d6c5146098400fe0f7d4" dependencies: graceful-readlink ">= 1.0.0" -commander@2.9.x, commander@^2.2.0, commander@^2.5.0, commander@^2.8.1, commander@^2.9.0: - version "2.9.0" - resolved "https://registry.yarnpkg.com/commander/-/commander-2.9.0.tgz#9c99094176e12240cb22d6c5146098400fe0f7d4" +commander@^2.2.0, commander@^2.5.0, commander@^2.8.1, commander@^2.9.0: + version "2.10.0" + resolved "https://registry.yarnpkg.com/commander/-/commander-2.10.0.tgz#e1f5d3245de246d1a5ca04702fa1ad1bd7e405fe" dependencies: graceful-readlink ">= 1.0.0" @@ -1080,10 +1098,10 @@ component-inherit@0.0.3: resolved "https://registry.yarnpkg.com/component-inherit/-/component-inherit-0.0.3.tgz#645fc4adf58b72b649d5cae65135619db26ff143" compressible@~2.0.8: - version "2.0.9" - resolved "https://registry.yarnpkg.com/compressible/-/compressible-2.0.9.tgz#6daab4e2b599c2770dd9e21e7a891b1c5a755425" + version "2.0.10" + resolved "https://registry.yarnpkg.com/compressible/-/compressible-2.0.10.tgz#feda1c7f7617912732b29bf8cf26252a20b9eecd" dependencies: - mime-db ">= 1.24.0 < 2" + mime-db ">= 1.27.0 < 2" compression@^1.5.2: version "1.6.2" @@ -1112,11 +1130,11 @@ connect-history-api-fallback@^1.3.0: resolved "https://registry.yarnpkg.com/connect-history-api-fallback/-/connect-history-api-fallback-1.3.0.tgz#e51d17f8f0ef0db90a64fdb47de3051556e9f169" connect@^3.3.5: - version "3.5.0" - resolved "https://registry.yarnpkg.com/connect/-/connect-3.5.0.tgz#b357525a0b4c1f50599cd983e1d9efeea9677198" + version "3.6.2" + resolved "https://registry.yarnpkg.com/connect/-/connect-3.6.2.tgz#694e8d20681bfe490282c8ab886be98f09f42fe7" dependencies: - debug "~2.2.0" - finalhandler "0.5.0" + debug "2.6.7" + finalhandler "1.0.3" parseurl "~1.3.1" utils-merge "1.0.0" @@ -1134,9 +1152,9 @@ constants-browserify@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/constants-browserify/-/constants-browserify-1.0.0.tgz#c20b96d8c617748aaf1c16021760cd27fcb8cb75" -content-disposition@0.5.1: - version "0.5.1" - resolved "https://registry.yarnpkg.com/content-disposition/-/content-disposition-0.5.1.tgz#87476c6a67c8daa87e32e87616df883ba7fb071b" +content-disposition@0.5.2: + version "0.5.2" + resolved "https://registry.yarnpkg.com/content-disposition/-/content-disposition-0.5.2.tgz#0cf68bb9ddf5f2be7961c3a85178cb85dba78cb4" content-type@~1.0.2: version "1.0.2" @@ -1147,8 +1165,8 @@ convert-source-map@^0.3.3: resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-0.3.5.tgz#f1d802950af7dd2631a1febe0596550c86ab3190" convert-source-map@^1.1.0: - version "1.3.0" - resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-1.3.0.tgz#e9f3e9c6e2728efc2676696a70eb382f73106a67" + version "1.5.0" + resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-1.5.0.tgz#9acd70851c6d5dfdd93d9282e5edf94a03ff46b5" cookie-signature@1.0.6: version "1.0.6" @@ -1163,8 +1181,8 @@ cookiejar@2.0.6: resolved "https://registry.yarnpkg.com/cookiejar/-/cookiejar-2.0.6.tgz#0abf356ad00d1c5a219d88d44518046dd026acfe" copy-to-clipboard@^3: - version "3.0.5" - resolved "https://registry.yarnpkg.com/copy-to-clipboard/-/copy-to-clipboard-3.0.5.tgz#4cd40e7c2ee159bc72d4f06b5bec8f9e0a1a3442" + version "3.0.6" + resolved "https://registry.yarnpkg.com/copy-to-clipboard/-/copy-to-clipboard-3.0.6.tgz#13c09bfea0408a5dc5bb987fee3b3986518c9d69" dependencies: toggle-selection "^1.0.3" @@ -1189,6 +1207,21 @@ core-util-is@~1.0.0: version "1.0.2" resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.2.tgz#b5fd54220aa2bc5ab57aab7140c940754503c1a7" +create-react-class@^15.5.1, create-react-class@^15.5.2: + version "15.6.0" + resolved "https://registry.yarnpkg.com/create-react-class/-/create-react-class-15.6.0.tgz#ab448497c26566e1e29413e883207d57cfe7bed4" + dependencies: + fbjs "^0.8.9" + loose-envify "^1.3.1" + object-assign "^4.1.1" + +cross-spawn@^4.0.0: + version "4.0.2" + resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-4.0.2.tgz#7b9247621c23adfdd3856004a823cbe397424d41" + dependencies: + lru-cache "^4.0.1" + which "^1.2.9" + cryptiles@2.x.x: version "2.0.5" resolved "https://registry.yarnpkg.com/cryptiles/-/cryptiles-2.0.5.tgz#3bdfecdc608147c1c67202fa291e7dca59eaa3b8" @@ -1240,9 +1273,9 @@ css-selector-tokenizer@^0.5.1: cssesc "^0.1.0" fastparse "^1.1.1" -css-selector-tokenizer@^0.6.0: - version "0.6.0" - resolved "https://registry.yarnpkg.com/css-selector-tokenizer/-/css-selector-tokenizer-0.6.0.tgz#6445f582c7930d241dcc5007a43d6fcb8f073152" +css-selector-tokenizer@^0.7.0: + version "0.7.0" + resolved "https://registry.yarnpkg.com/css-selector-tokenizer/-/css-selector-tokenizer-0.7.0.tgz#e6988474ae8c953477bf5e7efecfceccd9cf4c86" dependencies: cssesc "^0.1.0" fastparse "^1.1.1" @@ -1302,9 +1335,9 @@ cssesc@^0.1.0: postcss-value-parser "^3.2.3" postcss-zindex "^2.0.1" -csso@~2.2.1: - version "2.2.1" - resolved "https://registry.yarnpkg.com/csso/-/csso-2.2.1.tgz#51fbb5347e50e81e6ed51668a48490ae6fe2afe2" +csso@~2.3.1: + version "2.3.2" + resolved "https://registry.yarnpkg.com/csso/-/csso-2.3.2.tgz#ddd52c587033f49e94b71fc55569f252e8ff5f85" dependencies: clap "^1.0.9" source-map "^0.5.3" @@ -1323,11 +1356,11 @@ date-now@^0.1.4: version "0.1.4" resolved "https://registry.yarnpkg.com/date-now/-/date-now-0.1.4.tgz#eaf439fd4d4848ad74e5cc7dbef200672b9e345b" -debug@2, debug@2.6.0, debug@^2.1.1, debug@^2.1.3, debug@^2.2.0: - version "2.6.0" - resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.0.tgz#bc596bcabe7617f11d9fa15361eded5608b8499b" +debug@2, debug@2.6.8, debug@^2.1.1, debug@^2.1.3, debug@^2.2.0, debug@^2.6.6: + version "2.6.8" + resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.8.tgz#e731531ca2ede27d188222427da17821d68ff4fc" dependencies: - ms "0.7.2" + ms "2.0.0" debug@2.2.0, debug@~2.2.0: version "2.2.0" @@ -1341,6 +1374,12 @@ debug@2.3.3: dependencies: ms "0.7.2" +debug@2.6.7: + version "2.6.7" + resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.7.tgz#92bad1f6d05bbb6bba22cca88bcd0ec894c2861e" + dependencies: + ms "2.0.0" + debug@^0.7.4: version "0.7.4" resolved "https://registry.yarnpkg.com/debug/-/debug-0.7.4.tgz#06e1ea8082c2cb14e39806e22e2f6f757f92af39" @@ -1354,8 +1393,8 @@ deep-equal@^1.0.0: resolved "https://registry.yarnpkg.com/deep-equal/-/deep-equal-1.0.1.tgz#f5d260292b660e084eff4cdbc9f08ad3247448b5" deep-extend@~0.4.0: - version "0.4.1" - resolved "https://registry.yarnpkg.com/deep-extend/-/deep-extend-0.4.1.tgz#efe4113d08085f4e6f9687759810f807469e2253" + version "0.4.2" + resolved "https://registry.yarnpkg.com/deep-extend/-/deep-extend-0.4.2.tgz#48b699c27e334bf89f10892be432f6e4c7d34a7f" define-properties@^1.1.2, define-properties@~1.1.2: version "1.1.2" @@ -1376,7 +1415,7 @@ delegates@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/delegates/-/delegates-1.0.0.tgz#84c6e159b81904fdca59a0ef44cd870d31250f9a" -depd@~1.1.0: +depd@1.1.0, depd@~1.1.0: version "1.1.0" resolved "https://registry.yarnpkg.com/depd/-/depd-1.1.0.tgz#e1bd82c6aab6ced965b97b88b17ed3e528ca18c3" @@ -1391,10 +1430,10 @@ detect-indent@^4.0.0: repeating "^2.0.0" detective@^4.3.1: - version "4.3.2" - resolved "https://registry.yarnpkg.com/detective/-/detective-4.3.2.tgz#77697e2e7947ac3fe7c8e26a6d6f115235afa91c" + version "4.5.0" + resolved "https://registry.yarnpkg.com/detective/-/detective-4.5.0.tgz#6e5a8c6b26e6c7a254b1c6b6d7490d98ec91edd1" dependencies: - acorn "^3.1.0" + acorn "^4.0.3" defined "^1.0.0" di@^0.0.1: @@ -1430,6 +1469,10 @@ dom-helpers@^2.4.0: version "2.4.0" resolved "https://registry.yarnpkg.com/dom-helpers/-/dom-helpers-2.4.0.tgz#9bb4b245f637367b1fa670274272aa28fe06c367" +dom-helpers@^3.2.0: + version "3.2.1" + resolved "https://registry.yarnpkg.com/dom-helpers/-/dom-helpers-3.2.1.tgz#3203e07fed217bd1f424b019735582fc37b2825a" + dom-serialize@^2.2.0: version "2.2.1" resolved "https://registry.yarnpkg.com/dom-serialize/-/dom-serialize-2.2.1.tgz#562ae8999f44be5ea3076f5419dcd59eb43ac95b" @@ -1496,9 +1539,9 @@ ee-first@1.1.1: version "1.1.1" resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.1.1.tgz#590c61156b0ae2f4f0255732a158b266bc56b21d" -electron-to-chromium@^1.2.0: - version "1.2.0" - resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.2.0.tgz#3bd7761f85bd4163602259ae6c7ed338050b17e7" +electron-to-chromium@^1.2.7: + version "1.3.14" + resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.3.14.tgz#64af0f9efd3c3c6acd57d71f83b49ca7ee9c4b43" emojis-list@^2.0.0: version "2.1.0" @@ -1508,9 +1551,15 @@ encodeurl@~1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/encodeurl/-/encodeurl-1.0.1.tgz#79e3d58655346909fe6f0f45a5de68103b294d20" -engine.io-client@1.8.2: - version "1.8.2" - resolved "https://registry.yarnpkg.com/engine.io-client/-/engine.io-client-1.8.2.tgz#c38767547f2a7d184f5752f6f0ad501006703766" +encoding@^0.1.11: + version "0.1.12" + resolved "https://registry.yarnpkg.com/encoding/-/encoding-0.1.12.tgz#538b66f3ee62cd1ab51ec323829d1f9480c74beb" + dependencies: + iconv-lite "~0.4.13" + +engine.io-client@~1.8.4: + version "1.8.4" + resolved "https://registry.yarnpkg.com/engine.io-client/-/engine.io-client-1.8.4.tgz#9fe85dee25853ca6babe25bd2ad68710863e91c2" dependencies: component-emitter "1.2.1" component-inherit "0.0.3" @@ -1521,7 +1570,7 @@ engine.io-client@1.8.2: parsejson "0.0.3" parseqs "0.0.5" parseuri "0.0.5" - ws "1.1.1" + ws "1.1.2" xmlhttprequest-ssl "1.5.3" yeast "0.1.2" @@ -1536,16 +1585,16 @@ engine.io-parser@1.3.2: has-binary "0.1.7" wtf-8 "1.0.0" -engine.io@1.8.2: - version "1.8.2" - resolved "https://registry.yarnpkg.com/engine.io/-/engine.io-1.8.2.tgz#6b59be730b348c0125b0a4589de1c355abcf7a7e" +engine.io@~1.8.4: + version "1.8.4" + resolved "https://registry.yarnpkg.com/engine.io/-/engine.io-1.8.4.tgz#77bce12b80e5d60429337fec3b0daf691ebc9003" dependencies: accepts "1.3.3" base64id "1.0.0" cookie "0.3.1" debug "2.3.3" engine.io-parser "1.3.2" - ws "1.1.1" + ws "1.1.4" enhanced-resolve@~0.9.0: version "0.9.1" @@ -1576,6 +1625,12 @@ errno@^0.1.1, errno@^0.1.3: dependencies: prr "~0.0.0" +error-ex@^1.2.0: + version "1.3.1" + resolved "https://registry.yarnpkg.com/error-ex/-/error-ex-1.3.1.tgz#f855a86ce61adc4e8621c3cda21e7a7612c3a8dc" + dependencies: + is-arrayish "^0.2.1" + es-abstract@^1.6.1: version "1.7.0" resolved "https://registry.yarnpkg.com/es-abstract/-/es-abstract-1.7.0.tgz#dfade774e01bfcd97f96180298c449c8623fb94c" @@ -1676,9 +1731,9 @@ esutils@^2.0.0, esutils@^2.0.2: version "2.0.2" resolved "https://registry.yarnpkg.com/esutils/-/esutils-2.0.2.tgz#0abf4f1caa5bcb1f7a9d8acc6dea4faaa04bac9b" -etag@~1.7.0: - version "1.7.0" - resolved "https://registry.yarnpkg.com/etag/-/etag-1.7.0.tgz#03d30b5f67dd6e632d2945d30d6652731a34d5d8" +etag@~1.8.0: + version "1.8.0" + resolved "https://registry.yarnpkg.com/etag/-/etag-1.8.0.tgz#6f631aef336d6c46362b51764044ce216be3c051" eventemitter3@1.x.x: version "1.2.0" @@ -1694,6 +1749,18 @@ eventsource@0.1.6: dependencies: original ">=0.0.5" +execa@^0.5.0: + version "0.5.1" + resolved "https://registry.yarnpkg.com/execa/-/execa-0.5.1.tgz#de3fb85cb8d6e91c85bcbceb164581785cb57b36" + dependencies: + cross-spawn "^4.0.0" + get-stream "^2.2.0" + is-stream "^1.1.0" + npm-run-path "^2.0.0" + p-finally "^1.0.0" + signal-exit "^3.0.0" + strip-eof "^1.0.0" + expand-braces@^0.1.1: version "0.1.2" resolved "https://registry.yarnpkg.com/expand-braces/-/expand-braces-0.1.2.tgz#488b1d1d2451cb3d3a6b192cfc030f44c5855fea" @@ -1734,35 +1801,37 @@ expect@^1.20.2: tmatch "^2.0.1" express@^4.13.3: - version "4.14.0" - resolved "https://registry.yarnpkg.com/express/-/express-4.14.0.tgz#c1ee3f42cdc891fb3dc650a8922d51ec847d0d66" + version "4.15.3" + resolved "https://registry.yarnpkg.com/express/-/express-4.15.3.tgz#bab65d0f03aa80c358408972fc700f916944b662" dependencies: accepts "~1.3.3" array-flatten "1.1.1" - content-disposition "0.5.1" + content-disposition "0.5.2" content-type "~1.0.2" cookie "0.3.1" cookie-signature "1.0.6" - debug "~2.2.0" + debug "2.6.7" depd "~1.1.0" encodeurl "~1.0.1" escape-html "~1.0.3" - etag "~1.7.0" - finalhandler "0.5.0" - fresh "0.3.0" + etag "~1.8.0" + finalhandler "~1.0.3" + fresh "0.5.0" merge-descriptors "1.0.1" methods "~1.1.2" on-finished "~2.3.0" parseurl "~1.3.1" path-to-regexp "0.1.7" - proxy-addr "~1.1.2" - qs "6.2.0" + proxy-addr "~1.1.4" + qs "6.4.0" range-parser "~1.2.0" - send "0.14.1" - serve-static "~1.11.1" - type-is "~1.6.13" + send "0.15.3" + serve-static "1.12.3" + setprototypeof "1.0.3" + statuses "~1.3.1" + type-is "~1.6.15" utils-merge "1.0.0" - vary "~1.1.0" + vary "~1.1.1" extend@3.0.0, extend@^3.0.0, extend@~3.0.0: version "3.0.0" @@ -1804,9 +1873,21 @@ fbjs@^0.6.1: ua-parser-js "^0.7.9" whatwg-fetch "^0.9.0" +fbjs@^0.8.9: + version "0.8.12" + resolved "https://registry.yarnpkg.com/fbjs/-/fbjs-0.8.12.tgz#10b5d92f76d45575fd63a217d4ea02bea2f8ed04" + dependencies: + core-js "^1.0.0" + isomorphic-fetch "^2.1.1" + loose-envify "^1.0.0" + object-assign "^4.1.0" + promise "^7.1.1" + setimmediate "^1.0.5" + ua-parser-js "^0.7.9" + filename-regex@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/filename-regex/-/filename-regex-2.0.0.tgz#996e3e80479b98b9897f15a8a58b3d084e926775" + version "2.0.1" + resolved "https://registry.yarnpkg.com/filename-regex/-/filename-regex-2.0.1.tgz#c1c4b9bee3e09725ddb106b75c1e301fe2f18b26" fill-range@^2.1.0: version "2.2.3" @@ -1818,14 +1899,16 @@ fill-range@^2.1.0: repeat-element "^1.1.2" repeat-string "^1.5.2" -finalhandler@0.5.0: - version "0.5.0" - resolved "https://registry.yarnpkg.com/finalhandler/-/finalhandler-0.5.0.tgz#e9508abece9b6dba871a6942a1d7911b91911ac7" +finalhandler@1.0.3, finalhandler@~1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/finalhandler/-/finalhandler-1.0.3.tgz#ef47e77950e999780e86022a560e3217e0d0cc89" dependencies: - debug "~2.2.0" + debug "2.6.7" + encodeurl "~1.0.1" escape-html "~1.0.3" on-finished "~2.3.0" - statuses "~1.3.0" + parseurl "~1.3.1" + statuses "~1.3.1" unpipe "~1.0.0" find-cache-dir@^0.1.1: @@ -1843,6 +1926,12 @@ find-up@^1.0.0: path-exists "^2.0.0" pinkie-promise "^2.0.0" +find-up@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/find-up/-/find-up-2.1.0.tgz#45d1b7e506c717ddd482775a2b77920a3c0c57a7" + dependencies: + locate-path "^2.0.0" + flatten@^1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/flatten/-/flatten-1.0.2.tgz#dae46a9d78fbe25292258cc1e780a41d95c03782" @@ -1851,15 +1940,15 @@ font-awesome@^4.7.0: version "4.7.0" resolved "https://registry.yarnpkg.com/font-awesome/-/font-awesome-4.7.0.tgz#8fa8cf0411a1a31afd07b06d2902bb9fc815a133" -for-in@^0.1.5: - version "0.1.6" - resolved "https://registry.yarnpkg.com/for-in/-/for-in-0.1.6.tgz#c9f96e89bfad18a545af5ec3ed352a1d9e5b4dc8" +for-in@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/for-in/-/for-in-1.0.2.tgz#81068d295a8142ec0ac726c6e2200c30fb6d5e80" for-own@^0.1.4: - version "0.1.4" - resolved "https://registry.yarnpkg.com/for-own/-/for-own-0.1.4.tgz#0149b41a39088c7515f51ebe1c1386d45f935072" + version "0.1.5" + resolved "https://registry.yarnpkg.com/for-own/-/for-own-0.1.5.tgz#5265c681a4f294dabbf17c9509b6763aa84510ce" dependencies: - for-in "^0.1.5" + for-in "^1.0.1" foreach@^2.0.5: version "2.0.5" @@ -1878,8 +1967,8 @@ form-data@1.0.0-rc3: mime-types "^2.1.3" form-data@~2.1.1: - version "2.1.2" - resolved "https://registry.yarnpkg.com/form-data/-/form-data-2.1.2.tgz#89c3534008b97eada4cbb157d58f6f5df025eae4" + version "2.1.4" + resolved "https://registry.yarnpkg.com/form-data/-/form-data-2.1.4.tgz#33c183acf193276ecaa98143a69e94bfee1750d1" dependencies: asynckit "^0.4.0" combined-stream "^1.0.5" @@ -1899,9 +1988,9 @@ fresh-falafel@^0.2.6: dependencies: esprima "^2.0.0" -fresh@0.3.0: - version "0.3.0" - resolved "https://registry.yarnpkg.com/fresh/-/fresh-0.3.0.tgz#651f838e22424e7566de161d8358caa199f83d4f" +fresh@0.5.0: + version "0.5.0" + resolved "https://registry.yarnpkg.com/fresh/-/fresh-0.5.0.tgz#f474ca5e6a9246d6fd8e0953cfa9b9c805afa78e" fs-access@^1.0.0: version "1.0.1" @@ -1918,13 +2007,13 @@ fs.realpath@^1.0.0: resolved "https://registry.yarnpkg.com/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f" fsevents@^1.0.0: - version "1.0.17" - resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-1.0.17.tgz#8537f3f12272678765b4fd6528c0f1f66f8f4558" + version "1.1.2" + resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-1.1.2.tgz#3282b713fb3ad80ede0e9fcf4611b5aa6fc033f4" dependencies: nan "^2.3.0" - node-pre-gyp "^0.6.29" + node-pre-gyp "^0.6.36" -fstream-ignore@~1.0.5: +fstream-ignore@^1.0.5: version "1.0.5" resolved "https://registry.yarnpkg.com/fstream-ignore/-/fstream-ignore-1.0.5.tgz#9c31dae34767018fe1d249b24dada67d092da105" dependencies: @@ -1932,9 +2021,9 @@ fstream-ignore@~1.0.5: inherits "2" minimatch "^3.0.0" -fstream@^1.0.0, fstream@^1.0.2, fstream@~1.0.10: - version "1.0.10" - resolved "https://registry.yarnpkg.com/fstream/-/fstream-1.0.10.tgz#604e8a92fe26ffd9f6fae30399d4984e1ab22822" +fstream@^1.0.0, fstream@^1.0.10, fstream@^1.0.2: + version "1.0.11" + resolved "https://registry.yarnpkg.com/fstream/-/fstream-1.0.11.tgz#5c1fb1f117477114f0632a0eb4b71b3cb0fd3171" dependencies: graceful-fs "^4.1.2" inherits "~2.0.0" @@ -1945,9 +2034,9 @@ function-bind@^1.0.2, function-bind@^1.1.0: version "1.1.0" resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.0.tgz#16176714c801798e4e8f2cf7f7529467bb4a5771" -gauge@~2.7.1: - version "2.7.2" - resolved "https://registry.yarnpkg.com/gauge/-/gauge-2.7.2.tgz#15cecc31b02d05345a5d6b0e171cdb3ad2307774" +gauge@~2.7.3: + version "2.7.4" + resolved "https://registry.yarnpkg.com/gauge/-/gauge-2.7.4.tgz#2c03405c7538c39d7eb37b317022e325fb018bf7" dependencies: aproba "^1.0.3" console-control-strings "^1.0.0" @@ -1956,22 +2045,22 @@ gauge@~2.7.1: signal-exit "^3.0.0" string-width "^1.0.1" strip-ansi "^3.0.1" - supports-color "^0.2.0" wide-align "^1.1.0" -generate-function@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/generate-function/-/generate-function-2.0.0.tgz#6858fe7c0969b7d4e9093337647ac79f60dfbe74" +get-caller-file@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-1.0.2.tgz#f702e63127e7e231c160a80c1554acb70d5047e5" -generate-object-property@^1.1.0: - version "1.2.0" - resolved "https://registry.yarnpkg.com/generate-object-property/-/generate-object-property-1.2.0.tgz#9c0e1c40308ce804f4783618b937fa88f99d50d0" +get-stream@^2.2.0: + version "2.3.1" + resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-2.3.1.tgz#5f38f93f346009666ee0150a054167f91bdd95de" dependencies: - is-property "^1.0.0" + object-assign "^4.0.1" + pinkie-promise "^2.0.0" getpass@^0.1.1: - version "0.1.6" - resolved "https://registry.yarnpkg.com/getpass/-/getpass-0.1.6.tgz#283ffd9fc1256840875311c1b60e8c40187110e6" + version "0.1.7" + resolved "https://registry.yarnpkg.com/getpass/-/getpass-0.1.7.tgz#5eff8e3e684d569ae4cb2b1282604e8ba62149fa" dependencies: assert-plus "^1.0.0" @@ -2005,30 +2094,20 @@ glob@^5.0.15: once "^1.3.0" path-is-absolute "^1.0.0" -glob@^6.0.4: - version "6.0.4" - resolved "https://registry.yarnpkg.com/glob/-/glob-6.0.4.tgz#0f08860f6a155127b2fadd4f9ce24b1aab6e4d22" - dependencies: - inflight "^1.0.4" - inherits "2" - minimatch "2 || 3" - once "^1.3.0" - path-is-absolute "^1.0.0" - -glob@^7.0.0, glob@^7.0.5: - version "7.1.1" - resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.1.tgz#805211df04faaf1c63a3600306cdf5ade50b2ec8" +glob@^7.0.0, glob@^7.0.5, glob@^7.1.1: + version "7.1.2" + resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.2.tgz#c19c9df9a028702d678612384a6552404c636d15" dependencies: fs.realpath "^1.0.0" inflight "^1.0.4" inherits "2" - minimatch "^3.0.2" + minimatch "^3.0.4" once "^1.3.0" path-is-absolute "^1.0.0" globals@^9.0.0: - version "9.14.0" - resolved "https://registry.yarnpkg.com/globals/-/globals-9.14.0.tgz#8859936af0038741263053b39d0e76ca241e4034" + version "9.18.0" + resolved "https://registry.yarnpkg.com/globals/-/globals-9.18.0.tgz#aa3896b3e69b487f17e31ed2143d69a8e30c2d8a" graceful-fs@^4.1.2, graceful-fs@^4.1.4: version "4.1.11" @@ -2042,14 +2121,16 @@ growl@1.9.2: version "1.9.2" resolved "https://registry.yarnpkg.com/growl/-/growl-1.9.2.tgz#0ea7743715db8d8de2c5ede1775e1b45ac85c02f" -har-validator@~2.0.6: - version "2.0.6" - resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-2.0.6.tgz#cdcbc08188265ad119b6a5a7c8ab70eecfb5d27d" +har-schema@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/har-schema/-/har-schema-1.0.5.tgz#d263135f43307c02c602afc8fe95970c0151369e" + +har-validator@~4.2.1: + version "4.2.1" + resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-4.2.1.tgz#33481d0f1bbff600dd203d75812a6a5fba002e2a" dependencies: - chalk "^1.1.1" - commander "^2.9.0" - is-my-json-valid "^2.12.4" - pinkie-promise "^2.0.0" + ajv "^4.9.1" + har-schema "^1.0.5" has-ansi@^2.0.0: version "2.0.0" @@ -2071,6 +2152,10 @@ has-flag@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-1.0.0.tgz#9d9e793165ce017a00f00418c43f942a7b1d11fa" +has-flag@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-2.0.0.tgz#e8207af1cc7b30d446cc70b734b5e8be18f88d51" + has-unicode@^2.0.0: version "2.0.1" resolved "https://registry.yarnpkg.com/has-unicode/-/has-unicode-2.0.1.tgz#e0e6fe6a28cf51138855e086d1691e771de2a8b9" @@ -2127,26 +2212,30 @@ home-or-tmp@^2.0.0: os-homedir "^1.0.0" os-tmpdir "^1.0.1" +hosted-git-info@^2.1.4: + version "2.4.2" + resolved "https://registry.yarnpkg.com/hosted-git-info/-/hosted-git-info-2.4.2.tgz#0076b9f46a270506ddbaaea56496897460612a67" + html-comment-regex@^1.1.0: version "1.1.1" resolved "https://registry.yarnpkg.com/html-comment-regex/-/html-comment-regex-1.1.1.tgz#668b93776eaae55ebde8f3ad464b307a4963625e" html-minifier@^3.2.3: - version "3.2.3" - resolved "https://registry.yarnpkg.com/html-minifier/-/html-minifier-3.2.3.tgz#d2ff536e24d95726c332493d8f77d84dbed85372" + version "3.5.2" + resolved "https://registry.yarnpkg.com/html-minifier/-/html-minifier-3.5.2.tgz#d73bc3ff448942408818ce609bf3fb0ea7ef4eb7" dependencies: camel-case "3.0.x" - clean-css "3.4.x" + clean-css "4.1.x" commander "2.9.x" he "1.1.x" ncname "1.0.x" param-case "2.1.x" relateurl "0.2.x" - uglify-js "2.7.x" + uglify-js "3.0.x" html-webpack-plugin@^2.22.0: - version "2.26.0" - resolved "https://registry.yarnpkg.com/html-webpack-plugin/-/html-webpack-plugin-2.26.0.tgz#ba97c8a66f912b85df80d2aeea65966c8bd9249e" + version "2.28.0" + resolved "https://registry.yarnpkg.com/html-webpack-plugin/-/html-webpack-plugin-2.28.0.tgz#2e7863b57e5fd48fe263303e2ffc934c3064d009" dependencies: bluebird "^3.4.7" html-minifier "^3.2.3" @@ -2164,17 +2253,18 @@ htmlparser2@~3.3.0: domutils "1.1" readable-stream "1.0" -http-errors@~1.5.0, http-errors@~1.5.1: - version "1.5.1" - resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.5.1.tgz#788c0d2c1de2c81b9e6e8c01843b6b97eb920750" +http-errors@~1.6.1: + version "1.6.1" + resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.6.1.tgz#5f8b8ed98aca545656bf572997387f904a722257" dependencies: + depd "1.1.0" inherits "2.0.3" - setprototypeof "1.0.2" + setprototypeof "1.0.3" statuses ">= 1.3.1 < 2" http-proxy-middleware@~0.17.1: - version "0.17.3" - resolved "https://registry.yarnpkg.com/http-proxy-middleware/-/http-proxy-middleware-0.17.3.tgz#940382147149b856084f5534752d5b5a8168cd1d" + version "0.17.4" + resolved "https://registry.yarnpkg.com/http-proxy-middleware/-/http-proxy-middleware-0.17.4.tgz#642e8848851d66f09d4f124912846dbaeb41b833" dependencies: http-proxy "^1.16.2" is-glob "^3.1.0" @@ -2204,21 +2294,21 @@ humanize@0.0.9: version "0.0.9" resolved "https://registry.yarnpkg.com/humanize/-/humanize-0.0.9.tgz#1994ffaecdfe9c441ed2bdac7452b7bb4c9e41a4" -iconv-lite@0.4.15, iconv-lite@^0.4.5: +iconv-lite@0.4.15, iconv-lite@^0.4.5, iconv-lite@~0.4.13: version "0.4.15" resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.15.tgz#fe265a218ac6a57cfe854927e9d04c19825eddeb" -icss-replace-symbols@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/icss-replace-symbols/-/icss-replace-symbols-1.0.2.tgz#cb0b6054eb3af6edc9ab1d62d01933e2d4c8bfa5" +icss-replace-symbols@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/icss-replace-symbols/-/icss-replace-symbols-1.1.0.tgz#06ea6f83679a7749e386cfe1fe812ae5db223ded" ieee754@^1.1.4: version "1.1.8" resolved "https://registry.yarnpkg.com/ieee754/-/ieee754-1.1.8.tgz#be33d40ac10ef1926701f6f08a2d86fbfd1ad3e4" image-size@~0.5.0: - version "0.5.1" - resolved "https://registry.yarnpkg.com/image-size/-/image-size-0.5.1.tgz#28eea8548a4b1443480ddddc1e083ae54652439f" + version "0.5.5" + resolved "https://registry.yarnpkg.com/image-size/-/image-size-0.5.5.tgz#09dfd4ab9d20e29eb1c3e80b8990378df9e3cb9c" indexes-of@^1.0.1: version "1.0.1" @@ -2235,7 +2325,7 @@ inflight@^1.0.4: once "^1.3.0" wrappy "1" -inherits@2, inherits@2.0.3, inherits@^2.0.1, inherits@~2.0.0, inherits@~2.0.1: +inherits@2, inherits@2.0.3, inherits@^2.0.1, inherits@~2.0.0, inherits@~2.0.1, inherits@~2.0.3: version "2.0.3" resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.3.tgz#633c2c83e3da42a502f52466022480f4208261de" @@ -2251,10 +2341,6 @@ interpret@^0.6.4: version "0.6.6" resolved "https://registry.yarnpkg.com/interpret/-/interpret-0.6.6.tgz#fecd7a18e7ce5ca6abfb953e1f86213a49f1625b" -interpret@^1.0.0: - version "1.0.1" - resolved "https://registry.yarnpkg.com/interpret/-/interpret-1.0.1.tgz#d579fb7f693b858004947af39fa0db49f795602c" - invariant@^2.0.0, invariant@^2.1.0, invariant@^2.1.2, invariant@^2.2.0, invariant@^2.2.1: version "2.2.2" resolved "https://registry.yarnpkg.com/invariant/-/invariant-2.2.2.tgz#9e1f56ac0acdb6bf303306f338be3b204ae60360" @@ -2265,14 +2351,18 @@ invert-kv@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/invert-kv/-/invert-kv-1.0.0.tgz#104a8e4aaca6d3d8cd157a8ef8bfab2d7a3ffdb6" -ipaddr.js@1.2.0: - version "1.2.0" - resolved "https://registry.yarnpkg.com/ipaddr.js/-/ipaddr.js-1.2.0.tgz#8aba49c9192799585bdd643e0ccb50e8ae777ba4" +ipaddr.js@1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/ipaddr.js/-/ipaddr.js-1.3.0.tgz#1e03a52fdad83a8bbb2b25cbf4998b4cffcd3dec" is-absolute-url@^2.0.0: version "2.1.0" resolved "https://registry.yarnpkg.com/is-absolute-url/-/is-absolute-url-2.1.0.tgz#50530dfb84fcc9aa7dbe7852e83a37b93b9f2aa6" +is-arrayish@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/is-arrayish/-/is-arrayish-0.2.1.tgz#77c99840527aa8ecb1a8ba697b80645a7a926a9d" + is-arrow-function@^2.0.3: version "2.0.3" resolved "https://registry.yarnpkg.com/is-arrow-function/-/is-arrow-function-2.0.3.tgz#29be2c2d8d9450852b8bbafb635ba7b8d8e87ec2" @@ -2289,9 +2379,15 @@ is-boolean-object@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/is-boolean-object/-/is-boolean-object-1.0.0.tgz#98f8b28030684219a95f375cfbd88ce3405dff93" -is-buffer@^1.0.2: - version "1.1.4" - resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-1.1.4.tgz#cfc86ccd5dc5a52fa80489111c6920c457e2d98b" +is-buffer@^1.1.5: + version "1.1.5" + resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-1.1.5.tgz#1f3b26ef613b214b88cbca23cc6c01d87961eecc" + +is-builtin-module@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-builtin-module/-/is-builtin-module-1.0.0.tgz#540572d34f7ac3119f8f76c30cbc1b1e037affbe" + dependencies: + builtin-modules "^1.0.0" is-callable@^1.0.4, is-callable@^1.1.1, is-callable@^1.1.3: version "1.1.3" @@ -2302,8 +2398,8 @@ is-date-object@^1.0.1: resolved "https://registry.yarnpkg.com/is-date-object/-/is-date-object-1.0.1.tgz#9aa20eb6aeebbff77fbd33e74ca01b33581d3a16" is-dotfile@^1.0.0: - version "1.0.2" - resolved "https://registry.yarnpkg.com/is-dotfile/-/is-dotfile-1.0.2.tgz#2c132383f39199f8edc268ca01b9b007d205cc4d" + version "1.0.3" + resolved "https://registry.yarnpkg.com/is-dotfile/-/is-dotfile-1.0.3.tgz#a6a2f32ffd2dfb04f5ca25ecd0f6b83cf798a1e1" is-equal-shallow@^0.1.3: version "0.1.3" @@ -2312,20 +2408,20 @@ is-equal-shallow@^0.1.3: is-primitive "^2.0.0" is-equal@^1.5.1: - version "1.5.3" - resolved "https://registry.yarnpkg.com/is-equal/-/is-equal-1.5.3.tgz#05b7fa3a1122cbc71c1ef41ce0142d5532013b29" + version "1.5.5" + resolved "https://registry.yarnpkg.com/is-equal/-/is-equal-1.5.5.tgz#5e85f1957e052883247feb386965a3bba15fbb3d" dependencies: has "^1.0.1" is-arrow-function "^2.0.3" is-boolean-object "^1.0.0" is-callable "^1.1.3" is-date-object "^1.0.1" - is-generator-function "^1.0.3" + is-generator-function "^1.0.6" is-number-object "^1.0.3" is-regex "^1.0.3" is-string "^1.0.4" is-symbol "^1.0.1" - object.entries "^1.0.3" + object.entries "^1.0.4" is-extendable@^0.1.1: version "0.1.1" @@ -2351,7 +2447,11 @@ is-fullwidth-code-point@^1.0.0: dependencies: number-is-nan "^1.0.0" -is-generator-function@^1.0.3: +is-fullwidth-code-point@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz#a3b30a5c4f199183167aaab93beefae3ddfb654f" + +is-generator-function@^1.0.6: version "1.0.6" resolved "https://registry.yarnpkg.com/is-generator-function/-/is-generator-function-1.0.6.tgz#9e71653cd15fff341c79c4151460a131d31e9fc4" @@ -2367,15 +2467,6 @@ is-glob@^3.1.0: dependencies: is-extglob "^2.1.0" -is-my-json-valid@^2.12.4: - version "2.15.0" - resolved "https://registry.yarnpkg.com/is-my-json-valid/-/is-my-json-valid-2.15.0.tgz#936edda3ca3c211fd98f3b2d3e08da43f7b2915b" - dependencies: - generate-function "^2.0.0" - generate-object-property "^1.1.0" - jsonpointer "^4.0.0" - xtend "^4.0.0" - is-number-object@^1.0.3: version "1.0.3" resolved "https://registry.yarnpkg.com/is-number-object/-/is-number-object-1.0.3.tgz#f265ab89a9f445034ef6aff15a8f00b00f551799" @@ -2384,12 +2475,18 @@ is-number@^0.1.1: version "0.1.1" resolved "https://registry.yarnpkg.com/is-number/-/is-number-0.1.1.tgz#69a7af116963d47206ec9bd9b48a14216f1e3806" -is-number@^2.0.2, is-number@^2.1.0: +is-number@^2.1.0: version "2.1.0" resolved "https://registry.yarnpkg.com/is-number/-/is-number-2.1.0.tgz#01fcbbb393463a548f2f466cce16dece49db908f" dependencies: kind-of "^3.0.2" +is-number@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/is-number/-/is-number-3.0.0.tgz#24fd6201a4782cf50561c810276afc7d12d71195" + dependencies: + kind-of "^3.0.2" + is-plain-obj@^1.0.0: version "1.1.0" resolved "https://registry.yarnpkg.com/is-plain-obj/-/is-plain-obj-1.1.0.tgz#71a50c8429dfca773c92a390a4a03b39fcd51d3e" @@ -2402,13 +2499,15 @@ is-primitive@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/is-primitive/-/is-primitive-2.0.0.tgz#207bab91638499c07b2adf240a41a87210034575" -is-property@^1.0.0: - version "1.0.2" - resolved "https://registry.yarnpkg.com/is-property/-/is-property-1.0.2.tgz#57fe1c4e48474edd65b09911f26b1cd4095dda84" - is-regex@^1.0.3: - version "1.0.3" - resolved "https://registry.yarnpkg.com/is-regex/-/is-regex-1.0.3.tgz#0d55182bddf9f2fde278220aec3a75642c908637" + version "1.0.4" + resolved "https://registry.yarnpkg.com/is-regex/-/is-regex-1.0.4.tgz#5517489b547091b0930e095654ced25ee97e9491" + dependencies: + has "^1.0.1" + +is-stream@^1.0.1, is-stream@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-1.1.0.tgz#12d4a3dd4e68e0b79ceb8dbc84173ae80d91ca44" is-string@^1.0.4: version "1.0.4" @@ -2440,9 +2539,9 @@ isbinaryfile@^3.0.0: version "3.0.2" resolved "https://registry.yarnpkg.com/isbinaryfile/-/isbinaryfile-3.0.2.tgz#4a3e974ec0cba9004d3fc6cde7209ea69368a621" -isexe@^1.1.1: - version "1.1.2" - resolved "https://registry.yarnpkg.com/isexe/-/isexe-1.1.2.tgz#36f3e22e60750920f5e7241a476a8c6a42275ad0" +isexe@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/isexe/-/isexe-2.0.0.tgz#e8fbf374dc556ff8947a10dcb0572d633f2cfa10" isobject@^2.0.0: version "2.1.0" @@ -2450,6 +2549,13 @@ isobject@^2.0.0: dependencies: isarray "1.0.0" +isomorphic-fetch@^2.1.1: + version "2.2.1" + resolved "https://registry.yarnpkg.com/isomorphic-fetch/-/isomorphic-fetch-2.2.1.tgz#611ae1acf14f5e81f729507472819fe9733558a9" + dependencies: + node-fetch "^1.0.1" + whatwg-fetch ">=0.10.0" + isstream@~0.1.2: version "0.1.2" resolved "https://registry.yarnpkg.com/isstream/-/isstream-0.1.2.tgz#47e63f7af55afa6f92e1500e690eb8b8529c099a" @@ -2461,12 +2567,6 @@ jade@0.26.3: commander "0.6.1" mkdirp "0.3.0" -jodid25519@^1.0.0: - version "1.0.2" - resolved "https://registry.yarnpkg.com/jodid25519/-/jodid25519-1.0.2.tgz#06d4912255093419477d425633606e0e90782967" - dependencies: - jsbn "~0.1.0" - js-base64@^2.1.9: version "2.1.9" resolved "https://registry.yarnpkg.com/js-base64/-/js-base64-2.1.9.tgz#f0e80ae039a4bd654b5f281fc93f04a914a7fcce" @@ -2481,19 +2581,19 @@ js-beautify@1.6.4: nopt "~3.0.1" js-tokens@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-3.0.0.tgz#a2f2a969caae142fb3cd56228358c89366957bd1" + version "3.0.1" + resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-3.0.1.tgz#08e9f132484a2c45a30907e9dc4d5567b7f114d7" -js-yaml@~3.6.1: - version "3.6.1" - resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.6.1.tgz#6e5fe67d8b205ce4d22fad05b7781e8dadcc4b30" +js-yaml@~3.7.0: + version "3.7.0" + resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.7.0.tgz#5c967ddd837a9bfdca5f2de84253abe8a1c03b80" dependencies: argparse "^1.0.7" esprima "^2.6.0" jsbn@~0.1.0: - version "0.1.0" - resolved "https://registry.yarnpkg.com/jsbn/-/jsbn-0.1.0.tgz#650987da0dd74f4ebf5a11377a2aa2d273e97dfd" + version "0.1.1" + resolved "https://registry.yarnpkg.com/jsbn/-/jsbn-0.1.1.tgz#a5e654c2e5a2deb5f201d96cefbca80c0ef2f513" jsesc@^1.3.0: version "1.3.0" @@ -2511,6 +2611,12 @@ json-schema@0.2.3: version "0.2.3" resolved "https://registry.yarnpkg.com/json-schema/-/json-schema-0.2.3.tgz#b480c892e59a2f05954ce727bd3f2a4e882f9e13" +json-stable-stringify@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/json-stable-stringify/-/json-stable-stringify-1.0.1.tgz#9a759d39c5f2ff503fd5300646ed445f88c4f9af" + dependencies: + jsonify "~0.0.0" + json-stringify-safe@~5.0.1: version "5.0.1" resolved "https://registry.yarnpkg.com/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz#1296a2d58fd45f19a0f6ce01d65701e2c735b6eb" @@ -2523,14 +2629,15 @@ json5@^0.5.0: version "0.5.1" resolved "https://registry.yarnpkg.com/json5/-/json5-0.5.1.tgz#1eade7acc012034ad84e2396767ead9fa5495821" -jsonpointer@^4.0.0: - version "4.0.1" - resolved "https://registry.yarnpkg.com/jsonpointer/-/jsonpointer-4.0.1.tgz#4fd92cb34e0e9db3c89c8622ecf51f9b978c6cb9" +jsonify@~0.0.0: + version "0.0.0" + resolved "https://registry.yarnpkg.com/jsonify/-/jsonify-0.0.0.tgz#2c74b6ee41d93ca51b7b5aaee8f503631d252a73" jsprim@^1.2.2: - version "1.3.1" - resolved "https://registry.yarnpkg.com/jsprim/-/jsprim-1.3.1.tgz#2a7256f70412a29ee3670aaca625994c4dcff252" + version "1.4.0" + resolved "https://registry.yarnpkg.com/jsprim/-/jsprim-1.4.0.tgz#a3b87e40298d8c380552d8cc7628a0bb95a22918" dependencies: + assert-plus "1.0.0" extsprintf "1.0.2" json-schema "0.2.3" verror "1.3.6" @@ -2605,14 +2712,20 @@ karma@^0.13.22: useragent "^2.1.6" keycode@^2.1.0: - version "2.1.8" - resolved "https://registry.yarnpkg.com/keycode/-/keycode-2.1.8.tgz#94d2b7098215eff0e8f9a8931d5a59076c4532fb" + version "2.1.9" + resolved "https://registry.yarnpkg.com/keycode/-/keycode-2.1.9.tgz#964a23c54e4889405b4861a5c9f0480d45141dfa" kind-of@^3.0.2: - version "3.1.0" - resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-3.1.0.tgz#475d698a5e49ff5e53d14e3e732429dc8bf4cf47" + version "3.2.2" + resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-3.2.2.tgz#31ea21a734bab9bbb0f32466d893aea51e4a3c64" dependencies: - is-buffer "^1.0.2" + is-buffer "^1.1.5" + +kind-of@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-4.0.0.tgz#20813df3d712928b207378691a45066fae72dd57" + dependencies: + is-buffer "^1.1.5" lazy-cache@^1.0.3: version "1.0.4" @@ -2643,21 +2756,45 @@ less@^2.7.1: request "^2.72.0" source-map "^0.5.3" -loader-utils@0.2.x, loader-utils@^0.2.11, loader-utils@^0.2.16, loader-utils@^0.2.5, loader-utils@^0.2.7, loader-utils@~0.2.2: - version "0.2.16" - resolved "https://registry.yarnpkg.com/loader-utils/-/loader-utils-0.2.16.tgz#f08632066ed8282835dff88dfb52704765adee6d" +load-json-file@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/load-json-file/-/load-json-file-2.0.0.tgz#7947e42149af80d696cbf797bcaabcfe1fe29ca8" + dependencies: + graceful-fs "^4.1.2" + parse-json "^2.2.0" + pify "^2.0.0" + strip-bom "^3.0.0" + +loader-utils@^0.2.11, loader-utils@^0.2.16, loader-utils@^0.2.5, loader-utils@~0.2.2: + version "0.2.17" + resolved "https://registry.yarnpkg.com/loader-utils/-/loader-utils-0.2.17.tgz#f86e6374d43205a6e6c60e9196f17c0299bfb348" dependencies: big.js "^3.1.3" emojis-list "^2.0.0" json5 "^0.5.0" object-assign "^4.0.1" +loader-utils@^1.0.2: + version "1.1.0" + resolved "https://registry.yarnpkg.com/loader-utils/-/loader-utils-1.1.0.tgz#c98aef488bcceda2ffb5e2de646d6a754429f5cd" + dependencies: + big.js "^3.1.3" + emojis-list "^2.0.0" + json5 "^0.5.0" + local-storage-fallback@^1.3.0: - version "1.3.0" - resolved "https://registry.yarnpkg.com/local-storage-fallback/-/local-storage-fallback-1.3.0.tgz#d53c2455391ae2a1fa32db02031224cca9a8c8da" + version "1.4.0" + resolved "https://registry.yarnpkg.com/local-storage-fallback/-/local-storage-fallback-1.4.0.tgz#2cb71823abbaa982a0460708d9d392671d3c3f6d" dependencies: cookie "^0.3.1" +locate-path@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-2.0.0.tgz#2b568b265eec944c6d9c0de9c3dbbbca0354cd8e" + dependencies: + p-locate "^2.0.0" + path-exists "^3.0.0" + lodash-compat@^3.10.1: version "3.10.2" resolved "https://registry.yarnpkg.com/lodash-compat/-/lodash-compat-3.10.2.tgz#c6940128a9d30f8e902cd2cf99fd0cba4ecfc183" @@ -2689,15 +2826,11 @@ lodash.deburr@^3.0.0: dependencies: lodash._root "^3.0.0" -lodash.indexof@^4.0.5: - version "4.0.5" - resolved "https://registry.yarnpkg.com/lodash.indexof/-/lodash.indexof-4.0.5.tgz#53714adc2cddd6ed87638f893aa9b6c24e31ef3c" - -lodash.memoize@^4.1.0: +lodash.memoize@^4.1.2: version "4.1.2" resolved "https://registry.yarnpkg.com/lodash.memoize/-/lodash.memoize-4.1.2.tgz#bcc6c49a42a2840ed997f323eada5ecd182e0bfe" -lodash.uniq@^4.3.0: +lodash.uniq@^4.5.0: version "4.5.0" resolved "https://registry.yarnpkg.com/lodash.uniq/-/lodash.uniq-4.5.0.tgz#d0225373aeb652adc1bc82e4945339a842754773" @@ -2726,17 +2859,21 @@ longest@^1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/longest/-/longest-1.0.1.tgz#30a0b2da38f73770e8294a0d22e6625ed77d0097" -loose-envify@^1.0.0, loose-envify@^1.1.0, loose-envify@^1.2.0: +loose-envify@^1.0.0, loose-envify@^1.1.0, loose-envify@^1.2.0, loose-envify@^1.3.1: version "1.3.1" resolved "https://registry.yarnpkg.com/loose-envify/-/loose-envify-1.3.1.tgz#d1a8ad33fa9ce0e713d65fdd0ac8b748d478c848" dependencies: js-tokens "^3.0.0" lower-case@^1.1.1: - version "1.1.3" - resolved "https://registry.yarnpkg.com/lower-case/-/lower-case-1.1.3.tgz#c92393d976793eee5ba4edb583cf8eae35bd9bfb" + version "1.1.4" + resolved "https://registry.yarnpkg.com/lower-case/-/lower-case-1.1.4.tgz#9a2cabd1b9e8e0ae993a4bf7d5875c39c42e8eac" -lru-cache@2, lru-cache@2.2.x: +lru-cache@2: + version "2.7.3" + resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-2.7.3.tgz#6d4524e8b955f95d4f5b58851ce21dd72fb4e952" + +lru-cache@2.2.x: version "2.2.4" resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-2.2.4.tgz#6c658619becf14031d0d0b594b16042ce4dc063d" @@ -2746,20 +2883,31 @@ lru-cache@^3.2.0: dependencies: pseudomap "^1.0.1" +lru-cache@^4.0.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-4.1.1.tgz#622e32e82488b49279114a4f9ecf45e7cd6bba55" + dependencies: + pseudomap "^1.0.2" + yallist "^2.1.2" + macaddress@^0.2.8: version "0.2.8" resolved "https://registry.yarnpkg.com/macaddress/-/macaddress-0.2.8.tgz#5904dc537c39ec6dbefeae902327135fa8511f12" math-expression-evaluator@^1.2.14: - version "1.2.15" - resolved "https://registry.yarnpkg.com/math-expression-evaluator/-/math-expression-evaluator-1.2.15.tgz#38dc5f0194c5bf5ff1c690ad4c4b64df71ac0187" - dependencies: - lodash.indexof "^4.0.5" + version "1.2.17" + resolved "https://registry.yarnpkg.com/math-expression-evaluator/-/math-expression-evaluator-1.2.17.tgz#de819fdbcd84dccd8fae59c6aeb79615b9d266ac" media-typer@0.3.0: version "0.3.0" resolved "https://registry.yarnpkg.com/media-typer/-/media-typer-0.3.0.tgz#8710d7af0aa626f8fffa1ce00168545263255748" +mem@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/mem/-/mem-1.1.0.tgz#5edd52b485ca1d900fe64895505399a0dfa45f76" + dependencies: + mimic-fn "^1.0.0" + memory-fs@^0.2.0: version "0.2.0" resolved "https://registry.yarnpkg.com/memory-fs/-/memory-fs-0.2.0.tgz#f2bb25368bc121e391c2520de92969caee0a0290" @@ -2804,24 +2952,32 @@ micromatch@^2.1.5, micromatch@^2.3.11: parse-glob "^3.0.4" regex-cache "^0.4.2" -"mime-db@>= 1.24.0 < 2", mime-db@^1.25.0, mime-db@~1.26.0: - version "1.26.0" - resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.26.0.tgz#eaffcd0e4fc6935cf8134da246e2e6c35305adff" +"mime-db@>= 1.27.0 < 2", mime-db@^1.25.0: + version "1.28.0" + resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.28.0.tgz#fedd349be06d2865b7fc57d837c6de4f17d7ac3c" -mime-types@^2.1.12, mime-types@^2.1.13, mime-types@^2.1.3, mime-types@~2.1.11, mime-types@~2.1.13, mime-types@~2.1.7: - version "2.1.14" - resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.14.tgz#f7ef7d97583fcaf3b7d282b6f8b5679dab1e94ee" +mime-db@~1.27.0: + version "1.27.0" + resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.27.0.tgz#820f572296bbd20ec25ed55e5b5de869e5436eb1" + +mime-types@^2.1.12, mime-types@^2.1.13, mime-types@^2.1.3, mime-types@~2.1.11, mime-types@~2.1.15, mime-types@~2.1.7: + version "2.1.15" + resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.15.tgz#a4ebf5064094569237b8cf70046776d09fc92aed" dependencies: - mime-db "~1.26.0" + mime-db "~1.27.0" -mime@1.2.x: - version "1.2.11" - resolved "https://registry.yarnpkg.com/mime/-/mime-1.2.11.tgz#58203eed86e3a5ef17aed2b7d9ebd47f0a60dd10" - -mime@1.3.4, mime@^1.2.11, mime@^1.3.4: +mime@1.3.4: version "1.3.4" resolved "https://registry.yarnpkg.com/mime/-/mime-1.3.4.tgz#115f9e3b6b3daf2959983cb38f149a2d40eb5d53" +mime@1.3.x, mime@^1.2.11, mime@^1.3.4: + version "1.3.6" + resolved "https://registry.yarnpkg.com/mime/-/mime-1.3.6.tgz#591d84d3653a6b0b4a3b9df8de5aa8108e72e5e0" + +mimic-fn@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/mimic-fn/-/mimic-fn-1.1.0.tgz#e667783d92e89dbd342818b5230b9d62a672ad18" + minimatch@0.3: version "0.3.0" resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-0.3.0.tgz#275d8edaac4f1bb3326472089e7949c8394699dd" @@ -2829,11 +2985,11 @@ minimatch@0.3: lru-cache "2" sigmund "~1.0.0" -"minimatch@2 || 3", minimatch@^3.0.0, minimatch@^3.0.2: - version "3.0.3" - resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.0.3.tgz#2a4e4090b96b2db06a9d7df01055a62a77c9b774" +"minimatch@2 || 3", minimatch@^3.0.0, minimatch@^3.0.2, minimatch@^3.0.4: + version "3.0.4" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.0.4.tgz#5166e286457f03306064be5497e8dbb0c3d32083" dependencies: - brace-expansion "^1.0.0" + brace-expansion "^1.1.7" minimist@0.0.8, minimist@~0.0.1: version "0.0.8" @@ -2869,8 +3025,8 @@ mocha@^2.5.3: to-iso-string "0.0.2" moment@^2.15.1: - version "2.17.1" - resolved "https://registry.yarnpkg.com/moment/-/moment-2.17.1.tgz#fed9506063f36b10f066c8b59a144d7faebe1d82" + version "2.18.1" + resolved "https://registry.yarnpkg.com/moment/-/moment-2.18.1.tgz#c36193dd3ce1c2eed2adb7c802dbbc77a81b1c0f" "mout@>=0.9 <2.0": version "1.0.0" @@ -2888,9 +3044,13 @@ ms@0.7.2: version "0.7.2" resolved "https://registry.yarnpkg.com/ms/-/ms-0.7.2.tgz#ae25cf2512b3885a1d95d7f037868d8431124765" +ms@2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/ms/-/ms-2.0.0.tgz#5608aeadfc00be6c2901df5f9861788de0d597c8" + nan@^2.3.0: - version "2.5.1" - resolved "https://registry.yarnpkg.com/nan/-/nan-2.5.1.tgz#d5b01691253326a97a2bbee9e61c55d8d60351e2" + version "2.6.2" + resolved "https://registry.yarnpkg.com/nan/-/nan-2.6.2.tgz#e4ff34e6c95fdfb5aecc08de6596f43605a7db45" ncname@1.0.x: version "1.0.0" @@ -2909,11 +3069,18 @@ no-case@^2.2.0: lower-case "^1.1.1" node-dir@^0.1.10: - version "0.1.16" - resolved "https://registry.yarnpkg.com/node-dir/-/node-dir-0.1.16.tgz#d2ef583aa50b90d93db8cdd26fcea58353957fe4" + version "0.1.17" + resolved "https://registry.yarnpkg.com/node-dir/-/node-dir-0.1.17.tgz#5f5665d93351335caabef8f1c554516cf5f1e4e5" dependencies: minimatch "^3.0.2" +node-fetch@^1.0.1: + version "1.7.1" + resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-1.7.1.tgz#899cb3d0a3c92f952c47f1b876f4c8aeabd400d5" + dependencies: + encoding "^0.1.11" + is-stream "^1.0.1" + node-libs-browser@^0.7.0: version "0.7.0" resolved "https://registry.yarnpkg.com/node-libs-browser/-/node-libs-browser-0.7.0.tgz#3e272c0819e308935e26674408d7af0e1491b83b" @@ -2942,37 +3109,55 @@ node-libs-browser@^0.7.0: util "^0.10.3" vm-browserify "0.0.4" -node-pre-gyp@^0.6.29: - version "0.6.32" - resolved "https://registry.yarnpkg.com/node-pre-gyp/-/node-pre-gyp-0.6.32.tgz#fc452b376e7319b3d255f5f34853ef6fd8fe1fd5" +node-pre-gyp@^0.6.36: + version "0.6.36" + resolved "https://registry.yarnpkg.com/node-pre-gyp/-/node-pre-gyp-0.6.36.tgz#db604112cb74e0d477554e9b505b17abddfab786" dependencies: - mkdirp "~0.5.1" - nopt "~3.0.6" - npmlog "^4.0.1" - rc "~1.1.6" - request "^2.79.0" - rimraf "~2.5.4" - semver "~5.3.0" - tar "~2.2.1" - tar-pack "~3.3.0" + mkdirp "^0.5.1" + nopt "^4.0.1" + npmlog "^4.0.2" + rc "^1.1.7" + request "^2.81.0" + rimraf "^2.6.1" + semver "^5.3.0" + tar "^2.2.1" + tar-pack "^3.4.0" -nopt@~3.0.1, nopt@~3.0.6: +nopt@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/nopt/-/nopt-4.0.1.tgz#d0d4685afd5415193c8c7505602d0d17cd64474d" + dependencies: + abbrev "1" + osenv "^0.1.4" + +nopt@~3.0.1: version "3.0.6" resolved "https://registry.yarnpkg.com/nopt/-/nopt-3.0.6.tgz#c6465dbf08abcd4db359317f79ac68a646b28ff9" dependencies: abbrev "1" +normalize-package-data@^2.3.2: + version "2.3.8" + resolved "https://registry.yarnpkg.com/normalize-package-data/-/normalize-package-data-2.3.8.tgz#d819eda2a9dedbd1ffa563ea4071d936782295bb" + dependencies: + hosted-git-info "^2.1.4" + is-builtin-module "^1.0.0" + semver "2 || 3 || 4 || 5" + validate-npm-package-license "^3.0.1" + normalize-path@^2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-2.0.1.tgz#47886ac1662760d4261b7d979d241709d3ce3f7a" + version "2.1.1" + resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-2.1.1.tgz#1ab28b556e198363a8c1a6f7e6fa20137fe6aed9" + dependencies: + remove-trailing-separator "^1.0.1" normalize-range@^0.1.2: version "0.1.2" resolved "https://registry.yarnpkg.com/normalize-range/-/normalize-range-0.1.2.tgz#2d10c06bdfd312ea9777695a4d28439456b75942" normalize-url@^1.4.0: - version "1.9.0" - resolved "https://registry.yarnpkg.com/normalize-url/-/normalize-url-1.9.0.tgz#c2bb50035edee62cd81edb2d45da68dc25e3423e" + version "1.9.1" + resolved "https://registry.yarnpkg.com/normalize-url/-/normalize-url-1.9.1.tgz#2cc0d66b31ea23036458436e3620d85954c66c3c" dependencies: object-assign "^4.0.1" prepend-http "^1.0.0" @@ -2985,6 +3170,12 @@ npm-path@^1.0.0, npm-path@^1.0.1: dependencies: which "^1.2.4" +npm-run-path@^2.0.0: + version "2.0.2" + resolved "https://registry.yarnpkg.com/npm-run-path/-/npm-run-path-2.0.2.tgz#35a9232dfa35d7067b4cb2ddf2357b1871536c5f" + dependencies: + path-key "^2.0.0" + npm-run@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/npm-run/-/npm-run-3.0.0.tgz#568920f840a98fd8e2299db66b2616e2476caf69" @@ -3003,13 +3194,13 @@ npm-which@^2.0.0: npm-path "^1.0.0" which "^1.0.5" -npmlog@^4.0.1: - version "4.0.2" - resolved "https://registry.yarnpkg.com/npmlog/-/npmlog-4.0.2.tgz#d03950e0e78ce1527ba26d2a7592e9348ac3e75f" +npmlog@^4.0.2: + version "4.1.0" + resolved "https://registry.yarnpkg.com/npmlog/-/npmlog-4.1.0.tgz#dc59bee85f64f00ed424efb2af0783df25d1c0b5" dependencies: are-we-there-yet "~1.1.2" console-control-strings "~1.1.0" - gauge "~2.7.1" + gauge "~2.7.3" set-blocking "~2.0.0" nth-check@~1.0.1: @@ -3042,7 +3233,7 @@ object-assign@^2.0.0: version "2.1.1" resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-2.1.1.tgz#43c36e5d569ff8e4816c4efa8be02d26967c18aa" -object-assign@^4.0.1, object-assign@^4.1.0: +object-assign@^4.0.1, object-assign@^4.1.0, object-assign@^4.1.1: version "4.1.1" resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863" @@ -3051,14 +3242,14 @@ object-component@0.0.3: resolved "https://registry.yarnpkg.com/object-component/-/object-component-0.0.3.tgz#f0c69aa50efc95b866c186f400a33769cb2f1291" object-inspect@^1.1.0: - version "1.2.1" - resolved "https://registry.yarnpkg.com/object-inspect/-/object-inspect-1.2.1.tgz#3b62226eb8f6d441751c7d8f22a20ff80ac9dc3f" + version "1.2.2" + resolved "https://registry.yarnpkg.com/object-inspect/-/object-inspect-1.2.2.tgz#c82115e4fcc888aea14d64c22e4f17f6a70d5e5a" object-keys@^1.0.8, object-keys@^1.0.9: version "1.0.11" resolved "https://registry.yarnpkg.com/object-keys/-/object-keys-1.0.11.tgz#c54601778ad560f1142ce0e01bcca8b56d13426d" -object.entries@^1.0.3: +object.entries@^1.0.4: version "1.0.4" resolved "https://registry.yarnpkg.com/object.entries/-/object.entries-1.0.4.tgz#1bf9a4dd2288f5b33f3a993d257661f05d161a5f" dependencies: @@ -3084,18 +3275,12 @@ on-headers@~1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/on-headers/-/on-headers-1.0.1.tgz#928f5d0f470d49342651ea6794b0857c100693f7" -once@^1.3.0: +once@^1.3.0, once@^1.3.3: version "1.4.0" resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1" dependencies: wrappy "1" -once@~1.3.3: - version "1.3.3" - resolved "https://registry.yarnpkg.com/once/-/once-1.3.3.tgz#b2e261557ce4c314ec8304f3fa82663e4297ca20" - dependencies: - wrappy "1" - open@0.0.5: version "0.0.5" resolved "https://registry.yarnpkg.com/open/-/open-0.0.5.tgz#42c3e18ec95466b6bf0dc42f3a2945c3f0cad8fc" @@ -3125,16 +3310,25 @@ os-homedir@^1.0.0: version "1.0.2" resolved "https://registry.yarnpkg.com/os-homedir/-/os-homedir-1.0.2.tgz#ffbc4988336e0e833de0c168c7ef152121aa7fb3" -os-locale@^1.4.0: - version "1.4.0" - resolved "https://registry.yarnpkg.com/os-locale/-/os-locale-1.4.0.tgz#20f9f17ae29ed345e8bde583b13d2009803c14d9" +os-locale@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/os-locale/-/os-locale-2.0.0.tgz#15918ded510522b81ee7ae5a309d54f639fc39a4" dependencies: + execa "^0.5.0" lcid "^1.0.0" + mem "^1.1.0" -os-tmpdir@^1.0.1: +os-tmpdir@^1.0.0, os-tmpdir@^1.0.1, os-tmpdir@~1.0.1: version "1.0.2" resolved "https://registry.yarnpkg.com/os-tmpdir/-/os-tmpdir-1.0.2.tgz#bbe67406c79aa85c5cfec766fe5734555dfa1274" +osenv@^0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/osenv/-/osenv-0.1.4.tgz#42fe6d5953df06c8064be6f176c3d05aaaa34644" + dependencies: + os-homedir "^1.0.0" + os-tmpdir "^1.0.0" + output-file-sync@^1.1.0: version "1.1.2" resolved "https://registry.yarnpkg.com/output-file-sync/-/output-file-sync-1.1.2.tgz#d0a33eefe61a205facb90092e826598d5245ce76" @@ -3143,13 +3337,27 @@ output-file-sync@^1.1.0: mkdirp "^0.5.1" object-assign "^4.1.0" +p-finally@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/p-finally/-/p-finally-1.0.0.tgz#3fbcfb15b899a44123b34b6dcc18b724336a2cae" + +p-limit@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-1.1.0.tgz#b07ff2d9a5d88bec806035895a2bab66a27988bc" + +p-locate@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-2.0.0.tgz#20a0103b222a70c8fd39cc2e580680f3dde5ec43" + dependencies: + p-limit "^1.1.0" + pako@~0.2.0: version "0.2.9" resolved "https://registry.yarnpkg.com/pako/-/pako-0.2.9.tgz#f3f7522f4ef782348da8161bad9ecfd51bf83a75" param-case@2.1.x: - version "2.1.0" - resolved "https://registry.yarnpkg.com/param-case/-/param-case-2.1.0.tgz#2619f90fd6c829ed0b958f1c84ed03a745a6d70a" + version "2.1.1" + resolved "https://registry.yarnpkg.com/param-case/-/param-case-2.1.1.tgz#df94fd8cf6531ecf75e6bef9a0858fbc72be2247" dependencies: no-case "^2.2.0" @@ -3162,6 +3370,12 @@ parse-glob@^3.0.4: is-extglob "^1.0.0" is-glob "^2.0.0" +parse-json@^2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-2.2.0.tgz#f480f40434ef80741f8469099f8dea18f55a4dc9" + dependencies: + error-ex "^1.2.0" + parsejson@0.0.3: version "0.0.3" resolved "https://registry.yarnpkg.com/parsejson/-/parsejson-0.0.3.tgz#ab7e3759f209ece99437973f7d0f1f64ae0e64ab" @@ -3194,22 +3408,48 @@ path-exists@^2.0.0: dependencies: pinkie-promise "^2.0.0" +path-exists@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-3.0.0.tgz#ce0ebeaa5f78cb18925ea7d810d7b59b010fd515" + path-is-absolute@^1.0.0: version "1.0.1" resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f" +path-key@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/path-key/-/path-key-2.0.1.tgz#411cadb574c5a140d3a4b1910d40d80cc9f40b40" + +path-parse@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.5.tgz#3c1adf871ea9cd6c9431b6ea2bd74a0ff055c4c1" + path-to-regexp@0.1.7: version "0.1.7" resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-0.1.7.tgz#df604178005f522f15eb4490e7247a1bfaa67f8c" +path-type@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/path-type/-/path-type-2.0.0.tgz#f012ccb8415b7096fc2daa1054c3d72389594c73" + dependencies: + pify "^2.0.0" + pbkdf2-compat@2.0.1: version "2.0.1" resolved "https://registry.yarnpkg.com/pbkdf2-compat/-/pbkdf2-compat-2.0.1.tgz#b6e0c8fa99494d94e0511575802a59a5c142f288" -performance-now@~0.2.0: +performance-now@^0.2.0: version "0.2.0" resolved "https://registry.yarnpkg.com/performance-now/-/performance-now-0.2.0.tgz#33ef30c5c77d4ea21c5a53869d91b56d8f2555e5" +performance-now@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/performance-now/-/performance-now-2.1.0.tgz#6309f4e0e5fa913ec1c69307ae364b4b377c9e7b" + +pify@^2.0.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/pify/-/pify-2.3.0.tgz#ed141a6ac043a849ea588498e7dca8b15330e90c" + pinkie-promise@^2.0.0: version "2.0.1" resolved "https://registry.yarnpkg.com/pinkie-promise/-/pinkie-promise-2.0.1.tgz#2135d6dfa7a358c069ac9b178776288228450ffa" @@ -3235,16 +3475,16 @@ postcss-calc@^5.2.0: reduce-css-calc "^1.2.6" postcss-colormin@^2.1.8: - version "2.2.1" - resolved "https://registry.yarnpkg.com/postcss-colormin/-/postcss-colormin-2.2.1.tgz#dc5421b6ae6f779ef6bfd47352b94abe59d0316b" + version "2.2.2" + resolved "https://registry.yarnpkg.com/postcss-colormin/-/postcss-colormin-2.2.2.tgz#6631417d5f0e909a3d7ec26b24c8a8d1e4f96e4b" dependencies: colormin "^1.0.5" postcss "^5.0.13" postcss-value-parser "^3.2.3" postcss-convert-values@^2.3.4: - version "2.6.0" - resolved "https://registry.yarnpkg.com/postcss-convert-values/-/postcss-convert-values-2.6.0.tgz#08c6d06130fe58a91a21ff50829e1aad6a3a1acc" + version "2.6.1" + resolved "https://registry.yarnpkg.com/postcss-convert-values/-/postcss-convert-values-2.6.1.tgz#bbd8593c5c1fd2e3d1c322bb925dcae8dae4d62d" dependencies: postcss "^5.0.11" postcss-value-parser "^3.1.2" @@ -3256,8 +3496,8 @@ postcss-discard-comments@^2.0.4: postcss "^5.0.14" postcss-discard-duplicates@^2.0.1: - version "2.0.2" - resolved "https://registry.yarnpkg.com/postcss-discard-duplicates/-/postcss-discard-duplicates-2.0.2.tgz#02be520e91571ffb10738766a981d5770989bb32" + version "2.1.0" + resolved "https://registry.yarnpkg.com/postcss-discard-duplicates/-/postcss-discard-duplicates-2.1.0.tgz#b9abf27b88ac188158a5eb12abcae20263b91932" dependencies: postcss "^5.0.4" @@ -3302,8 +3542,8 @@ postcss-merge-longhand@^2.0.1: postcss "^5.0.4" postcss-merge-rules@^2.0.3: - version "2.1.1" - resolved "https://registry.yarnpkg.com/postcss-merge-rules/-/postcss-merge-rules-2.1.1.tgz#5e5640020ce43cddd343c73bba91c9a358d1fe0f" + version "2.1.2" + resolved "https://registry.yarnpkg.com/postcss-merge-rules/-/postcss-merge-rules-2.1.2.tgz#d1df5dfaa7b1acc3be553f0e9e10e87c61b5f721" dependencies: browserslist "^1.5.2" caniuse-api "^1.5.2" @@ -3349,31 +3589,31 @@ postcss-minify-selectors@^2.0.4: postcss-selector-parser "^2.0.0" postcss-modules-extract-imports@^1.0.0: - version "1.0.1" - resolved "https://registry.yarnpkg.com/postcss-modules-extract-imports/-/postcss-modules-extract-imports-1.0.1.tgz#8fb3fef9a6dd0420d3f6d4353cf1ff73f2b2a341" + version "1.2.0" + resolved "https://registry.yarnpkg.com/postcss-modules-extract-imports/-/postcss-modules-extract-imports-1.2.0.tgz#66140ecece38ef06bf0d3e355d69bf59d141ea85" dependencies: - postcss "^5.0.4" + postcss "^6.0.1" postcss-modules-local-by-default@^1.0.1: - version "1.1.1" - resolved "https://registry.yarnpkg.com/postcss-modules-local-by-default/-/postcss-modules-local-by-default-1.1.1.tgz#29a10673fa37d19251265ca2ba3150d9040eb4ce" + version "1.2.0" + resolved "https://registry.yarnpkg.com/postcss-modules-local-by-default/-/postcss-modules-local-by-default-1.2.0.tgz#f7d80c398c5a393fa7964466bd19500a7d61c069" dependencies: - css-selector-tokenizer "^0.6.0" - postcss "^5.0.4" + css-selector-tokenizer "^0.7.0" + postcss "^6.0.1" postcss-modules-scope@^1.0.0: - version "1.0.2" - resolved "https://registry.yarnpkg.com/postcss-modules-scope/-/postcss-modules-scope-1.0.2.tgz#ff977395e5e06202d7362290b88b1e8cd049de29" + version "1.1.0" + resolved "https://registry.yarnpkg.com/postcss-modules-scope/-/postcss-modules-scope-1.1.0.tgz#d6ea64994c79f97b62a72b426fbe6056a194bb90" dependencies: - css-selector-tokenizer "^0.6.0" - postcss "^5.0.4" + css-selector-tokenizer "^0.7.0" + postcss "^6.0.1" postcss-modules-values@^1.1.0: - version "1.2.2" - resolved "https://registry.yarnpkg.com/postcss-modules-values/-/postcss-modules-values-1.2.2.tgz#f0e7d476fe1ed88c5e4c7f97533a3e772ad94ca1" + version "1.3.0" + resolved "https://registry.yarnpkg.com/postcss-modules-values/-/postcss-modules-values-1.3.0.tgz#ecffa9d7e192518389f42ad0e83f72aec456ea20" dependencies: - icss-replace-symbols "^1.0.2" - postcss "^5.0.14" + icss-replace-symbols "^1.1.0" + postcss "^6.0.1" postcss-normalize-charset@^1.1.0: version "1.1.1" @@ -3419,8 +3659,8 @@ postcss-reduce-transforms@^1.0.3: postcss-value-parser "^3.0.1" postcss-selector-parser@^2.0.0, postcss-selector-parser@^2.2.2: - version "2.2.2" - resolved "https://registry.yarnpkg.com/postcss-selector-parser/-/postcss-selector-parser-2.2.2.tgz#3d70f5adda130da51c7c0c2fc023f56b1374fe08" + version "2.2.3" + resolved "https://registry.yarnpkg.com/postcss-selector-parser/-/postcss-selector-parser-2.2.3.tgz#f9437788606c3c9acee16ffe8d8b16297f27bb90" dependencies: flatten "^1.0.2" indexes-of "^1.0.1" @@ -3455,15 +3695,23 @@ postcss-zindex@^2.0.1: postcss "^5.0.4" uniqs "^2.0.0" -postcss@^5.0.10, postcss@^5.0.11, postcss@^5.0.12, postcss@^5.0.13, postcss@^5.0.14, postcss@^5.0.16, postcss@^5.0.2, postcss@^5.0.4, postcss@^5.0.5, postcss@^5.0.6, postcss@^5.0.8, postcss@^5.2.11: - version "5.2.11" - resolved "https://registry.yarnpkg.com/postcss/-/postcss-5.2.11.tgz#ff29bcd6d2efb98bfe08a022055ec599bbe7b761" +postcss@^5.0.10, postcss@^5.0.11, postcss@^5.0.12, postcss@^5.0.13, postcss@^5.0.14, postcss@^5.0.16, postcss@^5.0.2, postcss@^5.0.4, postcss@^5.0.5, postcss@^5.0.6, postcss@^5.0.8, postcss@^5.2.16: + version "5.2.17" + resolved "https://registry.yarnpkg.com/postcss/-/postcss-5.2.17.tgz#cf4f597b864d65c8a492b2eabe9d706c879c388b" dependencies: chalk "^1.1.3" js-base64 "^2.1.9" source-map "^0.5.6" supports-color "^3.2.3" +postcss@^6.0.1: + version "6.0.3" + resolved "https://registry.yarnpkg.com/postcss/-/postcss-6.0.3.tgz#b7f565b3d956fbb8565ca7c1e239d0506e427d8b" + dependencies: + chalk "^1.1.3" + source-map "^0.5.6" + supports-color "^4.0.0" + prefix-style@2.0.1: version "2.0.1" resolved "https://registry.yarnpkg.com/prefix-style/-/prefix-style-2.0.1.tgz#66bba9a870cfda308a5dc20e85e9120932c95a06" @@ -3477,30 +3725,37 @@ preserve@^0.2.0: resolved "https://registry.yarnpkg.com/preserve/-/preserve-0.2.0.tgz#815ed1f6ebc65926f865b310c0713bcb3315ce4b" pretty-error@^2.0.2: - version "2.0.2" - resolved "https://registry.yarnpkg.com/pretty-error/-/pretty-error-2.0.2.tgz#a7db19cbb529ca9f0af3d3a2f77d5caf8e5dec23" + version "2.1.1" + resolved "https://registry.yarnpkg.com/pretty-error/-/pretty-error-2.1.1.tgz#5f4f87c8f91e5ae3f3ba87ab4cf5e03b1a17f1a3" dependencies: - renderkid "~2.0.0" + renderkid "^2.0.1" utila "~0.4" private@^0.1.6, private@~0.1.5: - version "0.1.6" - resolved "https://registry.yarnpkg.com/private/-/private-0.1.6.tgz#55c6a976d0f9bafb9924851350fe47b9b5fbb7c1" + version "0.1.7" + resolved "https://registry.yarnpkg.com/private/-/private-0.1.7.tgz#68ce5e8a1ef0a23bb570cc28537b5332aba63ef1" process-nextick-args@~1.0.6: version "1.0.7" resolved "https://registry.yarnpkg.com/process-nextick-args/-/process-nextick-args-1.0.7.tgz#150e20b756590ad3f91093f25a4f2ad8bff30ba3" process@^0.11.0: - version "0.11.9" - resolved "https://registry.yarnpkg.com/process/-/process-0.11.9.tgz#7bd5ad21aa6253e7da8682264f1e11d11c0318c1" + version "0.11.10" + resolved "https://registry.yarnpkg.com/process/-/process-0.11.10.tgz#7332300e840161bda3e69a1d1d91a7d4bc16f182" promise@^7.0.3, promise@^7.1.1: - version "7.1.1" - resolved "https://registry.yarnpkg.com/promise/-/promise-7.1.1.tgz#489654c692616b8aa55b0724fa809bb7db49c5bf" + version "7.3.1" + resolved "https://registry.yarnpkg.com/promise/-/promise-7.3.1.tgz#064b72602b18f90f29192b8b1bc418ffd1ebd3bf" dependencies: asap "~2.0.3" +prop-types@^15.5.4, prop-types@^15.5.7, prop-types@^15.5.8: + version "15.5.10" + resolved "https://registry.yarnpkg.com/prop-types/-/prop-types-15.5.10.tgz#2797dfc3126182e3a95e3dfbb2e893ddd7456154" + dependencies: + fbjs "^0.8.9" + loose-envify "^1.3.1" + proto-list@~1.2.1: version "1.2.4" resolved "https://registry.yarnpkg.com/proto-list/-/proto-list-1.2.4.tgz#212d5bfe1318306a420f6402b8e26ff39647a849" @@ -3509,18 +3764,18 @@ protochain@^1.0.5: version "1.0.5" resolved "https://registry.yarnpkg.com/protochain/-/protochain-1.0.5.tgz#991c407e99de264aadf8f81504b5e7faf7bfa260" -proxy-addr@~1.1.2: - version "1.1.3" - resolved "https://registry.yarnpkg.com/proxy-addr/-/proxy-addr-1.1.3.tgz#dc97502f5722e888467b3fa2297a7b1ff47df074" +proxy-addr@~1.1.4: + version "1.1.4" + resolved "https://registry.yarnpkg.com/proxy-addr/-/proxy-addr-1.1.4.tgz#27e545f6960a44a627d9b44467e35c1b6b4ce2f3" dependencies: forwarded "~0.1.0" - ipaddr.js "1.2.0" + ipaddr.js "1.3.0" prr@~0.0.0: version "0.0.0" resolved "https://registry.yarnpkg.com/prr/-/prr-0.0.0.tgz#1a84b85908325501411853d0081ee3fa86e2926a" -pseudomap@^1.0.1: +pseudomap@^1.0.1, pseudomap@^1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/pseudomap/-/pseudomap-1.0.2.tgz#f052a28da70e618917ef0a8ac34c1ae5a68286b3" @@ -3533,14 +3788,14 @@ punycode@^1.2.4, punycode@^1.4.1: resolved "https://registry.yarnpkg.com/punycode/-/punycode-1.4.1.tgz#c0d5a63b2718800ad8e1eb0fa5269c84dd41845e" purify-css@^1.1.1: - version "1.1.9" - resolved "https://registry.yarnpkg.com/purify-css/-/purify-css-1.1.9.tgz#46c9acd8940f3076c0c346c027e286f996168357" + version "1.2.6" + resolved "https://registry.yarnpkg.com/purify-css/-/purify-css-1.2.6.tgz#56c2b58e4e1134247c6fdc20ba443b51b45e8b2f" dependencies: - clean-css "^3.2.10" - glob "^6.0.4" + clean-css "^4.0.12" + glob "^7.1.1" rework "^1.0.1" - uglifyjs "^2.4.10" - yargs "^3.10.0" + uglify-js "^3.0.6" + yargs "^8.0.1" purifycss-webpack-plugin@^2.0.3: version "2.0.3" @@ -3551,24 +3806,16 @@ purifycss-webpack-plugin@^2.0.3: webpack-sources "^0.1.0" q@^1.1.2: - version "1.4.1" - resolved "https://registry.yarnpkg.com/q/-/q-1.4.1.tgz#55705bcd93c5f3673530c2c2cbc0c2b3addc286e" + version "1.5.0" + resolved "https://registry.yarnpkg.com/q/-/q-1.5.0.tgz#dd01bac9d06d30e6f219aecb8253ee9ebdc308f1" qs@2.3.3: version "2.3.3" resolved "https://registry.yarnpkg.com/qs/-/qs-2.3.3.tgz#e9e85adbe75da0bbe4c8e0476a086290f863b404" -qs@6.2.0: - version "6.2.0" - resolved "https://registry.yarnpkg.com/qs/-/qs-6.2.0.tgz#3b7848c03c2dece69a9522b0fae8c4126d745f3b" - -qs@6.2.1: - version "6.2.1" - resolved "https://registry.yarnpkg.com/qs/-/qs-6.2.1.tgz#ce03c5ff0935bc1d9d69a9f14cbd18e568d67625" - -qs@~6.3.0: - version "6.3.0" - resolved "https://registry.yarnpkg.com/qs/-/qs-6.3.0.tgz#f403b264f23bc01228c74131b407f18d5ea5d442" +qs@6.4.0, qs@~6.4.0: + version "6.4.0" + resolved "https://registry.yarnpkg.com/qs/-/qs-6.4.0.tgz#13e26d28ad6b0ffaa91312cd3bf708ed351e7233" query-string@^3.0.0: version "3.0.3" @@ -3577,8 +3824,8 @@ query-string@^3.0.0: strict-uri-encode "^1.0.0" query-string@^4.1.0: - version "4.3.1" - resolved "https://registry.yarnpkg.com/query-string/-/query-string-4.3.1.tgz#54baada6713eafc92be75c47a731f2ebd09cd11d" + version "4.3.4" + resolved "https://registry.yarnpkg.com/query-string/-/query-string-4.3.4.tgz#bbb693b9ca915c232515b228b1a02b609043dbeb" dependencies: object-assign "^4.1.0" strict-uri-encode "^1.0.0" @@ -3595,18 +3842,22 @@ querystringify@0.0.x: version "0.0.4" resolved "https://registry.yarnpkg.com/querystringify/-/querystringify-0.0.4.tgz#0cf7f84f9463ff0ae51c4c4b142d95be37724d9c" +querystringify@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/querystringify/-/querystringify-1.0.0.tgz#6286242112c5b712fa654e526652bf6a13ff05cb" + raf@^3.1.0: - version "3.3.0" - resolved "https://registry.yarnpkg.com/raf/-/raf-3.3.0.tgz#93845eeffc773f8129039f677f80a36044eee2c3" + version "3.3.2" + resolved "https://registry.yarnpkg.com/raf/-/raf-3.3.2.tgz#0c13be0b5b49b46f76d6669248d527cf2b02fe27" dependencies: - performance-now "~0.2.0" + performance-now "^2.1.0" randomatic@^1.1.3: - version "1.1.6" - resolved "https://registry.yarnpkg.com/randomatic/-/randomatic-1.1.6.tgz#110dcabff397e9dcff7c0789ccc0a49adf1ec5bb" + version "1.1.7" + resolved "https://registry.yarnpkg.com/randomatic/-/randomatic-1.1.7.tgz#c7abe9cc8b87c0baa876b19fde83fd464797e38c" dependencies: - is-number "^2.0.2" - kind-of "^3.0.2" + is-number "^3.0.0" + kind-of "^4.0.0" range-parser@^1.0.3, range-parser@~1.2.0: version "1.2.0" @@ -3620,14 +3871,14 @@ raw-body@~2.2.0: iconv-lite "0.4.15" unpipe "1.0.0" -rc@~1.1.6: - version "1.1.6" - resolved "https://registry.yarnpkg.com/rc/-/rc-1.1.6.tgz#43651b76b6ae53b5c802f1151fa3fc3b059969c9" +rc@^1.1.7: + version "1.2.1" + resolved "https://registry.yarnpkg.com/rc/-/rc-1.2.1.tgz#2e03e8e42ee450b8cb3dce65be1bf8974e1dfd95" dependencies: deep-extend "~0.4.0" ini "~1.3.0" minimist "^1.2.0" - strip-json-comments "~1.0.4" + strip-json-comments "~2.0.1" react-addons-test-utils@^0.14.8: version "0.14.8" @@ -3649,10 +3900,12 @@ react-bootstrap@^0.28.5: warning "^2.1.0" react-copy-to-clipboard@^4.2.3: - version "4.2.3" - resolved "https://registry.yarnpkg.com/react-copy-to-clipboard/-/react-copy-to-clipboard-4.2.3.tgz#268c5a0fbde9a95d96145014e7f85110b0e7fb8e" + version "4.3.1" + resolved "https://registry.yarnpkg.com/react-copy-to-clipboard/-/react-copy-to-clipboard-4.3.1.tgz#aa429ce6029077c987e2bc4af7eec9a09ba5075b" dependencies: copy-to-clipboard "^3" + create-react-class "^15.5.2" + prop-types "^15.5.8" react-custom-scrollbars@^2.2.2: version "2.3.0" @@ -3662,29 +3915,32 @@ react-custom-scrollbars@^2.2.2: raf "^3.1.0" react-dom@^0.14.6: - version "0.14.8" - resolved "https://registry.yarnpkg.com/react-dom/-/react-dom-0.14.8.tgz#0f1c547514263f771bd31814a739e5306575069e" + version "0.14.9" + resolved "https://registry.yarnpkg.com/react-dom/-/react-dom-0.14.9.tgz#05064a3dcf0fb1880a3b2bfc9d58c55d8d9f6293" react-dropzone@^3.5.3: - version "3.9.1" - resolved "https://registry.yarnpkg.com/react-dropzone/-/react-dropzone-3.9.1.tgz#f90e0c7d603be3fb2253ee315063d424371a57bd" + version "3.13.3" + resolved "https://registry.yarnpkg.com/react-dropzone/-/react-dropzone-3.13.3.tgz#b8bde4b5a12842f85196b45e8cc2959834b81962" dependencies: attr-accept "^1.0.3" + prop-types "^15.5.7" react-infinite-scroller@^1.0.6: - version "1.0.6" - resolved "https://registry.yarnpkg.com/react-infinite-scroller/-/react-infinite-scroller-1.0.6.tgz#bb406d70032d09fa9e4a3e2175d3adbf4a3f559d" + version "1.0.12" + resolved "https://registry.yarnpkg.com/react-infinite-scroller/-/react-infinite-scroller-1.0.12.tgz#a51fcb5dd82bc1b6207f69be747bd7b872917fa4" + dependencies: + prop-types "^15.5.8" react-onclickout@2.0.4: version "2.0.4" resolved "https://registry.yarnpkg.com/react-onclickout/-/react-onclickout-2.0.4.tgz#2c7539a647e1dcdcab0b28e2f4eae3c3e00f0c64" react-overlays@^0.6.0: - version "0.6.10" - resolved "https://registry.yarnpkg.com/react-overlays/-/react-overlays-0.6.10.tgz#e7e52dad47f00a0fc784eb044428c3a9e874bfa3" + version "0.6.12" + resolved "https://registry.yarnpkg.com/react-overlays/-/react-overlays-0.6.12.tgz#a079c750cc429d7db4c7474a95b4b54033e255c3" dependencies: classnames "^2.2.5" - dom-helpers "^2.4.0" + dom-helpers "^3.2.0" react-prop-types "^0.4.0" warning "^3.0.0" @@ -3701,13 +3957,15 @@ react-prop-types@^0.4.0: warning "^3.0.0" react-redux@^4.4.5: - version "4.4.6" - resolved "https://registry.yarnpkg.com/react-redux/-/react-redux-4.4.6.tgz#4b9d32985307a11096a2dd61561980044fcc6209" + version "4.4.8" + resolved "https://registry.yarnpkg.com/react-redux/-/react-redux-4.4.8.tgz#e7bc1dd100e8b64e96ac8212db113239b9e2e08f" dependencies: + create-react-class "^15.5.1" hoist-non-react-statics "^1.0.3" invariant "^2.0.0" lodash "^4.2.0" loose-envify "^1.1.0" + prop-types "^15.5.4" react-router@^2.8.1: version "2.8.1" @@ -3720,12 +3978,27 @@ react-router@^2.8.1: warning "^3.0.0" react@^0.14.8: - version "0.14.8" - resolved "https://registry.yarnpkg.com/react/-/react-0.14.8.tgz#078dfa454d4745bcc54a9726311c2bf272c23684" + version "0.14.9" + resolved "https://registry.yarnpkg.com/react/-/react-0.14.9.tgz#9110a6497c49d44ba1c0edd317aec29c2e0d91d1" dependencies: envify "^3.0.0" fbjs "^0.6.1" +read-pkg-up@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/read-pkg-up/-/read-pkg-up-2.0.0.tgz#6b72a8048984e0c41e79510fd5e9fa99b3b549be" + dependencies: + find-up "^2.0.0" + read-pkg "^2.0.0" + +read-pkg@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/read-pkg/-/read-pkg-2.0.0.tgz#8ef1c0623c6a6db0dc6713c4bfac46332b2368f8" + dependencies: + load-json-file "^2.0.0" + normalize-package-data "^2.3.2" + path-type "^2.0.0" + readable-stream@1.0, readable-stream@~1.0.2: version "1.0.34" resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-1.0.34.tgz#125820e34bc842d2f2aaafafe4c2916ee32c157c" @@ -3744,28 +4017,16 @@ readable-stream@1.0.27-1: isarray "0.0.1" string_decoder "~0.10.x" -"readable-stream@^2.0.0 || ^1.1.13", readable-stream@^2.0.1, readable-stream@^2.0.2, readable-stream@^2.0.5, readable-stream@^2.1.0: - version "2.2.2" - resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.2.2.tgz#a9e6fec3c7dda85f8bb1b3ba7028604556fc825e" +readable-stream@^2.0.1, readable-stream@^2.0.2, readable-stream@^2.0.5, readable-stream@^2.0.6, readable-stream@^2.1.4, readable-stream@^2.2.6: + version "2.3.2" + resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.3.2.tgz#5a04df05e4f57fe3f0dc68fdd11dc5c97c7e6f4d" dependencies: - buffer-shims "^1.0.0" core-util-is "~1.0.0" - inherits "~2.0.1" + inherits "~2.0.3" isarray "~1.0.0" process-nextick-args "~1.0.6" - string_decoder "~0.10.x" - util-deprecate "~1.0.1" - -readable-stream@~2.1.4: - version "2.1.5" - resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.1.5.tgz#66fa8b720e1438b364681f2ad1a63c618448c9d0" - dependencies: - buffer-shims "^1.0.0" - core-util-is "~1.0.0" - inherits "~2.0.1" - isarray "~1.0.0" - process-nextick-args "~1.0.6" - string_decoder "~0.10.x" + safe-buffer "~5.1.0" + string_decoder "~1.0.0" util-deprecate "~1.0.1" readdirp@^2.0.0: @@ -3778,20 +4039,14 @@ readdirp@^2.0.0: set-immediate-shim "^1.0.1" recast@^0.11.17: - version "0.11.20" - resolved "https://registry.yarnpkg.com/recast/-/recast-0.11.20.tgz#2cb9bec269c03b36d0598118a936cd0a293ca3f3" + version "0.11.23" + resolved "https://registry.yarnpkg.com/recast/-/recast-0.11.23.tgz#451fd3004ab1e4df9b4e4b66376b2a21912462d3" dependencies: - ast-types "0.9.4" + ast-types "0.9.6" esprima "~3.1.0" private "~0.1.5" source-map "~0.5.0" -rechoir@^0.6.2: - version "0.6.2" - resolved "https://registry.yarnpkg.com/rechoir/-/rechoir-0.6.2.tgz#85204b54dba82d5742e28c96756ef43af50e3384" - dependencies: - resolve "^1.1.6" - reduce-component@1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/reduce-component/-/reduce-component-1.0.1.tgz#e0c93542c574521bea13df0f9488ed82ab77c5da" @@ -3815,25 +4070,25 @@ redux-thunk@^1.0.3: resolved "https://registry.yarnpkg.com/redux-thunk/-/redux-thunk-1.0.3.tgz#778aa0099eea0595031ab6b39165f6670d8d26bd" redux@^3.6.0: - version "3.6.0" - resolved "https://registry.yarnpkg.com/redux/-/redux-3.6.0.tgz#887c2b3d0b9bd86eca2be70571c27654c19e188d" + version "3.7.0" + resolved "https://registry.yarnpkg.com/redux/-/redux-3.7.0.tgz#07a623cafd92eee8abe309d13d16538f6707926f" dependencies: lodash "^4.2.1" lodash-es "^4.2.1" loose-envify "^1.1.0" - symbol-observable "^1.0.2" + symbol-observable "^1.0.3" regenerate@^1.2.1: version "1.3.2" resolved "https://registry.yarnpkg.com/regenerate/-/regenerate-1.3.2.tgz#d1941c67bad437e1be76433add5b385f95b19260" regenerator-runtime@^0.10.0: - version "0.10.1" - resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.10.1.tgz#257f41961ce44558b18f7814af48c17559f9faeb" + version "0.10.5" + resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.10.5.tgz#336c3efc1220adcedda2c9fab67b5a7955a33658" -regenerator-transform@0.9.8: - version "0.9.8" - resolved "https://registry.yarnpkg.com/regenerator-transform/-/regenerator-transform-0.9.8.tgz#0f88bb2bc03932ddb7b6b7312e68078f01026d6c" +regenerator-transform@0.9.11: + version "0.9.11" + resolved "https://registry.yarnpkg.com/regenerator-transform/-/regenerator-transform-0.9.11.tgz#3a7d067520cb7b7176769eb5ff868691befe1283" dependencies: babel-runtime "^6.18.0" babel-types "^6.19.0" @@ -3841,7 +4096,7 @@ regenerator-transform@0.9.8: regex-cache@^0.4.2: version "0.4.3" - resolved "http://registry.npmjs.org/regex-cache/-/regex-cache-0.4.3.tgz#9b1a6c35d4d0dfcef5711ae651e8e9d3d7114145" + resolved "https://registry.yarnpkg.com/regex-cache/-/regex-cache-0.4.3.tgz#9b1a6c35d4d0dfcef5711ae651e8e9d3d7114145" dependencies: is-equal-shallow "^0.1.3" is-primitive "^2.0.0" @@ -3876,9 +4131,13 @@ relateurl@0.2.x: version "0.2.7" resolved "https://registry.yarnpkg.com/relateurl/-/relateurl-0.2.7.tgz#54dbf377e51440aca90a4cd274600d3ff2d888a9" -renderkid@~2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/renderkid/-/renderkid-2.0.0.tgz#1859753e7a5adbf35443aba0d4e4579e78abee85" +remove-trailing-separator@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/remove-trailing-separator/-/remove-trailing-separator-1.0.2.tgz#69b062d978727ad14dc6b56ba4ab772fd8d70511" + +renderkid@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/renderkid/-/renderkid-2.0.1.tgz#898cabfc8bede4b7b91135a3ffd323e58c0db319" dependencies: css-select "^1.1.0" dom-converter "~0.1" @@ -3904,18 +4163,18 @@ repeating@^2.0.0: dependencies: is-finite "^1.0.0" -request@^2.72.0, request@^2.79.0: - version "2.79.0" - resolved "https://registry.yarnpkg.com/request/-/request-2.79.0.tgz#4dfe5bf6be8b8cdc37fcf93e04b65577722710de" +request@^2.72.0, request@^2.81.0: + version "2.81.0" + resolved "https://registry.yarnpkg.com/request/-/request-2.81.0.tgz#c6928946a0e06c5f8d6f8a9333469ffda46298a0" dependencies: aws-sign2 "~0.6.0" aws4 "^1.2.1" - caseless "~0.11.0" + caseless "~0.12.0" combined-stream "~1.0.5" extend "~3.0.0" forever-agent "~0.6.1" form-data "~2.1.1" - har-validator "~2.0.6" + har-validator "~4.2.1" hawk "~3.1.3" http-signature "~1.1.0" is-typedarray "~1.0.0" @@ -3923,12 +4182,22 @@ request@^2.72.0, request@^2.79.0: json-stringify-safe "~5.0.1" mime-types "~2.1.7" oauth-sign "~0.8.1" - qs "~6.3.0" + performance-now "^0.2.0" + qs "~6.4.0" + safe-buffer "^5.0.1" stringstream "~0.0.4" tough-cookie "~2.3.0" - tunnel-agent "~0.4.1" + tunnel-agent "^0.6.0" uuid "^3.0.0" +require-directory@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/require-directory/-/require-directory-2.1.1.tgz#8c64ad5fd30dab1c976e2344ffe7f792a6a6df42" + +require-main-filename@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/require-main-filename/-/require-main-filename-1.0.1.tgz#97f717b69d48784f5f526a6c5aa8ffdda055a4d1" + requires-port@1.0.x, requires-port@1.x.x: version "1.0.0" resolved "https://registry.yarnpkg.com/requires-port/-/requires-port-1.0.0.tgz#925d2601d39ac485e091cf0da5c6e694dc3dcaff" @@ -3938,8 +4207,10 @@ resolve-url@~0.2.1: resolved "https://registry.yarnpkg.com/resolve-url/-/resolve-url-0.2.1.tgz#2c637fe77c893afd2a663fe21aa9080068e2052a" resolve@^1.1.5, resolve@^1.1.6: - version "1.2.0" - resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.2.0.tgz#9589c3f2f6149d1417a40becc1663db6ec6bc26c" + version "1.3.3" + resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.3.3.tgz#655907c3469a8680dc2de3a275a8fdd69691f0e5" + dependencies: + path-parse "^1.0.5" rework@^1.0.1: version "1.0.1" @@ -3954,9 +4225,9 @@ right-align@^0.1.1: dependencies: align-text "^0.1.1" -rimraf@2, rimraf@^2.3.3, rimraf@~2.5.1, rimraf@~2.5.4: - version "2.5.4" - resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.5.4.tgz#96800093cbf1a0c86bd95b4625467535c29dfa04" +rimraf@2, rimraf@^2.3.3, rimraf@^2.5.1, rimraf@^2.6.1: + version "2.6.1" + resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.6.1.tgz#c2338ec643df7a1b7fe5c54fa86f57428a55f33d" dependencies: glob "^7.0.5" @@ -4002,50 +4273,36 @@ rocambole-whitespace@^1.0.0: dependencies: esprima "^2.1" +safe-buffer@^5.0.1, safe-buffer@~5.1.0: + version "5.1.1" + resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.1.1.tgz#893312af69b2123def71f57889001671eeb2c853" + sax@~1.2.1: - version "1.2.1" - resolved "https://registry.yarnpkg.com/sax/-/sax-1.2.1.tgz#7b8e656190b228e81a66aea748480d828cd2d37a" + version "1.2.4" + resolved "https://registry.yarnpkg.com/sax/-/sax-1.2.4.tgz#2816234e2378bddc4e5354fab5caa895df7100d9" + +"semver@2 || 3 || 4 || 5", semver@^5.3.0: + version "5.3.0" + resolved "https://registry.yarnpkg.com/semver/-/semver-5.3.0.tgz#9b2ce5d3de02d17c6012ad326aa6b4d0cf54f94f" semver@^4.3.1, semver@~4.3.3: version "4.3.6" resolved "https://registry.yarnpkg.com/semver/-/semver-4.3.6.tgz#300bc6e0e86374f7ba61068b5b1ecd57fc6532da" -semver@~5.3.0: - version "5.3.0" - resolved "https://registry.yarnpkg.com/semver/-/semver-5.3.0.tgz#9b2ce5d3de02d17c6012ad326aa6b4d0cf54f94f" - -send@0.14.1: - version "0.14.1" - resolved "https://registry.yarnpkg.com/send/-/send-0.14.1.tgz#a954984325392f51532a7760760e459598c89f7a" +send@0.15.3: + version "0.15.3" + resolved "https://registry.yarnpkg.com/send/-/send-0.15.3.tgz#5013f9f99023df50d1bd9892c19e3defd1d53309" dependencies: - debug "~2.2.0" + debug "2.6.7" depd "~1.1.0" destroy "~1.0.4" encodeurl "~1.0.1" escape-html "~1.0.3" - etag "~1.7.0" - fresh "0.3.0" - http-errors "~1.5.0" + etag "~1.8.0" + fresh "0.5.0" + http-errors "~1.6.1" mime "1.3.4" - ms "0.7.1" - on-finished "~2.3.0" - range-parser "~1.2.0" - statuses "~1.3.0" - -send@0.14.2: - version "0.14.2" - resolved "https://registry.yarnpkg.com/send/-/send-0.14.2.tgz#39b0438b3f510be5dc6f667a11f71689368cdeef" - dependencies: - debug "~2.2.0" - depd "~1.1.0" - destroy "~1.0.4" - encodeurl "~1.0.1" - escape-html "~1.0.3" - etag "~1.7.0" - fresh "0.3.0" - http-errors "~1.5.1" - mime "1.3.4" - ms "0.7.2" + ms "2.0.0" on-finished "~2.3.0" range-parser "~1.2.0" statuses "~1.3.1" @@ -4057,27 +4314,27 @@ serializerr@^1.0.1: protochain "^1.0.5" serve-index@^1.7.2: - version "1.8.0" - resolved "https://registry.yarnpkg.com/serve-index/-/serve-index-1.8.0.tgz#7c5d96c13fb131101f93c1c5774f8516a1e78d3b" + version "1.9.0" + resolved "https://registry.yarnpkg.com/serve-index/-/serve-index-1.9.0.tgz#d2b280fc560d616ee81b48bf0fa82abed2485ce7" dependencies: accepts "~1.3.3" - batch "0.5.3" - debug "~2.2.0" + batch "0.6.1" + debug "2.6.8" escape-html "~1.0.3" - http-errors "~1.5.0" - mime-types "~2.1.11" + http-errors "~1.6.1" + mime-types "~2.1.15" parseurl "~1.3.1" -serve-static@~1.11.1: - version "1.11.2" - resolved "https://registry.yarnpkg.com/serve-static/-/serve-static-1.11.2.tgz#2cf9889bd4435a320cc36895c9aa57bd662e6ac7" +serve-static@1.12.3: + version "1.12.3" + resolved "https://registry.yarnpkg.com/serve-static/-/serve-static-1.12.3.tgz#9f4ba19e2f3030c547f8af99107838ec38d5b1e2" dependencies: encodeurl "~1.0.1" escape-html "~1.0.3" parseurl "~1.3.1" - send "0.14.2" + send "0.15.3" -set-blocking@~2.0.0: +set-blocking@^2.0.0, set-blocking@~2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/set-blocking/-/set-blocking-2.0.0.tgz#045f9782d011ae9a6803ddd382b24392b3d890f7" @@ -4085,26 +4342,18 @@ set-immediate-shim@^1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/set-immediate-shim/-/set-immediate-shim-1.0.1.tgz#4b2b1b27eb808a9f8dcc481a58e5e56f599f3f61" -setimmediate@^1.0.4: +setimmediate@^1.0.4, setimmediate@^1.0.5: version "1.0.5" resolved "https://registry.yarnpkg.com/setimmediate/-/setimmediate-1.0.5.tgz#290cbb232e306942d7d7ea9b83732ab7856f8285" -setprototypeof@1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/setprototypeof/-/setprototypeof-1.0.2.tgz#81a552141ec104b88e89ce383103ad5c66564d08" +setprototypeof@1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/setprototypeof/-/setprototypeof-1.0.3.tgz#66567e37043eeb4f04d91bd658c0cbefb55b8e04" sha.js@2.2.6: version "2.2.6" resolved "https://registry.yarnpkg.com/sha.js/-/sha.js-2.2.6.tgz#17ddeddc5f722fb66501658895461977867315ba" -shelljs@^0.7.0: - version "0.7.6" - resolved "https://registry.yarnpkg.com/shelljs/-/shelljs-0.7.6.tgz#379cccfb56b91c8601e4793356eb5382924de9ad" - dependencies: - glob "^7.0.0" - interpret "^1.0.0" - rechoir "^0.6.2" - sigmund@^1.0.1, sigmund@~1.0.0: version "1.0.1" resolved "https://registry.yarnpkg.com/sigmund/-/sigmund-1.0.1.tgz#3ff21f198cad2175f9f3b781853fd94d0d19b590" @@ -4130,15 +4379,15 @@ socket.io-adapter@0.5.0: debug "2.3.3" socket.io-parser "2.3.1" -socket.io-client@1.7.2: - version "1.7.2" - resolved "https://registry.yarnpkg.com/socket.io-client/-/socket.io-client-1.7.2.tgz#39fdb0c3dd450e321b7e40cfd83612ec533dd644" +socket.io-client@1.7.4: + version "1.7.4" + resolved "https://registry.yarnpkg.com/socket.io-client/-/socket.io-client-1.7.4.tgz#ec9f820356ed99ef6d357f0756d648717bdd4281" dependencies: backo2 "1.0.2" component-bind "1.0.0" component-emitter "1.2.1" debug "2.3.3" - engine.io-client "1.8.2" + engine.io-client "~1.8.4" has-binary "0.1.7" indexof "0.0.1" object-component "0.0.3" @@ -4156,27 +4405,27 @@ socket.io-parser@2.3.1: json3 "3.3.2" socket.io@^1.4.5: - version "1.7.2" - resolved "https://registry.yarnpkg.com/socket.io/-/socket.io-1.7.2.tgz#83bbbdf2e79263b378900da403e7843e05dc3b71" + version "1.7.4" + resolved "https://registry.yarnpkg.com/socket.io/-/socket.io-1.7.4.tgz#2f7ecedc3391bf2d5c73e291fe233e6e34d4dd00" dependencies: debug "2.3.3" - engine.io "1.8.2" + engine.io "~1.8.4" has-binary "0.1.7" object-assign "4.1.0" socket.io-adapter "0.5.0" - socket.io-client "1.7.2" + socket.io-client "1.7.4" socket.io-parser "2.3.1" sockjs-client@^1.0.3: - version "1.1.2" - resolved "https://registry.yarnpkg.com/sockjs-client/-/sockjs-client-1.1.2.tgz#f0212a8550e4c9468c8cceaeefd2e3493c033ad5" + version "1.1.4" + resolved "https://registry.yarnpkg.com/sockjs-client/-/sockjs-client-1.1.4.tgz#5babe386b775e4cf14e7520911452654016c8b12" dependencies: - debug "^2.2.0" + debug "^2.6.6" eventsource "0.1.6" faye-websocket "~0.11.0" inherits "^2.0.1" json3 "^3.3.2" - url-parse "^1.1.1" + url-parse "^1.1.8" sockjs@^0.3.15: version "0.3.18" @@ -4205,26 +4454,18 @@ source-map-resolve@^0.3.0: urix "~0.1.0" source-map-support@^0.4.2: - version "0.4.10" - resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.4.10.tgz#d7b19038040a14c0837a18e630a196453952b378" + version "0.4.15" + resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.4.15.tgz#03202df65c06d2bd8c7ec2362a193056fef8d3b1" dependencies: - source-map "^0.5.3" + source-map "^0.5.6" source-map-url@~0.3.0: version "0.3.0" resolved "https://registry.yarnpkg.com/source-map-url/-/source-map-url-0.3.0.tgz#7ecaf13b57bcd09da8a40c5d269db33799d4aaf9" -source-map@0.1.34: - version "0.1.34" - resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.1.34.tgz#a7cfe89aec7b1682c3b198d0acfb47d7d090566b" - dependencies: - amdefine ">=0.0.4" - -source-map@0.4.x, source-map@^0.4.2, source-map@~0.4.1: - version "0.4.4" - resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.4.4.tgz#eba4f5da9c0dc999de68032d8b4f76173652036b" - dependencies: - amdefine ">=0.0.4" +source-map@0.5.x, source-map@^0.5.0, source-map@^0.5.3, source-map@^0.5.6, source-map@~0.5.0, source-map@~0.5.1, source-map@~0.5.3: + version "0.5.6" + resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.5.6.tgz#75ce38f52bf0733c5a7f0c118d81334a2bb5f412" source-map@^0.1.38, source-map@^0.1.41: version "0.1.43" @@ -4232,17 +4473,33 @@ source-map@^0.1.38, source-map@^0.1.41: dependencies: amdefine ">=0.0.4" -source-map@^0.5.0, source-map@^0.5.3, source-map@^0.5.6, source-map@~0.5.0, source-map@~0.5.1, source-map@~0.5.3: - version "0.5.6" - resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.5.6.tgz#75ce38f52bf0733c5a7f0c118d81334a2bb5f412" +source-map@^0.4.2, source-map@~0.4.1: + version "0.4.4" + resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.4.4.tgz#eba4f5da9c0dc999de68032d8b4f76173652036b" + dependencies: + amdefine ">=0.0.4" + +spdx-correct@~1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/spdx-correct/-/spdx-correct-1.0.2.tgz#4b3073d933ff51f3912f03ac5519498a4150db40" + dependencies: + spdx-license-ids "^1.0.2" + +spdx-expression-parse@~1.0.0: + version "1.0.4" + resolved "https://registry.yarnpkg.com/spdx-expression-parse/-/spdx-expression-parse-1.0.4.tgz#9bdf2f20e1f40ed447fbe273266191fced51626c" + +spdx-license-ids@^1.0.2: + version "1.2.2" + resolved "https://registry.yarnpkg.com/spdx-license-ids/-/spdx-license-ids-1.2.2.tgz#c9df7a3424594ade6bd11900d596696dc06bac57" sprintf-js@~1.0.2: version "1.0.3" resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c" sshpk@^1.7.0: - version "1.10.2" - resolved "https://registry.yarnpkg.com/sshpk/-/sshpk-1.10.2.tgz#d5a804ce22695515638e798dbe23273de070a5fa" + version "1.13.1" + resolved "https://registry.yarnpkg.com/sshpk/-/sshpk-1.13.1.tgz#512df6da6287144316dc4c18fe1cf1d940739be3" dependencies: asn1 "~0.2.3" assert-plus "^1.0.0" @@ -4251,11 +4508,10 @@ sshpk@^1.7.0: optionalDependencies: bcrypt-pbkdf "^1.0.0" ecc-jsbn "~0.1.1" - jodid25519 "^1.0.0" jsbn "~0.1.0" tweetnacl "~0.14.0" -"statuses@>= 1.3.1 < 2", statuses@~1.3.0, statuses@~1.3.1: +"statuses@>= 1.3.1 < 2", statuses@~1.3.1: version "1.3.1" resolved "https://registry.yarnpkg.com/statuses/-/statuses-1.3.1.tgz#faf51b9eb74aaef3b3acf4ad5f61abf24cb7b93e" @@ -4275,12 +4531,12 @@ stream-cache@~0.0.1: resolved "https://registry.yarnpkg.com/stream-cache/-/stream-cache-0.0.2.tgz#1ac5ad6832428ca55667dbdee395dad4e6db118f" stream-http@^2.3.1: - version "2.6.3" - resolved "https://registry.yarnpkg.com/stream-http/-/stream-http-2.6.3.tgz#4c3ddbf9635968ea2cfd4e48d43de5def2625ac3" + version "2.7.2" + resolved "https://registry.yarnpkg.com/stream-http/-/stream-http-2.7.2.tgz#40a050ec8dc3b53b33d9909415c02c0bf1abfbad" dependencies: builtin-status-codes "^3.0.0" inherits "^2.0.1" - readable-stream "^2.1.0" + readable-stream "^2.2.6" to-arraybuffer "^1.0.0" xtend "^4.0.0" @@ -4288,7 +4544,7 @@ strict-uri-encode@^1.0.0: version "1.1.0" resolved "https://registry.yarnpkg.com/strict-uri-encode/-/strict-uri-encode-1.1.0.tgz#279b225df1d582b1f54e65addd4352e18faa0713" -string-width@^1.0.1: +string-width@^1.0.1, string-width@^1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/string-width/-/string-width-1.0.2.tgz#118bdf5b8cdc51a2a7e70d211e07e2b0b9b107d3" dependencies: @@ -4296,10 +4552,23 @@ string-width@^1.0.1: is-fullwidth-code-point "^1.0.0" strip-ansi "^3.0.0" +string-width@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-2.0.0.tgz#635c5436cc72a6e0c387ceca278d4e2eec52687e" + dependencies: + is-fullwidth-code-point "^2.0.0" + strip-ansi "^3.0.0" + string_decoder@^0.10.25, string_decoder@~0.10.x: version "0.10.31" resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-0.10.31.tgz#62e203bc41766c6c28c9fc84301dab1c5310fa94" +string_decoder@~1.0.0: + version "1.0.3" + resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.0.3.tgz#0fc67d7c141825de94282dd536bec6b9bce860ab" + dependencies: + safe-buffer "~5.1.0" + stringstream@~0.0.4: version "0.0.5" resolved "https://registry.yarnpkg.com/stringstream/-/stringstream-0.0.5.tgz#4e484cd4de5a0bbbee18e46307710a8a81621878" @@ -4310,19 +4579,27 @@ strip-ansi@^3.0.0, strip-ansi@^3.0.1: dependencies: ansi-regex "^2.0.0" +strip-bom@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/strip-bom/-/strip-bom-3.0.0.tgz#2334c18e9c759f7bdd56fdef7e9ae3d588e68ed3" + +strip-eof@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/strip-eof/-/strip-eof-1.0.0.tgz#bb43ff5598a6eb05d89b59fcd129c983313606bf" + strip-json-comments@~0.1.1: version "0.1.3" resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-0.1.3.tgz#164c64e370a8a3cc00c9e01b539e569823f0ee54" -strip-json-comments@~1.0.4: - version "1.0.4" - resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-1.0.4.tgz#1e15fbcac97d3ee99bf2d73b4c656b082bbafb91" +strip-json-comments@~2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-2.0.1.tgz#3c531942e908c2697c0ec344858c286c7ca0a60a" style-loader@^0.13.1: - version "0.13.1" - resolved "https://registry.yarnpkg.com/style-loader/-/style-loader-0.13.1.tgz#468280efbc0473023cd3a6cd56e33b5a1d7fc3a9" + version "0.13.2" + resolved "https://registry.yarnpkg.com/style-loader/-/style-loader-0.13.2.tgz#74533384cf698c7104c7951150b49717adc2f3bb" dependencies: - loader-utils "^0.2.7" + loader-utils "^1.0.2" superagent-es6-promise@^1.0.0: version "1.0.0" @@ -4348,10 +4625,6 @@ supports-color@1.2.0: version "1.2.0" resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-1.2.0.tgz#ff1ed1e61169d06b3cf2d588e188b18d8847e17e" -supports-color@^0.2.0: - version "0.2.0" - resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-0.2.0.tgz#d92de2694eb3f67323973d7ae3d8b55b4c22190a" - supports-color@^1.3.1: version "1.3.1" resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-1.3.1.tgz#15758df09d8ff3b4acc307539fabe27095e1042d" @@ -4366,19 +4639,25 @@ supports-color@^3.1.0, supports-color@^3.1.1, supports-color@^3.2.3: dependencies: has-flag "^1.0.0" +supports-color@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-4.0.0.tgz#33a7c680aa512c9d03ef929cacbb974d203d2790" + dependencies: + has-flag "^2.0.0" + svgo@^0.7.0: - version "0.7.1" - resolved "https://registry.yarnpkg.com/svgo/-/svgo-0.7.1.tgz#287320fed972cb097e72c2bb1685f96fe08f8034" + version "0.7.2" + resolved "https://registry.yarnpkg.com/svgo/-/svgo-0.7.2.tgz#9f5772413952135c6fefbf40afe6a4faa88b4bb5" dependencies: coa "~1.0.1" colors "~1.1.2" - csso "~2.2.1" - js-yaml "~3.6.1" + csso "~2.3.1" + js-yaml "~3.7.0" mkdirp "~0.5.1" sax "~1.2.1" whet.extend "~0.9.9" -symbol-observable@^1.0.2: +symbol-observable@^1.0.3: version "1.0.4" resolved "https://registry.yarnpkg.com/symbol-observable/-/symbol-observable-1.0.4.tgz#29bf615d4aa7121bdd898b22d4b3f9bc4e2aa03d" @@ -4390,20 +4669,20 @@ tapable@^0.1.8, tapable@~0.1.8: version "0.1.10" resolved "https://registry.yarnpkg.com/tapable/-/tapable-0.1.10.tgz#29c35707c2b70e50d07482b5d202e8ed446dafd4" -tar-pack@~3.3.0: - version "3.3.0" - resolved "https://registry.yarnpkg.com/tar-pack/-/tar-pack-3.3.0.tgz#30931816418f55afc4d21775afdd6720cee45dae" +tar-pack@^3.4.0: + version "3.4.0" + resolved "https://registry.yarnpkg.com/tar-pack/-/tar-pack-3.4.0.tgz#23be2d7f671a8339376cbdb0b8fe3fdebf317984" dependencies: - debug "~2.2.0" - fstream "~1.0.10" - fstream-ignore "~1.0.5" - once "~1.3.3" - readable-stream "~2.1.4" - rimraf "~2.5.1" - tar "~2.2.1" - uid-number "~0.0.6" + debug "^2.2.0" + fstream "^1.0.10" + fstream-ignore "^1.0.5" + once "^1.3.3" + readable-stream "^2.1.4" + rimraf "^2.5.1" + tar "^2.2.1" + uid-number "^0.0.6" -tar@~2.2.1: +tar@^2.2.1: version "2.2.1" resolved "https://registry.yarnpkg.com/tar/-/tar-2.2.1.tgz#8e4d2a256c0e2185c6b18ad694aec968b83cb1d1" dependencies: @@ -4425,6 +4704,12 @@ tmatch@^2.0.1: version "2.0.1" resolved "https://registry.yarnpkg.com/tmatch/-/tmatch-2.0.1.tgz#0c56246f33f30da1b8d3d72895abaf16660f38cf" +tmp@0.0.x: + version "0.0.31" + resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.0.31.tgz#8f38ab9438e17315e5dbd8b3657e8bfb277ae4a7" + dependencies: + os-tmpdir "~1.0.1" + to-array@0.1.4: version "0.1.4" resolved "https://registry.yarnpkg.com/to-array/-/to-array-0.1.4.tgz#17e6c11f73dd4f3d74cda7a4ff3238e9ad9bf890" @@ -4440,8 +4725,8 @@ to-camel-case@1.0.0: to-space-case "^1.0.0" to-fast-properties@^1.0.1: - version "1.0.2" - resolved "https://registry.yarnpkg.com/to-fast-properties/-/to-fast-properties-1.0.2.tgz#f3f5c0c3ba7299a7ef99427e44633257ade43320" + version "1.0.3" + resolved "https://registry.yarnpkg.com/to-fast-properties/-/to-fast-properties-1.0.3.tgz#b83571fa4d8c25b82e231b06e3a3055de4ca1a47" to-iso-string@0.0.2: version "0.0.2" @@ -4462,8 +4747,8 @@ toggle-selection@^1.0.3: resolved "https://registry.yarnpkg.com/toggle-selection/-/toggle-selection-1.0.5.tgz#726c703de607193a73c32c7df49cd24950fc574f" toposort@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/toposort/-/toposort-1.0.0.tgz#b66cf385a1a8a8e68e45b8259e7f55875e8b06ef" + version "1.0.3" + resolved "https://registry.yarnpkg.com/toposort/-/toposort-1.0.3.tgz#f02cd8a74bd8be2fc0e98611c3bacb95a171869c" tough-cookie@~2.3.0: version "2.3.2" @@ -4471,30 +4756,43 @@ tough-cookie@~2.3.0: dependencies: punycode "^1.4.1" +trim-right@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/trim-right/-/trim-right-1.0.1.tgz#cb2e1203067e0c8de1f614094b9fe45704ea6003" + tty-browserify@0.0.0: version "0.0.0" resolved "https://registry.yarnpkg.com/tty-browserify/-/tty-browserify-0.0.0.tgz#a157ba402da24e9bf957f9aa69d524eed42901a6" -tunnel-agent@~0.4.1: - version "0.4.3" - resolved "https://registry.yarnpkg.com/tunnel-agent/-/tunnel-agent-0.4.3.tgz#6373db76909fe570e08d73583365ed828a74eeeb" +tunnel-agent@^0.6.0: + version "0.6.0" + resolved "https://registry.yarnpkg.com/tunnel-agent/-/tunnel-agent-0.6.0.tgz#27a5dea06b36b04a0a9966774b290868f0fc40fd" + dependencies: + safe-buffer "^5.0.1" tweetnacl@^0.14.3, tweetnacl@~0.14.0: version "0.14.5" resolved "https://registry.yarnpkg.com/tweetnacl/-/tweetnacl-0.14.5.tgz#5ae68177f192d4456269d108afa93ff8743f4f64" -type-is@~1.6.13, type-is@~1.6.14: - version "1.6.14" - resolved "https://registry.yarnpkg.com/type-is/-/type-is-1.6.14.tgz#e219639c17ded1ca0789092dd54a03826b817cb2" +type-is@~1.6.15: + version "1.6.15" + resolved "https://registry.yarnpkg.com/type-is/-/type-is-1.6.15.tgz#cab10fb4909e441c82842eafe1ad646c81804410" dependencies: media-typer "0.3.0" - mime-types "~2.1.13" + mime-types "~2.1.15" ua-parser-js@^0.7.9: - version "0.7.12" - resolved "https://registry.yarnpkg.com/ua-parser-js/-/ua-parser-js-0.7.12.tgz#04c81a99bdd5dc52263ea29d24c6bf8d4818a4bb" + version "0.7.13" + resolved "https://registry.yarnpkg.com/ua-parser-js/-/ua-parser-js-0.7.13.tgz#cd9dd2f86493b3f44dbeeef3780fda74c5ee14be" -uglify-js@2.7.x, uglify-js@~2.7.3: +uglify-js@3.0.x, uglify-js@^3.0.6: + version "3.0.19" + resolved "https://registry.yarnpkg.com/uglify-js/-/uglify-js-3.0.19.tgz#ab1dfe2a171361b81fa9ffb3383461ea384557ed" + dependencies: + commander "~2.9.0" + source-map "~0.5.1" + +uglify-js@~2.7.3: version "2.7.5" resolved "https://registry.yarnpkg.com/uglify-js/-/uglify-js-2.7.5.tgz#4612c0c7baaee2ba7c487de4904ae122079f2ca8" dependencies: @@ -4507,16 +4805,7 @@ uglify-to-browserify@~1.0.0: version "1.0.2" resolved "https://registry.yarnpkg.com/uglify-to-browserify/-/uglify-to-browserify-1.0.2.tgz#6e0924d6bda6b5afe349e39a6d632850a0f882b7" -uglifyjs@^2.4.10: - version "2.4.10" - resolved "https://registry.yarnpkg.com/uglifyjs/-/uglifyjs-2.4.10.tgz#632927319fa6a3da3fc91f9773ac27bfe6c3ee92" - dependencies: - async "~0.2.6" - source-map "0.1.34" - uglify-to-browserify "~1.0.0" - yargs "~1.3.3" - -uid-number@~0.0.6: +uid-number@^0.0.6: version "0.0.6" resolved "https://registry.yarnpkg.com/uid-number/-/uid-number-0.0.6.tgz#0ea10e8035e8eb5b8e4449f06da1c730663baa81" @@ -4557,11 +4846,11 @@ urix@^0.1.0, urix@~0.1.0: resolved "https://registry.yarnpkg.com/urix/-/urix-0.1.0.tgz#da937f7a62e21fec1fd18d49b35c2935067a6c72" url-loader@^0.5.7: - version "0.5.7" - resolved "https://registry.yarnpkg.com/url-loader/-/url-loader-0.5.7.tgz#67e8779759f8000da74994906680c943a9b0925d" + version "0.5.9" + resolved "https://registry.yarnpkg.com/url-loader/-/url-loader-0.5.9.tgz#cc8fea82c7b906e7777019250869e569e995c295" dependencies: - loader-utils "0.2.x" - mime "1.2.x" + loader-utils "^1.0.2" + mime "1.3.x" url-parse@1.0.x: version "1.0.5" @@ -4570,11 +4859,11 @@ url-parse@1.0.x: querystringify "0.0.x" requires-port "1.0.x" -url-parse@^1.1.1: - version "1.1.7" - resolved "https://registry.yarnpkg.com/url-parse/-/url-parse-1.1.7.tgz#025cff999653a459ab34232147d89514cc87d74a" +url-parse@^1.1.8: + version "1.1.9" + resolved "https://registry.yarnpkg.com/url-parse/-/url-parse-1.1.9.tgz#c67f1d775d51f0a18911dd7b3ffad27bb9e5bd19" dependencies: - querystringify "0.0.x" + querystringify "~1.0.0" requires-port "1.0.x" url@^0.11.0: @@ -4595,10 +4884,11 @@ user-home@^2.0.0: os-homedir "^1.0.0" useragent@^2.1.6: - version "2.1.11" - resolved "https://registry.yarnpkg.com/useragent/-/useragent-2.1.11.tgz#6a026e6a6c619b46ca7a0b2fdef6c1ac3da8ca29" + version "2.1.13" + resolved "https://registry.yarnpkg.com/useragent/-/useragent-2.1.13.tgz#bba43e8aa24d5ceb83c2937473e102e21df74c10" dependencies: lru-cache "2.2.x" + tmp "0.0.x" util-deprecate@~1.0.1: version "1.0.2" @@ -4627,18 +4917,25 @@ uuid@^2.0.2: resolved "https://registry.yarnpkg.com/uuid/-/uuid-2.0.3.tgz#67e2e863797215530dff318e5bf9dcebfd47b21a" uuid@^3.0.0: - version "3.0.1" - resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.0.1.tgz#6544bba2dfda8c1cf17e629a3a305e2bb1fee6c1" + version "3.1.0" + resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.1.0.tgz#3dd3d3e790abc24d7b0d3a034ffababe28ebbc04" v8flags@^2.0.10: - version "2.0.11" - resolved "https://registry.yarnpkg.com/v8flags/-/v8flags-2.0.11.tgz#bca8f30f0d6d60612cc2c00641e6962d42ae6881" + version "2.1.1" + resolved "https://registry.yarnpkg.com/v8flags/-/v8flags-2.1.1.tgz#aab1a1fa30d45f88dd321148875ac02c0b55e5b4" dependencies: user-home "^1.1.1" -vary@~1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/vary/-/vary-1.1.0.tgz#e1e5affbbd16ae768dd2674394b9ad3022653140" +validate-npm-package-license@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/validate-npm-package-license/-/validate-npm-package-license-3.0.1.tgz#2804babe712ad3379459acfbe24746ab2c303fbc" + dependencies: + spdx-correct "~1.0.0" + spdx-expression-parse "~1.0.0" + +vary@~1.1.0, vary@~1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/vary/-/vary-1.1.1.tgz#67535ebb694c1d52257457984665323f587e8d37" vendors@^1.0.0: version "1.0.1" @@ -4687,9 +4984,9 @@ webpack-core@~0.6.9: source-list-map "~0.1.7" source-map "~0.4.1" -webpack-dev-middleware@^1.0.11, webpack-dev-middleware@^1.4.0: - version "1.9.0" - resolved "https://registry.yarnpkg.com/webpack-dev-middleware/-/webpack-dev-middleware-1.9.0.tgz#a1c67a3dfd8a5c5d62740aa0babe61758b4c84aa" +webpack-dev-middleware@^1.0.11, webpack-dev-middleware@^1.10.2: + version "1.11.0" + resolved "https://registry.yarnpkg.com/webpack-dev-middleware/-/webpack-dev-middleware-1.11.0.tgz#09691d0973a30ad1f82ac73a12e2087f0a4754f9" dependencies: memory-fs "~0.4.1" mime "^1.3.4" @@ -4697,8 +4994,8 @@ webpack-dev-middleware@^1.0.11, webpack-dev-middleware@^1.4.0: range-parser "^1.0.3" webpack-dev-server@^1.14.1: - version "1.16.2" - resolved "https://registry.yarnpkg.com/webpack-dev-server/-/webpack-dev-server-1.16.2.tgz#8bebc2c4ce1c45a15c72dd769d9ba08db306a793" + version "1.16.5" + resolved "https://registry.yarnpkg.com/webpack-dev-server/-/webpack-dev-server-1.16.5.tgz#0cbd5f2d2ac8d4e593aacd5c9702e7bbd5e59892" dependencies: compression "^1.5.2" connect-history-api-fallback "^1.3.0" @@ -4712,18 +5009,18 @@ webpack-dev-server@^1.14.1: stream-cache "~0.0.1" strip-ansi "^3.0.0" supports-color "^3.1.1" - webpack-dev-middleware "^1.4.0" + webpack-dev-middleware "^1.10.2" webpack-sources@^0.1.0: - version "0.1.4" - resolved "https://registry.yarnpkg.com/webpack-sources/-/webpack-sources-0.1.4.tgz#ccc2c817e08e5fa393239412690bb481821393cd" + version "0.1.5" + resolved "https://registry.yarnpkg.com/webpack-sources/-/webpack-sources-0.1.5.tgz#aa1f3abf0f0d74db7111c40e500b84f966640750" dependencies: source-list-map "~0.1.7" source-map "~0.5.3" webpack@^1.12.11: - version "1.14.0" - resolved "https://registry.yarnpkg.com/webpack/-/webpack-1.14.0.tgz#54f1ffb92051a328a5b2057d6ae33c289462c823" + version "1.15.0" + resolved "https://registry.yarnpkg.com/webpack/-/webpack-1.15.0.tgz#4ff31f53db03339e55164a9d468ee0324968fe98" dependencies: acorn "^3.0.0" async "^1.3.0" @@ -4751,6 +5048,10 @@ websocket-extensions@>=0.1.1: version "0.1.1" resolved "https://registry.yarnpkg.com/websocket-extensions/-/websocket-extensions-0.1.1.tgz#76899499c184b6ef754377c2dbb0cd6cb55d29e7" +whatwg-fetch@>=0.10.0: + version "2.0.3" + resolved "https://registry.yarnpkg.com/whatwg-fetch/-/whatwg-fetch-2.0.3.tgz#9c84ec2dcf68187ff00bc64e1274b442176e1c84" + whatwg-fetch@^0.9.0: version "0.9.0" resolved "https://registry.yarnpkg.com/whatwg-fetch/-/whatwg-fetch-0.9.0.tgz#0e3684c6cb9995b43efc9df03e4c365d95fd9cc0" @@ -4759,26 +5060,26 @@ whet.extend@~0.9.9: version "0.9.9" resolved "https://registry.yarnpkg.com/whet.extend/-/whet.extend-0.9.9.tgz#f877d5bf648c97e5aa542fadc16d6a259b9c11a1" -which@^1.0.5, which@^1.2.1, which@^1.2.4: - version "1.2.12" - resolved "https://registry.yarnpkg.com/which/-/which-1.2.12.tgz#de67b5e450269f194909ef23ece4ebe416fa1192" +which-module@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/which-module/-/which-module-2.0.0.tgz#d9ef07dce77b9902b8a3a8fa4b31c3e3f7e6e87a" + +which@^1.0.5, which@^1.2.1, which@^1.2.4, which@^1.2.9: + version "1.2.14" + resolved "https://registry.yarnpkg.com/which/-/which-1.2.14.tgz#9a87c4378f03e827cecaf1acdf56c736c01c14e5" dependencies: - isexe "^1.1.1" + isexe "^2.0.0" wide-align@^1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/wide-align/-/wide-align-1.1.0.tgz#40edde802a71fea1f070da3e62dcda2e7add96ad" + version "1.1.2" + resolved "https://registry.yarnpkg.com/wide-align/-/wide-align-1.1.2.tgz#571e0f1b0604636ebc0dfc21b0339bbe31341710" dependencies: - string-width "^1.0.1" + string-width "^1.0.2" window-size@0.1.0: version "0.1.0" resolved "https://registry.yarnpkg.com/window-size/-/window-size-0.1.0.tgz#5438cd2ea93b202efa3a19fe8887aee7c94f9c9d" -window-size@^0.1.4: - version "0.1.4" - resolved "https://registry.yarnpkg.com/window-size/-/window-size-0.1.4.tgz#f8e1aa1ee5a53ec5bf151ffa09742a6ad7697876" - wordwrap@0.0.2: version "0.0.2" resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-0.0.2.tgz#b79669bb42ecb409f83d583cad52ca17eaa1643f" @@ -4798,9 +5099,16 @@ wrappy@1: version "1.0.2" resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f" -ws@1.1.1: - version "1.1.1" - resolved "https://registry.yarnpkg.com/ws/-/ws-1.1.1.tgz#082ddb6c641e85d4bb451f03d52f06eabdb1f018" +ws@1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/ws/-/ws-1.1.2.tgz#8a244fa052401e08c9886cf44a85189e1fd4067f" + dependencies: + options ">=0.0.5" + ultron "1.0.x" + +ws@1.1.4: + version "1.1.4" + resolved "https://registry.yarnpkg.com/ws/-/ws-1.1.4.tgz#57f40d036832e5f5055662a397c4de76ed66bf61" dependencies: options ">=0.0.5" ultron "1.0.x" @@ -4821,25 +5129,37 @@ xtend@^4.0.0: version "4.0.1" resolved "https://registry.yarnpkg.com/xtend/-/xtend-4.0.1.tgz#a5c6d532be656e23db820efb943a1f04998d63af" -y18n@^3.2.0: +y18n@^3.2.1: version "3.2.1" resolved "https://registry.yarnpkg.com/y18n/-/y18n-3.2.1.tgz#6d15fba884c08679c0d77e88e7759e811e07fa41" -yargs@^3.10.0: - version "3.32.0" - resolved "https://registry.yarnpkg.com/yargs/-/yargs-3.32.0.tgz#03088e9ebf9e756b69751611d2a5ef591482c995" - dependencies: - camelcase "^2.0.1" - cliui "^3.0.3" - decamelize "^1.1.1" - os-locale "^1.4.0" - string-width "^1.0.1" - window-size "^0.1.4" - y18n "^3.2.0" +yallist@^2.1.2: + version "2.1.2" + resolved "https://registry.yarnpkg.com/yallist/-/yallist-2.1.2.tgz#1c11f9218f076089a47dd512f93c6699a6a81d52" -yargs@~1.3.3: - version "1.3.3" - resolved "https://registry.yarnpkg.com/yargs/-/yargs-1.3.3.tgz#054de8b61f22eefdb7207059eaef9d6b83fb931a" +yargs-parser@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-7.0.0.tgz#8d0ac42f16ea55debd332caf4c4038b3e3f5dfd9" + dependencies: + camelcase "^4.1.0" + +yargs@^8.0.1: + version "8.0.2" + resolved "https://registry.yarnpkg.com/yargs/-/yargs-8.0.2.tgz#6299a9055b1cefc969ff7e79c1d918dceb22c360" + dependencies: + camelcase "^4.1.0" + cliui "^3.2.0" + decamelize "^1.1.1" + get-caller-file "^1.0.1" + os-locale "^2.0.0" + read-pkg-up "^2.0.0" + require-directory "^2.1.1" + require-main-filename "^1.0.1" + set-blocking "^2.0.0" + string-width "^2.0.0" + which-module "^2.0.0" + y18n "^3.2.1" + yargs-parser "^7.0.0" yargs@~3.10.0: version "3.10.0" diff --git a/buildscripts/build.sh b/buildscripts/build.sh index 849e3565f..f7f9c435a 100755 --- a/buildscripts/build.sh +++ b/buildscripts/build.sh @@ -90,9 +90,18 @@ main() { go_build ${each_osarch} done else - for each_osarch in $(echo $chosen_osarch | sed 's/,/ /g'); do - go_build ${each_osarch} + local found=0 + for each_osarch in ${SUPPORTED_OSARCH}; do + if [ "$chosen_osarch" = "$each_osarch" ]; then + found=1 + fi done + if [ ${found} -eq 1 ]; then + go_build ${chosen_osarch} + else + echo "Unknown architecture \"${chosen_osarch}\"" + exit 1 + fi fi } diff --git a/buildscripts/healthcheck.sh b/buildscripts/healthcheck.sh new file mode 100644 index 000000000..142eca50e --- /dev/null +++ b/buildscripts/healthcheck.sh @@ -0,0 +1,32 @@ +#!/bin/sh +# +# Minio Cloud Storage, (C) 2017 Minio, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +_init () { + address="http://127.0.0.1:9000" + resource="/minio/index.html" +} + +HealthCheckMain () { + # Get the http response code + http_response=$(curl -s -o /dev/null -I -w "%{http_code}" ${address}${resource}) + + # If http_repsonse is 200 - server is up. + # When MINIO_BROWSER is set to off, curl responds with 404. We assume that the the server is up + [ "$http_response" == "200" ] || [ "$http_response" == "404" ] +} + +_init && HealthCheckMain diff --git a/cmd/admin-rpc-client.go b/cmd/admin-rpc-client.go index 13f65aa69..a89434bc3 100644 --- a/cmd/admin-rpc-client.go +++ b/cmd/admin-rpc-client.go @@ -113,15 +113,15 @@ func (rc remoteAdminClient) ReInitDisks() error { } // ServerInfoData - Returns the server info of this server. -func (lc localAdminClient) ServerInfoData() (ServerInfoData, error) { +func (lc localAdminClient) ServerInfoData() (sid ServerInfoData, e error) { if globalBootTime.IsZero() { - return ServerInfoData{}, errServerNotInitialized + return sid, errServerNotInitialized } // Build storage info objLayer := newObjectLayerFn() if objLayer == nil { - return ServerInfoData{}, errServerNotInitialized + return sid, errServerNotInitialized } storage := objLayer.StorageInfo() @@ -145,12 +145,12 @@ func (lc localAdminClient) ServerInfoData() (ServerInfoData, error) { } // ServerInfo - returns the server info of the server to which the RPC call is made. -func (rc remoteAdminClient) ServerInfoData() (ServerInfoData, error) { +func (rc remoteAdminClient) ServerInfoData() (sid ServerInfoData, e error) { args := AuthRPCArgs{} reply := ServerInfoDataReply{} err := rc.Call(serverInfoDataRPC, &args, &reply) if err != nil { - return ServerInfoData{}, err + return sid, err } return reply.ServerInfoData, nil @@ -493,7 +493,7 @@ func getPeerConfig(peers adminPeers) ([]byte, error) { // getValidServerConfig - finds the server config that is present in // quorum or more number of servers. -func getValidServerConfig(serverConfigs []serverConfigV13, errs []error) (serverConfigV13, error) { +func getValidServerConfig(serverConfigs []serverConfigV13, errs []error) (scv serverConfigV13, e error) { // majority-based quorum quorum := len(serverConfigs)/2 + 1 @@ -566,7 +566,7 @@ func getValidServerConfig(serverConfigs []serverConfigV13, errs []error) (server // If quorum nodes don't agree. if maxOccurrence < quorum { - return serverConfigV13{}, errXLWriteQuorum + return scv, errXLWriteQuorum } return configJSON, nil diff --git a/cmd/admin-rpc-server.go b/cmd/admin-rpc-server.go index 25a03b310..f5b3d4855 100644 --- a/cmd/admin-rpc-server.go +++ b/cmd/admin-rpc-server.go @@ -21,7 +21,6 @@ import ( "errors" "fmt" "io/ioutil" - "net/rpc" "os" "path/filepath" "time" @@ -236,7 +235,7 @@ func (s *adminCmd) CommitConfig(cArgs *CommitConfigArgs, cReply *CommitConfigRep // stop and restart commands. func registerAdminRPCRouter(mux *router.Router) error { adminRPCHandler := &adminCmd{} - adminRPCServer := rpc.NewServer() + adminRPCServer := newRPCServer() err := adminRPCServer.RegisterName("Admin", adminRPCHandler) if err != nil { return traceError(err) diff --git a/cmd/api-errors.go b/cmd/api-errors.go index 4a10bbdb5..7bb60f87e 100644 --- a/cmd/api-errors.go +++ b/cmd/api-errors.go @@ -115,6 +115,7 @@ const ( ErrBucketAlreadyOwnedByYou ErrInvalidDuration ErrNotSupported + ErrBucketAlreadyExists // Add new error codes here. // Bucket notification related errors. @@ -317,7 +318,7 @@ var errorCodeResponse = map[APIErrorCode]APIError{ }, ErrInvalidPart: { Code: "InvalidPart", - Description: "One or more of the specified parts could not be found.", + Description: "One or more of the specified parts could not be found. The part may not have been uploaded, or the specified entity tag may not match the part's entity tag.", HTTPStatusCode: http.StatusBadRequest, }, ErrInvalidPartOrder: { @@ -355,6 +356,11 @@ var errorCodeResponse = map[APIErrorCode]APIError{ Description: "The bucket you tried to delete is not empty", HTTPStatusCode: http.StatusConflict, }, + ErrBucketAlreadyExists: { + Code: "BucketAlreadyExists", + Description: "The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again.", + HTTPStatusCode: http.StatusConflict, + }, ErrAllAccessDisabled: { Code: "AllAccessDisabled", Description: "All access to this bucket has been disabled.", @@ -663,6 +669,8 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) { apiErr = ErrStorageFull case BadDigest: apiErr = ErrBadDigest + case AllAccessDisabled: + apiErr = ErrAllAccessDisabled case IncompleteBody: apiErr = ErrIncompleteBody case ObjectExistsAsDirectory: @@ -677,6 +685,8 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) { apiErr = ErrBucketAlreadyOwnedByYou case BucketNotEmpty: apiErr = ErrBucketNotEmpty + case BucketAlreadyExists: + apiErr = ErrBucketAlreadyExists case BucketExists: apiErr = ErrBucketAlreadyOwnedByYou case ObjectNotFound: @@ -701,6 +711,8 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) { apiErr = ErrNoSuchUpload case PartTooSmall: apiErr = ErrEntityTooSmall + case SignatureDoesNotMatch: + apiErr = ErrSignatureDoesNotMatch case SHA256Mismatch: apiErr = ErrContentSHA256Mismatch case ObjectTooLarge: @@ -713,6 +725,8 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) { apiErr = ErrNotImplemented case PolicyNotFound: apiErr = ErrNoSuchBucketPolicy + case PartTooBig: + apiErr = ErrEntityTooLarge default: apiErr = ErrInternalError } diff --git a/cmd/api-headers.go b/cmd/api-headers.go index 9e510084b..94a5ebb2e 100644 --- a/cmd/api-headers.go +++ b/cmd/api-headers.go @@ -36,7 +36,11 @@ func setCommonHeaders(w http.ResponseWriter) { // Set unique request ID for each reply. w.Header().Set(responseRequestIDKey, mustGetRequestID(UTCNow())) w.Header().Set("Server", globalServerUserAgent) - w.Header().Set("X-Amz-Bucket-Region", serverConfig.GetRegion()) + // Set `x-amz-bucket-region` only if region is set on the server + // by default minio uses an empty region. + if region := serverConfig.GetRegion(); region != "" { + w.Header().Set("X-Amz-Bucket-Region", region) + } w.Header().Set("Accept-Ranges", "bytes") } diff --git a/cmd/api-response.go b/cmd/api-response.go index 794178c2b..9e139b9d7 100644 --- a/cmd/api-response.go +++ b/cmd/api-response.go @@ -288,8 +288,6 @@ func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse { var owner = Owner{} owner.ID = globalMinioDefaultOwnerID - owner.DisplayName = globalMinioDefaultOwnerID - for _, bucket := range buckets { var listbucket = Bucket{} listbucket.Name = bucket.Name @@ -312,8 +310,6 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter string, max var data = ListObjectsResponse{} owner.ID = globalMinioDefaultOwnerID - owner.DisplayName = globalMinioDefaultOwnerID - for _, object := range resp.Objects { var content = Object{} if object.Name == "" { @@ -352,18 +348,17 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter string, max } // generates an ListObjectsV2 response for the said bucket with other enumerated options. -func generateListObjectsV2Response(bucket, prefix, token, startAfter, delimiter string, fetchOwner bool, maxKeys int, resp ListObjectsInfo) ListObjectsV2Response { +func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string) ListObjectsV2Response { var contents []Object - var prefixes []CommonPrefix + var commonPrefixes []CommonPrefix var owner = Owner{} var data = ListObjectsV2Response{} if fetchOwner { owner.ID = globalMinioDefaultOwnerID - owner.DisplayName = globalMinioDefaultOwnerID } - for _, object := range resp.Objects { + for _, object := range objects { var content = Object{} if object.Name == "" { continue @@ -387,14 +382,14 @@ func generateListObjectsV2Response(bucket, prefix, token, startAfter, delimiter data.Prefix = prefix data.MaxKeys = maxKeys data.ContinuationToken = token - data.NextContinuationToken = resp.NextMarker - data.IsTruncated = resp.IsTruncated - for _, prefix := range resp.Prefixes { + data.NextContinuationToken = nextToken + data.IsTruncated = isTruncated + for _, prefix := range prefixes { var prefixItem = CommonPrefix{} prefixItem.Prefix = prefix - prefixes = append(prefixes, prefixItem) + commonPrefixes = append(commonPrefixes, prefixItem) } - data.CommonPrefixes = prefixes + data.CommonPrefixes = commonPrefixes data.KeyCount = len(data.Contents) + len(data.CommonPrefixes) return data } @@ -443,9 +438,7 @@ func generateListPartsResponse(partsInfo ListPartsInfo) ListPartsResponse { listPartsResponse.UploadID = partsInfo.UploadID listPartsResponse.StorageClass = globalMinioDefaultStorageClass listPartsResponse.Initiator.ID = globalMinioDefaultOwnerID - listPartsResponse.Initiator.DisplayName = globalMinioDefaultOwnerID listPartsResponse.Owner.ID = globalMinioDefaultOwnerID - listPartsResponse.Owner.DisplayName = globalMinioDefaultOwnerID listPartsResponse.MaxParts = partsInfo.MaxParts listPartsResponse.PartNumberMarker = partsInfo.PartNumberMarker diff --git a/cmd/auth-rpc-server_test.go b/cmd/auth-rpc-server_test.go index 39ec686c0..81e4ea9e0 100644 --- a/cmd/auth-rpc-server_test.go +++ b/cmd/auth-rpc-server_test.go @@ -77,7 +77,7 @@ func TestLogin(t *testing.T) { // Invalid password length { args: LoginRPCArgs{ - Username: globalMinioDefaultOwnerID, + Username: "minio", Password: "aaa", Version: Version, }, diff --git a/cmd/browser-rpc-router.go b/cmd/browser-rpc-router.go index d88c5779d..fdf88c856 100644 --- a/cmd/browser-rpc-router.go +++ b/cmd/browser-rpc-router.go @@ -17,8 +17,6 @@ package cmd import ( - "net/rpc" - router "github.com/gorilla/mux" ) @@ -39,7 +37,7 @@ type browserPeerAPIHandlers struct { func registerBrowserPeerRPCRouter(mux *router.Router) error { bpHandlers := &browserPeerAPIHandlers{} - bpRPCServer := rpc.NewServer() + bpRPCServer := newRPCServer() err := bpRPCServer.RegisterName("BrowserPeer", bpHandlers) if err != nil { return traceError(err) diff --git a/cmd/bucket-handlers-listobjects.go b/cmd/bucket-handlers-listobjects.go index 6381ce837..d69666d53 100644 --- a/cmd/bucket-handlers-listobjects.go +++ b/cmd/bucket-handlers-listobjects.go @@ -100,7 +100,7 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http return } - response := generateListObjectsV2Response(bucket, prefix, token, startAfter, delimiter, fetchOwner, maxKeys, listObjectsInfo) + response := generateListObjectsV2Response(bucket, prefix, token, listObjectsInfo.NextMarker, startAfter, delimiter, fetchOwner, listObjectsInfo.IsTruncated, maxKeys, listObjectsInfo.Objects, listObjectsInfo.Prefixes) // Write success response. writeSuccessResponseXML(w, encodeResponse(response)) diff --git a/cmd/bucket-handlers.go b/cmd/bucket-handlers.go index 2d35d4269..e6945331c 100644 --- a/cmd/bucket-handlers.go +++ b/cmd/bucket-handlers.go @@ -525,7 +525,12 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h } // Extract metadata to be saved from received Form. - metadata := extractMetadataFromForm(formValues) + metadata, err := extractMetadataFromHeader(formValues) + if err != nil { + errorIf(err, "found invalid http request header") + writeErrorResponse(w, ErrInternalError, r.URL) + return + } sha256sum := "" objectLock := globalNSMutex.NewNSLock(bucket, object) @@ -648,12 +653,21 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http. // Delete bucket access policy, if present - ignore any errors. _ = removeBucketPolicy(bucket, objectAPI) + // Notify all peers (including self) to update in-memory state + S3PeersUpdateBucketPolicy(bucket, policyChange{true, nil}) + // Delete notification config, if present - ignore any errors. _ = removeNotificationConfig(bucket, objectAPI) + // Notify all peers (including self) to update in-memory state + S3PeersUpdateBucketNotification(bucket, nil) + // Delete listener config, if present - ignore any errors. _ = removeListenerConfig(bucket, objectAPI) + // Notify all peers (including self) to update in-memory state + S3PeersUpdateBucketListener(bucket, []listenerConfig{}) + // Write success response. writeSuccessNoContent(w) } diff --git a/cmd/bucket-notification-handlers.go b/cmd/bucket-notification-handlers.go index afbe4f684..efc598f6f 100644 --- a/cmd/bucket-notification-handlers.go +++ b/cmd/bucket-notification-handlers.go @@ -204,6 +204,15 @@ func writeNotification(w http.ResponseWriter, notification map[string][]Notifica if err != nil { return err } + + // https://github.com/containous/traefik/issues/560 + // https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events + // + // Proxies might buffer the connection to avoid this we + // need the proper MIME type before writing to client. + // This MIME header tells the proxies to avoid buffering + w.Header().Set("Content-Type", "text/event-stream") + // Add additional CRLF characters for client to // differentiate the individual events properly. _, err = w.Write(append(notificationBytes, crlf...)) diff --git a/cmd/bucket-notification-handlers_test.go b/cmd/bucket-notification-handlers_test.go index c1b7c9ebf..5d095f39d 100644 --- a/cmd/bucket-notification-handlers_test.go +++ b/cmd/bucket-notification-handlers_test.go @@ -185,7 +185,7 @@ func testGetBucketNotificationHandler(obj ObjectLayer, instanceType, bucketName filterRules := []filterRule{ { Name: "prefix", - Value: globalMinioDefaultOwnerID, + Value: "minio", }, { Name: "suffix", diff --git a/cmd/bucket-notification-utils.go b/cmd/bucket-notification-utils.go index de9275b09..211addcd8 100644 --- a/cmd/bucket-notification-utils.go +++ b/cmd/bucket-notification-utils.go @@ -144,6 +144,9 @@ func isValidQueueID(queueARN string) bool { if isAMQPQueue(sqsARN) { // AMQP eueue. amqpN := serverConfig.Notify.GetAMQPByID(sqsARN.AccountID) return amqpN.Enable && amqpN.URL != "" + } else if isMQTTQueue(sqsARN) { + mqttN := serverConfig.Notify.GetMQTTByID(sqsARN.AccountID) + return mqttN.Enable && mqttN.Broker != "" } else if isNATSQueue(sqsARN) { natsN := serverConfig.Notify.GetNATSByID(sqsARN.AccountID) return natsN.Enable && natsN.Address != "" @@ -251,6 +254,7 @@ func validateNotificationConfig(nConfig notificationConfig) APIErrorCode { // Unmarshals input value of AWS ARN format into minioSqs object. // Returned value represents minio sqs types, currently supported are // - amqp +// - mqtt // - nats // - elasticsearch // - redis @@ -273,6 +277,8 @@ func unmarshalSqsARN(queueARN string) (mSqs arnSQS) { switch sqsType { case queueTypeAMQP: mSqs.Type = queueTypeAMQP + case queueTypeMQTT: + mSqs.Type = queueTypeMQTT case queueTypeNATS: mSqs.Type = queueTypeNATS case queueTypeElastic: diff --git a/cmd/bucket-notification-utils_test.go b/cmd/bucket-notification-utils_test.go index e98907b21..0d7c95253 100644 --- a/cmd/bucket-notification-utils_test.go +++ b/cmd/bucket-notification-utils_test.go @@ -358,6 +358,11 @@ func TestUnmarshalSQSARN(t *testing.T) { queueARN: "arn:minio:sqs:us-east-1:1:amqp", Type: "amqp", }, + // Valid mqtt queue arn. + { + queueARN: "arn:minio:sqs:us-east-1:1:mqtt", + Type: "mqtt", + }, // Invalid empty queue arn. { queueARN: "", diff --git a/cmd/certs.go b/cmd/certs.go index 99297b5e4..57229faa2 100644 --- a/cmd/certs.go +++ b/cmd/certs.go @@ -17,6 +17,7 @@ package cmd import ( + "crypto/tls" "crypto/x509" "encoding/pem" "fmt" @@ -24,35 +25,34 @@ import ( "path/filepath" ) -func parsePublicCertFile(certFile string) (certs []*x509.Certificate, err error) { - var bytes []byte - - if bytes, err = ioutil.ReadFile(certFile); err != nil { - return certs, err +func parsePublicCertFile(certFile string) (x509Certs []*x509.Certificate, err error) { + // Read certificate file. + var data []byte + if data, err = ioutil.ReadFile(certFile); err != nil { + return nil, err } // Parse all certs in the chain. - var block *pem.Block - var cert *x509.Certificate - current := bytes + current := data for len(current) > 0 { - if block, current = pem.Decode(current); block == nil { - err = fmt.Errorf("Could not read PEM block from file %s", certFile) - return certs, err + var pemBlock *pem.Block + if pemBlock, current = pem.Decode(current); pemBlock == nil { + return nil, fmt.Errorf("Could not read PEM block from file %s", certFile) } - if cert, err = x509.ParseCertificate(block.Bytes); err != nil { - return certs, err + var x509Cert *x509.Certificate + if x509Cert, err = x509.ParseCertificate(pemBlock.Bytes); err != nil { + return nil, err } - certs = append(certs, cert) + x509Certs = append(x509Certs, x509Cert) } - if len(certs) == 0 { - err = fmt.Errorf("Empty public certificate file %s", certFile) + if len(x509Certs) == 0 { + return nil, fmt.Errorf("Empty public certificate file %s", certFile) } - return certs, err + return x509Certs, nil } func getRootCAs(certsCAsDir string) (*x509.CertPool, error) { @@ -81,7 +81,7 @@ func getRootCAs(certsCAsDir string) (*x509.CertPool, error) { for _, caFile := range caFiles { caCert, err := ioutil.ReadFile(caFile) if err != nil { - return rootCAs, err + return nil, err } rootCAs.AppendCertsFromPEM(caCert) @@ -90,19 +90,26 @@ func getRootCAs(certsCAsDir string) (*x509.CertPool, error) { return rootCAs, nil } -func getSSLConfig() (publicCerts []*x509.Certificate, rootCAs *x509.CertPool, secureConn bool, err error) { +func getSSLConfig() (x509Certs []*x509.Certificate, rootCAs *x509.CertPool, tlsCert *tls.Certificate, secureConn bool, err error) { if !(isFile(getPublicCertFile()) && isFile(getPrivateKeyFile())) { - return publicCerts, rootCAs, secureConn, err + return nil, nil, nil, false, nil } - if publicCerts, err = parsePublicCertFile(getPublicCertFile()); err != nil { - return publicCerts, rootCAs, secureConn, err + if x509Certs, err = parsePublicCertFile(getPublicCertFile()); err != nil { + return nil, nil, nil, false, err } + var cert tls.Certificate + if cert, err = tls.LoadX509KeyPair(getPublicCertFile(), getPrivateKeyFile()); err != nil { + return nil, nil, nil, false, err + } + + tlsCert = &cert + if rootCAs, err = getRootCAs(getCADir()); err != nil { - return publicCerts, rootCAs, secureConn, err + return nil, nil, nil, false, err } secureConn = true - return publicCerts, rootCAs, secureConn, err + return x509Certs, rootCAs, tlsCert, secureConn, nil } diff --git a/cmd/common-main.go b/cmd/common-main.go new file mode 100644 index 000000000..c9041f8f4 --- /dev/null +++ b/cmd/common-main.go @@ -0,0 +1,117 @@ +/* + * Minio Cloud Storage, (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "errors" + "os" + "path/filepath" + "strings" + "time" + + "github.com/minio/cli" +) + +// Check for updates and print a notification message +func checkUpdate(mode string) { + // Its OK to ignore any errors during getUpdateInfo() here. + if older, downloadURL, err := getUpdateInfo(1*time.Second, mode); err == nil { + if updateMsg := computeUpdateMessage(downloadURL, older); updateMsg != "" { + log.Println(updateMsg) + } + } +} + +func enableLoggers() { + fileLogTarget := serverConfig.Logger.GetFile() + if fileLogTarget.Enable { + err := InitFileLogger(&fileLogTarget) + fatalIf(err, "Unable to initialize file logger") + log.AddTarget(fileLogTarget) + } + + consoleLogTarget := serverConfig.Logger.GetConsole() + if consoleLogTarget.Enable { + InitConsoleLogger(&consoleLogTarget) + } + + log.SetConsoleTarget(consoleLogTarget) +} + +func initConfig() { + // Config file does not exist, we create it fresh and return upon success. + if isFile(getConfigFile()) { + fatalIf(migrateConfig(), "Config migration failed.") + fatalIf(loadConfig(), "Unable to load config version: '%s'.", v19) + } else { + fatalIf(newConfig(), "Unable to initialize minio config for the first time.") + log.Println("Created minio configuration file successfully at " + getConfigDir()) + } +} + +func handleCommonCmdArgs(ctx *cli.Context) { + // Set configuration directory. + { + // Get configuration directory from command line argument. + configDir := ctx.String("config-dir") + if !ctx.IsSet("config-dir") && ctx.GlobalIsSet("config-dir") { + configDir = ctx.GlobalString("config-dir") + } + if configDir == "" { + fatalIf(errors.New("empty directory"), "Configuration directory cannot be empty.") + } + + // Disallow relative paths, figure out absolute paths. + configDirAbs, err := filepath.Abs(configDir) + fatalIf(err, "Unable to fetch absolute path for config directory %s", configDir) + + setConfigDir(configDirAbs) + } +} + +func handleCommonEnvVars() { + // Start profiler if env is set. + if profiler := os.Getenv("_MINIO_PROFILER"); profiler != "" { + globalProfiler = startProfiler(profiler) + } + + // Check if object cache is disabled. + globalXLObjCacheDisabled = strings.EqualFold(os.Getenv("_MINIO_CACHE"), "off") + + accessKey := os.Getenv("MINIO_ACCESS_KEY") + secretKey := os.Getenv("MINIO_SECRET_KEY") + if accessKey != "" && secretKey != "" { + cred, err := createCredential(accessKey, secretKey) + fatalIf(err, "Invalid access/secret Key set in environment.") + + // credential Envs are set globally. + globalIsEnvCreds = true + globalActiveCred = cred + } + + if browser := os.Getenv("MINIO_BROWSER"); browser != "" { + browserFlag, err := ParseBrowserFlag(browser) + if err != nil { + fatalIf(errors.New("invalid value"), "Unknown value ‘%s’ in MINIO_BROWSER environment variable.", browser) + } + + // browser Envs are set globally, this does not represent + // if browser is turned off or on. + globalIsEnvBrowser = true + globalIsBrowserEnabled = bool(browserFlag) + } +} diff --git a/cmd/config-migrate.go b/cmd/config-migrate.go index f20c10434..eb6d0121a 100644 --- a/cmd/config-migrate.go +++ b/cmd/config-migrate.go @@ -141,7 +141,13 @@ func migrateConfig() error { return err } fallthrough - case v18: + case "18": + // Migrate version '17' to '18'. + if err = migrateV18ToV19(); err != nil { + return err + } + fallthrough + case v19: // No migration needed. this always points to current version. err = nil } @@ -1366,3 +1372,109 @@ func migrateV17ToV18() error { log.Printf(configMigrateMSGTemplate, configFile, cv17.Version, srvConfig.Version) return nil } + +func migrateV18ToV19() error { + configFile := getConfigFile() + + cv18 := &serverConfigV18{} + _, err := quick.Load(configFile, cv18) + if os.IsNotExist(err) { + return nil + } else if err != nil { + return fmt.Errorf("Unable to load config version ‘18’. %v", err) + } + if cv18.Version != "18" { + return nil + } + + // Copy over fields from V18 into V19 config struct + srvConfig := &serverConfigV18{ + Logger: &loggers{}, + Notify: ¬ifier{}, + } + srvConfig.Version = "19" + srvConfig.Credential = cv18.Credential + srvConfig.Region = cv18.Region + if srvConfig.Region == "" { + // Region needs to be set for AWS Signature Version 4. + srvConfig.Region = globalMinioDefaultRegion + } + + srvConfig.Logger.Console = cv18.Logger.Console + srvConfig.Logger.File = cv18.Logger.File + + // check and set notifiers config + if len(cv18.Notify.AMQP) == 0 { + srvConfig.Notify.AMQP = make(map[string]amqpNotify) + srvConfig.Notify.AMQP["1"] = amqpNotify{} + } else { + // New deliveryMode parameter is added for AMQP, + // default value is already 0, so nothing to + // explicitly migrate here. + srvConfig.Notify.AMQP = cv18.Notify.AMQP + } + if len(cv18.Notify.ElasticSearch) == 0 { + srvConfig.Notify.ElasticSearch = make(map[string]elasticSearchNotify) + srvConfig.Notify.ElasticSearch["1"] = elasticSearchNotify{ + Format: formatNamespace, + } + } else { + srvConfig.Notify.ElasticSearch = cv18.Notify.ElasticSearch + } + if len(cv18.Notify.Redis) == 0 { + srvConfig.Notify.Redis = make(map[string]redisNotify) + srvConfig.Notify.Redis["1"] = redisNotify{ + Format: formatNamespace, + } + } else { + srvConfig.Notify.Redis = cv18.Notify.Redis + } + if len(cv18.Notify.PostgreSQL) == 0 { + srvConfig.Notify.PostgreSQL = make(map[string]postgreSQLNotify) + srvConfig.Notify.PostgreSQL["1"] = postgreSQLNotify{ + Format: formatNamespace, + } + } else { + srvConfig.Notify.PostgreSQL = cv18.Notify.PostgreSQL + } + if len(cv18.Notify.Kafka) == 0 { + srvConfig.Notify.Kafka = make(map[string]kafkaNotify) + srvConfig.Notify.Kafka["1"] = kafkaNotify{} + } else { + srvConfig.Notify.Kafka = cv18.Notify.Kafka + } + if len(cv18.Notify.NATS) == 0 { + srvConfig.Notify.NATS = make(map[string]natsNotify) + srvConfig.Notify.NATS["1"] = natsNotify{} + } else { + srvConfig.Notify.NATS = cv18.Notify.NATS + } + if len(cv18.Notify.Webhook) == 0 { + srvConfig.Notify.Webhook = make(map[string]webhookNotify) + srvConfig.Notify.Webhook["1"] = webhookNotify{} + } else { + srvConfig.Notify.Webhook = cv18.Notify.Webhook + } + if len(cv18.Notify.MySQL) == 0 { + srvConfig.Notify.MySQL = make(map[string]mySQLNotify) + srvConfig.Notify.MySQL["1"] = mySQLNotify{ + Format: formatNamespace, + } + } else { + srvConfig.Notify.MySQL = cv18.Notify.MySQL + } + + // V18 will not have mqtt support, so we add that here. + srvConfig.Notify.MQTT = make(map[string]mqttNotify) + srvConfig.Notify.MQTT["1"] = mqttNotify{} + + // Load browser config from existing config in the file. + srvConfig.Browser = cv18.Browser + + if err = quick.Save(configFile, srvConfig); err != nil { + return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %v", cv18.Version, srvConfig.Version, err) + } + + log.Printf(configMigrateMSGTemplate, configFile, cv18.Version, srvConfig.Version) + return nil +} diff --git a/cmd/config-migrate_test.go b/cmd/config-migrate_test.go index fe4054255..db714c6c0 100644 --- a/cmd/config-migrate_test.go +++ b/cmd/config-migrate_test.go @@ -122,11 +122,14 @@ func TestServerConfigMigrateInexistentConfig(t *testing.T) { if err := migrateV17ToV18(); err != nil { t.Fatal("migrate v17 to v18 should succeed when no config file is found") } + if err := migrateV18ToV19(); err != nil { + t.Fatal("migrate v18 to v19 should succeed when no config file is found") + } } -// Test if a config migration from v2 to v18 is successfully done -func TestServerConfigMigrateV2toV18(t *testing.T) { +// Test if a config migration from v2 to v19 is successfully done +func TestServerConfigMigrateV2toV19(t *testing.T) { rootPath, err := newTestConfig(globalMinioDefaultRegion) if err != nil { t.Fatalf("Init Test config failed") @@ -166,7 +169,7 @@ func TestServerConfigMigrateV2toV18(t *testing.T) { } // Check the version number in the upgraded config file - expectedVersion := v18 + expectedVersion := v19 if serverConfig.Version != expectedVersion { t.Fatalf("Expect version "+expectedVersion+", found: %v", serverConfig.Version) } @@ -246,6 +249,9 @@ func TestServerConfigMigrateFaultyConfig(t *testing.T) { if err := migrateV17ToV18(); err == nil { t.Fatal("migrateConfigV17ToV18() should fail with a corrupted json") } + if err := migrateV18ToV19(); err == nil { + t.Fatal("migrateConfigV18ToV19() should fail with a corrupted json") + } } // Test if all migrate code returns error with corrupted config files diff --git a/cmd/config-old.go b/cmd/config-old.go index 9d83607a3..f77b9e92c 100644 --- a/cmd/config-old.go +++ b/cmd/config-old.go @@ -449,3 +449,22 @@ type serverConfigV17 struct { // Notification queue configuration. Notify *notifier `json:"notify"` } + +// serverConfigV18 server configuration version '18' which is like +// version '17' except it adds support for "deliveryMode" parameter in +// the AMQP notification target. +type serverConfigV18 struct { + sync.RWMutex + Version string `json:"version"` + + // S3 API configuration. + Credential credential `json:"credential"` + Region string `json:"region"` + Browser BrowserFlag `json:"browser"` + + // Additional error logging configuration. + Logger *loggers `json:"logger"` + + // Notification queue configuration. + Notify *notifier `json:"notify"` +} diff --git a/cmd/config-v18.go b/cmd/config-v19.go similarity index 87% rename from cmd/config-v18.go rename to cmd/config-v19.go index 048f02451..144cb4fc7 100644 --- a/cmd/config-v18.go +++ b/cmd/config-v19.go @@ -27,18 +27,17 @@ import ( ) // Config version -const v18 = "18" +const v19 = "19" var ( // serverConfig server config. - serverConfig *serverConfigV18 + serverConfig *serverConfigV19 serverConfigMu sync.RWMutex ) -// serverConfigV18 server configuration version '18' which is like -// version '17' except it adds support for "deliveryMode" parameter in -// the AMQP notification target. -type serverConfigV18 struct { +// serverConfigV19 server configuration version '19' which is like +// version '18' except it adds support for MQTT notifications. +type serverConfigV19 struct { sync.RWMutex Version string `json:"version"` @@ -55,7 +54,7 @@ type serverConfigV18 struct { } // GetVersion get current config version. -func (s *serverConfigV18) GetVersion() string { +func (s *serverConfigV19) GetVersion() string { s.RLock() defer s.RUnlock() @@ -63,7 +62,7 @@ func (s *serverConfigV18) GetVersion() string { } // SetRegion set new region. -func (s *serverConfigV18) SetRegion(region string) { +func (s *serverConfigV19) SetRegion(region string) { s.Lock() defer s.Unlock() @@ -71,7 +70,7 @@ func (s *serverConfigV18) SetRegion(region string) { } // GetRegion get current region. -func (s *serverConfigV18) GetRegion() string { +func (s *serverConfigV19) GetRegion() string { s.RLock() defer s.RUnlock() @@ -79,7 +78,7 @@ func (s *serverConfigV18) GetRegion() string { } // SetCredentials set new credentials. -func (s *serverConfigV18) SetCredential(creds credential) { +func (s *serverConfigV19) SetCredential(creds credential) { s.Lock() defer s.Unlock() @@ -88,7 +87,7 @@ func (s *serverConfigV18) SetCredential(creds credential) { } // GetCredentials get current credentials. -func (s *serverConfigV18) GetCredential() credential { +func (s *serverConfigV19) GetCredential() credential { s.RLock() defer s.RUnlock() @@ -96,7 +95,7 @@ func (s *serverConfigV18) GetCredential() credential { } // SetBrowser set if browser is enabled. -func (s *serverConfigV18) SetBrowser(b bool) { +func (s *serverConfigV19) SetBrowser(b bool) { s.Lock() defer s.Unlock() @@ -105,7 +104,7 @@ func (s *serverConfigV18) SetBrowser(b bool) { } // GetCredentials get current credentials. -func (s *serverConfigV18) GetBrowser() bool { +func (s *serverConfigV19) GetBrowser() bool { s.RLock() defer s.RUnlock() @@ -113,7 +112,7 @@ func (s *serverConfigV18) GetBrowser() bool { } // Save config. -func (s *serverConfigV18) Save() error { +func (s *serverConfigV19) Save() error { s.RLock() defer s.RUnlock() @@ -121,9 +120,9 @@ func (s *serverConfigV18) Save() error { return quick.Save(getConfigFile(), s) } -func newServerConfigV18() *serverConfigV18 { - srvCfg := &serverConfigV18{ - Version: v18, +func newServerConfigV19() *serverConfigV19 { + srvCfg := &serverConfigV19{ + Version: v19, Credential: mustGetNewCredential(), Region: globalMinioDefaultRegion, Browser: true, @@ -137,6 +136,8 @@ func newServerConfigV18() *serverConfigV18 { // Make sure to initialize notification configs. srvCfg.Notify.AMQP = make(map[string]amqpNotify) srvCfg.Notify.AMQP["1"] = amqpNotify{} + srvCfg.Notify.MQTT = make(map[string]mqttNotify) + srvCfg.Notify.MQTT["1"] = mqttNotify{} srvCfg.Notify.ElasticSearch = make(map[string]elasticSearchNotify) srvCfg.Notify.ElasticSearch["1"] = elasticSearchNotify{} srvCfg.Notify.Redis = make(map[string]redisNotify) @@ -159,7 +160,7 @@ func newServerConfigV18() *serverConfigV18 { // found, otherwise use default parameters func newConfig() error { // Initialize server config. - srvCfg := newServerConfigV18() + srvCfg := newServerConfigV19() // If env is set override the credentials from config file. if globalIsEnvCreds { @@ -237,8 +238,8 @@ func checkDupJSONKeys(json string) error { } // getValidConfig - returns valid server configuration -func getValidConfig() (*serverConfigV18, error) { - srvCfg := &serverConfigV18{ +func getValidConfig() (*serverConfigV19, error) { + srvCfg := &serverConfigV19{ Region: globalMinioDefaultRegion, Browser: true, } @@ -248,8 +249,8 @@ func getValidConfig() (*serverConfigV18, error) { return nil, err } - if srvCfg.Version != v18 { - return nil, fmt.Errorf("configuration version mismatch. Expected: ‘%s’, Got: ‘%s’", v18, srvCfg.Version) + if srvCfg.Version != v19 { + return nil, fmt.Errorf("configuration version mismatch. Expected: ‘%s’, Got: ‘%s’", v19, srvCfg.Version) } // Load config file json and check for duplication json keys diff --git a/cmd/config-v18_test.go b/cmd/config-v19_test.go similarity index 95% rename from cmd/config-v18_test.go rename to cmd/config-v19_test.go index e2bb4f008..c8ace31a1 100644 --- a/cmd/config-v18_test.go +++ b/cmd/config-v19_test.go @@ -80,13 +80,21 @@ func TestServerConfig(t *testing.T) { } // Set new console logger. - // Set new Webhook notification id. + // Set new MySQL notification id. serverConfig.Notify.SetMySQLByID("2", mySQLNotify{}) savedNotifyCfg6 := serverConfig.Notify.GetMySQLByID("2") if !reflect.DeepEqual(savedNotifyCfg6, mySQLNotify{}) { t.Errorf("Expecting Webhook config %#v found %#v", mySQLNotify{}, savedNotifyCfg6) } + // Set new console logger. + // Set new MQTT notification id. + serverConfig.Notify.SetMQTTByID("2", mqttNotify{}) + savedNotifyCfg7 := serverConfig.Notify.GetMQTTByID("2") + if !reflect.DeepEqual(savedNotifyCfg7, mqttNotify{}) { + t.Errorf("Expecting Webhook config %#v found %#v", mqttNotify{}, savedNotifyCfg7) + } + consoleLogger := NewConsoleLogger() serverConfig.Logger.SetConsole(consoleLogger) consoleCfg := serverConfig.Logger.GetConsole() @@ -109,8 +117,8 @@ func TestServerConfig(t *testing.T) { serverConfig.Logger.SetFile(fileLogger) // Match version. - if serverConfig.GetVersion() != v18 { - t.Errorf("Expecting version %s found %s", serverConfig.GetVersion(), v18) + if serverConfig.GetVersion() != v19 { + t.Errorf("Expecting version %s found %s", serverConfig.GetVersion(), v19) } // Attempt to save. @@ -223,7 +231,7 @@ func TestValidateConfig(t *testing.T) { configPath := filepath.Join(rootPath, minioConfigFile) - v := v18 + v := v19 testCases := []struct { configData string @@ -309,6 +317,9 @@ func TestValidateConfig(t *testing.T) { // Test 28 - Test valid Format for Redis {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "format": "namespace", "address": "example.com:80", "password": "xxx", "key": "key1" } }}}`, true}, + + // Test 29 - Test MQTT + {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mqtt": { "1": { "enable": true, "broker": "", "topic": "", "qos": 0, "clientId": "", "username": "", "password": ""}}}}`, false}, } for i, testCase := range testCases { diff --git a/cmd/endpoint.go b/cmd/endpoint.go index c42160efa..ccc2890c8 100644 --- a/cmd/endpoint.go +++ b/cmd/endpoint.go @@ -77,14 +77,14 @@ func (endpoint Endpoint) SetHTTP() { } // NewEndpoint - returns new endpoint based on given arguments. -func NewEndpoint(arg string) (Endpoint, error) { +func NewEndpoint(arg string) (ep Endpoint, e error) { // isEmptyPath - check whether given path is not empty. isEmptyPath := func(path string) bool { return path == "" || path == "/" || path == `\` } if isEmptyPath(arg) { - return Endpoint{}, fmt.Errorf("empty or root endpoint is not supported") + return ep, fmt.Errorf("empty or root endpoint is not supported") } var isLocal bool @@ -96,13 +96,13 @@ func NewEndpoint(arg string) (Endpoint, error) { // - All field should be empty except Host and Path. if !((u.Scheme == "http" || u.Scheme == "https") && u.User == nil && u.Opaque == "" && u.ForceQuery == false && u.RawQuery == "" && u.Fragment == "") { - return Endpoint{}, fmt.Errorf("invalid URL endpoint format") + return ep, fmt.Errorf("invalid URL endpoint format") } host, port, err := net.SplitHostPort(u.Host) if err != nil { if !strings.Contains(err.Error(), "missing port in address") { - return Endpoint{}, fmt.Errorf("invalid URL endpoint format: %s", err) + return ep, fmt.Errorf("invalid URL endpoint format: %s", err) } host = u.Host @@ -110,26 +110,26 @@ func NewEndpoint(arg string) (Endpoint, error) { var p int p, err = strconv.Atoi(port) if err != nil { - return Endpoint{}, fmt.Errorf("invalid URL endpoint format: invalid port number") + return ep, fmt.Errorf("invalid URL endpoint format: invalid port number") } else if p < 1 || p > 65535 { - return Endpoint{}, fmt.Errorf("invalid URL endpoint format: port number must be between 1 to 65535") + return ep, fmt.Errorf("invalid URL endpoint format: port number must be between 1 to 65535") } } if host == "" { - return Endpoint{}, fmt.Errorf("invalid URL endpoint format: empty host name") + return ep, fmt.Errorf("invalid URL endpoint format: empty host name") } // As this is path in the URL, we should use path package, not filepath package. // On MS Windows, filepath.Clean() converts into Windows path style ie `/foo` becomes `\foo` u.Path = path.Clean(u.Path) if isEmptyPath(u.Path) { - return Endpoint{}, fmt.Errorf("empty or root path is not supported in URL endpoint") + return ep, fmt.Errorf("empty or root path is not supported in URL endpoint") } isLocal, err = isLocalHost(host) if err != nil { - return Endpoint{}, err + return ep, err } } else { u = &url.URL{Path: path.Clean(arg)} diff --git a/cmd/erasure-createfile.go b/cmd/erasure-createfile.go index db300cf39..b011fbb2b 100644 --- a/cmd/erasure-createfile.go +++ b/cmd/erasure-createfile.go @@ -29,7 +29,7 @@ import ( // all the disks, writes also calculate individual block's checksum // for future bit-rot protection. func erasureCreateFile(disks []StorageAPI, volume, path string, reader io.Reader, allowEmpty bool, blockSize int64, - dataBlocks, parityBlocks int, algo HashAlgo, writeQuorum int) (bytesWritten int64, checkSums []string, err error) { + dataBlocks, parityBlocks int, algo HashAlgo, writeQuorum int) (newDisks []StorageAPI, bytesWritten int64, checkSums []string, err error) { // Allocated blockSized buffer for reading from incoming stream. buf := make([]byte, blockSize) @@ -43,7 +43,7 @@ func erasureCreateFile(disks []StorageAPI, volume, path string, reader io.Reader // FIXME: this is a bug in Golang, n == 0 and err == // io.ErrUnexpectedEOF for io.ReadFull function. if n == 0 && rErr == io.ErrUnexpectedEOF { - return 0, nil, traceError(rErr) + return nil, 0, nil, traceError(rErr) } if rErr == io.EOF { // We have reached EOF on the first byte read, io.Reader @@ -51,28 +51,28 @@ func erasureCreateFile(disks []StorageAPI, volume, path string, reader io.Reader // data. Will create a 0byte file instead. if bytesWritten == 0 && allowEmpty { blocks = make([][]byte, len(disks)) - rErr = appendFile(disks, volume, path, blocks, hashWriters, writeQuorum) + newDisks, rErr = appendFile(disks, volume, path, blocks, hashWriters, writeQuorum) if rErr != nil { - return 0, nil, rErr + return nil, 0, nil, rErr } } // else we have reached EOF after few reads, no need to // add an additional 0bytes at the end. break } if rErr != nil && rErr != io.ErrUnexpectedEOF { - return 0, nil, traceError(rErr) + return nil, 0, nil, traceError(rErr) } if n > 0 { // Returns encoded blocks. var enErr error blocks, enErr = encodeData(buf[0:n], dataBlocks, parityBlocks) if enErr != nil { - return 0, nil, enErr + return nil, 0, nil, enErr } // Write to all disks. - if err = appendFile(disks, volume, path, blocks, hashWriters, writeQuorum); err != nil { - return 0, nil, err + if newDisks, err = appendFile(disks, volume, path, blocks, hashWriters, writeQuorum); err != nil { + return nil, 0, nil, err } bytesWritten += int64(n) } @@ -82,7 +82,7 @@ func erasureCreateFile(disks []StorageAPI, volume, path string, reader io.Reader for i := range checkSums { checkSums[i] = hex.EncodeToString(hashWriters[i].Sum(nil)) } - return bytesWritten, checkSums, nil + return newDisks, bytesWritten, checkSums, nil } // encodeData - encodes incoming data buffer into @@ -110,7 +110,7 @@ func encodeData(dataBuffer []byte, dataBlocks, parityBlocks int) ([][]byte, erro } // appendFile - append data buffer at path. -func appendFile(disks []StorageAPI, volume, path string, enBlocks [][]byte, hashWriters []hash.Hash, writeQuorum int) (err error) { +func appendFile(disks []StorageAPI, volume, path string, enBlocks [][]byte, hashWriters []hash.Hash, writeQuorum int) ([]StorageAPI, error) { var wg = &sync.WaitGroup{} var wErrs = make([]error, len(disks)) // Write encoded data to quorum disks in parallel. @@ -126,8 +126,6 @@ func appendFile(disks []StorageAPI, volume, path string, enBlocks [][]byte, hash wErr := disk.AppendFile(volume, path, enBlocks[index]) if wErr != nil { wErrs[index] = traceError(wErr) - // Ignore disk which returned an error. - disks[index] = nil return } @@ -142,5 +140,5 @@ func appendFile(disks []StorageAPI, volume, path string, enBlocks [][]byte, hash // Wait for all the appends to finish. wg.Wait() - return reduceWriteQuorumErrs(wErrs, objectOpIgnoredErrs, writeQuorum) + return evalDisks(disks, wErrs), reduceWriteQuorumErrs(wErrs, objectOpIgnoredErrs, writeQuorum) } diff --git a/cmd/erasure-createfile_test.go b/cmd/erasure-createfile_test.go index bcc9973eb..fcf0ce6c6 100644 --- a/cmd/erasure-createfile_test.go +++ b/cmd/erasure-createfile_test.go @@ -56,7 +56,7 @@ func TestErasureCreateFile(t *testing.T) { t.Fatal(err) } // Test when all disks are up. - size, _, err := erasureCreateFile(disks, "testbucket", "testobject1", bytes.NewReader(data), true, blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1) + _, size, _, err := erasureCreateFile(disks, "testbucket", "testobject1", bytes.NewReader(data), true, blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1) if err != nil { t.Fatal(err) } @@ -69,7 +69,7 @@ func TestErasureCreateFile(t *testing.T) { disks[5] = AppendDiskDown{disks[5].(*posix)} // Test when two disks are down. - size, _, err = erasureCreateFile(disks, "testbucket", "testobject2", bytes.NewReader(data), true, blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1) + _, size, _, err = erasureCreateFile(disks, "testbucket", "testobject2", bytes.NewReader(data), true, blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1) if err != nil { t.Fatal(err) } @@ -83,7 +83,7 @@ func TestErasureCreateFile(t *testing.T) { disks[8] = AppendDiskDown{disks[8].(*posix)} disks[9] = AppendDiskDown{disks[9].(*posix)} - size, _, err = erasureCreateFile(disks, "testbucket", "testobject3", bytes.NewReader(data), true, blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1) + _, size, _, err = erasureCreateFile(disks, "testbucket", "testobject3", bytes.NewReader(data), true, blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1) if err != nil { t.Fatal(err) } @@ -93,7 +93,7 @@ func TestErasureCreateFile(t *testing.T) { // 1 more disk down. 7 disk down in total. Should return quorum error. disks[10] = AppendDiskDown{disks[10].(*posix)} - _, _, err = erasureCreateFile(disks, "testbucket", "testobject4", bytes.NewReader(data), true, blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1) + _, _, _, err = erasureCreateFile(disks, "testbucket", "testobject4", bytes.NewReader(data), true, blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1) if errorCause(err) != errXLWriteQuorum { t.Errorf("erasureCreateFile return value: expected errXLWriteQuorum, got %s", err) } diff --git a/cmd/erasure-healfile_test.go b/cmd/erasure-healfile_test.go index 0f820738c..263da827e 100644 --- a/cmd/erasure-healfile_test.go +++ b/cmd/erasure-healfile_test.go @@ -48,7 +48,7 @@ func TestErasureHealFile(t *testing.T) { t.Fatal(err) } // Create a test file. - size, checkSums, err := erasureCreateFile(disks, "testbucket", "testobject1", bytes.NewReader(data), true, blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1) + _, size, checkSums, err := erasureCreateFile(disks, "testbucket", "testobject1", bytes.NewReader(data), true, blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1) if err != nil { t.Fatal(err) } diff --git a/cmd/erasure-readfile_test.go b/cmd/erasure-readfile_test.go index 6d7fef8ad..570593194 100644 --- a/cmd/erasure-readfile_test.go +++ b/cmd/erasure-readfile_test.go @@ -242,7 +242,7 @@ func TestErasureReadFileDiskFail(t *testing.T) { } // Create a test file to read from. - size, checkSums, err := erasureCreateFile(disks, "testbucket", "testobject", bytes.NewReader(data), true, blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1) + _, size, checkSums, err := erasureCreateFile(disks, "testbucket", "testobject", bytes.NewReader(data), true, blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1) if err != nil { t.Fatal(err) } @@ -325,7 +325,7 @@ func TestErasureReadFileOffsetLength(t *testing.T) { } // Create a test file to read from. - size, checkSums, err := erasureCreateFile(disks, "testbucket", "testobject", bytes.NewReader(data), true, blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1) + _, size, checkSums, err := erasureCreateFile(disks, "testbucket", "testobject", bytes.NewReader(data), true, blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1) if err != nil { t.Fatal(err) } @@ -404,7 +404,7 @@ func TestErasureReadFileRandomOffsetLength(t *testing.T) { iterations := 10000 // Create a test file to read from. - size, checkSums, err := erasureCreateFile(disks, "testbucket", "testobject", bytes.NewReader(data), true, blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1) + _, size, checkSums, err := erasureCreateFile(disks, "testbucket", "testobject", bytes.NewReader(data), true, blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1) if err != nil { t.Fatal(err) } diff --git a/cmd/event-notifier.go b/cmd/event-notifier.go index 9e5003973..5ae28fa0d 100644 --- a/cmd/event-notifier.go +++ b/cmd/event-notifier.go @@ -503,9 +503,8 @@ func removeNotificationConfig(bucket string, objAPI ObjectLayer) error { // Acquire a write lock on notification config before modifying. objLock := globalNSMutex.NewNSLock(minioMetaBucket, ncPath) objLock.Lock() - err := objAPI.DeleteObject(minioMetaBucket, ncPath) - objLock.Unlock() - return err + defer objLock.Unlock() + return objAPI.DeleteObject(minioMetaBucket, ncPath) } // Remove listener configuration from storage layer. Used when a bucket is deleted. @@ -516,9 +515,8 @@ func removeListenerConfig(bucket string, objAPI ObjectLayer) error { // Acquire a write lock on notification config before modifying. objLock := globalNSMutex.NewNSLock(minioMetaBucket, lcPath) objLock.Lock() - err := objAPI.DeleteObject(minioMetaBucket, lcPath) - objLock.Unlock() - return err + defer objLock.Unlock() + return objAPI.DeleteObject(minioMetaBucket, lcPath) } // Loads both notification and listener config. @@ -609,6 +607,25 @@ func loadAllQueueTargets() (map[string]*logrus.Logger, error) { } } + // Load all mqtt targets, initialize their respective loggers. + for accountID, mqttN := range serverConfig.Notify.GetMQTT() { + if !mqttN.Enable { + continue + } + + if queueARN, err := addQueueTarget(queueTargets, accountID, queueTypeMQTT, newMQTTNotify); err != nil { + if _, ok := err.(net.Error); ok { + err = &net.OpError{ + Op: "Connecting to " + queueARN, + Net: "tcp", + Err: err, + } + } + + return nil, err + } + } + // Load all nats targets, initialize their respective loggers. for accountID, natsN := range serverConfig.Notify.GetNATS() { if !natsN.Enable { diff --git a/cmd/event-notifier_test.go b/cmd/event-notifier_test.go index 4b2087417..c8408c4b9 100644 --- a/cmd/event-notifier_test.go +++ b/cmd/event-notifier_test.go @@ -312,7 +312,7 @@ func TestInitEventNotifier(t *testing.T) { filterRules := []filterRule{ { Name: "prefix", - Value: globalMinioDefaultOwnerID, + Value: "minio", }, { Name: "suffix", @@ -535,7 +535,7 @@ func TestAddRemoveBucketListenerConfig(t *testing.T) { filterRules := []filterRule{ { Name: "prefix", - Value: globalMinioDefaultOwnerID, + Value: "minio", }, { Name: "suffix", diff --git a/cmd/fs-v1-multipart.go b/cmd/fs-v1-multipart.go index 25d253714..84ab60c97 100644 --- a/cmd/fs-v1-multipart.go +++ b/cmd/fs-v1-multipart.go @@ -159,7 +159,7 @@ func (fs fsObjects) listMultipartUploadIDs(bucketName, objectName, uploadIDMarke } // listMultipartUploads - lists all multipart uploads. -func (fs fsObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, error) { +func (fs fsObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (lmi ListMultipartsInfo, e error) { result := ListMultipartsInfo{} recursive := true if delimiter == slashSeparator { @@ -191,7 +191,7 @@ func (fs fsObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark if uploadIDMarker != "" { uploads, _, err = fs.listMultipartUploadIDs(bucket, keyMarker, uploadIDMarker, maxUploads) if err != nil { - return ListMultipartsInfo{}, err + return lmi, err } maxUploads = maxUploads - len(uploads) } @@ -232,7 +232,7 @@ func (fs fsObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark eof = true break } - return ListMultipartsInfo{}, walkResult.err + return lmi, walkResult.err } entry := strings.TrimPrefix(walkResult.entry, retainSlash(bucket)) @@ -256,7 +256,7 @@ func (fs fsObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark tmpUploads, end, err = fs.listMultipartUploadIDs(bucket, entry, uploadIDMarker, maxUploads) if err != nil { - return ListMultipartsInfo{}, err + return lmi, err } uploads = append(uploads, tmpUploads...) @@ -311,13 +311,13 @@ func (fs fsObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark // Implements S3 compatible ListMultipartUploads API. The resulting // ListMultipartsInfo structure is unmarshalled directly into XML and // replied back to the client. -func (fs fsObjects) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, error) { +func (fs fsObjects) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (lmi ListMultipartsInfo, e error) { if err := checkListMultipartArgs(bucket, prefix, keyMarker, uploadIDMarker, delimiter, fs); err != nil { - return ListMultipartsInfo{}, err + return lmi, err } if _, err := fs.statBucketDir(bucket); err != nil { - return ListMultipartsInfo{}, toObjectErr(err, bucket) + return lmi, toObjectErr(err, bucket) } return fs.listMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads) @@ -412,9 +412,9 @@ func partToAppend(fsMeta fsMetaV1, fsAppendMeta fsMetaV1) (part objectPartInfo, // CopyObjectPart - similar to PutObjectPart but reads data from an existing // object. Internally incoming data is written to '.minio.sys/tmp' location // and safely renamed to '.minio.sys/multipart' for reach parts. -func (fs fsObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, startOffset int64, length int64) (PartInfo, error) { +func (fs fsObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, startOffset int64, length int64) (pi PartInfo, e error) { if err := checkNewMultipartArgs(srcBucket, srcObject, fs); err != nil { - return PartInfo{}, err + return pi, err } // Initialize pipe. @@ -431,7 +431,7 @@ func (fs fsObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, u partInfo, err := fs.PutObjectPart(dstBucket, dstObject, uploadID, partID, length, pipeReader, "", "") if err != nil { - return PartInfo{}, toObjectErr(err, dstBucket, dstObject) + return pi, toObjectErr(err, dstBucket, dstObject) } // Explicitly close the reader. @@ -444,13 +444,13 @@ func (fs fsObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, u // an ongoing multipart transaction. Internally incoming data is // written to '.minio.sys/tmp' location and safely renamed to // '.minio.sys/multipart' for reach parts. -func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (PartInfo, error) { +func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (pi PartInfo, e error) { if err := checkPutObjectPartArgs(bucket, object, fs); err != nil { - return PartInfo{}, err + return pi, err } if _, err := fs.statBucketDir(bucket); err != nil { - return PartInfo{}, toObjectErr(err, bucket) + return pi, toObjectErr(err, bucket) } // Hold the lock so that two parallel complete-multipart-uploads @@ -463,9 +463,9 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s uploadsPath := pathJoin(fs.fsPath, minioMetaMultipartBucket, bucket, object, uploadsJSONFile) if _, err := fs.rwPool.Open(uploadsPath); err != nil { if err == errFileNotFound || err == errFileAccessDenied { - return PartInfo{}, traceError(InvalidUploadID{UploadID: uploadID}) + return pi, traceError(InvalidUploadID{UploadID: uploadID}) } - return PartInfo{}, toObjectErr(traceError(err), bucket, object) + return pi, toObjectErr(traceError(err), bucket, object) } defer fs.rwPool.Close(uploadsPath) @@ -476,16 +476,16 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s rwlk, err := fs.rwPool.Write(fsMetaPath) if err != nil { if err == errFileNotFound || err == errFileAccessDenied { - return PartInfo{}, traceError(InvalidUploadID{UploadID: uploadID}) + return pi, traceError(InvalidUploadID{UploadID: uploadID}) } - return PartInfo{}, toObjectErr(traceError(err), bucket, object) + return pi, toObjectErr(traceError(err), bucket, object) } defer rwlk.Close() fsMeta := fsMetaV1{} _, err = fsMeta.ReadFrom(rwlk) if err != nil { - return PartInfo{}, toObjectErr(err, minioMetaMultipartBucket, fsMetaPath) + return pi, toObjectErr(err, minioMetaMultipartBucket, fsMetaPath) } partSuffix := fmt.Sprintf("object%d", partID) @@ -523,14 +523,14 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s bytesWritten, cErr := fsCreateFile(fsPartPath, teeReader, buf, size) if cErr != nil { fsRemoveFile(fsPartPath) - return PartInfo{}, toObjectErr(cErr, minioMetaTmpBucket, tmpPartPath) + return pi, toObjectErr(cErr, minioMetaTmpBucket, tmpPartPath) } // Should return IncompleteBody{} error when reader has fewer // bytes than specified in request header. if bytesWritten < size { fsRemoveFile(fsPartPath) - return PartInfo{}, traceError(IncompleteBody{}) + return pi, traceError(IncompleteBody{}) } // Delete temporary part in case of failure. If @@ -541,14 +541,14 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s newMD5Hex := hex.EncodeToString(md5Writer.Sum(nil)) if md5Hex != "" { if newMD5Hex != md5Hex { - return PartInfo{}, traceError(BadDigest{md5Hex, newMD5Hex}) + return pi, traceError(BadDigest{md5Hex, newMD5Hex}) } } if sha256sum != "" { newSHA256sum := hex.EncodeToString(sha256Writer.Sum(nil)) if newSHA256sum != sha256sum { - return PartInfo{}, traceError(SHA256Mismatch{}) + return pi, traceError(SHA256Mismatch{}) } } @@ -561,20 +561,20 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s fsNSPartPath := pathJoin(fs.fsPath, minioMetaMultipartBucket, partPath) if err = fsRenameFile(fsPartPath, fsNSPartPath); err != nil { partLock.Unlock() - return PartInfo{}, toObjectErr(err, minioMetaMultipartBucket, partPath) + return pi, toObjectErr(err, minioMetaMultipartBucket, partPath) } // Save the object part info in `fs.json`. fsMeta.AddObjectPart(partID, partSuffix, newMD5Hex, size) if _, err = fsMeta.WriteTo(rwlk); err != nil { partLock.Unlock() - return PartInfo{}, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath) + return pi, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath) } partNamePath := pathJoin(fs.fsPath, minioMetaMultipartBucket, uploadIDPath, partSuffix) fi, err := fsStatFile(partNamePath) if err != nil { - return PartInfo{}, toObjectErr(err, minioMetaMultipartBucket, partSuffix) + return pi, toObjectErr(err, minioMetaMultipartBucket, partSuffix) } // Append the part in background. @@ -599,7 +599,7 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s // listObjectParts - wrapper scanning through // '.minio.sys/multipart/bucket/object/UPLOADID'. Lists all the parts // saved inside '.minio.sys/multipart/bucket/object/UPLOADID'. -func (fs fsObjects) listObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (ListPartsInfo, error) { +func (fs fsObjects) listObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (lpi ListPartsInfo, e error) { result := ListPartsInfo{} uploadIDPath := pathJoin(bucket, object, uploadID) @@ -608,16 +608,16 @@ func (fs fsObjects) listObjectParts(bucket, object, uploadID string, partNumberM if err != nil { if err == errFileNotFound || err == errFileAccessDenied { // On windows oddly this is returned. - return ListPartsInfo{}, traceError(InvalidUploadID{UploadID: uploadID}) + return lpi, traceError(InvalidUploadID{UploadID: uploadID}) } - return ListPartsInfo{}, toObjectErr(traceError(err), bucket, object) + return lpi, toObjectErr(traceError(err), bucket, object) } defer fs.rwPool.Close(fsMetaPath) fsMeta := fsMetaV1{} _, err = fsMeta.ReadFrom(metaFile.LockedFile) if err != nil { - return ListPartsInfo{}, toObjectErr(err, minioMetaBucket, fsMetaPath) + return lpi, toObjectErr(err, minioMetaBucket, fsMetaPath) } // Only parts with higher part numbers will be listed. @@ -633,7 +633,7 @@ func (fs fsObjects) listObjectParts(bucket, object, uploadID string, partNumberM partNamePath := pathJoin(fs.fsPath, minioMetaMultipartBucket, uploadIDPath, part.Name) fi, err = fsStatFile(partNamePath) if err != nil { - return ListPartsInfo{}, toObjectErr(err, minioMetaMultipartBucket, partNamePath) + return lpi, toObjectErr(err, minioMetaMultipartBucket, partNamePath) } result.Parts = append(result.Parts, PartInfo{ PartNumber: part.Number, @@ -671,13 +671,13 @@ func (fs fsObjects) listObjectParts(bucket, object, uploadID string, partNumberM // Implements S3 compatible ListObjectParts API. The resulting // ListPartsInfo structure is unmarshalled directly into XML and // replied back to the client. -func (fs fsObjects) ListObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (ListPartsInfo, error) { +func (fs fsObjects) ListObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (lpi ListPartsInfo, e error) { if err := checkListPartsArgs(bucket, object, fs); err != nil { - return ListPartsInfo{}, err + return lpi, err } if _, err := fs.statBucketDir(bucket); err != nil { - return ListPartsInfo{}, toObjectErr(err, bucket) + return lpi, toObjectErr(err, bucket) } // Hold the lock so that two parallel complete-multipart-uploads @@ -688,7 +688,7 @@ func (fs fsObjects) ListObjectParts(bucket, object, uploadID string, partNumberM listPartsInfo, err := fs.listObjectParts(bucket, object, uploadID, partNumberMarker, maxParts) if err != nil { - return ListPartsInfo{}, toObjectErr(err, bucket, object) + return lpi, toObjectErr(err, bucket, object) } // Success. @@ -701,24 +701,24 @@ func (fs fsObjects) ListObjectParts(bucket, object, uploadID string, partNumberM // md5sums of all the parts. // // Implements S3 compatible Complete multipart API. -func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, uploadID string, parts []completePart) (ObjectInfo, error) { +func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, uploadID string, parts []completePart) (oi ObjectInfo, e error) { if err := checkCompleteMultipartArgs(bucket, object, fs); err != nil { - return ObjectInfo{}, err + return oi, err } // Check if an object is present as one of the parent dir. if fs.parentDirIsObject(bucket, pathutil.Dir(object)) { - return ObjectInfo{}, toObjectErr(traceError(errFileAccessDenied), bucket, object) + return oi, toObjectErr(traceError(errFileAccessDenied), bucket, object) } if _, err := fs.statBucketDir(bucket); err != nil { - return ObjectInfo{}, toObjectErr(err, bucket) + return oi, toObjectErr(err, bucket) } // Calculate s3 compatible md5sum for complete multipart. s3MD5, err := getCompleteMultipartMD5(parts) if err != nil { - return ObjectInfo{}, err + return oi, err } uploadIDPath := pathJoin(bucket, object, uploadID) @@ -733,9 +733,9 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload rlk, err := fs.rwPool.Open(fsMetaPathMultipart) if err != nil { if err == errFileNotFound || err == errFileAccessDenied { - return ObjectInfo{}, traceError(InvalidUploadID{UploadID: uploadID}) + return oi, traceError(InvalidUploadID{UploadID: uploadID}) } - return ObjectInfo{}, toObjectErr(traceError(err), bucket, object) + return oi, toObjectErr(traceError(err), bucket, object) } // Disallow any parallel abort or complete multipart operations. @@ -743,9 +743,9 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload if err != nil { fs.rwPool.Close(fsMetaPathMultipart) if err == errFileNotFound || err == errFileAccessDenied { - return ObjectInfo{}, traceError(InvalidUploadID{UploadID: uploadID}) + return oi, traceError(InvalidUploadID{UploadID: uploadID}) } - return ObjectInfo{}, toObjectErr(traceError(err), bucket, object) + return oi, toObjectErr(traceError(err), bucket, object) } defer rwlk.Close() @@ -754,7 +754,31 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload _, err = fsMeta.ReadFrom(rlk.LockedFile) if err != nil { fs.rwPool.Close(fsMetaPathMultipart) - return ObjectInfo{}, toObjectErr(err, minioMetaMultipartBucket, fsMetaPathMultipart) + return oi, toObjectErr(err, minioMetaMultipartBucket, fsMetaPathMultipart) + } + + // Validate all parts and then commit to disk. + for i, part := range parts { + partIdx := fsMeta.ObjectPartIndex(part.PartNumber) + if partIdx == -1 { + fs.rwPool.Close(fsMetaPathMultipart) + return oi, traceError(InvalidPart{}) + } + + if fsMeta.Parts[partIdx].ETag != part.ETag { + fs.rwPool.Close(fsMetaPathMultipart) + return oi, traceError(InvalidPart{}) + } + + // All parts except the last part has to be atleast 5MB. + if (i < len(parts)-1) && !isMinAllowedPartSize(fsMeta.Parts[partIdx].Size) { + fs.rwPool.Close(fsMetaPathMultipart) + return oi, traceError(PartTooSmall{ + PartNumber: part.PartNumber, + PartSize: fsMeta.Parts[partIdx].Size, + PartETag: part.ETag, + }) + } } // Wait for any competing PutObject() operation on bucket/object, since same namespace @@ -763,7 +787,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload metaFile, err := fs.rwPool.Create(fsMetaPath) if err != nil { fs.rwPool.Close(fsMetaPathMultipart) - return ObjectInfo{}, toObjectErr(traceError(err), bucket, object) + return oi, toObjectErr(traceError(err), bucket, object) } defer metaFile.Close() @@ -780,7 +804,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload fsTmpObjPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, uploadID) if err = fsRenameFile(fsTmpObjPath, fsNSObjPath); err != nil { fs.rwPool.Close(fsMetaPathMultipart) - return ObjectInfo{}, toObjectErr(err, minioMetaTmpBucket, uploadID) + return oi, toObjectErr(err, minioMetaTmpBucket, uploadID) } } } @@ -798,29 +822,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload // Allocate staging buffer. var buf = make([]byte, readSizeV1) - // Validate all parts and then commit to disk. - for i, part := range parts { - partIdx := fsMeta.ObjectPartIndex(part.PartNumber) - if partIdx == -1 { - fs.rwPool.Close(fsMetaPathMultipart) - return ObjectInfo{}, traceError(InvalidPart{}) - } - - if fsMeta.Parts[partIdx].ETag != part.ETag { - fs.rwPool.Close(fsMetaPathMultipart) - return ObjectInfo{}, traceError(BadDigest{}) - } - - // All parts except the last part has to be atleast 5MB. - if (i < len(parts)-1) && !isMinAllowedPartSize(fsMeta.Parts[partIdx].Size) { - fs.rwPool.Close(fsMetaPathMultipart) - return ObjectInfo{}, traceError(PartTooSmall{ - PartNumber: part.PartNumber, - PartSize: fsMeta.Parts[partIdx].Size, - PartETag: part.ETag, - }) - } - + for _, part := range parts { // Construct part suffix. partSuffix := fmt.Sprintf("object%d", part.PartNumber) multipartPartFile := pathJoin(fs.fsPath, minioMetaMultipartBucket, uploadIDPath, partSuffix) @@ -831,9 +833,9 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload if err != nil { fs.rwPool.Close(fsMetaPathMultipart) if err == errFileNotFound { - return ObjectInfo{}, traceError(InvalidPart{}) + return oi, traceError(InvalidPart{}) } - return ObjectInfo{}, toObjectErr(traceError(err), minioMetaMultipartBucket, partSuffix) + return oi, toObjectErr(traceError(err), minioMetaMultipartBucket, partSuffix) } // No need to hold a lock, this is a unique file and will be only written @@ -843,7 +845,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload if err != nil { reader.Close() fs.rwPool.Close(fsMetaPathMultipart) - return ObjectInfo{}, toObjectErr(traceError(err), bucket, object) + return oi, toObjectErr(traceError(err), bucket, object) } _, err = io.CopyBuffer(wfile, reader, buf) @@ -851,7 +853,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload wfile.Close() reader.Close() fs.rwPool.Close(fsMetaPathMultipart) - return ObjectInfo{}, toObjectErr(traceError(err), bucket, object) + return oi, toObjectErr(traceError(err), bucket, object) } wfile.Close() @@ -860,7 +862,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload if err = fsRenameFile(fsTmpObjPath, fsNSObjPath); err != nil { fs.rwPool.Close(fsMetaPathMultipart) - return ObjectInfo{}, toObjectErr(err, minioMetaTmpBucket, uploadID) + return oi, toObjectErr(err, minioMetaTmpBucket, uploadID) } } @@ -876,7 +878,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload // Write all the set metadata. if _, err = fsMeta.WriteTo(metaFile); err != nil { fs.rwPool.Close(fsMetaPathMultipart) - return ObjectInfo{}, toObjectErr(err, bucket, object) + return oi, toObjectErr(err, bucket, object) } // Close lock held on bucket/object/uploadid/fs.json, @@ -888,17 +890,17 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload multipartObjectDir := pathJoin(fs.fsPath, minioMetaMultipartBucket, bucket, object) multipartUploadIDDir := pathJoin(multipartObjectDir, uploadID) if err = fsRemoveUploadIDPath(multipartObjectDir, multipartUploadIDDir); err != nil { - return ObjectInfo{}, toObjectErr(err, bucket, object) + return oi, toObjectErr(err, bucket, object) } // Remove entry from `uploads.json`. if err = fs.removeUploadID(bucket, object, uploadID, rwlk); err != nil { - return ObjectInfo{}, toObjectErr(err, minioMetaMultipartBucket, pathutil.Join(bucket, object)) + return oi, toObjectErr(err, minioMetaMultipartBucket, pathutil.Join(bucket, object)) } fi, err := fsStatFile(fsNSObjPath) if err != nil { - return ObjectInfo{}, toObjectErr(err, bucket, object) + return oi, toObjectErr(err, bucket, object) } // Return object info. diff --git a/cmd/fs-v1.go b/cmd/fs-v1.go index c8d0db1d6..960a905ca 100644 --- a/cmd/fs-v1.go +++ b/cmd/fs-v1.go @@ -103,6 +103,16 @@ func newFSObjectLayer(fsPath string) (ObjectLayer, error) { } } + di, err := getDiskInfo(preparePath(fsPath)) + if err != nil { + return nil, err + } + + // Check if disk has minimum required total space. + if err = checkDiskMinTotal(di); err != nil { + return nil, err + } + // Assign a new UUID for FS minio mode. Each server instance // gets its own UUID for temporary file transaction. fsUUID := mustGetUUID() @@ -138,14 +148,12 @@ func newFSObjectLayer(fsPath string) (ObjectLayer, error) { fs.fsFormatRlk = rlk // Initialize and load bucket policies. - err = initBucketPolicies(fs) - if err != nil { + if err = initBucketPolicies(fs); err != nil { return nil, fmt.Errorf("Unable to load all bucket policies. %s", err) } // Initialize a new event notifier. - err = initEventNotifier(fs) - if err != nil { + if err = initEventNotifier(fs); err != nil { return nil, fmt.Errorf("Unable to initialize event notification. %s", err) } @@ -214,10 +222,10 @@ func (fs fsObjects) MakeBucketWithLocation(bucket, location string) error { } // GetBucketInfo - fetch bucket metadata info. -func (fs fsObjects) GetBucketInfo(bucket string) (BucketInfo, error) { +func (fs fsObjects) GetBucketInfo(bucket string) (bi BucketInfo, e error) { st, err := fs.statBucketDir(bucket) if err != nil { - return BucketInfo{}, toObjectErr(err, bucket) + return bi, toObjectErr(err, bucket) } // As osStat() doesn't carry other than ModTime(), use ModTime() as CreatedTime. @@ -304,15 +312,15 @@ func (fs fsObjects) DeleteBucket(bucket string) error { // CopyObject - copy object source object to destination object. // if source object and destination object are same we only // update metadata. -func (fs fsObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string, metadata map[string]string) (ObjectInfo, error) { +func (fs fsObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string, metadata map[string]string) (oi ObjectInfo, e error) { if _, err := fs.statBucketDir(srcBucket); err != nil { - return ObjectInfo{}, toObjectErr(err, srcBucket) + return oi, toObjectErr(err, srcBucket) } // Stat the file to get file size. fi, err := fsStatFile(pathJoin(fs.fsPath, srcBucket, srcObject)) if err != nil { - return ObjectInfo{}, toObjectErr(err, srcBucket, srcObject) + return oi, toObjectErr(err, srcBucket, srcObject) } // Check if this request is only metadata update. @@ -322,7 +330,7 @@ func (fs fsObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string var wlk *lock.LockedFile wlk, err = fs.rwPool.Write(fsMetaPath) if err != nil { - return ObjectInfo{}, toObjectErr(traceError(err), srcBucket, srcObject) + return oi, toObjectErr(traceError(err), srcBucket, srcObject) } // This close will allow for locks to be synchronized on `fs.json`. defer wlk.Close() @@ -331,7 +339,7 @@ func (fs fsObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string fsMeta := newFSMetaV1() fsMeta.Meta = metadata if _, err = fsMeta.WriteTo(wlk); err != nil { - return ObjectInfo{}, toObjectErr(err, srcBucket, srcObject) + return oi, toObjectErr(err, srcBucket, srcObject) } // Return the new object info. @@ -356,7 +364,7 @@ func (fs fsObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string objInfo, err := fs.PutObject(dstBucket, dstObject, length, pipeReader, metadata, "") if err != nil { - return ObjectInfo{}, toObjectErr(err, dstBucket, dstObject) + return oi, toObjectErr(err, dstBucket, dstObject) } // Explicitly close the reader. @@ -431,7 +439,7 @@ func (fs fsObjects) GetObject(bucket, object string, offset int64, length int64, } // getObjectInfo - wrapper for reading object metadata and constructs ObjectInfo. -func (fs fsObjects) getObjectInfo(bucket, object string) (ObjectInfo, error) { +func (fs fsObjects) getObjectInfo(bucket, object string) (oi ObjectInfo, e error) { fsMeta := fsMetaV1{} fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fsMetaJSONFile) @@ -446,33 +454,33 @@ func (fs fsObjects) getObjectInfo(bucket, object string) (ObjectInfo, error) { // PutObject() transaction, if we arrive at such // a situation we just ignore and continue. if errorCause(rerr) != io.EOF { - return ObjectInfo{}, toObjectErr(rerr, bucket, object) + return oi, toObjectErr(rerr, bucket, object) } } } // Ignore if `fs.json` is not available, this is true for pre-existing data. if err != nil && err != errFileNotFound { - return ObjectInfo{}, toObjectErr(traceError(err), bucket, object) + return oi, toObjectErr(traceError(err), bucket, object) } // Stat the file to get file size. fi, err := fsStatFile(pathJoin(fs.fsPath, bucket, object)) if err != nil { - return ObjectInfo{}, toObjectErr(err, bucket, object) + return oi, toObjectErr(err, bucket, object) } return fsMeta.ToObjectInfo(bucket, object, fi), nil } // GetObjectInfo - reads object metadata and replies back ObjectInfo. -func (fs fsObjects) GetObjectInfo(bucket, object string) (ObjectInfo, error) { +func (fs fsObjects) GetObjectInfo(bucket, object string) (oi ObjectInfo, e error) { if err := checkGetObjArgs(bucket, object); err != nil { - return ObjectInfo{}, err + return oi, err } if _, err := fs.statBucketDir(bucket); err != nil { - return ObjectInfo{}, toObjectErr(err, bucket) + return oi, toObjectErr(err, bucket) } return fs.getObjectInfo(bucket, object) @@ -759,18 +767,18 @@ func (fs fsObjects) getObjectETag(bucket, entry string) (string, error) { // ListObjects - list all objects at prefix upto maxKeys., optionally delimited by '/'. Maintains the list pool // state for future re-entrant list requests. -func (fs fsObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) { +func (fs fsObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) { if err := checkListObjsArgs(bucket, prefix, marker, delimiter, fs); err != nil { - return ListObjectsInfo{}, err + return loi, err } if _, err := fs.statBucketDir(bucket); err != nil { - return ListObjectsInfo{}, err + return loi, err } // With max keys of zero we have reached eof, return right here. if maxKeys == 0 { - return ListObjectsInfo{}, nil + return loi, nil } // For delimiter and prefix as '/' we do not list anything at all @@ -779,7 +787,7 @@ func (fs fsObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKey // as '/' we don't have any entries, since all the keys are // of form 'keyName/...' if delimiter == slashSeparator && prefix == slashSeparator { - return ListObjectsInfo{}, nil + return loi, nil } // Over flowing count - reset to maxObjectList. @@ -860,13 +868,13 @@ func (fs fsObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKey if walkResult.err != nil { // File not found is a valid case. if errorCause(walkResult.err) == errFileNotFound { - return ListObjectsInfo{}, nil + return loi, nil } - return ListObjectsInfo{}, toObjectErr(walkResult.err, bucket, prefix) + return loi, toObjectErr(walkResult.err, bucket, prefix) } objInfo, err := entryToObjectInfo(walkResult.entry) if err != nil { - return ListObjectsInfo{}, nil + return loi, nil } nextMarker = objInfo.Name objInfos = append(objInfos, objInfo) @@ -908,8 +916,8 @@ func (fs fsObjects) HealBucket(bucket string) error { } // ListObjectsHeal - list all objects to be healed. Valid only for XL -func (fs fsObjects) ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) { - return ListObjectsInfo{}, traceError(NotImplemented{}) +func (fs fsObjects) ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) { + return loi, traceError(NotImplemented{}) } // ListBucketsHeal - list all buckets to be healed. Valid only for XL @@ -918,6 +926,6 @@ func (fs fsObjects) ListBucketsHeal() ([]BucketInfo, error) { } func (fs fsObjects) ListUploadsHeal(bucket, prefix, marker, uploadIDMarker, - delimiter string, maxUploads int) (ListMultipartsInfo, error) { - return ListMultipartsInfo{}, traceError(NotImplemented{}) + delimiter string, maxUploads int) (lmi ListMultipartsInfo, e error) { + return lmi, traceError(NotImplemented{}) } diff --git a/cmd/gateway-anonymous.go b/cmd/gateway-anonymous.go new file mode 100644 index 000000000..fd940fd94 --- /dev/null +++ b/cmd/gateway-anonymous.go @@ -0,0 +1,49 @@ +/* + * Minio Cloud Storage, (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import "net/http" + +func anonErrToObjectErr(statusCode int, params ...string) error { + bucket := "" + object := "" + if len(params) >= 1 { + bucket = params[0] + } + if len(params) == 2 { + object = params[1] + } + + switch statusCode { + case http.StatusNotFound: + if object != "" { + return ObjectNotFound{bucket, object} + } + return BucketNotFound{Bucket: bucket} + case http.StatusBadRequest: + if object != "" { + return ObjectNameInvalid{bucket, object} + } + return BucketNameInvalid{Bucket: bucket} + case http.StatusForbidden: + fallthrough + case http.StatusUnauthorized: + return AllAccessDisabled{bucket, object} + } + + return errUnexpected +} diff --git a/cmd/gateway-azure-anonymous.go b/cmd/gateway-azure-anonymous.go index c8f35e265..47e39224b 100644 --- a/cmd/gateway-azure-anonymous.go +++ b/cmd/gateway-azure-anonymous.go @@ -29,6 +29,48 @@ import ( "github.com/Azure/azure-sdk-for-go/storage" ) +// Make anonymous HTTP request to azure endpoint. +func azureAnonRequest(verb, urlStr string, header http.Header) (*http.Response, error) { + req, err := http.NewRequest(verb, urlStr, nil) + if err != nil { + return nil, err + } + if header != nil { + req.Header = header + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + + // 4XX and 5XX are error HTTP codes. + if resp.StatusCode >= 400 && resp.StatusCode <= 511 { + defer resp.Body.Close() + respBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + if len(respBody) == 0 { + // no error in response body, might happen in HEAD requests + return nil, storage.AzureStorageServiceError{ + StatusCode: resp.StatusCode, + Code: resp.Status, + Message: "no response body was available for error status code", + } + } + // Response contains Azure storage service error object. + var storageErr storage.AzureStorageServiceError + if err := xml.Unmarshal(respBody, &storageErr); err != nil { + return nil, err + } + storageErr.StatusCode = resp.StatusCode + return nil, storageErr + } + + return resp, nil +} + // AnonGetBucketInfo - Get bucket metadata from azure anonymously. func (a *azureObjects) AnonGetBucketInfo(bucket string) (bucketInfo BucketInfo, err error) { url, err := url.Parse(a.client.GetBlobURL(bucket, "")) @@ -36,11 +78,11 @@ func (a *azureObjects) AnonGetBucketInfo(bucket string) (bucketInfo BucketInfo, return bucketInfo, azureToObjectError(traceError(err)) } url.RawQuery = "restype=container" - resp, err := http.Head(url.String()) + resp, err := azureAnonRequest(httpHEAD, url.String(), nil) if err != nil { return bucketInfo, azureToObjectError(traceError(err), bucket) } - resp.Body.Close() + defer resp.Body.Close() if resp.StatusCode != http.StatusOK { return bucketInfo, azureToObjectError(traceError(anonErrToObjectErr(resp.StatusCode, bucket)), bucket) @@ -67,19 +109,14 @@ func (a *azureObjects) AnonPutObject(bucket, object string, size int64, data io. // AnonGetObject - SendGET request without authentication. // This is needed when clients send GET requests on objects that can be downloaded without auth. func (a *azureObjects) AnonGetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) (err error) { - u := a.client.GetBlobURL(bucket, object) - req, err := http.NewRequest("GET", u, nil) - if err != nil { - return azureToObjectError(traceError(err), bucket, object) - } - + h := make(http.Header) if length > 0 && startOffset > 0 { - req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", startOffset, startOffset+length-1)) + h.Add("Range", fmt.Sprintf("bytes=%d-%d", startOffset, startOffset+length-1)) } else if startOffset > 0 { - req.Header.Add("Range", fmt.Sprintf("bytes=%d-", startOffset)) + h.Add("Range", fmt.Sprintf("bytes=%d-", startOffset)) } - resp, err := http.DefaultClient.Do(req) + resp, err := azureAnonRequest(httpGET, a.client.GetBlobURL(bucket, object), h) if err != nil { return azureToObjectError(traceError(err), bucket, object) } @@ -96,11 +133,11 @@ func (a *azureObjects) AnonGetObject(bucket, object string, startOffset int64, l // AnonGetObjectInfo - Send HEAD request without authentication and convert the // result to ObjectInfo. func (a *azureObjects) AnonGetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error) { - resp, err := http.Head(a.client.GetBlobURL(bucket, object)) + resp, err := azureAnonRequest(httpHEAD, a.client.GetBlobURL(bucket, object), nil) if err != nil { return objInfo, azureToObjectError(traceError(err), bucket, object) } - resp.Body.Close() + defer resp.Body.Close() if resp.StatusCode != http.StatusOK { return objInfo, azureToObjectError(traceError(anonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object) @@ -153,7 +190,7 @@ func (a *azureObjects) AnonListObjects(bucket, prefix, marker, delimiter string, } url.RawQuery = q.Encode() - resp, err := http.Get(url.String()) + resp, err := azureAnonRequest(httpGET, url.String(), nil) if err != nil { return result, azureToObjectError(traceError(err)) } @@ -190,3 +227,63 @@ func (a *azureObjects) AnonListObjects(bucket, prefix, marker, delimiter string, result.Prefixes = listResp.BlobPrefixes return result, nil } + +// AnonListObjectsV2 - List objects in V2 mode, anonymously +func (a *azureObjects) AnonListObjectsV2(bucket, prefix, continuationToken string, fetchOwner bool, delimiter string, maxKeys int) (result ListObjectsV2Info, err error) { + params := storage.ListBlobsParameters{ + Prefix: prefix, + Marker: continuationToken, + Delimiter: delimiter, + MaxResults: uint(maxKeys), + } + + q := azureListBlobsGetParameters(params) + q.Set("restype", "container") + q.Set("comp", "list") + + url, err := url.Parse(a.client.GetBlobURL(bucket, "")) + if err != nil { + return result, azureToObjectError(traceError(err)) + } + url.RawQuery = q.Encode() + + resp, err := http.Get(url.String()) + if err != nil { + return result, azureToObjectError(traceError(err)) + } + defer resp.Body.Close() + + var listResp storage.BlobListResponse + + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return result, azureToObjectError(traceError(err)) + } + err = xml.Unmarshal(data, &listResp) + if err != nil { + return result, azureToObjectError(traceError(err)) + } + + // If NextMarker is not empty, this means response is truncated and NextContinuationToken should be set + if listResp.NextMarker != "" { + result.IsTruncated = true + result.NextContinuationToken = listResp.NextMarker + } + for _, object := range listResp.Blobs { + t, e := time.Parse(time.RFC1123, object.Properties.LastModified) + if e != nil { + continue + } + result.Objects = append(result.Objects, ObjectInfo{ + Bucket: bucket, + Name: object.Name, + ModTime: t, + Size: object.Properties.ContentLength, + ETag: canonicalizeETag(object.Properties.Etag), + ContentType: object.Properties.ContentType, + ContentEncoding: object.Properties.ContentEncoding, + }) + } + result.Prefixes = listResp.BlobPrefixes + return result, nil +} diff --git a/cmd/gateway-azure-unsupported.go b/cmd/gateway-azure-unsupported.go index c292275cb..e2aeeecdd 100644 --- a/cmd/gateway-azure-unsupported.go +++ b/cmd/gateway-azure-unsupported.go @@ -32,12 +32,12 @@ func (a *azureObjects) HealObject(bucket, object string) (int, int, error) { } // ListObjectsHeal - Not relevant. -func (a *azureObjects) ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) { - return ListObjectsInfo{}, traceError(NotImplemented{}) +func (a *azureObjects) ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) { + return loi, traceError(NotImplemented{}) } // ListUploadsHeal - Not relevant. func (a *azureObjects) ListUploadsHeal(bucket, prefix, marker, uploadIDMarker, - delimiter string, maxUploads int) (ListMultipartsInfo, error) { - return ListMultipartsInfo{}, traceError(NotImplemented{}) + delimiter string, maxUploads int) (lmi ListMultipartsInfo, e error) { + return lmi, traceError(NotImplemented{}) } diff --git a/cmd/gateway-azure.go b/cmd/gateway-azure.go index 16f762f52..162154e7b 100644 --- a/cmd/gateway-azure.go +++ b/cmd/gateway-azure.go @@ -67,6 +67,11 @@ func azureToS3Metadata(meta map[string]string) (newMeta map[string]string) { return newMeta } +// Append "-1" to etag so that clients do not interpret it as MD5. +func azureToS3ETag(etag string) string { + return canonicalizeETag(etag) + "-1" +} + // To store metadata during NewMultipartUpload which will be used after // CompleteMultipartUpload to call SetBlobMetadata. type azureMultipartMetaInfo struct { @@ -137,6 +142,8 @@ func azureToObjectError(err error, params ...string) error { err = BucketExists{Bucket: bucket} case "InvalidResourceName": err = BucketNameInvalid{Bucket: bucket} + case "RequestBodyTooLarge": + err = PartTooBig{} default: switch azureErr.StatusCode { case http.StatusNotFound: @@ -153,15 +160,26 @@ func azureToObjectError(err error, params ...string) error { return e } -// Inits azure blob storage client and returns azureObjects. -func newAzureLayer(endPoint string, account, key string, secure bool) (GatewayLayer, error) { - if endPoint == "" { - endPoint = storage.DefaultBaseURL +// Inits azure blob storage client and returns AzureObjects. +func newAzureLayer(host string) (GatewayLayer, error) { + var err error + var endpoint = storage.DefaultBaseURL + var secure = true + + // If user provided some parameters + if host != "" { + endpoint, secure, err = parseGatewayEndpoint(host) + if err != nil { + return nil, err + } } - c, err := storage.NewClient(account, key, endPoint, globalAzureAPIVersion, secure) + + creds := serverConfig.GetCredential() + c, err := storage.NewClient(creds.AccessKey, creds.SecretKey, endpoint, globalAzureAPIVersion, secure) if err != nil { return &azureObjects{}, err } + return &azureObjects{ client: c.GetBlobService(), metaInfo: azureMultipartMetaInfo{ @@ -174,13 +192,12 @@ func newAzureLayer(endPoint string, account, key string, secure bool) (GatewayLa // Shutdown - save any gateway metadata to disk // if necessary and reload upon next restart. func (a *azureObjects) Shutdown() error { - // TODO return nil } // StorageInfo - Not relevant to Azure backend. -func (a *azureObjects) StorageInfo() StorageInfo { - return StorageInfo{} +func (a *azureObjects) StorageInfo() (si StorageInfo) { + return si } // MakeBucketWithLocation - Create a new container on azure backend. @@ -190,13 +207,13 @@ func (a *azureObjects) MakeBucketWithLocation(bucket, location string) error { } // GetBucketInfo - Get bucket metadata.. -func (a *azureObjects) GetBucketInfo(bucket string) (BucketInfo, error) { +func (a *azureObjects) GetBucketInfo(bucket string) (bi BucketInfo, e error) { // Azure does not have an equivalent call, hence use ListContainers. resp, err := a.client.ListContainers(storage.ListContainersParameters{ Prefix: bucket, }) if err != nil { - return BucketInfo{}, azureToObjectError(traceError(err), bucket) + return bi, azureToObjectError(traceError(err), bucket) } for _, container := range resp.Containers { if container.Name == bucket { @@ -209,7 +226,7 @@ func (a *azureObjects) GetBucketInfo(bucket string) (BucketInfo, error) { } // else continue } } - return BucketInfo{}, traceError(BucketNotFound{Bucket: bucket}) + return bi, traceError(BucketNotFound{Bucket: bucket}) } // ListBuckets - Lists all azure containers, uses Azure equivalent ListContainers. @@ -260,7 +277,42 @@ func (a *azureObjects) ListObjects(bucket, prefix, marker, delimiter string, max Name: object.Name, ModTime: t, Size: object.Properties.ContentLength, - ETag: canonicalizeETag(object.Properties.Etag), + ETag: azureToS3ETag(object.Properties.Etag), + ContentType: object.Properties.ContentType, + ContentEncoding: object.Properties.ContentEncoding, + }) + } + result.Prefixes = resp.BlobPrefixes + return result, nil +} + +// ListObjectsV2 - list all blobs in Azure bucket filtered by prefix +func (a *azureObjects) ListObjectsV2(bucket, prefix, continuationToken string, fetchOwner bool, delimiter string, maxKeys int) (result ListObjectsV2Info, err error) { + resp, err := a.client.ListBlobs(bucket, storage.ListBlobsParameters{ + Prefix: prefix, + Marker: continuationToken, + Delimiter: delimiter, + MaxResults: uint(maxKeys), + }) + if err != nil { + return result, azureToObjectError(traceError(err), bucket, prefix) + } + // If NextMarker is not empty, this means response is truncated and NextContinuationToken should be set + if resp.NextMarker != "" { + result.IsTruncated = true + result.NextContinuationToken = resp.NextMarker + } + for _, object := range resp.Blobs { + t, e := time.Parse(time.RFC1123, object.Properties.LastModified) + if e != nil { + continue + } + result.Objects = append(result.Objects, ObjectInfo{ + Bucket: bucket, + Name: object.Name, + ModTime: t, + Size: object.Properties.ContentLength, + ETag: azureToS3ETag(object.Properties.Etag), ContentType: object.Properties.ContentType, ContentEncoding: object.Properties.ContentEncoding, }) @@ -323,7 +375,7 @@ func (a *azureObjects) GetObjectInfo(bucket, object string) (objInfo ObjectInfo, objInfo = ObjectInfo{ Bucket: bucket, UserDefined: meta, - ETag: canonicalizeETag(prop.Etag), + ETag: azureToS3ETag(prop.Etag), ModTime: t, Name: object, Size: prop.ContentLength, @@ -618,31 +670,6 @@ func (a *azureObjects) CompleteMultipartUpload(bucket, object, uploadID string, return a.GetObjectInfo(bucket, object) } -func anonErrToObjectErr(statusCode int, params ...string) error { - bucket := "" - object := "" - if len(params) >= 1 { - bucket = params[0] - } - if len(params) == 2 { - object = params[1] - } - - switch statusCode { - case http.StatusNotFound: - if object != "" { - return ObjectNotFound{bucket, object} - } - return BucketNotFound{Bucket: bucket} - case http.StatusBadRequest: - if object != "" { - return ObjectNameInvalid{bucket, object} - } - return BucketNameInvalid{Bucket: bucket} - } - return errUnexpected -} - // Copied from github.com/Azure/azure-sdk-for-go/storage/blob.go func azureListBlobsGetParameters(p storage.ListBlobsParameters) url.Values { out := url.Values{} diff --git a/cmd/gateway-azure_test.go b/cmd/gateway-azure_test.go index 7e5639b7f..29b70889d 100644 --- a/cmd/gateway-azure_test.go +++ b/cmd/gateway-azure_test.go @@ -18,21 +18,41 @@ package cmd import ( "net/http" + "net/url" "reflect" "testing" "github.com/Azure/azure-sdk-for-go/storage" ) +// Test azureToS3ETag. +func TestAzureToS3ETag(t *testing.T) { + tests := []struct { + etag string + expected string + }{ + {`"etag"`, `etag-1`}, + {"etag", "etag-1"}, + } + for i, test := range tests { + got := azureToS3ETag(test.etag) + if got != test.expected { + t.Errorf("test %d: got:%s expected:%s", i+1, got, test.expected) + } + } +} + // Test canonical metadata. func TestS3ToAzureHeaders(t *testing.T) { headers := map[string]string{ "accept-encoding": "gzip", "content-encoding": "gzip", + "X-Amz-Meta-Hdr": "value", } expectedHeaders := map[string]string{ "Accept-Encoding": "gzip", "Content-Encoding": "gzip", + "X-Ms-Meta-Hdr": "value", } actualHeaders := s3ToAzureHeaders(headers) if !reflect.DeepEqual(actualHeaders, expectedHeaders) { @@ -155,3 +175,81 @@ func TestAzureParseBlockID(t *testing.T) { t.Fatal("Expected azureParseBlockID() to return error") } } + +// Test azureListBlobsGetParameters() +func TestAzureListBlobsGetParameters(t *testing.T) { + + // Test values set 1 + expectedURLValues := url.Values{} + expectedURLValues.Set("prefix", "test") + expectedURLValues.Set("delimiter", "_") + expectedURLValues.Set("marker", "marker") + expectedURLValues.Set("include", "hello") + expectedURLValues.Set("maxresults", "20") + expectedURLValues.Set("timeout", "10") + + setBlobParameters := storage.ListBlobsParameters{"test", "_", "marker", "hello", 20, 10} + + // Test values set 2 + expectedURLValues1 := url.Values{} + + setBlobParameters1 := storage.ListBlobsParameters{"", "", "", "", 0, 0} + + testCases := []struct { + name string + args storage.ListBlobsParameters + want url.Values + }{ + {"TestIfValuesSet", setBlobParameters, expectedURLValues}, + {"TestIfValuesNotSet", setBlobParameters1, expectedURLValues1}, + } + for _, test := range testCases { + t.Run(test.name, func(t *testing.T) { + if got := azureListBlobsGetParameters(test.args); !reflect.DeepEqual(got, test.want) { + t.Errorf("azureListBlobsGetParameters() = %v, want %v", got, test.want) + } + }) + } +} + +func TestAnonErrToObjectErr(t *testing.T) { + testCases := []struct { + name string + statusCode int + params []string + wantErr error + }{ + {"ObjectNotFound", + http.StatusNotFound, + []string{"testBucket", "testObject"}, + ObjectNotFound{Bucket: "testBucket", Object: "testObject"}, + }, + {"BucketNotFound", + http.StatusNotFound, + []string{"testBucket", ""}, + BucketNotFound{Bucket: "testBucket"}, + }, + {"ObjectNameInvalid", + http.StatusBadRequest, + []string{"testBucket", "testObject"}, + ObjectNameInvalid{Bucket: "testBucket", Object: "testObject"}, + }, + {"BucketNameInvalid", + http.StatusBadRequest, + []string{"testBucket", ""}, + BucketNameInvalid{Bucket: "testBucket"}, + }, + {"UnexpectedError", + http.StatusBadGateway, + []string{"testBucket", "testObject"}, + errUnexpected, + }, + } + for _, test := range testCases { + t.Run(test.name, func(t *testing.T) { + if err := anonErrToObjectErr(test.statusCode, test.params...); !reflect.DeepEqual(err, test.wantErr) { + t.Errorf("anonErrToObjectErr() error = %v, wantErr %v", err, test.wantErr) + } + }) + } +} diff --git a/cmd/gateway-gcs-anonymous.go b/cmd/gateway-gcs-anonymous.go new file mode 100644 index 000000000..095d37178 --- /dev/null +++ b/cmd/gateway-gcs-anonymous.go @@ -0,0 +1,142 @@ +/* + * Minio Cloud Storage, (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "fmt" + "io" + "net/http" + "strconv" + "time" +) + +func toGCSPublicURL(bucket, object string) string { + return fmt.Sprintf("https://storage.googleapis.com/%s/%s", bucket, object) +} + +// AnonPutObject creates a new object anonymously with the incoming data, +func (l *gcsGateway) AnonPutObject(bucket, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (ObjectInfo, error) { + + return ObjectInfo{}, NotImplemented{} +} + +// AnonGetObject - Get object anonymously +func (l *gcsGateway) AnonGetObject(bucket string, object string, startOffset int64, length int64, writer io.Writer) error { + req, err := http.NewRequest("GET", toGCSPublicURL(bucket, object), nil) + if err != nil { + return gcsToObjectError(traceError(err), bucket, object) + } + + if length > 0 && startOffset > 0 { + req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", startOffset, startOffset+length-1)) + } else if startOffset > 0 { + req.Header.Add("Range", fmt.Sprintf("bytes=%d-", startOffset)) + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return gcsToObjectError(traceError(err), bucket, object) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusPartialContent && resp.StatusCode != http.StatusOK { + return gcsToObjectError(traceError(anonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object) + } + + _, err = io.Copy(writer, resp.Body) + return gcsToObjectError(traceError(err), bucket, object) +} + +// AnonGetObjectInfo - Get object info anonymously +func (l *gcsGateway) AnonGetObjectInfo(bucket string, object string) (objInfo ObjectInfo, err error) { + resp, err := http.Head(toGCSPublicURL(bucket, object)) + if err != nil { + return objInfo, gcsToObjectError(traceError(err), bucket, object) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return objInfo, gcsToObjectError(traceError(anonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object) + } + + var contentLength int64 + contentLengthStr := resp.Header.Get("Content-Length") + if contentLengthStr != "" { + contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64) + if err != nil { + return objInfo, gcsToObjectError(traceError(errUnexpected), bucket, object) + } + } + + t, err := time.Parse(time.RFC1123, resp.Header.Get("Last-Modified")) + if err != nil { + return objInfo, traceError(err) + } + + objInfo.ModTime = t + objInfo.Bucket = bucket + objInfo.UserDefined = make(map[string]string) + if resp.Header.Get("Content-Encoding") != "" { + objInfo.UserDefined["Content-Encoding"] = resp.Header.Get("Content-Encoding") + } + objInfo.UserDefined["Content-Type"] = resp.Header.Get("Content-Type") + objInfo.ETag = resp.Header.Get("Etag") + objInfo.ModTime = t + objInfo.Name = object + objInfo.Size = contentLength + return +} + +// AnonListObjects - List objects anonymously +func (l *gcsGateway) AnonListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (ListObjectsInfo, error) { + result, err := l.anonClient.ListObjects(bucket, prefix, marker, delimiter, maxKeys) + if err != nil { + return ListObjectsInfo{}, s3ToObjectError(traceError(err), bucket) + } + + return fromMinioClientListBucketResult(bucket, result), nil +} + +// AnonListObjectsV2 - List objects in V2 mode, anonymously +func (l *gcsGateway) AnonListObjectsV2(bucket, prefix, continuationToken string, fetchOwner bool, delimiter string, maxKeys int) (ListObjectsV2Info, error) { + // Request V1 List Object to the backend + result, err := l.anonClient.ListObjects(bucket, prefix, continuationToken, delimiter, maxKeys) + if err != nil { + return ListObjectsV2Info{}, s3ToObjectError(traceError(err), bucket) + } + // translate V1 Result to V2Info + return fromMinioClientListBucketResultToV2Info(bucket, result), nil +} + +// AnonGetBucketInfo - Get bucket metadata anonymously. +func (l *gcsGateway) AnonGetBucketInfo(bucket string) (bucketInfo BucketInfo, err error) { + resp, err := http.Head(toGCSPublicURL(bucket, "")) + if err != nil { + return bucketInfo, gcsToObjectError(traceError(err)) + } + + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return bucketInfo, gcsToObjectError(traceError(anonErrToObjectErr(resp.StatusCode, bucket)), bucket) + } + + // Last-Modified date being returned by GCS + return BucketInfo{ + Name: bucket, + }, nil +} diff --git a/cmd/gateway-gcs-errors.go b/cmd/gateway-gcs-errors.go new file mode 100644 index 000000000..a4cb06b89 --- /dev/null +++ b/cmd/gateway-gcs-errors.go @@ -0,0 +1,27 @@ +/* + * Minio Cloud Storage, (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import "errors" + +var ( + // Project ID format is not valid. + errGCSInvalidProjectID = errors.New("GCS project id is either empty or invalid") + + // Project ID not found + errGCSProjectIDNotFound = errors.New("unknown project id") +) diff --git a/cmd/gateway-gcs-unsupported.go b/cmd/gateway-gcs-unsupported.go new file mode 100644 index 000000000..5c1b6fda8 --- /dev/null +++ b/cmd/gateway-gcs-unsupported.go @@ -0,0 +1,42 @@ +/* + * Minio Cloud Storage, (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +// HealBucket - Not relevant. +func (l *gcsGateway) HealBucket(bucket string) error { + return traceError(NotImplemented{}) +} + +// ListBucketsHeal - Not relevant. +func (l *gcsGateway) ListBucketsHeal() (buckets []BucketInfo, err error) { + return []BucketInfo{}, traceError(NotImplemented{}) +} + +// HealObject - Not relevant. +func (l *gcsGateway) HealObject(bucket string, object string) (int, int, error) { + return 0, 0, traceError(NotImplemented{}) +} + +// ListObjectsHeal - Not relevant. +func (l *gcsGateway) ListObjectsHeal(bucket string, prefix string, marker string, delimiter string, maxKeys int) (ListObjectsInfo, error) { + return ListObjectsInfo{}, traceError(NotImplemented{}) +} + +// ListUploadsHeal - Not relevant. +func (l *gcsGateway) ListUploadsHeal(bucket string, prefix string, marker string, uploadIDMarker string, delimiter string, maxUploads int) (ListMultipartsInfo, error) { + return ListMultipartsInfo{}, traceError(NotImplemented{}) +} diff --git a/cmd/gateway-gcs.go b/cmd/gateway-gcs.go new file mode 100644 index 000000000..7ebae3a9d --- /dev/null +++ b/cmd/gateway-gcs.go @@ -0,0 +1,1123 @@ +/* + * Minio Cloud Storage, (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "context" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "encoding/json" + "fmt" + "hash" + "io" + "math" + "regexp" + "strings" + "time" + + "golang.org/x/oauth2/google" + + "cloud.google.com/go/storage" + cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" + "google.golang.org/api/googleapi" + "google.golang.org/api/iterator" + + minio "github.com/minio/minio-go" + "github.com/minio/minio-go/pkg/policy" +) + +const ( + // gcsMinioSysTmp is used for multiparts. We have "minio.sys.tmp" prefix so that + // listing on the GCS lists this entry in the end. Also in the gateway + // ListObjects we filter out this entry. + gcsMinioSysTmp = "minio.sys.tmp/" + + // Path where multipart objects are saved. + // If we change the backend format we will use a different url path like /multipart/v2 + // but we will not migrate old data. + gcsMinioMultipartPathV1 = gcsMinioSysTmp + "multipart/v1" + + // Multipart meta file. + gcsMinioMultipartMeta = "gcs.json" + + // gcs.json version number + gcsMinioMultipartMetaCurrentVersion = "1" + + // token prefixed with GCS returned marker to differentiate + // from user supplied marker. + gcsTokenPrefix = "{minio}" + + // Maximum component object count to create a composite object. + // Refer https://cloud.google.com/storage/docs/composite-objects + gcsMaxComponents = 32 + + // gcsMaxPartCount - maximum multipart parts GCS supports which is 32 x 32 = 1024. + gcsMaxPartCount = 1024 + + // Every 24 hours we scan minio.sys.tmp to delete expired multiparts in minio.sys.tmp + gcsCleanupInterval = time.Hour * 24 + + // The cleanup routine deletes files older than 2 weeks in minio.sys.tmp + gcsMultipartExpiry = time.Hour * 24 * 14 +) + +// Stored in gcs.json - Contents of this file is not used anywhere. It can be +// used for debugging purposes. +type gcsMultipartMetaV1 struct { + Version string `json:"version"` // Version number + Bucket string `json:"bucket"` // Bucket name + Object string `json:"object"` // Object name +} + +// Returns name of the multipart meta object. +func gcsMultipartMetaName(uploadID string) string { + return fmt.Sprintf("%s/%s/%s", gcsMinioMultipartPathV1, uploadID, gcsMinioMultipartMeta) +} + +// Returns name of the part object. +func gcsMultipartDataName(uploadID string, partNumber int, etag string) string { + return fmt.Sprintf("%s/%s/%05d.%s", gcsMinioMultipartPathV1, uploadID, partNumber, etag) +} + +// Convert Minio errors to minio object layer errors. +func gcsToObjectError(err error, params ...string) error { + if err == nil { + return nil + } + + e, ok := err.(*Error) + if !ok { + // Code should be fixed if this function is called without doing traceError() + // Else handling different situations in this function makes this function complicated. + errorIf(err, "Expected type *Error") + return err + } + + err = e.e + + bucket := "" + object := "" + if len(params) >= 1 { + bucket = params[0] + } + if len(params) == 2 { + object = params[1] + } + + // in some cases just a plain error is being returned + switch err.Error() { + case "storage: bucket doesn't exist": + err = BucketNotFound{ + Bucket: bucket, + } + e.e = err + return e + case "storage: object doesn't exist": + err = ObjectNotFound{ + Bucket: bucket, + Object: object, + } + e.e = err + return e + } + + googleAPIErr, ok := err.(*googleapi.Error) + if !ok { + // We don't interpret non Minio errors. As minio errors will + // have StatusCode to help to convert to object errors. + e.e = err + return e + } + + if len(googleAPIErr.Errors) == 0 { + e.e = err + return e + } + + reason := googleAPIErr.Errors[0].Reason + message := googleAPIErr.Errors[0].Message + + switch reason { + case "required": + // Anonymous users does not have storage.xyz access to project 123. + fallthrough + case "keyInvalid": + fallthrough + case "forbidden": + err = PrefixAccessDenied{ + Bucket: bucket, + Object: object, + } + case "invalid": + err = BucketNameInvalid{ + Bucket: bucket, + } + case "notFound": + if object != "" { + err = ObjectNotFound{ + Bucket: bucket, + Object: object, + } + break + } + err = BucketNotFound{Bucket: bucket} + case "conflict": + if message == "You already own this bucket. Please select another name." { + err = BucketAlreadyOwnedByYou{Bucket: bucket} + break + } + if message == "Sorry, that name is not available. Please try a different one." { + err = BucketAlreadyExists{Bucket: bucket} + break + } + err = BucketNotEmpty{Bucket: bucket} + default: + err = fmt.Errorf("Unsupported error reason: %s", reason) + } + + e.e = err + return e +} + +// gcsProjectIDRegex defines a valid gcs project id format +var gcsProjectIDRegex = regexp.MustCompile("^[a-z][a-z0-9-]{5,29}$") + +// isValidGCSProjectIDFormat - checks if a given project id format is valid or not. +// Project IDs must start with a lowercase letter and can have lowercase ASCII letters, +// digits or hyphens. Project IDs must be between 6 and 30 characters. +// Ref: https://cloud.google.com/resource-manager/reference/rest/v1/projects#Project (projectId section) +func isValidGCSProjectIDFormat(projectID string) bool { + // Checking projectID format + return gcsProjectIDRegex.MatchString(projectID) +} + +// checkGCSProjectID - checks if the project ID does really exist using resource manager API. +func checkGCSProjectID(ctx context.Context, projectID string) error { + // Check if a project id associated to the current account does really exist + resourceManagerClient, err := google.DefaultClient(ctx, cloudresourcemanager.CloudPlatformReadOnlyScope) + if err != nil { + return err + } + + baseSvc, err := cloudresourcemanager.New(resourceManagerClient) + if err != nil { + return err + } + + projectSvc := cloudresourcemanager.NewProjectsService(baseSvc) + + curPageToken := "" + + // Iterate over projects list result pages and immediately return nil when + // the project ID is found. + for { + resp, err := projectSvc.List().PageToken(curPageToken).Context(ctx).Do() + if err != nil { + return fmt.Errorf("Error getting projects list: %s", err.Error()) + } + + for _, p := range resp.Projects { + if p.ProjectId == projectID { + return nil + } + } + + if resp.NextPageToken != "" { + curPageToken = resp.NextPageToken + } else { + break + } + } + + return errGCSProjectIDNotFound +} + +// gcsGateway - Implements gateway for Minio and GCS compatible object storage servers. +type gcsGateway struct { + client *storage.Client + anonClient *minio.Core + projectID string + ctx context.Context +} + +const googleStorageEndpoint = "storage.googleapis.com" + +// newGCSGateway returns gcs gatewaylayer +func newGCSGateway(projectID string) (GatewayLayer, error) { + ctx := context.Background() + + err := checkGCSProjectID(ctx, projectID) + if err != nil { + return nil, err + } + + // Initialize a GCS client. + client, err := storage.NewClient(ctx) + if err != nil { + return nil, err + } + + // Initialize a anonymous client with minio core APIs. + anonClient, err := minio.NewCore(googleStorageEndpoint, "", "", true) + if err != nil { + return nil, err + } + + gateway := &gcsGateway{ + client: client, + projectID: projectID, + ctx: ctx, + anonClient: anonClient, + } + // Start background process to cleanup old files in minio.sys.tmp + go gateway.CleanupGCSMinioSysTmp() + return gateway, nil +} + +// Cleanup old files in minio.sys.tmp of the given bucket. +func (l *gcsGateway) CleanupGCSMinioSysTmpBucket(bucket string) { + it := l.client.Bucket(bucket).Objects(l.ctx, &storage.Query{Prefix: gcsMinioSysTmp, Versions: false}) + for { + attrs, err := it.Next() + if err != nil { + if err != iterator.Done { + errorIf(err, "Object listing error on bucket %s during purging of old files in minio.sys.tmp", bucket) + } + return + } + if time.Since(attrs.Updated) > gcsMultipartExpiry { + // Delete files older than 2 weeks. + err := l.client.Bucket(bucket).Object(attrs.Name).Delete(l.ctx) + if err != nil { + errorIf(err, "Unable to delete %s/%s during purging of old files in minio.sys.tmp", bucket, attrs.Name) + return + } + } + } +} + +// Cleanup old files in minio.sys.tmp of all buckets. +func (l *gcsGateway) CleanupGCSMinioSysTmp() { + for { + it := l.client.Buckets(l.ctx, l.projectID) + for { + attrs, err := it.Next() + if err != nil { + if err != iterator.Done { + errorIf(err, "Bucket listing error during purging of old files in minio.sys.tmp") + } + break + } + l.CleanupGCSMinioSysTmpBucket(attrs.Name) + } + // Run the cleanup loop every 1 day. + time.Sleep(gcsCleanupInterval) + } +} + +// Shutdown - save any gateway metadata to disk +// if necessary and reload upon next restart. +func (l *gcsGateway) Shutdown() error { + return nil +} + +// StorageInfo - Not relevant to GCS backend. +func (l *gcsGateway) StorageInfo() StorageInfo { + return StorageInfo{} +} + +// MakeBucketWithLocation - Create a new container on GCS backend. +func (l *gcsGateway) MakeBucketWithLocation(bucket, location string) error { + bkt := l.client.Bucket(bucket) + + // we'll default to the us multi-region in case of us-east-1 + if location == "us-east-1" { + location = "us" + } + + err := bkt.Create(l.ctx, l.projectID, &storage.BucketAttrs{ + Location: location, + }) + + return gcsToObjectError(traceError(err), bucket) +} + +// GetBucketInfo - Get bucket metadata.. +func (l *gcsGateway) GetBucketInfo(bucket string) (BucketInfo, error) { + attrs, err := l.client.Bucket(bucket).Attrs(l.ctx) + if err != nil { + return BucketInfo{}, gcsToObjectError(traceError(err), bucket) + } + + return BucketInfo{ + Name: attrs.Name, + Created: attrs.Created, + }, nil +} + +// ListBuckets lists all buckets under your project-id on GCS. +func (l *gcsGateway) ListBuckets() (buckets []BucketInfo, err error) { + it := l.client.Buckets(l.ctx, l.projectID) + + // Iterate and capture all the buckets. + for { + attrs, ierr := it.Next() + if ierr == iterator.Done { + break + } + + if ierr != nil { + return buckets, gcsToObjectError(traceError(ierr)) + } + + buckets = append(buckets, BucketInfo{ + Name: attrs.Name, + Created: attrs.Created, + }) + } + + return buckets, nil +} + +// DeleteBucket delete a bucket on GCS. +func (l *gcsGateway) DeleteBucket(bucket string) error { + itObject := l.client.Bucket(bucket).Objects(l.ctx, &storage.Query{Delimiter: slashSeparator, Versions: false}) + // We list the bucket and if we find any objects we return BucketNotEmpty error. If we + // find only "minio.sys.tmp/" then we remove it before deleting the bucket. + gcsMinioPathFound := false + nonGCSMinioPathFound := false + for { + objAttrs, err := itObject.Next() + if err == iterator.Done { + break + } + if err != nil { + return gcsToObjectError(traceError(err)) + } + if objAttrs.Prefix == gcsMinioSysTmp { + gcsMinioPathFound = true + continue + } + nonGCSMinioPathFound = true + break + } + if nonGCSMinioPathFound { + return gcsToObjectError(traceError(BucketNotEmpty{})) + } + if gcsMinioPathFound { + // Remove minio.sys.tmp before deleting the bucket. + itObject = l.client.Bucket(bucket).Objects(l.ctx, &storage.Query{Versions: false, Prefix: gcsMinioSysTmp}) + for { + objAttrs, err := itObject.Next() + if err == iterator.Done { + break + } + if err != nil { + return gcsToObjectError(traceError(err)) + } + err = l.client.Bucket(bucket).Object(objAttrs.Name).Delete(l.ctx) + if err != nil { + return gcsToObjectError(traceError(err)) + } + } + } + err := l.client.Bucket(bucket).Delete(l.ctx) + return gcsToObjectError(traceError(err), bucket) +} + +func toGCSPageToken(name string) string { + length := uint16(len(name)) + + b := []byte{ + 0xa, + byte(length & 0xFF), + } + + length = length >> 7 + if length > 0 { + b = append(b, byte(length&0xFF)) + } + + b = append(b, []byte(name)...) + + return base64.StdEncoding.EncodeToString(b) +} + +// Returns true if marker was returned by GCS, i.e prefixed with +// ##minio by minio gcs gateway. +func isGCSMarker(marker string) bool { + return strings.HasPrefix(marker, gcsTokenPrefix) +} + +// ListObjects - lists all blobs in GCS bucket filtered by prefix +func (l *gcsGateway) ListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (ListObjectsInfo, error) { + it := l.client.Bucket(bucket).Objects(l.ctx, &storage.Query{ + Delimiter: delimiter, + Prefix: prefix, + Versions: false, + }) + + isTruncated := false + nextMarker := "" + prefixes := []string{} + + // To accommodate S3-compatible applications using + // ListObjectsV1 to use object keys as markers to control the + // listing of objects, we use the following encoding scheme to + // distinguish between GCS continuation tokens and application + // supplied markers. + // + // - NextMarker in ListObjectsV1 response is constructed by + // prefixing "##minio" to the GCS continuation token, + // e.g, "##minioCgRvYmoz" + // + // - Application supplied markers are used as-is to list + // object keys that appear after it in the lexicographical order. + + // If application is using GCS continuation token we should + // strip the gcsTokenPrefix we added. + gcsMarker := isGCSMarker(marker) + if gcsMarker { + it.PageInfo().Token = strings.TrimPrefix(marker, gcsTokenPrefix) + } + + it.PageInfo().MaxSize = maxKeys + + objects := []ObjectInfo{} + for { + if len(objects) >= maxKeys { + // check if there is one next object and + // if that one next object is our hidden + // metadata folder, then just break + // otherwise we've truncated the output + attrs, _ := it.Next() + if attrs != nil && attrs.Prefix == gcsMinioSysTmp { + break + } + + isTruncated = true + break + } + + attrs, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return ListObjectsInfo{}, gcsToObjectError(traceError(err), bucket, prefix) + } + + nextMarker = toGCSPageToken(attrs.Name) + + if attrs.Prefix == gcsMinioSysTmp { + // We don't return our metadata prefix. + continue + } + if !strings.HasPrefix(prefix, gcsMinioSysTmp) { + // If client lists outside gcsMinioPath then we filter out gcsMinioPath/* entries. + // But if the client lists inside gcsMinioPath then we return the entries in gcsMinioPath/ + // which will be helpful to observe the "directory structure" for debugging purposes. + if strings.HasPrefix(attrs.Prefix, gcsMinioSysTmp) || + strings.HasPrefix(attrs.Name, gcsMinioSysTmp) { + continue + } + } + if attrs.Prefix != "" { + prefixes = append(prefixes, attrs.Prefix) + continue + } + if !gcsMarker && attrs.Name <= marker { + // if user supplied a marker don't append + // objects until we reach marker (and skip it). + continue + } + + objects = append(objects, ObjectInfo{ + Name: attrs.Name, + Bucket: attrs.Bucket, + ModTime: attrs.Updated, + Size: attrs.Size, + ETag: fmt.Sprintf("%d", attrs.CRC32C), + UserDefined: attrs.Metadata, + ContentType: attrs.ContentType, + ContentEncoding: attrs.ContentEncoding, + }) + } + + return ListObjectsInfo{ + IsTruncated: isTruncated, + NextMarker: gcsTokenPrefix + nextMarker, + Prefixes: prefixes, + Objects: objects, + }, nil +} + +// ListObjectsV2 - lists all blobs in GCS bucket filtered by prefix +func (l *gcsGateway) ListObjectsV2(bucket, prefix, continuationToken string, fetchOwner bool, + delimiter string, maxKeys int) (ListObjectsV2Info, error) { + + it := l.client.Bucket(bucket).Objects(l.ctx, &storage.Query{ + Delimiter: delimiter, + Prefix: prefix, + Versions: false, + }) + + isTruncated := false + it.PageInfo().MaxSize = maxKeys + + if continuationToken != "" { + // If client sends continuationToken, set it + it.PageInfo().Token = continuationToken + } else { + // else set the continuationToken to return + continuationToken = it.PageInfo().Token + if continuationToken != "" { + // If GCS SDK sets continuationToken, it means there are more than maxKeys in the current page + // and the response will be truncated + isTruncated = true + } + } + + prefixes := []string{} + objects := []ObjectInfo{} + + for { + attrs, err := it.Next() + if err == iterator.Done { + break + } + + if err != nil { + return ListObjectsV2Info{}, gcsToObjectError(traceError(err), bucket, prefix) + } + + if attrs.Prefix == gcsMinioSysTmp { + // We don't return our metadata prefix. + continue + } + if !strings.HasPrefix(prefix, gcsMinioSysTmp) { + // If client lists outside gcsMinioPath then we filter out gcsMinioPath/* entries. + // But if the client lists inside gcsMinioPath then we return the entries in gcsMinioPath/ + // which will be helpful to observe the "directory structure" for debugging purposes. + if strings.HasPrefix(attrs.Prefix, gcsMinioSysTmp) || + strings.HasPrefix(attrs.Name, gcsMinioSysTmp) { + continue + } + } + + if attrs.Prefix != "" { + prefixes = append(prefixes, attrs.Prefix) + continue + } + + objects = append(objects, fromGCSAttrsToObjectInfo(attrs)) + } + + return ListObjectsV2Info{ + IsTruncated: isTruncated, + ContinuationToken: continuationToken, + NextContinuationToken: continuationToken, + Prefixes: prefixes, + Objects: objects, + }, nil +} + +// GetObject - reads an object from GCS. Supports additional +// parameters like offset and length which are synonymous with +// HTTP Range requests. +// +// startOffset indicates the starting read location of the object. +// length indicates the total length of the object. +func (l *gcsGateway) GetObject(bucket string, key string, startOffset int64, length int64, writer io.Writer) error { + // if we want to mimic S3 behavior exactly, we need to verify if bucket exists first, + // otherwise gcs will just return object not exist in case of non-existing bucket + if _, err := l.client.Bucket(bucket).Attrs(l.ctx); err != nil { + return gcsToObjectError(traceError(err), bucket) + } + + object := l.client.Bucket(bucket).Object(key) + r, err := object.NewRangeReader(l.ctx, startOffset, length) + if err != nil { + return gcsToObjectError(traceError(err), bucket, key) + } + defer r.Close() + + if _, err := io.Copy(writer, r); err != nil { + return gcsToObjectError(traceError(err), bucket, key) + } + + return nil +} + +// fromMinioClientListBucketResultToV2Info converts minio ListBucketResult to ListObjectsV2Info +func fromMinioClientListBucketResultToV2Info(bucket string, result minio.ListBucketResult) ListObjectsV2Info { + objects := make([]ObjectInfo, len(result.Contents)) + + for i, oi := range result.Contents { + objects[i] = fromMinioClientObjectInfo(bucket, oi) + } + + prefixes := make([]string, len(result.CommonPrefixes)) + for i, p := range result.CommonPrefixes { + prefixes[i] = p.Prefix + } + + return ListObjectsV2Info{ + IsTruncated: result.IsTruncated, + Prefixes: prefixes, + Objects: objects, + ContinuationToken: result.Marker, + NextContinuationToken: result.NextMarker, + } +} + +// fromGCSAttrsToObjectInfo converts GCS BucketAttrs to gateway ObjectInfo +func fromGCSAttrsToObjectInfo(attrs *storage.ObjectAttrs) ObjectInfo { + // All google cloud storage objects have a CRC32c hash, whereas composite objects may not have a MD5 hash + // Refer https://cloud.google.com/storage/docs/hashes-etags. Use CRC32C for ETag + return ObjectInfo{ + Name: attrs.Name, + Bucket: attrs.Bucket, + ModTime: attrs.Updated, + Size: attrs.Size, + ETag: fmt.Sprintf("%d", attrs.CRC32C), + UserDefined: attrs.Metadata, + ContentType: attrs.ContentType, + ContentEncoding: attrs.ContentEncoding, + } +} + +// GetObjectInfo - reads object info and replies back ObjectInfo +func (l *gcsGateway) GetObjectInfo(bucket string, object string) (ObjectInfo, error) { + // if we want to mimic S3 behavior exactly, we need to verify if bucket exists first, + // otherwise gcs will just return object not exist in case of non-existing bucket + if _, err := l.client.Bucket(bucket).Attrs(l.ctx); err != nil { + return ObjectInfo{}, gcsToObjectError(traceError(err), bucket) + } + + attrs, err := l.client.Bucket(bucket).Object(object).Attrs(l.ctx) + if err != nil { + return ObjectInfo{}, gcsToObjectError(traceError(err), bucket, object) + } + + return fromGCSAttrsToObjectInfo(attrs), nil +} + +// PutObject - Create a new object with the incoming data, +func (l *gcsGateway) PutObject(bucket string, key string, size int64, data io.Reader, + metadata map[string]string, sha256sum string) (ObjectInfo, error) { + + // if we want to mimic S3 behavior exactly, we need to verify if bucket exists first, + // otherwise gcs will just return object not exist in case of non-existing bucket + if _, err := l.client.Bucket(bucket).Attrs(l.ctx); err != nil { + return ObjectInfo{}, gcsToObjectError(traceError(err), bucket) + } + + reader := data + + var sha256Writer hash.Hash + if sha256sum != "" { + sha256Writer = sha256.New() + reader = io.TeeReader(data, sha256Writer) + } + + md5sum := metadata["etag"] + delete(metadata, "etag") + + object := l.client.Bucket(bucket).Object(key) + + w := object.NewWriter(l.ctx) + + w.ContentType = metadata["content-type"] + w.ContentEncoding = metadata["content-encoding"] + w.Metadata = metadata + if md5sum != "" { + var err error + w.MD5, err = hex.DecodeString(md5sum) + if err != nil { + // Close the object writer upon error. + w.Close() + return ObjectInfo{}, gcsToObjectError(traceError(err), bucket, key) + } + } + + if _, err := io.CopyN(w, reader, size); err != nil { + // Close the object writer upon error. + w.Close() + return ObjectInfo{}, gcsToObjectError(traceError(err), bucket, key) + } + // Close the object writer upon success. + w.Close() + + // Verify sha256sum after close. + if sha256sum != "" { + if hex.EncodeToString(sha256Writer.Sum(nil)) != sha256sum { + object.Delete(l.ctx) + return ObjectInfo{}, traceError(SHA256Mismatch{}) + } + } + + attrs, err := object.Attrs(l.ctx) + if err != nil { + return ObjectInfo{}, gcsToObjectError(traceError(err), bucket, key) + } + + return fromGCSAttrsToObjectInfo(attrs), nil +} + +// CopyObject - Copies a blob from source container to destination container. +func (l *gcsGateway) CopyObject(srcBucket string, srcObject string, destBucket string, destObject string, + metadata map[string]string) (ObjectInfo, error) { + + src := l.client.Bucket(srcBucket).Object(srcObject) + dst := l.client.Bucket(destBucket).Object(destObject) + + attrs, err := dst.CopierFrom(src).Run(l.ctx) + if err != nil { + return ObjectInfo{}, gcsToObjectError(traceError(err), destBucket, destObject) + } + + return fromGCSAttrsToObjectInfo(attrs), nil +} + +// DeleteObject - Deletes a blob in bucket +func (l *gcsGateway) DeleteObject(bucket string, object string) error { + err := l.client.Bucket(bucket).Object(object).Delete(l.ctx) + if err != nil { + return gcsToObjectError(traceError(err), bucket, object) + } + + return nil +} + +// NewMultipartUpload - upload object in multiple parts +func (l *gcsGateway) NewMultipartUpload(bucket string, key string, metadata map[string]string) (uploadID string, err error) { + // generate new uploadid + uploadID = mustGetUUID() + + // generate name for part zero + meta := gcsMultipartMetaName(uploadID) + + w := l.client.Bucket(bucket).Object(meta).NewWriter(l.ctx) + defer w.Close() + + w.ContentType = metadata["content-type"] + w.ContentEncoding = metadata["content-encoding"] + w.Metadata = metadata + + if err = json.NewEncoder(w).Encode(gcsMultipartMetaV1{ + gcsMinioMultipartMetaCurrentVersion, + bucket, + key, + }); err != nil { + return "", gcsToObjectError(traceError(err), bucket, key) + } + return uploadID, nil +} + +// ListMultipartUploads - lists all multipart uploads. +func (l *gcsGateway) ListMultipartUploads(bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (ListMultipartsInfo, error) { + return ListMultipartsInfo{ + KeyMarker: keyMarker, + UploadIDMarker: uploadIDMarker, + MaxUploads: maxUploads, + Prefix: prefix, + Delimiter: delimiter, + }, nil +} + +// CopyObjectPart - copy part of object to other bucket and object +func (l *gcsGateway) CopyObjectPart(srcBucket string, srcObject string, destBucket string, destObject string, uploadID string, partID int, startOffset int64, length int64) (info PartInfo, err error) { + return PartInfo{}, traceError(NotSupported{}) +} + +// Checks if minio.sys.tmp/multipart/v1//gcs.json exists, returns +// an object layer compatible error upon any error. +func (l *gcsGateway) checkUploadIDExists(bucket string, key string, uploadID string) error { + _, err := l.client.Bucket(bucket).Object(gcsMultipartMetaName(uploadID)).Attrs(l.ctx) + return gcsToObjectError(traceError(err), bucket, key) +} + +// PutObjectPart puts a part of object in bucket +func (l *gcsGateway) PutObjectPart(bucket string, key string, uploadID string, partNumber int, size int64, data io.Reader, md5Hex string, sha256sum string) (PartInfo, error) { + if err := l.checkUploadIDExists(bucket, key, uploadID); err != nil { + return PartInfo{}, err + } + + var sha256Writer hash.Hash + + var etag string + // Honor etag if client did send md5Hex. + if md5Hex != "" { + etag = md5Hex + } else { + // Generate random ETag. + etag = getMD5Hash([]byte(mustGetUUID())) + } + + reader := data + if sha256sum != "" { + sha256Writer = sha256.New() + reader = io.TeeReader(data, sha256Writer) + } + + object := l.client.Bucket(bucket).Object(gcsMultipartDataName(uploadID, partNumber, etag)) + w := object.NewWriter(l.ctx) + // Disable "chunked" uploading in GCS client. If enabled, it can cause a corner case + // where it tries to upload 0 bytes in the last chunk and get error from server. + w.ChunkSize = 0 + if md5Hex != "" { + var err error + w.MD5, err = hex.DecodeString(md5Hex) + if err != nil { + // Make sure to close object writer upon error. + w.Close() + return PartInfo{}, gcsToObjectError(traceError(err), bucket, key) + } + } + + if _, err := io.CopyN(w, reader, size); err != nil { + // Make sure to close object writer upon error. + w.Close() + return PartInfo{}, gcsToObjectError(traceError(err), bucket, key) + } + + // Make sure to close the object writer upon success. + w.Close() + + // Verify sha256sum after Close(). + if sha256sum != "" { + if hex.EncodeToString(sha256Writer.Sum(nil)) != sha256sum { + object.Delete(l.ctx) + return PartInfo{}, traceError(SHA256Mismatch{}) + } + } + + return PartInfo{ + PartNumber: partNumber, + ETag: etag, + LastModified: UTCNow(), + Size: size, + }, nil + +} + +// ListObjectParts returns all object parts for specified object in specified bucket +func (l *gcsGateway) ListObjectParts(bucket string, key string, uploadID string, partNumberMarker int, maxParts int) (ListPartsInfo, error) { + return ListPartsInfo{}, l.checkUploadIDExists(bucket, key, uploadID) +} + +// Called by AbortMultipartUpload and CompleteMultipartUpload for cleaning up. +func (l *gcsGateway) cleanupMultipartUpload(bucket, key, uploadID string) error { + prefix := fmt.Sprintf("%s/%s/", gcsMinioMultipartPathV1, uploadID) + + // iterate through all parts and delete them + it := l.client.Bucket(bucket).Objects(l.ctx, &storage.Query{Prefix: prefix, Versions: false}) + + for { + attrs, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return gcsToObjectError(traceError(err), bucket, key) + } + + object := l.client.Bucket(bucket).Object(attrs.Name) + // Ignore the error as parallel AbortMultipartUpload might have deleted it. + object.Delete(l.ctx) + } + + return nil +} + +// AbortMultipartUpload aborts a ongoing multipart upload +func (l *gcsGateway) AbortMultipartUpload(bucket string, key string, uploadID string) error { + if err := l.checkUploadIDExists(bucket, key, uploadID); err != nil { + return err + } + return l.cleanupMultipartUpload(bucket, key, uploadID) +} + +// CompleteMultipartUpload completes ongoing multipart upload and finalizes object +// Note that there is a limit (currently 32) to the number of components that can +// be composed in a single operation. There is a limit (currently 1024) to the total +// number of components for a given composite object. This means you can append to +// each object at most 1023 times. There is a per-project rate limit (currently 200) +// to the number of components you can compose per second. This rate counts both the +// components being appended to a composite object as well as the components being +// copied when the composite object of which they are a part is copied. +func (l *gcsGateway) CompleteMultipartUpload(bucket string, key string, uploadID string, uploadedParts []completePart) (ObjectInfo, error) { + meta := gcsMultipartMetaName(uploadID) + object := l.client.Bucket(bucket).Object(meta) + + partZeroAttrs, err := object.Attrs(l.ctx) + if err != nil { + return ObjectInfo{}, gcsToObjectError(traceError(err), bucket, key) + } + + r, err := object.NewReader(l.ctx) + if err != nil { + return ObjectInfo{}, gcsToObjectError(traceError(err), bucket, key) + } + defer r.Close() + + // Check version compatibility of the meta file before compose() + multipartMeta := gcsMultipartMetaV1{} + if err = json.NewDecoder(r).Decode(&multipartMeta); err != nil { + return ObjectInfo{}, gcsToObjectError(traceError(err), bucket, key) + } + + if multipartMeta.Version != gcsMinioMultipartMetaCurrentVersion { + return ObjectInfo{}, gcsToObjectError(traceError(errFormatNotSupported), bucket, key) + } + + // Validate if the gcs.json stores valid entries for the bucket and key. + if multipartMeta.Bucket != bucket || multipartMeta.Object != key { + return ObjectInfo{}, gcsToObjectError(InvalidUploadID{ + UploadID: uploadID, + }, bucket, key) + } + + var parts []*storage.ObjectHandle + for _, uploadedPart := range uploadedParts { + parts = append(parts, l.client.Bucket(bucket).Object(gcsMultipartDataName(uploadID, + uploadedPart.PartNumber, uploadedPart.ETag))) + } + + // Returns name of the composed object. + gcsMultipartComposeName := func(uploadID string, composeNumber int) string { + return fmt.Sprintf("%s/tmp/%s/composed-object-%05d", gcsMinioSysTmp, uploadID, composeNumber) + } + + composeCount := int(math.Ceil(float64(len(parts)) / float64(gcsMaxComponents))) + if composeCount > 1 { + // Create composes of every 32 parts. + composeParts := make([]*storage.ObjectHandle, composeCount) + for i := 0; i < composeCount; i++ { + // Create 'composed-object-N' using next 32 parts. + composeParts[i] = l.client.Bucket(bucket).Object(gcsMultipartComposeName(uploadID, i)) + start := i * gcsMaxComponents + end := start + gcsMaxComponents + if end > len(parts) { + end = len(parts) + } + + composer := composeParts[i].ComposerFrom(parts[start:end]...) + composer.ContentType = partZeroAttrs.ContentType + composer.Metadata = partZeroAttrs.Metadata + + if _, err = composer.Run(l.ctx); err != nil { + return ObjectInfo{}, gcsToObjectError(traceError(err), bucket, key) + } + } + + // As composes are successfully created, final object needs to be created using composes. + parts = composeParts + } + + composer := l.client.Bucket(bucket).Object(key).ComposerFrom(parts...) + composer.ContentType = partZeroAttrs.ContentType + composer.Metadata = partZeroAttrs.Metadata + attrs, err := composer.Run(l.ctx) + if err != nil { + return ObjectInfo{}, gcsToObjectError(traceError(err), bucket, key) + } + if err = l.cleanupMultipartUpload(bucket, key, uploadID); err != nil { + return ObjectInfo{}, gcsToObjectError(traceError(err), bucket, key) + } + return fromGCSAttrsToObjectInfo(attrs), nil +} + +// SetBucketPolicies - Set policy on bucket +func (l *gcsGateway) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error { + var policies []BucketAccessPolicy + + for prefix, policy := range policy.GetPolicies(policyInfo.Statements, bucket) { + policies = append(policies, BucketAccessPolicy{ + Prefix: prefix, + Policy: policy, + }) + } + + prefix := bucket + "/*" // For all objects inside the bucket. + + if len(policies) != 1 { + return traceError(NotImplemented{}) + } + if policies[0].Prefix != prefix { + return traceError(NotImplemented{}) + } + + acl := l.client.Bucket(bucket).ACL() + if policies[0].Policy == policy.BucketPolicyNone { + if err := acl.Delete(l.ctx, storage.AllUsers); err != nil { + return gcsToObjectError(traceError(err), bucket) + } + return nil + } + + var role storage.ACLRole + switch policies[0].Policy { + case policy.BucketPolicyReadOnly: + role = storage.RoleReader + case policy.BucketPolicyWriteOnly: + role = storage.RoleWriter + default: + return traceError(NotImplemented{}) + } + + if err := acl.Set(l.ctx, storage.AllUsers, role); err != nil { + return gcsToObjectError(traceError(err), bucket) + } + + return nil +} + +// GetBucketPolicies - Get policy on bucket +func (l *gcsGateway) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy, error) { + rules, err := l.client.Bucket(bucket).ACL().List(l.ctx) + if err != nil { + return policy.BucketAccessPolicy{}, gcsToObjectError(traceError(err), bucket) + } + + policyInfo := policy.BucketAccessPolicy{Version: "2012-10-17"} + for _, r := range rules { + if r.Entity != storage.AllUsers || r.Role == storage.RoleOwner { + continue + } + switch r.Role { + case storage.RoleReader: + policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, policy.BucketPolicyReadOnly, bucket, "") + case storage.RoleWriter: + policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, policy.BucketPolicyWriteOnly, bucket, "") + } + } + + return policyInfo, nil +} + +// DeleteBucketPolicies - Delete all policies on bucket +func (l *gcsGateway) DeleteBucketPolicies(bucket string) error { + // This only removes the storage.AllUsers policies + if err := l.client.Bucket(bucket).ACL().Delete(l.ctx, storage.AllUsers); err != nil { + return gcsToObjectError(traceError(err), bucket) + } + + return nil +} diff --git a/cmd/gateway-gcs_test.go b/cmd/gateway-gcs_test.go new file mode 100644 index 000000000..9a865973e --- /dev/null +++ b/cmd/gateway-gcs_test.go @@ -0,0 +1,181 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "fmt" + "reflect" + "testing" + + minio "github.com/minio/minio-go" +) + +func TestToGCSPageToken(t *testing.T) { + testCases := []struct { + Name string + Token string + }{ + { + Name: "A", + Token: "CgFB", + }, + { + Name: "AAAAAAAAAA", + Token: "CgpBQUFBQUFBQUFB", + }, + { + Name: "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", + Token: "CmRBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFB", + }, + { + Name: "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", + Token: "CpEDQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUE=", + }, + { + Name: "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", + Token: "CpIDQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFB", + }, + { + Name: "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", + Token: "CpMDQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQQ==", + }, + { + Name: "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", + Token: "CvQDQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUE=", + }, + } + + for i, testCase := range testCases { + if toGCSPageToken(testCase.Name) != testCase.Token { + t.Errorf("Test %d: Expected %s, got %s", i+1, toGCSPageToken(testCase.Name), testCase.Token) + } + } + +} + +// TestIsValidGCSProjectIDFormat tests isValidGCSProjectIDFormat +func TestValidGCSProjectIDFormat(t *testing.T) { + testCases := []struct { + ProjectID string + Valid bool + }{ + {"", false}, + {"a", false}, + {"Abc", false}, + {"1bcd", false}, + // 5 chars + {"abcdb", false}, + // 6 chars + {"abcdbz", true}, + // 30 chars + {"project-id-1-project-id-more-1", true}, + // 31 chars + {"project-id-1-project-id-more-11", false}, + {"storage.googleapis.com", false}, + {"http://storage.googleapis.com", false}, + {"http://localhost:9000", false}, + {"project-id-1", true}, + {"project-id-1988832", true}, + {"projectid1414", true}, + } + + for i, testCase := range testCases { + valid := isValidGCSProjectIDFormat(testCase.ProjectID) + if valid != testCase.Valid { + t.Errorf("Test %d: Expected %v, got %v", i+1, valid, testCase.Valid) + } + } +} + +// Test for isGCSMarker. +func TestIsGCSMarker(t *testing.T) { + testCases := []struct { + marker string + expected bool + }{ + { + marker: "{minio}gcs123", + expected: true, + }, + { + marker: "{mini_no}tgcs123", + expected: false, + }, + { + marker: "{minioagainnotgcs123", + expected: false, + }, + { + marker: "obj1", + expected: false, + }, + } + + for i, tc := range testCases { + if actual := isGCSMarker(tc.marker); actual != tc.expected { + t.Errorf("Test %d: marker is %s, expected %v but got %v", + i+1, tc.marker, tc.expected, actual) + } + } +} + +// Test for gcsMultipartMetaName. +func TestGCSMultipartMetaName(t *testing.T) { + uploadID := "a" + expected := pathJoin(gcsMinioMultipartPathV1, uploadID, gcsMinioMultipartMeta) + got := gcsMultipartMetaName(uploadID) + if expected != got { + t.Errorf("expected: %s, got: %s", expected, got) + } +} + +// Test for gcsMultipartDataName. +func TestGCSMultipartDataName(t *testing.T) { + var ( + uploadID = "a" + etag = "b" + partNumber = 1 + ) + expected := pathJoin(gcsMinioMultipartPathV1, uploadID, fmt.Sprintf("%05d.%s", partNumber, etag)) + got := gcsMultipartDataName(uploadID, partNumber, etag) + if expected != got { + t.Errorf("expected: %s, got: %s", expected, got) + } +} + +func TestFromMinioClientListBucketResultToV2Info(t *testing.T) { + + listBucketResult := minio.ListBucketResult{ + IsTruncated: false, + Marker: "testMarker", + NextMarker: "testMarker2", + CommonPrefixes: []minio.CommonPrefix{{Prefix: "one"}, {Prefix: "two"}}, + Contents: []minio.ObjectInfo{{Key: "testobj", ContentType: ""}}, + } + + listBucketV2Info := ListObjectsV2Info{ + Prefixes: []string{"one", "two"}, + Objects: []ObjectInfo{{Name: "testobj", Bucket: "testbucket", UserDefined: map[string]string{"Content-Type": ""}}}, + IsTruncated: false, + ContinuationToken: "testMarker", + NextContinuationToken: "testMarker2", + } + + if got := fromMinioClientListBucketResultToV2Info("testbucket", listBucketResult); !reflect.DeepEqual(got, listBucketV2Info) { + t.Errorf("fromMinioClientListBucketResultToV2Info() = %v, want %v", got, listBucketV2Info) + } +} diff --git a/cmd/gateway-handlers.go b/cmd/gateway-handlers.go index 4ce311f72..f15ef10cd 100644 --- a/cmd/gateway-handlers.go +++ b/cmd/gateway-handlers.go @@ -19,12 +19,12 @@ package cmd import ( "io" "io/ioutil" + "net" "net/http" "strconv" "encoding/hex" "encoding/json" - "encoding/xml" router "github.com/gorilla/mux" "github.com/minio/minio-go/pkg/policy" @@ -140,7 +140,7 @@ func (api gatewayAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Re } // Reads the object at startOffset and writes to mw. - if err := getObject(bucket, object, startOffset, length, writer); err != nil { + if err = getObject(bucket, object, startOffset, length, writer); err != nil { errorIf(err, "Unable to write to client.") if !dataWritten { // Error response only if no data has been written to client yet. i.e if @@ -157,6 +157,23 @@ func (api gatewayAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Re // call wrter.Write(nil) to set appropriate headers. writer.Write(nil) } + + // Get host and port from Request.RemoteAddr. + host, port, err := net.SplitHostPort(r.RemoteAddr) + if err != nil { + host, port = "", "" + } + + // Notify object accessed via a GET request. + eventNotify(eventData{ + Type: ObjectAccessedGet, + Bucket: bucket, + ObjInfo: objInfo, + ReqParams: extractReqParams(r), + UserAgent: r.UserAgent(), + Host: host, + Port: port, + }) } // PutObjectHandler - PUT Object @@ -180,6 +197,8 @@ func (api gatewayAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Re bucket = vars["bucket"] object = vars["object"] + // TODO: we should validate the object name here + // Get Content-Md5 sent by client and verify if valid md5Bytes, err := checkValidMD5(r.Header.Get("Content-Md5")) if err != nil { @@ -212,7 +231,12 @@ func (api gatewayAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Re } // Extract metadata to be saved from incoming HTTP header. - metadata := extractMetadataFromHeader(r.Header) + metadata, err := extractMetadataFromHeader(r.Header) + if err != nil { + errorIf(err, "found invalid http request header") + writeErrorResponse(w, ErrInternalError, r.URL) + return + } if reqAuthType == authTypeStreamingSigned { if contentEncoding, ok := metadata["content-encoding"]; ok { contentEncoding = trimAwsChunkedContentEncoding(contentEncoding) @@ -236,8 +260,6 @@ func (api gatewayAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Re // Make sure we hex encode md5sum here. metadata["etag"] = hex.EncodeToString(md5Bytes) - sha256sum := "" - // Lock the object. objectLock := globalNSMutex.NewNSLock(bucket, object) objectLock.Lock() @@ -247,7 +269,7 @@ func (api gatewayAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Re switch reqAuthType { case authTypeAnonymous: // Create anonymous object. - objInfo, err = objectAPI.AnonPutObject(bucket, object, size, r.Body, metadata, sha256sum) + objInfo, err = objectAPI.AnonPutObject(bucket, object, size, r.Body, metadata, "") case authTypeStreamingSigned: // Initialize stream signature verifier. reader, s3Error := newSignV4ChunkedReader(r) @@ -256,7 +278,7 @@ func (api gatewayAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Re writeErrorResponse(w, s3Error, r.URL) return } - objInfo, err = objectAPI.PutObject(bucket, object, size, reader, metadata, sha256sum) + objInfo, err = objectAPI.PutObject(bucket, object, size, reader, metadata, "") case authTypeSignedV2, authTypePresignedV2: s3Error := isReqAuthenticatedV2(r) if s3Error != ErrNone { @@ -264,16 +286,19 @@ func (api gatewayAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Re writeErrorResponse(w, s3Error, r.URL) return } - objInfo, err = objectAPI.PutObject(bucket, object, size, r.Body, metadata, sha256sum) + objInfo, err = objectAPI.PutObject(bucket, object, size, r.Body, metadata, "") case authTypePresigned, authTypeSigned: if s3Error := reqSignatureV4Verify(r, serverConfig.GetRegion()); s3Error != ErrNone { errorIf(errSignatureMismatch, "%s", dumpRequest(r)) writeErrorResponse(w, s3Error, r.URL) return } + + sha256sum := "" if !skipContentSha256Cksum(r) { - sha256sum = r.Header.Get("X-Amz-Content-Sha256") + sha256sum = getContentSha256Cksum(r) } + // Create object. objInfo, err = objectAPI.PutObject(bucket, object, size, r.Body, metadata, sha256sum) default: @@ -283,12 +308,30 @@ func (api gatewayAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Re } if err != nil { + errorIf(err, "Unable to save an object %s", r.URL.Path) writeErrorResponse(w, toAPIErrorCode(err), r.URL) return } w.Header().Set("ETag", "\""+objInfo.ETag+"\"") writeSuccessResponseHeadersOnly(w) + + // Get host and port from Request.RemoteAddr. + host, port, err := net.SplitHostPort(r.RemoteAddr) + if err != nil { + host, port = "", "" + } + + // Notify object created event. + eventNotify(eventData{ + Type: ObjectCreatedPut, + Bucket: bucket, + ObjInfo: objInfo, + ReqParams: extractReqParams(r), + UserAgent: r.UserAgent(), + Host: host, + Port: port, + }) } // HeadObjectHandler - HEAD Object @@ -357,97 +400,23 @@ func (api gatewayAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.R // Successful response. w.WriteHeader(http.StatusOK) -} -// DeleteMultipleObjectsHandler - deletes multiple objects. -func (api gatewayAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Request) { - vars := router.Vars(r) - bucket := vars["bucket"] - - objectAPI := api.ObjectAPI() - if objectAPI == nil { - writeErrorResponse(w, ErrServerNotInitialized, r.URL) - return + // Get host and port from Request.RemoteAddr. + host, port, err := net.SplitHostPort(r.RemoteAddr) + if err != nil { + host, port = "", "" } - if s3Error := checkRequestAuthType(r, bucket, "s3:DeleteObject", serverConfig.GetRegion()); s3Error != ErrNone { - writeErrorResponse(w, s3Error, r.URL) - return - } - - // Content-Length is required and should be non-zero - // http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html - if r.ContentLength <= 0 { - writeErrorResponse(w, ErrMissingContentLength, r.URL) - return - } - - // Content-Md5 is requied should be set - // http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html - if _, ok := r.Header["Content-Md5"]; !ok { - writeErrorResponse(w, ErrMissingContentMD5, r.URL) - return - } - - // Allocate incoming content length bytes. - deleteXMLBytes := make([]byte, r.ContentLength) - - // Read incoming body XML bytes. - if _, err := io.ReadFull(r.Body, deleteXMLBytes); err != nil { - errorIf(err, "Unable to read HTTP body.") - writeErrorResponse(w, ErrInternalError, r.URL) - return - } - - // Unmarshal list of keys to be deleted. - deleteObjects := &DeleteObjectsRequest{} - if err := xml.Unmarshal(deleteXMLBytes, deleteObjects); err != nil { - errorIf(err, "Unable to unmarshal delete objects request XML.") - writeErrorResponse(w, ErrMalformedXML, r.URL) - return - } - - var dErrs = make([]error, len(deleteObjects.Objects)) - - // Delete all requested objects in parallel. - for index, object := range deleteObjects.Objects { - dErr := objectAPI.DeleteObject(bucket, object.ObjectName) - if dErr != nil { - dErrs[index] = dErr - } - } - - // Collect deleted objects and errors if any. - var deletedObjects []ObjectIdentifier - var deleteErrors []DeleteError - for index, err := range dErrs { - object := deleteObjects.Objects[index] - // Success deleted objects are collected separately. - if err == nil { - deletedObjects = append(deletedObjects, object) - continue - } - if _, ok := errorCause(err).(ObjectNotFound); ok { - // If the object is not found it should be - // accounted as deleted as per S3 spec. - deletedObjects = append(deletedObjects, object) - continue - } - errorIf(err, "Unable to delete object. %s", object.ObjectName) - // Error during delete should be collected separately. - deleteErrors = append(deleteErrors, DeleteError{ - Code: errorCodeResponse[toAPIErrorCode(err)].Code, - Message: errorCodeResponse[toAPIErrorCode(err)].Description, - Key: object.ObjectName, - }) - } - - // Generate response - response := generateMultiDeleteResponse(deleteObjects.Quiet, deletedObjects, deleteErrors) - encodedSuccessResponse := encodeResponse(response) - - // Write success response. - writeSuccessResponseXML(w, encodedSuccessResponse) + // Notify object accessed via a HEAD request. + eventNotify(eventData{ + Type: ObjectAccessedHead, + Bucket: bucket, + ObjInfo: objInfo, + ReqParams: extractReqParams(r), + UserAgent: r.UserAgent(), + Host: host, + Port: port, + }) } // PutBucketPolicyHandler - PUT Bucket policy @@ -649,15 +618,6 @@ func (api gatewayAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Re return } - // validating region here, because isValidLocationConstraint - // reads body which has been read already. So only validating - // region here. - serverRegion := serverConfig.GetRegion() - if serverRegion != location { - writeErrorResponse(w, ErrInvalidRegion, r.URL) - return - } - bucketLock := globalNSMutex.NewNSLock(bucket, "") bucketLock.Lock() defer bucketLock.Unlock() @@ -746,15 +706,11 @@ func (api gatewayAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *htt return } - // Extract all the litsObjectsV1 query params to their native values. + // Extract all the listObjectsV1 query params to their native + // values. N B We delegate validation of params to respective + // gateway backends. prefix, marker, delimiter, maxKeys, _ := getListObjectsV1Args(r.URL.Query()) - // Validate all the query params before beginning to serve the request. - if s3Error := validateListObjectsArgs(prefix, marker, delimiter, maxKeys); s3Error != ErrNone { - writeErrorResponse(w, s3Error, r.URL) - return - } - listObjects := objectAPI.ListObjects if reqAuthType == authTypeAnonymous { listObjects = objectAPI.AnonListObjects @@ -769,6 +725,87 @@ func (api gatewayAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *htt return } response := generateListObjectsV1Response(bucket, prefix, marker, delimiter, maxKeys, listObjectsInfo) + // Write success response. + writeSuccessResponseXML(w, encodeResponse(response)) +} + +// ListObjectsV2Handler - GET Bucket (List Objects) Version 2. +// -------------------------- +// This implementation of the GET operation returns some or all (up to 1000) +// of the objects in a bucket. You can use the request parameters as selection +// criteria to return a subset of the objects in a bucket. +// +// NOTE: It is recommended that this API to be used for application development. +// Minio continues to support ListObjectsV1 for supporting legacy tools. +func (api gatewayAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) { + vars := router.Vars(r) + bucket := vars["bucket"] + + objectAPI := api.ObjectAPI() + if objectAPI == nil { + writeErrorResponse(w, ErrServerNotInitialized, r.URL) + return + } + + reqAuthType := getRequestAuthType(r) + + switch reqAuthType { + case authTypePresignedV2, authTypeSignedV2: + // Signature V2 validation. + s3Error := isReqAuthenticatedV2(r) + if s3Error != ErrNone { + errorIf(errSignatureMismatch, dumpRequest(r)) + writeErrorResponse(w, s3Error, r.URL) + return + } + case authTypeSigned, authTypePresigned: + s3Error := isReqAuthenticated(r, serverConfig.GetRegion()) + if s3Error != ErrNone { + errorIf(errSignatureMismatch, dumpRequest(r)) + writeErrorResponse(w, s3Error, r.URL) + return + } + case authTypeAnonymous: + // No verification needed for anonymous requests. + default: + // For all unknown auth types return error. + writeErrorResponse(w, ErrAccessDenied, r.URL) + return + } + + // Extract all the listObjectsV2 query params to their native values. + prefix, token, startAfter, delimiter, fetchOwner, maxKeys, _ := getListObjectsV2Args(r.URL.Query()) + + // In ListObjectsV2 'continuation-token' is the marker. + marker := token + // Check if 'continuation-token' is empty. + if token == "" { + // Then we need to use 'start-after' as marker instead. + marker = startAfter + } + + listObjectsV2 := objectAPI.ListObjectsV2 + if reqAuthType == authTypeAnonymous { + listObjectsV2 = objectAPI.AnonListObjectsV2 + } + + // Validate the query params before beginning to serve the request. + // fetch-owner is not validated since it is a boolean + if s3Error := validateListObjectsArgs(prefix, marker, delimiter, maxKeys); s3Error != ErrNone { + writeErrorResponse(w, s3Error, r.URL) + return + } + // Inititate a list objects operation based on the input params. + // On success would return back ListObjectsV2Info object to be + // serialized as XML and sent as S3 compatible response body. + listObjectsV2Info, err := listObjectsV2(bucket, prefix, token, fetchOwner, delimiter, maxKeys) + if err != nil { + errorIf(err, "Unable to list objects. Args to listObjectsV2 are bucket=%s, prefix=%s, token=%s, delimiter=%s", bucket, prefix, token, delimiter) + writeErrorResponse(w, toAPIErrorCode(err), r.URL) + return + } + + response := generateListObjectsV2Response(bucket, prefix, token, listObjectsV2Info.ContinuationToken, startAfter, delimiter, fetchOwner, listObjectsV2Info.IsTruncated, maxKeys, listObjectsV2Info.Objects, listObjectsV2Info.Prefixes) // Write success response. writeSuccessResponseXML(w, encodeResponse(response)) diff --git a/cmd/gateway-main.go b/cmd/gateway-main.go index adb2bf9d6..43bdd71e9 100644 --- a/cmd/gateway-main.go +++ b/cmd/gateway-main.go @@ -21,11 +21,14 @@ import ( "fmt" "net/url" "os" + "os/signal" "runtime" "strings" + "syscall" "github.com/gorilla/mux" "github.com/minio/cli" + miniohttp "github.com/minio/minio/pkg/http" ) const azureGatewayTemplate = `NAME: @@ -53,26 +56,13 @@ EXAMPLES: $ export MINIO_ACCESS_KEY=azureaccountname $ export MINIO_SECRET_KEY=azureaccountkey $ {{.HelpName}} + 2. Start minio gateway server for Azure Blob Storage backend on custom endpoint. $ export MINIO_ACCESS_KEY=azureaccountname $ export MINIO_SECRET_KEY=azureaccountkey $ {{.HelpName}} https://azure.example.com ` -var azureBackendCmd = cli.Command{ - Name: "azure", - Usage: "Microsoft Azure Blob Storage.", - Action: azureGatewayMain, - CustomHelpTemplate: azureGatewayTemplate, - Flags: append(serverFlags, - cli.BoolFlag{ - Name: "quiet", - Usage: "Disable startup banner.", - }, - ), - HideHelpCommand: true, -} - const s3GatewayTemplate = `NAME: {{.HelpName}} - {{.Usage}} @@ -105,26 +95,71 @@ EXAMPLES: $ {{.HelpName}} https://play.minio.io:9000 ` -var s3BackendCmd = cli.Command{ - Name: "s3", - Usage: "Amazon Simple Storage Service (S3).", - Action: s3GatewayMain, - CustomHelpTemplate: s3GatewayTemplate, - Flags: append(serverFlags, - cli.BoolFlag{ - Name: "quiet", - Usage: "Disable startup banner.", - }, - ), - HideHelpCommand: true, -} +const gcsGatewayTemplate = `NAME: + {{.HelpName}} - {{.Usage}} -var gatewayCmd = cli.Command{ - Name: "gateway", - Usage: "Start object storage gateway.", - HideHelpCommand: true, - Subcommands: []cli.Command{azureBackendCmd, s3BackendCmd}, -} +USAGE: + {{.HelpName}} {{if .VisibleFlags}}[FLAGS]{{end}} PROJECTID +{{if .VisibleFlags}} +FLAGS: + {{range .VisibleFlags}}{{.}} + {{end}}{{end}} +PROJECTID: + GCS project id, there are no defaults this is mandatory. + +ENVIRONMENT VARIABLES: + ACCESS: + MINIO_ACCESS_KEY: Username or access key of GCS. + MINIO_SECRET_KEY: Password or secret key of GCS. + + BROWSER: + MINIO_BROWSER: To disable web browser access, set this value to "off". + +EXAMPLES: + 1. Start minio gateway server for GCS backend. + $ export GOOGLE_APPLICATION_CREDENTIALS=/path/to/credentials.json + (Instructions to generate credentials : https://developers.google.com/identity/protocols/application-default-credentials) + $ export MINIO_ACCESS_KEY=accesskey + $ export MINIO_SECRET_KEY=secretkey + $ {{.HelpName}} mygcsprojectid + +` + +var ( + azureBackendCmd = cli.Command{ + Name: "azure", + Usage: "Microsoft Azure Blob Storage.", + Action: azureGatewayMain, + CustomHelpTemplate: azureGatewayTemplate, + Flags: append(serverFlags, globalFlags...), + HideHelpCommand: true, + } + + s3BackendCmd = cli.Command{ + Name: "s3", + Usage: "Amazon Simple Storage Service (S3).", + Action: s3GatewayMain, + CustomHelpTemplate: s3GatewayTemplate, + Flags: append(serverFlags, globalFlags...), + HideHelpCommand: true, + } + gcsBackendCmd = cli.Command{ + Name: "gcs", + Usage: "Google Cloud Storage.", + Action: gcsGatewayMain, + CustomHelpTemplate: gcsGatewayTemplate, + Flags: append(serverFlags, globalFlags...), + HideHelpCommand: true, + } + + gatewayCmd = cli.Command{ + Name: "gateway", + Usage: "Start object storage gateway.", + Flags: append(serverFlags, globalFlags...), + HideHelpCommand: true, + Subcommands: []cli.Command{azureBackendCmd, s3BackendCmd, gcsBackendCmd}, + } +) // Represents the type of the gateway backend. type gatewayBackend string @@ -132,79 +167,33 @@ type gatewayBackend string const ( azureBackend gatewayBackend = "azure" s3Backend gatewayBackend = "s3" + gcsBackend gatewayBackend = "gcs" // Add more backends here. ) -// Returns access and secretkey set from environment variables. -func mustGetGatewayCredsFromEnv() (accessKey, secretKey string) { - // Fetch access keys from environment variables. - accessKey = os.Getenv("MINIO_ACCESS_KEY") - secretKey = os.Getenv("MINIO_SECRET_KEY") - if accessKey == "" || secretKey == "" { - fatalIf(errors.New("Missing credentials"), "Access and secret keys are mandatory to run Minio gateway server.") - } - return accessKey, secretKey -} - -// Set browser setting from environment variables -func mustSetBrowserSettingFromEnv() { - if browser := os.Getenv("MINIO_BROWSER"); browser != "" { - browserFlag, err := ParseBrowserFlag(browser) - if err != nil { - fatalIf(errors.New("invalid value"), "Unknown value ‘%s’ in MINIO_BROWSER environment variable.", browser) - } - - // browser Envs are set globally, this does not represent - // if browser is turned off or on. - globalIsEnvBrowser = true - globalIsBrowserEnabled = bool(browserFlag) - } -} - // Initialize gateway layer depending on the backend type. // Supported backend types are // // - Azure Blob Storage. +// - AWS S3. +// - Google Cloud Storage. // - Add your favorite backend here. -func newGatewayLayer(backendType gatewayBackend, endpoint, accessKey, secretKey string, secure bool) (GatewayLayer, error) { - - switch gatewayBackend(backendType) { +func newGatewayLayer(backendType gatewayBackend, arg string) (GatewayLayer, error) { + switch backendType { case azureBackend: - return newAzureLayer(endpoint, accessKey, secretKey, secure) + return newAzureLayer(arg) case s3Backend: - return newS3Gateway(endpoint, accessKey, secretKey, secure) + return newS3Gateway(arg) + case gcsBackend: + // FIXME: The following print command is temporary and + // will be removed when gcs is ready for production use. + log.Println(colorYellow("\n *** Warning: Not Ready for Production ***")) + return newGCSGateway(arg) } return nil, fmt.Errorf("Unrecognized backend type %s", backendType) } -// Initialize a new gateway config. -// -// DO NOT save this config, this is meant to be -// only used in memory. -func newGatewayConfig(accessKey, secretKey, region string) error { - // Initialize server config. - srvCfg := newServerConfigV18() - - // If env is set for a fresh start, save them to config file. - srvCfg.SetCredential(credential{ - AccessKey: accessKey, - SecretKey: secretKey, - }) - - // Set custom region. - srvCfg.SetRegion(region) - - // hold the mutex lock before a new config is assigned. - // Save the new config globally. - // unlock the mutex. - serverConfigMu.Lock() - serverConfig = srvCfg - serverConfigMu.Unlock() - - return nil -} - // Return endpoint. func parseGatewayEndpoint(arg string) (endPoint string, secure bool, err error) { schemeSpecified := len(strings.Split(arg, "://")) > 1 @@ -264,6 +253,9 @@ func azureGatewayMain(ctx *cli.Context) { cli.ShowCommandHelpAndExit(ctx, "azure", 1) } + // Validate gateway arguments. + fatalIf(validateGatewayArguments(ctx.GlobalString("address"), ctx.Args().First()), "Invalid argument") + gatewayMain(ctx, azureBackend) } @@ -273,54 +265,78 @@ func s3GatewayMain(ctx *cli.Context) { cli.ShowCommandHelpAndExit(ctx, "s3", 1) } + // Validate gateway arguments. + fatalIf(validateGatewayArguments(ctx.GlobalString("address"), ctx.Args().First()), "Invalid argument") + gatewayMain(ctx, s3Backend) } +// Handler for 'minio gateway gcs' command line +func gcsGatewayMain(ctx *cli.Context) { + if ctx.Args().Present() && ctx.Args().First() == "help" { + cli.ShowCommandHelpAndExit(ctx, "gcs", 1) + } + + if !isValidGCSProjectIDFormat(ctx.Args().First()) { + errorIf(errGCSInvalidProjectID, "Unable to start GCS gateway with %s", ctx.Args().First()) + cli.ShowCommandHelpAndExit(ctx, "gcs", 1) + } + + gatewayMain(ctx, gcsBackend) +} + // Handler for 'minio gateway'. func gatewayMain(ctx *cli.Context, backendType gatewayBackend) { - // Fetch access and secret key from env. - accessKey, secretKey := mustGetGatewayCredsFromEnv() - - // Fetch browser env setting - mustSetBrowserSettingFromEnv() - - // Initialize new gateway config. - - newGatewayConfig(accessKey, secretKey, globalMinioDefaultRegion) - // Get quiet flag from command line argument. quietFlag := ctx.Bool("quiet") || ctx.GlobalBool("quiet") if quietFlag { log.EnableQuiet() } - serverAddr := ctx.String("address") - endpointAddr := ctx.Args().Get(0) - err := validateGatewayArguments(serverAddr, endpointAddr) - fatalIf(err, "Invalid argument") + // Fetch address option + gatewayAddr := ctx.GlobalString("address") + if gatewayAddr == ":"+globalMinioPort { + gatewayAddr = ctx.String("address") + } - // Second argument is endpoint. If no endpoint is specified then the - // gateway implementation should use a default setting. - endPoint, secure, err := parseGatewayEndpoint(endpointAddr) - fatalIf(err, "Unable to parse endpoint") + // Handle common command args. + handleCommonCmdArgs(ctx) - // Create certs path for SSL configuration. - fatalIf(createConfigDir(), "Unable to create configuration directory") + // Handle common env vars. + handleCommonEnvVars() - newObject, err := newGatewayLayer(backendType, endPoint, accessKey, secretKey, secure) - fatalIf(err, "Unable to initialize gateway layer") + // Validate if we have access, secret set through environment. + if !globalIsEnvCreds { + fatalIf(fmt.Errorf("Access and Secret keys should be set through ENVs for backend [%s]", backendType), "") + } + + // Create certs path. + fatalIf(createConfigDir(), "Unable to create configuration directories.") + + // Initialize gateway config. + initConfig() + + // Enable loggers as per configuration file. + enableLoggers() + + // Init the error tracing module. + initError() + + // Check and load SSL certificates. + var err error + globalPublicCerts, globalRootCAs, globalTLSCertificate, globalIsSSL, err = getSSLConfig() + fatalIf(err, "Invalid SSL certificate file") initNSLock(false) // Enable local namespace lock. - router := mux.NewRouter().SkipClean(true) + newObject, err := newGatewayLayer(backendType, ctx.Args().First()) + fatalIf(err, "Unable to initialize gateway layer") - // credentials Envs are set globally. - globalIsEnvCreds = true + router := mux.NewRouter().SkipClean(true) // Register web router when its enabled. if globalIsBrowserEnabled { - aerr := registerWebRouter(router) - fatalIf(aerr, "Unable to configure web browser") + fatalIf(registerWebRouter(router), "Unable to configure web browser") } registerGatewayAPIRouter(router, newObject) @@ -353,22 +369,15 @@ func gatewayMain(ctx *cli.Context, backendType gatewayBackend) { } - apiServer := NewServerMux(serverAddr, registerHandlers(router, handlerFns...)) - - _, _, globalIsSSL, err = getSSLConfig() - fatalIf(err, "Invalid SSL key file") + globalHTTPServer = miniohttp.NewServer([]string{gatewayAddr}, registerHandlers(router, handlerFns...), globalTLSCertificate) // Start server, automatically configures TLS if certs are available. go func() { - cert, key := "", "" - if globalIsSSL { - cert, key = getPublicCertFile(), getPrivateKeyFile() - } - - aerr := apiServer.ListenAndServe(cert, key) - fatalIf(aerr, "Failed to start minio server") + globalHTTPServerErrorCh <- globalHTTPServer.Start() }() + signal.Notify(globalOSSignalCh, os.Interrupt, syscall.SIGTERM) + // Once endpoints are finalized, initialize the new object api. globalObjLayerMutex.Lock() globalObjectAPI = newObject @@ -377,15 +386,21 @@ func gatewayMain(ctx *cli.Context, backendType gatewayBackend) { // Prints the formatted startup message once object layer is initialized. if !quietFlag { mode := "" - if gatewayBackend(backendType) == azureBackend { + switch gatewayBackend(backendType) { + case azureBackend: mode = globalMinioModeGatewayAzure - } else if gatewayBackend(backendType) == s3Backend { + case gcsBackend: + mode = globalMinioModeGatewayGCS + case s3Backend: mode = globalMinioModeGatewayS3 } + + // Check update mode. checkUpdate(mode) - apiEndpoints := getAPIEndpoints(apiServer.Addr) - printGatewayStartupMessage(apiEndpoints, accessKey, secretKey, backendType) + + // Print gateway startup message. + printGatewayStartupMessage(getAPIEndpoints(gatewayAddr), backendType) } - <-globalServiceDoneCh + handleSignals() } diff --git a/cmd/gateway-main_test.go b/cmd/gateway-main_test.go index b80d0292b..fdab9580f 100644 --- a/cmd/gateway-main_test.go +++ b/cmd/gateway-main_test.go @@ -17,7 +17,6 @@ package cmd import ( - "os" "strings" "testing" ) @@ -53,28 +52,6 @@ func TestParseGatewayEndpoint(t *testing.T) { } } -func TestSetBrowserFromEnv(t *testing.T) { - browser := os.Getenv("MINIO_BROWSER") - - os.Setenv("MINIO_BROWSER", "on") - mustSetBrowserSettingFromEnv() - if globalIsBrowserEnabled != true { - t.Errorf("Expected the response status to be `%t`, but instead found `%t`", globalIsBrowserEnabled, false) - } - - os.Setenv("MINIO_BROWSER", "off") - mustSetBrowserSettingFromEnv() - if globalIsBrowserEnabled != false { - t.Errorf("Expected the response status to be `%t`, but instead found `%t`", globalIsBrowserEnabled, true) - } - os.Setenv("MINIO_BROWSER", "") - mustSetBrowserSettingFromEnv() - if globalIsBrowserEnabled != false { - t.Errorf("Expected the response status to be `%t`, but instead found `%t`", globalIsBrowserEnabled, true) - } - os.Setenv("MINIO_BROWSER", browser) -} - // Test validateGatewayArguments func TestValidateGatewayArguments(t *testing.T) { nonLoopBackIPs := localIP4.FuncMatch(func(ip string, matchString string) bool { diff --git a/cmd/gateway-router.go b/cmd/gateway-router.go index b286ed6ed..02c32d01a 100644 --- a/cmd/gateway-router.go +++ b/cmd/gateway-router.go @@ -36,6 +36,8 @@ type GatewayLayer interface { GetBucketPolicies(string) (policy.BucketAccessPolicy, error) DeleteBucketPolicies(string) error AnonListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error) + AnonListObjectsV2(bucket, prefix, continuationToken string, fetchOwner bool, delimiter string, maxKeys int) (result ListObjectsV2Info, err error) + ListObjectsV2(bucket, prefix, continuationToken string, fetchOwner bool, delimiter string, maxKeys int) (result ListObjectsV2Info, err error) AnonGetBucketInfo(bucket string) (bucketInfo BucketInfo, err error) } diff --git a/cmd/gateway-s3-anonymous.go b/cmd/gateway-s3-anonymous.go index 147e8ea43..b5f45fbbc 100644 --- a/cmd/gateway-s3-anonymous.go +++ b/cmd/gateway-s3-anonymous.go @@ -24,14 +24,14 @@ import ( ) // AnonPutObject creates a new object anonymously with the incoming data, -func (l *s3Objects) AnonPutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (ObjectInfo, error) { +func (l *s3Objects) AnonPutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (objInfo ObjectInfo, e error) { var sha256sumBytes []byte var err error if sha256sum != "" { sha256sumBytes, err = hex.DecodeString(sha256sum) if err != nil { - return ObjectInfo{}, s3ToObjectError(traceError(err), bucket, object) + return objInfo, s3ToObjectError(traceError(err), bucket, object) } } @@ -40,14 +40,14 @@ func (l *s3Objects) AnonPutObject(bucket string, object string, size int64, data if md5sum != "" { md5sumBytes, err = hex.DecodeString(md5sum) if err != nil { - return ObjectInfo{}, s3ToObjectError(traceError(err), bucket, object) + return objInfo, s3ToObjectError(traceError(err), bucket, object) } delete(metadata, "etag") } oi, err := l.anonClient.PutObject(bucket, object, size, data, md5sumBytes, sha256sumBytes, toMinioClientMetadata(metadata)) if err != nil { - return ObjectInfo{}, s3ToObjectError(traceError(err), bucket, object) + return objInfo, s3ToObjectError(traceError(err), bucket, object) } return fromMinioClientObjectInfo(bucket, oi), nil @@ -74,37 +74,47 @@ func (l *s3Objects) AnonGetObject(bucket string, key string, startOffset int64, } // AnonGetObjectInfo - Get object info anonymously -func (l *s3Objects) AnonGetObjectInfo(bucket string, object string) (ObjectInfo, error) { +func (l *s3Objects) AnonGetObjectInfo(bucket string, object string) (objInfo ObjectInfo, e error) { r := minio.NewHeadReqHeaders() oi, err := l.anonClient.StatObject(bucket, object, r) if err != nil { - return ObjectInfo{}, s3ToObjectError(traceError(err), bucket, object) + return objInfo, s3ToObjectError(traceError(err), bucket, object) } return fromMinioClientObjectInfo(bucket, oi), nil } // AnonListObjects - List objects anonymously -func (l *s3Objects) AnonListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (ListObjectsInfo, error) { +func (l *s3Objects) AnonListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) { result, err := l.anonClient.ListObjects(bucket, prefix, marker, delimiter, maxKeys) if err != nil { - return ListObjectsInfo{}, s3ToObjectError(traceError(err), bucket) + return loi, s3ToObjectError(traceError(err), bucket) } return fromMinioClientListBucketResult(bucket, result), nil } +// AnonListObjectsV2 - List objects in V2 mode, anonymously +func (l *s3Objects) AnonListObjectsV2(bucket, prefix, continuationToken string, fetchOwner bool, delimiter string, maxKeys int) (loi ListObjectsV2Info, e error) { + result, err := l.anonClient.ListObjectsV2(bucket, prefix, continuationToken, fetchOwner, delimiter, maxKeys) + if err != nil { + return loi, s3ToObjectError(traceError(err), bucket) + } + + return fromMinioClientListBucketV2Result(bucket, result), nil +} + // AnonGetBucketInfo - Get bucket metadata anonymously. -func (l *s3Objects) AnonGetBucketInfo(bucket string) (BucketInfo, error) { +func (l *s3Objects) AnonGetBucketInfo(bucket string) (bi BucketInfo, e error) { if exists, err := l.anonClient.BucketExists(bucket); err != nil { - return BucketInfo{}, s3ToObjectError(traceError(err), bucket) + return bi, s3ToObjectError(traceError(err), bucket) } else if !exists { - return BucketInfo{}, traceError(BucketNotFound{Bucket: bucket}) + return bi, traceError(BucketNotFound{Bucket: bucket}) } buckets, err := l.anonClient.ListBuckets() if err != nil { - return BucketInfo{}, s3ToObjectError(traceError(err), bucket) + return bi, s3ToObjectError(traceError(err), bucket) } for _, bi := range buckets { @@ -118,5 +128,5 @@ func (l *s3Objects) AnonGetBucketInfo(bucket string) (BucketInfo, error) { }, nil } - return BucketInfo{}, traceError(BucketNotFound{Bucket: bucket}) + return bi, traceError(BucketNotFound{Bucket: bucket}) } diff --git a/cmd/gateway-s3-unsupported.go b/cmd/gateway-s3-unsupported.go index 13d167b8a..4b9526565 100644 --- a/cmd/gateway-s3-unsupported.go +++ b/cmd/gateway-s3-unsupported.go @@ -32,11 +32,11 @@ func (l *s3Objects) HealObject(bucket string, object string) (int, int, error) { } // ListObjectsHeal - Not relevant. -func (l *s3Objects) ListObjectsHeal(bucket string, prefix string, marker string, delimiter string, maxKeys int) (ListObjectsInfo, error) { - return ListObjectsInfo{}, traceError(NotImplemented{}) +func (l *s3Objects) ListObjectsHeal(bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) { + return loi, traceError(NotImplemented{}) } // ListUploadsHeal - Not relevant. -func (l *s3Objects) ListUploadsHeal(bucket string, prefix string, marker string, uploadIDMarker string, delimiter string, maxUploads int) (ListMultipartsInfo, error) { - return ListMultipartsInfo{}, traceError(NotImplemented{}) +func (l *s3Objects) ListUploadsHeal(bucket string, prefix string, marker string, uploadIDMarker string, delimiter string, maxUploads int) (lmi ListMultipartsInfo, e error) { + return lmi, traceError(NotImplemented{}) } diff --git a/cmd/gateway-s3.go b/cmd/gateway-s3.go index 709153e1f..dbace5295 100644 --- a/cmd/gateway-s3.go +++ b/cmd/gateway-s3.go @@ -17,11 +17,12 @@ package cmd import ( - "encoding/hex" "io" "net/http" "path" + "encoding/hex" + minio "github.com/minio/minio-go" "github.com/minio/minio-go/pkg/policy" ) @@ -97,14 +98,30 @@ type s3Objects struct { } // newS3Gateway returns s3 gatewaylayer -func newS3Gateway(endpoint string, accessKey, secretKey string, secure bool) (GatewayLayer, error) { - if endpoint == "" { - endpoint = "s3.amazonaws.com" - secure = true +func newS3Gateway(host string) (GatewayLayer, error) { + + var err error + var endpoint string + var secure = true + + // Validate host parameters. + if host != "" { + // Override default params if the host is provided + endpoint, secure, err = parseGatewayEndpoint(host) + if err != nil { + return nil, err + } } + // Default endpoint parameters + if endpoint == "" { + endpoint = "s3.amazonaws.com" + } + + creds := serverConfig.GetCredential() + // Initialize minio client object. - client, err := minio.NewCore(endpoint, accessKey, secretKey, secure) + client, err := minio.NewCore(endpoint, creds.AccessKey, creds.SecretKey, secure) if err != nil { return nil, err } @@ -128,8 +145,8 @@ func (l *s3Objects) Shutdown() error { } // StorageInfo is not relevant to S3 backend. -func (l *s3Objects) StorageInfo() StorageInfo { - return StorageInfo{} +func (l *s3Objects) StorageInfo() (si StorageInfo) { + return si } // MakeBucket creates a new container on S3 backend. @@ -142,10 +159,10 @@ func (l *s3Objects) MakeBucketWithLocation(bucket, location string) error { } // GetBucketInfo gets bucket metadata.. -func (l *s3Objects) GetBucketInfo(bucket string) (BucketInfo, error) { +func (l *s3Objects) GetBucketInfo(bucket string) (bi BucketInfo, e error) { buckets, err := l.Client.ListBuckets() if err != nil { - return BucketInfo{}, s3ToObjectError(traceError(err), bucket) + return bi, s3ToObjectError(traceError(err), bucket) } for _, bi := range buckets { @@ -159,7 +176,7 @@ func (l *s3Objects) GetBucketInfo(bucket string) (BucketInfo, error) { }, nil } - return BucketInfo{}, traceError(BucketNotFound{Bucket: bucket}) + return bi, traceError(BucketNotFound{Bucket: bucket}) } // ListBuckets lists all S3 buckets @@ -190,20 +207,20 @@ func (l *s3Objects) DeleteBucket(bucket string) error { } // ListObjects lists all blobs in S3 bucket filtered by prefix -func (l *s3Objects) ListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (ListObjectsInfo, error) { +func (l *s3Objects) ListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) { result, err := l.Client.ListObjects(bucket, prefix, marker, delimiter, maxKeys) if err != nil { - return ListObjectsInfo{}, s3ToObjectError(traceError(err), bucket) + return loi, s3ToObjectError(traceError(err), bucket) } return fromMinioClientListBucketResult(bucket, result), nil } // ListObjectsV2 lists all blobs in S3 bucket filtered by prefix -func (l *s3Objects) ListObjectsV2(bucket, prefix, continuationToken string, fetchOwner bool, delimiter string, maxKeys int) (ListObjectsV2Info, error) { +func (l *s3Objects) ListObjectsV2(bucket, prefix, continuationToken string, fetchOwner bool, delimiter string, maxKeys int) (loi ListObjectsV2Info, e error) { result, err := l.Client.ListObjectsV2(bucket, prefix, continuationToken, fetchOwner, delimiter, maxKeys) if err != nil { - return ListObjectsV2Info{}, s3ToObjectError(traceError(err), bucket) + return loi, s3ToObjectError(traceError(err), bucket) } return fromMinioClientListBucketV2Result(bucket, result), nil @@ -294,7 +311,7 @@ func fromMinioClientObjectInfo(bucket string, oi minio.ObjectInfo) ObjectInfo { Name: oi.Key, ModTime: oi.LastModified, Size: oi.Size, - ETag: oi.ETag, + ETag: canonicalizeETag(oi.ETag), UserDefined: userDefined, ContentType: oi.ContentType, ContentEncoding: oi.Metadata.Get("Content-Encoding"), @@ -313,14 +330,14 @@ func (l *s3Objects) GetObjectInfo(bucket string, object string) (objInfo ObjectI } // PutObject creates a new object with the incoming data, -func (l *s3Objects) PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (ObjectInfo, error) { +func (l *s3Objects) PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (objInfo ObjectInfo, e error) { var sha256sumBytes []byte var err error if sha256sum != "" { sha256sumBytes, err = hex.DecodeString(sha256sum) if err != nil { - return ObjectInfo{}, s3ToObjectError(traceError(err), bucket, object) + return objInfo, s3ToObjectError(traceError(err), bucket, object) } } @@ -329,29 +346,29 @@ func (l *s3Objects) PutObject(bucket string, object string, size int64, data io. if md5sum != "" { md5sumBytes, err = hex.DecodeString(md5sum) if err != nil { - return ObjectInfo{}, s3ToObjectError(traceError(err), bucket, object) + return objInfo, s3ToObjectError(traceError(err), bucket, object) } delete(metadata, "etag") } oi, err := l.Client.PutObject(bucket, object, size, data, md5sumBytes, sha256sumBytes, toMinioClientMetadata(metadata)) if err != nil { - return ObjectInfo{}, s3ToObjectError(traceError(err), bucket, object) + return objInfo, s3ToObjectError(traceError(err), bucket, object) } return fromMinioClientObjectInfo(bucket, oi), nil } // CopyObject copies a blob from source container to destination container. -func (l *s3Objects) CopyObject(srcBucket string, srcObject string, destBucket string, destObject string, metadata map[string]string) (ObjectInfo, error) { +func (l *s3Objects) CopyObject(srcBucket string, srcObject string, destBucket string, destObject string, metadata map[string]string) (objInfo ObjectInfo, e error) { err := l.Client.CopyObject(destBucket, destObject, path.Join(srcBucket, srcObject), minio.CopyConditions{}) if err != nil { - return ObjectInfo{}, s3ToObjectError(traceError(err), srcBucket, srcObject) + return objInfo, s3ToObjectError(traceError(err), srcBucket, srcObject) } oi, err := l.GetObjectInfo(destBucket, destObject) if err != nil { - return ObjectInfo{}, s3ToObjectError(traceError(err), destBucket, destObject) + return objInfo, s3ToObjectError(traceError(err), destBucket, destObject) } return oi, nil @@ -406,10 +423,10 @@ func fromMinioClientListMultipartsInfo(lmur minio.ListMultipartUploadsResult) Li } // ListMultipartUploads lists all multipart uploads. -func (l *s3Objects) ListMultipartUploads(bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (ListMultipartsInfo, error) { +func (l *s3Objects) ListMultipartUploads(bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (lmi ListMultipartsInfo, e error) { result, err := l.Client.ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads) if err != nil { - return ListMultipartsInfo{}, err + return lmi, err } return fromMinioClientListMultipartsInfo(result), nil @@ -455,20 +472,20 @@ func fromMinioClientObjectPart(op minio.ObjectPart) PartInfo { } // PutObjectPart puts a part of object in bucket -func (l *s3Objects) PutObjectPart(bucket string, object string, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (PartInfo, error) { +func (l *s3Objects) PutObjectPart(bucket string, object string, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (pi PartInfo, e error) { md5HexBytes, err := hex.DecodeString(md5Hex) if err != nil { - return PartInfo{}, err + return pi, err } sha256sumBytes, err := hex.DecodeString(sha256sum) if err != nil { - return PartInfo{}, err + return pi, err } info, err := l.Client.PutObjectPart(bucket, object, uploadID, partID, size, data, md5HexBytes, sha256sumBytes) if err != nil { - return PartInfo{}, err + return pi, err } return fromMinioClientObjectPart(info), nil @@ -500,10 +517,10 @@ func fromMinioClientListPartsInfo(lopr minio.ListObjectPartsResult) ListPartsInf } // ListObjectParts returns all object parts for specified object in specified bucket -func (l *s3Objects) ListObjectParts(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (ListPartsInfo, error) { +func (l *s3Objects) ListObjectParts(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (lpi ListPartsInfo, e error) { result, err := l.Client.ListObjectParts(bucket, object, uploadID, partNumberMarker, maxParts) if err != nil { - return ListPartsInfo{}, err + return lpi, err } return fromMinioClientListPartsInfo(result), nil @@ -532,10 +549,10 @@ func toMinioClientCompleteParts(parts []completePart) []minio.CompletePart { } // CompleteMultipartUpload completes ongoing multipart upload and finalizes object -func (l *s3Objects) CompleteMultipartUpload(bucket string, object string, uploadID string, uploadedParts []completePart) (ObjectInfo, error) { +func (l *s3Objects) CompleteMultipartUpload(bucket string, object string, uploadID string, uploadedParts []completePart) (oi ObjectInfo, e error) { err := l.Client.CompleteMultipartUpload(bucket, object, uploadID, toMinioClientCompleteParts(uploadedParts)) if err != nil { - return ObjectInfo{}, s3ToObjectError(traceError(err), bucket, object) + return oi, s3ToObjectError(traceError(err), bucket, object) } return l.GetObjectInfo(bucket, object) diff --git a/cmd/gateway-startup-msg.go b/cmd/gateway-startup-msg.go index b89a753f3..e17a3e4d5 100644 --- a/cmd/gateway-startup-msg.go +++ b/cmd/gateway-startup-msg.go @@ -18,28 +18,19 @@ package cmd import ( "fmt" - "runtime" "strings" ) // Prints the formatted startup message. -func printGatewayStartupMessage(apiEndPoints []string, accessKey, secretKey string, backendType gatewayBackend) { +func printGatewayStartupMessage(apiEndPoints []string, backendType gatewayBackend) { + strippedAPIEndpoints := stripStandardPorts(apiEndPoints) + // Prints credential. - printGatewayCommonMsg(apiEndPoints, accessKey, secretKey) + printGatewayCommonMsg(strippedAPIEndpoints) // Prints `mc` cli configuration message chooses // first endpoint as default. - endPoint := apiEndPoints[0] - - // Configure 'mc', following block prints platform specific information for minio client. - log.Println(colorBlue("\nCommand-line Access: ") + mcQuickStartGuide) - if runtime.GOOS == globalWindowsOSName { - mcMessage := fmt.Sprintf("$ mc.exe config host add my%s %s %s %s", backendType, endPoint, accessKey, secretKey) - log.Println(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage)) - } else { - mcMessage := fmt.Sprintf("$ mc config host add my%s %s %s %s", backendType, endPoint, accessKey, secretKey) - log.Println(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage)) - } + printCLIAccessMsg(strippedAPIEndpoints[0], fmt.Sprintf("my%s", backendType)) // Prints documentation message. printObjectAPIMsg() @@ -52,10 +43,16 @@ func printGatewayStartupMessage(apiEndPoints []string, accessKey, secretKey stri } // Prints common server startup message. Prints credential, region and browser access. -func printGatewayCommonMsg(apiEndpoints []string, accessKey, secretKey string) { +func printGatewayCommonMsg(apiEndpoints []string) { + // Get saved credentials. + cred := serverConfig.GetCredential() + apiEndpointStr := strings.Join(apiEndpoints, " ") // Colorize the message and print. log.Println(colorBlue("\nEndpoint: ") + colorBold(fmt.Sprintf(getFormatStr(len(apiEndpointStr), 1), apiEndpointStr))) - log.Println(colorBlue("AccessKey: ") + colorBold(fmt.Sprintf("%s ", accessKey))) - log.Println(colorBlue("SecretKey: ") + colorBold(fmt.Sprintf("%s ", secretKey))) + log.Println(colorBlue("AccessKey: ") + colorBold(fmt.Sprintf("%s ", cred.AccessKey))) + log.Println(colorBlue("SecretKey: ") + colorBold(fmt.Sprintf("%s ", cred.SecretKey))) + + log.Println(colorBlue("\nBrowser Access:")) + log.Println(fmt.Sprintf(getFormatStr(len(apiEndpointStr), 3), apiEndpointStr)) } diff --git a/cmd/gateway-startup-msg_test.go b/cmd/gateway-startup-msg_test.go index c3bf2e6cb..bda258fca 100644 --- a/cmd/gateway-startup-msg_test.go +++ b/cmd/gateway-startup-msg_test.go @@ -20,12 +20,24 @@ import "testing" // Test printing Gateway common message. func TestPrintGatewayCommonMessage(t *testing.T) { - apiEndpoints := []string{"127.0.0.1:9000"} - printGatewayCommonMsg(apiEndpoints, "abcd1", "abcd123") + root, err := newTestConfig(globalMinioDefaultRegion) + if err != nil { + t.Fatal(err) + } + defer removeAll(root) + + apiEndpoints := []string{"http://127.0.0.1:9000"} + printGatewayCommonMsg(apiEndpoints) } // Test print gateway startup message. func TestPrintGatewayStartupMessage(t *testing.T) { - apiEndpoints := []string{"127.0.0.1:9000"} - printGatewayStartupMessage(apiEndpoints, "abcd1", "abcd123", "azure") + root, err := newTestConfig(globalMinioDefaultRegion) + if err != nil { + t.Fatal(err) + } + defer removeAll(root) + + apiEndpoints := []string{"http://127.0.0.1:9000"} + printGatewayStartupMessage(apiEndpoints, "azure") } diff --git a/cmd/globals.go b/cmd/globals.go index b57457b9c..e894133e1 100644 --- a/cmd/globals.go +++ b/cmd/globals.go @@ -17,20 +17,32 @@ package cmd import ( + "crypto/tls" "crypto/x509" + "os" "runtime" "time" humanize "github.com/dustin/go-humanize" "github.com/fatih/color" + miniohttp "github.com/minio/minio/pkg/http" ) // minio configuration related constants. const ( globalMinioCertExpireWarnDays = time.Hour * 24 * 30 // 30 days. - globalMinioDefaultRegion = "" - globalMinioDefaultOwnerID = "minio" + globalMinioDefaultRegion = "" + // This is a sha256 output of ``arn:aws:iam::minio:user/admin``, + // this is kept in present form to be compatible with S3 owner ID + // requirements - + // + // ``` + // The canonical user ID is the Amazon S3–only concept. + // It is 64-character obfuscated version of the account ID. + // ``` + // http://docs.aws.amazon.com/AmazonS3/latest/dev/example-walkthroughs-managing-access-example4.html + globalMinioDefaultOwnerID = "02d6176db174dc93cb1b899f7c6078f08654445fe8cf1b6ce98d8855f66bdbf4" globalMinioDefaultStorageClass = "STANDARD" globalWindowsOSName = "windows" globalNetBSDOSName = "netbsd" @@ -40,6 +52,7 @@ const ( globalMinioModeDistXL = "mode-server-distributed-xl" globalMinioModeGatewayAzure = "mode-gateway-azure" globalMinioModeGatewayS3 = "mode-gateway-s3" + globalMinioModeGatewayGCS = "mode-gateway-gcs" // Add new global values here. ) @@ -96,6 +109,12 @@ var ( // IsSSL indicates if the server is configured with SSL. globalIsSSL bool + globalTLSCertificate *tls.Certificate + + globalHTTPServer *miniohttp.Server + globalHTTPServerErrorCh = make(chan error) + globalOSSignalCh = make(chan os.Signal, 1) + // List of admin peers. globalAdminPeers = adminPeers{} @@ -127,8 +146,9 @@ var ( // global colors. var ( - colorBold = color.New(color.Bold).SprintFunc() - colorBlue = color.New(color.FgBlue).SprintfFunc() + colorBold = color.New(color.Bold).SprintFunc() + colorBlue = color.New(color.FgBlue).SprintfFunc() + colorYellow = color.New(color.FgYellow).SprintfFunc() ) // Returns minio global information, as a key value map. diff --git a/cmd/handler-utils.go b/cmd/handler-utils.go index 7a1d80f13..e8b60f7e1 100644 --- a/cmd/handler-utils.go +++ b/cmd/handler-utils.go @@ -98,9 +98,9 @@ func path2BucketAndObject(path string) (bucket, object string) { } // extractMetadataFromHeader extracts metadata from HTTP header. -func extractMetadataFromHeader(header http.Header) map[string]string { +func extractMetadataFromHeader(header http.Header) (map[string]string, error) { if header == nil { - return nil + return nil, traceError(errInvalidArgument) } metadata := make(map[string]string) // Save standard supported headers. @@ -116,16 +116,17 @@ func extractMetadataFromHeader(header http.Header) map[string]string { } // Go through all other headers for any additional headers that needs to be saved. for key := range header { - cKey := http.CanonicalHeaderKey(key) - if strings.HasPrefix(cKey, "X-Amz-Meta-") { - metadata[cKey] = header.Get(key) - } else if strings.HasPrefix(key, "X-Minio-Meta-") { - metadata[cKey] = header.Get(key) + if key != http.CanonicalHeaderKey(key) { + return nil, traceError(errInvalidArgument) + } + if strings.HasPrefix(key, "X-Amz-Meta-") { + metadata[key] = header.Get(key) + } + if strings.HasPrefix(key, "X-Minio-Meta-") { + metadata[key] = header.Get(key) } } - - // Success. - return metadata + return metadata, nil } // The Query string for the redirect URL the client is @@ -168,11 +169,6 @@ func trimAwsChunkedContentEncoding(contentEnc string) (trimmedContentEnc string) return strings.Join(newEncs, ",") } -// extractMetadataFromForm extracts metadata from Post Form. -func extractMetadataFromForm(formValues http.Header) map[string]string { - return extractMetadataFromHeader(formValues) -} - // Validate form field size for s3 specification requirement. func validateFormFieldSize(formValues http.Header) error { // Iterate over form values diff --git a/cmd/handler-utils_test.go b/cmd/handler-utils_test.go index 51c45adf6..40b11e733 100644 --- a/cmd/handler-utils_test.go +++ b/cmd/handler-utils_test.go @@ -123,8 +123,9 @@ func TestValidateFormFieldSize(t *testing.T) { // Tests validate metadata extraction from http headers. func TestExtractMetadataHeaders(t *testing.T) { testCases := []struct { - header http.Header - metadata map[string]string + header http.Header + metadata map[string]string + shouldFail bool }{ // Validate if there a known 'content-type'. { @@ -134,15 +135,17 @@ func TestExtractMetadataHeaders(t *testing.T) { metadata: map[string]string{ "content-type": "image/png", }, + shouldFail: false, }, // Validate if there are no keys to extract. { header: http.Header{ - "test-1": []string{"123"}, + "Test-1": []string{"123"}, }, - metadata: map[string]string{}, + metadata: map[string]string{}, + shouldFail: false, }, - // Validate if there are no keys to extract. + // Validate that there are all headers extracted { header: http.Header{ "X-Amz-Meta-Appid": []string{"amz-meta"}, @@ -150,19 +153,38 @@ func TestExtractMetadataHeaders(t *testing.T) { }, metadata: map[string]string{ "X-Amz-Meta-Appid": "amz-meta", - "X-Minio-Meta-Appid": "minio-meta"}, + "X-Minio-Meta-Appid": "minio-meta", + }, + shouldFail: false, + }, + // Fail if header key is not in canonicalized form + { + header: http.Header{ + "x-amz-meta-appid": []string{"amz-meta"}, + }, + metadata: map[string]string{ + "X-Amz-Meta-Appid": "amz-meta", + }, + shouldFail: true, }, // Empty header input returns empty metadata. { - header: nil, - metadata: nil, + header: nil, + metadata: nil, + shouldFail: true, }, } // Validate if the extracting headers. for i, testCase := range testCases { - metadata := extractMetadataFromHeader(testCase.header) - if !reflect.DeepEqual(metadata, testCase.metadata) { + metadata, err := extractMetadataFromHeader(testCase.header) + if err != nil && !testCase.shouldFail { + t.Fatalf("Test %d failed to extract metadata: %v", i+1, err) + } + if err == nil && testCase.shouldFail { + t.Fatalf("Test %d should fail, but it passed", i+1) + } + if err == nil && !reflect.DeepEqual(metadata, testCase.metadata) { t.Fatalf("Test %d failed: Expected \"%#v\", got \"%#v\"", i+1, testCase.metadata, metadata) } } diff --git a/cmd/lock-rpc-server.go b/cmd/lock-rpc-server.go index 6de6a965d..7d14d0eee 100644 --- a/cmd/lock-rpc-server.go +++ b/cmd/lock-rpc-server.go @@ -19,7 +19,6 @@ package cmd import ( "fmt" "math/rand" - "net/rpc" "path" "sync" "time" @@ -64,7 +63,7 @@ type lockServer struct { } // Start lock maintenance from all lock servers. -func startLockMaintainence(lockServers []*lockServer) { +func startLockMaintenance(lockServers []*lockServer) { for _, locker := range lockServers { // Start loop for stale lock maintenance go func(lk *lockServer) { @@ -90,7 +89,7 @@ func startLockMaintainence(lockServers []*lockServer) { // Register distributed NS lock handlers. func registerDistNSLockRouter(mux *router.Router, endpoints EndpointList) error { // Start lock maintenance from all lock servers. - startLockMaintainence(globalLockServers) + startLockMaintenance(globalLockServers) // Register initialized lock servers to their respective rpc endpoints. return registerStorageLockers(mux, globalLockServers) @@ -99,7 +98,7 @@ func registerDistNSLockRouter(mux *router.Router, endpoints EndpointList) error // registerStorageLockers - register locker rpc handlers for net/rpc library clients func registerStorageLockers(mux *router.Router, lockServers []*lockServer) error { for _, lockServer := range lockServers { - lockRPCServer := rpc.NewServer() + lockRPCServer := newRPCServer() if err := lockRPCServer.RegisterName(lockServiceName, lockServer); err != nil { return traceError(err) } diff --git a/cmd/notifier-config.go b/cmd/notifier-config.go index ccb112502..044033274 100644 --- a/cmd/notifier-config.go +++ b/cmd/notifier-config.go @@ -32,6 +32,7 @@ type notifier struct { Kafka kafkaConfigs `json:"kafka"` Webhook webhookConfigs `json:"webhook"` MySQL mySQLConfigs `json:"mysql"` + MQTT mqttConfigs `json:"mqtt"` // Add new notification queues. } @@ -54,6 +55,25 @@ func (a amqpConfigs) Validate() error { return nil } +type mqttConfigs map[string]mqttNotify + +func (a mqttConfigs) Clone() mqttConfigs { + a2 := make(mqttConfigs, len(a)) + for k, v := range a { + a2[k] = v + } + return a2 +} + +func (a mqttConfigs) Validate() error { + for k, v := range a { + if err := v.Validate(); err != nil { + return fmt.Errorf("MQTT [%s] configuration invalid: %s", k, err.Error()) + } + } + return nil +} + type natsConfigs map[string]natsNotify func (a natsConfigs) Clone() natsConfigs { @@ -215,6 +235,9 @@ func (n *notifier) Validate() error { if err := n.MySQL.Validate(); err != nil { return err } + if err := n.MQTT.Validate(); err != nil { + return err + } return nil } @@ -236,6 +259,24 @@ func (n *notifier) GetAMQPByID(accountID string) amqpNotify { return n.AMQP[accountID] } +func (n *notifier) SetMQTTByID(accountID string, mqttn mqttNotify) { + n.Lock() + defer n.Unlock() + n.MQTT[accountID] = mqttn +} + +func (n *notifier) GetMQTT() map[string]mqttNotify { + n.RLock() + defer n.RUnlock() + return n.MQTT.Clone() +} + +func (n *notifier) GetMQTTByID(accountID string) mqttNotify { + n.RLock() + defer n.RUnlock() + return n.MQTT[accountID] +} + func (n *notifier) SetNATSByID(accountID string, natsn natsNotify) { n.Lock() defer n.Unlock() diff --git a/cmd/notifiers.go b/cmd/notifiers.go index 9a016f7d8..db140124e 100644 --- a/cmd/notifiers.go +++ b/cmd/notifiers.go @@ -30,6 +30,8 @@ const ( // Static string indicating queue type 'amqp'. queueTypeAMQP = "amqp" + // Static string indicating queue type 'mqtt'. + queueTypeMQTT = "mqtt" // Static string indicating queue type 'nats'. queueTypeNATS = "nats" // Static string indicating queue type 'elasticsearch'. @@ -80,6 +82,25 @@ func isAMQPQueue(sqsArn arnSQS) bool { return true } +// Returns true if mqttARN is for an MQTT queue. +func isMQTTQueue(sqsArn arnSQS) bool { + if sqsArn.Type != queueTypeMQTT { + return false + } + mqttL := serverConfig.Notify.GetMQTTByID(sqsArn.AccountID) + if !mqttL.Enable { + return false + } + // Connect to mqtt server to validate. + mqttC, err := dialMQTT(mqttL) + if err != nil { + errorIf(err, "Unable to connect to mqtt service. %#v", mqttL) + return false + } + defer mqttC.Client.Disconnect(250) + return true +} + // Returns true if natsArn is for an NATS queue. func isNATSQueue(sqsArn arnSQS) bool { if sqsArn.Type != queueTypeNATS { diff --git a/cmd/notify-amqp.go b/cmd/notify-amqp.go index ed02366be..d946e4bf8 100644 --- a/cmd/notify-amqp.go +++ b/cmd/notify-amqp.go @@ -59,13 +59,13 @@ type amqpConn struct { // dialAMQP - dials and returns an amqpConnection instance, // for sending notifications. Returns error if amqp logger // is not enabled. -func dialAMQP(amqpL amqpNotify) (amqpConn, error) { +func dialAMQP(amqpL amqpNotify) (ac amqpConn, e error) { if !amqpL.Enable { - return amqpConn{}, errNotifyNotEnabled + return ac, errNotifyNotEnabled } conn, err := amqp.Dial(amqpL.URL) if err != nil { - return amqpConn{}, err + return ac, err } return amqpConn{Connection: conn, params: amqpL}, nil } diff --git a/cmd/notify-kafka.go b/cmd/notify-kafka.go index acd4ec80b..02f6384aa 100644 --- a/cmd/notify-kafka.go +++ b/cmd/notify-kafka.go @@ -66,13 +66,13 @@ type kafkaConn struct { topic string } -func dialKafka(kn kafkaNotify) (kafkaConn, error) { +func dialKafka(kn kafkaNotify) (kc kafkaConn, e error) { if !kn.Enable { - return kafkaConn{}, errNotifyNotEnabled + return kc, errNotifyNotEnabled } if kn.Topic == "" { - return kafkaConn{}, kkErrFunc( + return kc, kkErrFunc( "Topic was not specified in configuration") } @@ -85,7 +85,7 @@ func dialKafka(kn kafkaNotify) (kafkaConn, error) { p, err := sarama.NewSyncProducer(kn.Brokers, config) if err != nil { - return kafkaConn{}, kkErrFunc("Failed to start producer: %v", err) + return kc, kkErrFunc("Failed to start producer: %v", err) } return kafkaConn{p, kn.Topic}, nil diff --git a/cmd/notify-mqtt.go b/cmd/notify-mqtt.go new file mode 100644 index 000000000..cfc132705 --- /dev/null +++ b/cmd/notify-mqtt.go @@ -0,0 +1,123 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "crypto/tls" + "io/ioutil" + "time" + + "github.com/Sirupsen/logrus" + MQTT "github.com/eclipse/paho.mqtt.golang" +) + +type mqttNotify struct { + Enable bool `json:"enable"` + Broker string `json:"broker"` + Topic string `json:"topic"` + QoS int `json:"qos"` + ClientID string `json:"clientId"` + User string `json:"username"` + Password string `json:"password"` +} + +func (m *mqttNotify) Validate() error { + if !m.Enable { + return nil + } + if _, err := checkURL(m.Broker); err != nil { + return err + } + return nil +} + +type mqttConn struct { + params mqttNotify + Client MQTT.Client +} + +func dialMQTT(mqttL mqttNotify) (mc mqttConn, e error) { + if !mqttL.Enable { + return mc, errNotifyNotEnabled + } + connOpts := &MQTT.ClientOptions{ + ClientID: mqttL.ClientID, + CleanSession: true, + Username: mqttL.User, + Password: mqttL.Password, + MaxReconnectInterval: 1 * time.Second, + KeepAlive: 30 * time.Second, + TLSConfig: tls.Config{RootCAs: globalRootCAs}, + } + connOpts.AddBroker(mqttL.Broker) + client := MQTT.NewClient(connOpts) + if token := client.Connect(); token.Wait() && token.Error() != nil { + return mc, token.Error() + } + return mqttConn{Client: client, params: mqttL}, nil +} + +func newMQTTNotify(accountID string) (*logrus.Logger, error) { + mqttL := serverConfig.Notify.GetMQTTByID(accountID) + + //connect to MQTT Server + mqttC, err := dialMQTT(mqttL) + if err != nil { + return nil, err + } + + mqttLog := logrus.New() + + // Disable writing to console. + mqttLog.Out = ioutil.Discard + + // Add a mqtt hook. + mqttLog.Hooks.Add(mqttC) + + // Set default JSON formatter + mqttLog.Formatter = new(logrus.JSONFormatter) + + // successfully enabled all MQTTs + return mqttLog, nil +} + +// Fire if called when an event should be sent to the message broker. +func (q mqttConn) Fire(entry *logrus.Entry) error { + body, err := entry.String() + if err != nil { + return err + } + + if !q.Client.IsConnected() { + if token := q.Client.Connect(); token.Wait() && token.Error() != nil { + return token.Error() + } + } + token := q.Client.Publish(q.params.Topic, byte(q.params.QoS), false, body) + if token.Wait() && token.Error() != nil { + return token.Error() + } + + return nil +} + +// Levels is available logging levels. +func (q mqttConn) Levels() []logrus.Level { + return []logrus.Level{ + logrus.InfoLevel, + } +} diff --git a/cmd/notify-mysql.go b/cmd/notify-mysql.go index 0bf5d143d..2d55f7d95 100644 --- a/cmd/notify-mysql.go +++ b/cmd/notify-mysql.go @@ -145,9 +145,9 @@ type mySQLConn struct { *sql.DB } -func dialMySQL(msql mySQLNotify) (mySQLConn, error) { +func dialMySQL(msql mySQLNotify) (mc mySQLConn, e error) { if !msql.Enable { - return mySQLConn{}, errNotifyNotEnabled + return mc, errNotifyNotEnabled } dsnStr := msql.DsnString @@ -166,7 +166,7 @@ func dialMySQL(msql mySQLNotify) (mySQLConn, error) { db, err := sql.Open("mysql", dsnStr) if err != nil { - return mySQLConn{}, mysqlErrFunc( + return mc, mysqlErrFunc( "Connection opening failure (dsnStr=%s): %v", dsnStr, err) } @@ -174,7 +174,7 @@ func dialMySQL(msql mySQLNotify) (mySQLConn, error) { // ping to check that server is actually reachable. err = db.Ping() if err != nil { - return mySQLConn{}, mysqlErrFunc( + return mc, mysqlErrFunc( "Ping to server failed with: %v", err) } @@ -190,7 +190,7 @@ func dialMySQL(msql mySQLNotify) (mySQLConn, error) { _, errCreate := db.Exec(fmt.Sprintf(createStmt, msql.Table)) if errCreate != nil { // failed to create the table. error out. - return mySQLConn{}, mysqlErrFunc( + return mc, mysqlErrFunc( "'Select' failed with %v, then 'Create Table' failed with %v", err, errCreate, ) @@ -205,22 +205,20 @@ func dialMySQL(msql mySQLNotify) (mySQLConn, error) { stmts["upsertRow"], err = db.Prepare(fmt.Sprintf(upsertRowForNSMySQL, msql.Table)) if err != nil { - return mySQLConn{}, - mysqlErrFunc("create UPSERT prepared statement failed with: %v", err) + return mc, mysqlErrFunc("create UPSERT prepared statement failed with: %v", err) } // delete statement stmts["deleteRow"], err = db.Prepare(fmt.Sprintf(deleteRowForNSMySQL, msql.Table)) if err != nil { - return mySQLConn{}, - mysqlErrFunc("create DELETE prepared statement failed with: %v", err) + return mc, mysqlErrFunc("create DELETE prepared statement failed with: %v", err) } case formatAccess: // insert statement stmts["insertRow"], err = db.Prepare(fmt.Sprintf(insertRowForAccessMySQL, msql.Table)) if err != nil { - return mySQLConn{}, mysqlErrFunc( + return mc, mysqlErrFunc( "create INSERT prepared statement failed with: %v", err) } diff --git a/cmd/notify-nats.go b/cmd/notify-nats.go index 7826e3d62..18de99379 100644 --- a/cmd/notify-nats.go +++ b/cmd/notify-nats.go @@ -69,9 +69,9 @@ type natsIOConn struct { // dialNATS - dials and returns an natsIOConn instance, // for sending notifications. Returns error if nats logger // is not enabled. -func dialNATS(natsL natsNotify, testDial bool) (natsIOConn, error) { +func dialNATS(natsL natsNotify, testDial bool) (nioc natsIOConn, e error) { if !natsL.Enable { - return natsIOConn{}, errNotifyNotEnabled + return nioc, errNotifyNotEnabled } // Construct natsIOConn which holds all NATS connection information @@ -105,7 +105,7 @@ func dialNATS(natsL natsNotify, testDial bool) (natsIOConn, error) { // Do the real connection to the NATS server sc, err := stan.Connect(natsL.Streaming.ClusterID, clientID, connOpts...) if err != nil { - return natsIOConn{}, err + return nioc, err } // Save the created connection conn.stanConn = sc @@ -120,7 +120,7 @@ func dialNATS(natsL natsNotify, testDial bool) (natsIOConn, error) { // Do the real connection nc, err := natsC.Connect() if err != nil { - return natsIOConn{}, err + return nioc, err } // Save the created connection conn.natsConn = nc diff --git a/cmd/notify-postgresql.go b/cmd/notify-postgresql.go index f0176f2bf..d6739681c 100644 --- a/cmd/notify-postgresql.go +++ b/cmd/notify-postgresql.go @@ -153,9 +153,9 @@ type pgConn struct { *sql.DB } -func dialPostgreSQL(pgN postgreSQLNotify) (pgConn, error) { +func dialPostgreSQL(pgN postgreSQLNotify) (pc pgConn, e error) { if !pgN.Enable { - return pgConn{}, errNotifyNotEnabled + return pc, errNotifyNotEnabled } // collect connection params @@ -179,7 +179,7 @@ func dialPostgreSQL(pgN postgreSQLNotify) (pgConn, error) { db, err := sql.Open("postgres", connStr) if err != nil { - return pgConn{}, pgErrFunc( + return pc, pgErrFunc( "Connection opening failure (connectionString=%s): %v", connStr, err) } @@ -187,7 +187,7 @@ func dialPostgreSQL(pgN postgreSQLNotify) (pgConn, error) { // ping to check that server is actually reachable. err = db.Ping() if err != nil { - return pgConn{}, pgErrFunc("Ping to server failed with: %v", + return pc, pgErrFunc("Ping to server failed with: %v", err) } @@ -203,7 +203,7 @@ func dialPostgreSQL(pgN postgreSQLNotify) (pgConn, error) { _, errCreate := db.Exec(fmt.Sprintf(createStmt, pgN.Table)) if errCreate != nil { // failed to create the table. error out. - return pgConn{}, pgErrFunc( + return pc, pgErrFunc( "'Select' failed with %v, then 'Create Table' failed with %v", err, errCreate, ) @@ -218,14 +218,14 @@ func dialPostgreSQL(pgN postgreSQLNotify) (pgConn, error) { stmts["upsertRow"], err = db.Prepare(fmt.Sprintf(upsertRowForNS, pgN.Table)) if err != nil { - return pgConn{}, pgErrFunc( + return pc, pgErrFunc( "create UPSERT prepared statement failed with: %v", err) } // delete statement stmts["deleteRow"], err = db.Prepare(fmt.Sprintf(deleteRowForNS, pgN.Table)) if err != nil { - return pgConn{}, pgErrFunc( + return pc, pgErrFunc( "create DELETE prepared statement failed with: %v", err) } case formatAccess: @@ -233,7 +233,7 @@ func dialPostgreSQL(pgN postgreSQLNotify) (pgConn, error) { stmts["insertRow"], err = db.Prepare(fmt.Sprintf(insertRowForAccess, pgN.Table)) if err != nil { - return pgConn{}, pgErrFunc( + return pc, pgErrFunc( "create INSERT prepared statement failed with: %v", err) } } diff --git a/cmd/object-api-errors.go b/cmd/object-api-errors.go index d77d85470..8af37362a 100644 --- a/cmd/object-api-errors.go +++ b/cmd/object-api-errors.go @@ -110,6 +110,13 @@ func (e SHA256Mismatch) Error() string { return "sha256 computed does not match with what is expected" } +// SignatureDoesNotMatch - when content md5 does not match with what was sent from client. +type SignatureDoesNotMatch struct{} + +func (e SignatureDoesNotMatch) Error() string { + return "The request signature we calculated does not match the signature you provided. Check your key and signing method." +} + // StorageFull storage ran out of space. type StorageFull struct{} @@ -144,6 +151,13 @@ func (e BucketNotFound) Error() string { return "Bucket not found: " + e.Bucket } +// BucketAlreadyExists the requested bucket name is not available. +type BucketAlreadyExists GenericError + +func (e BucketAlreadyExists) Error() string { + return "The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again." +} + // BucketAlreadyOwnedByYou already owned by you. type BucketAlreadyOwnedByYou GenericError @@ -250,6 +264,14 @@ func (e ObjectNameInvalid) Error() string { return "Object name invalid: " + e.Bucket + "#" + e.Object } +// AllAccessDisabled All access to this object has been disabled +type AllAccessDisabled GenericError + +// Return string an error formatted as the given text. +func (e AllAccessDisabled) Error() string { + return "All access to this object has been disabled" +} + // IncompleteBody You did not provide the number of bytes specified by the Content-Length HTTP header. type IncompleteBody GenericError @@ -307,7 +329,7 @@ func (e InvalidUploadID) Error() string { type InvalidPart struct{} func (e InvalidPart) Error() string { - return "One or more of the specified parts could not be found" + return "One or more of the specified parts could not be found. The part may not have been uploaded, or the specified entity tag may not match the part's entity tag." } // PartTooSmall - error if part size is less than 5MB. @@ -321,6 +343,13 @@ func (e PartTooSmall) Error() string { return fmt.Sprintf("Part size for %d should be atleast 5MB", e.PartNumber) } +// PartTooBig returned if size of part is bigger than the allowed limit. +type PartTooBig struct{} + +func (e PartTooBig) Error() string { + return "Part size bigger than the allowed limit" +} + // NotImplemented If a feature is not implemented type NotImplemented struct{} diff --git a/cmd/object-api-multipart_test.go b/cmd/object-api-multipart_test.go index 36be3f8a2..044c06428 100644 --- a/cmd/object-api-multipart_test.go +++ b/cmd/object-api-multipart_test.go @@ -1061,7 +1061,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan { MaxUploads: 10, IsTruncated: false, - Prefix: globalMinioDefaultOwnerID, + Prefix: "minio", UploadIDMarker: uploadIDs[4], Uploads: []uploadMetadata{ { @@ -1895,7 +1895,7 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T // Part number 0 doesn't exist, expecting InvalidPart error (Test number 12). {bucketNames[0], objectNames[0], uploadIDs[0], []completePart{{ETag: "abcd", PartNumber: 0}}, "", InvalidPart{}, false}, // // Upload and PartNumber exists, But a deliberate ETag mismatch is introduced (Test number 13). - {bucketNames[0], objectNames[0], uploadIDs[0], inputParts[0].parts, "", BadDigest{}, false}, + {bucketNames[0], objectNames[0], uploadIDs[0], inputParts[0].parts, "", InvalidPart{}, false}, // Test case with non existent object name (Test number 14). {bucketNames[0], "my-object", uploadIDs[0], []completePart{{ETag: "abcd", PartNumber: 1}}, "", InvalidUploadID{UploadID: uploadIDs[0]}, false}, // Testing for Part being too small (Test number 15). diff --git a/cmd/object-handlers.go b/cmd/object-handlers.go index 3a1352fdf..1da5b0dd7 100644 --- a/cmd/object-handlers.go +++ b/cmd/object-handlers.go @@ -260,7 +260,7 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re // Extract metadata relevant for an CopyObject operation based on conditional // header values specified in X-Amz-Metadata-Directive. -func getCpObjMetadataFromHeader(header http.Header, defaultMeta map[string]string) map[string]string { +func getCpObjMetadataFromHeader(header http.Header, defaultMeta map[string]string) (map[string]string, error) { // if x-amz-metadata-directive says REPLACE then // we extract metadata from the input headers. if isMetadataReplace(header) { @@ -270,11 +270,11 @@ func getCpObjMetadataFromHeader(header http.Header, defaultMeta map[string]strin // if x-amz-metadata-directive says COPY then we // return the default metadata. if isMetadataCopy(header) { - return defaultMeta + return defaultMeta, nil } // Copy is default behavior if not x-amz-metadata-directive is set. - return defaultMeta + return defaultMeta, nil } // CopyObjectHandler - Copy Object @@ -363,7 +363,11 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re // Make sure to remove saved etag, CopyObject calculates a new one. delete(defaultMeta, "etag") - newMetadata := getCpObjMetadataFromHeader(r.Header, defaultMeta) + newMetadata, err := getCpObjMetadataFromHeader(r.Header, defaultMeta) + if err != nil { + errorIf(err, "found invalid http request header") + writeErrorResponse(w, ErrInternalError, r.URL) + } // Check if x-amz-metadata-directive was not set to REPLACE and source, // desination are same objects. if !isMetadataReplace(r.Header) && cpSrcDstSame { @@ -457,7 +461,12 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req } // Extract metadata to be saved from incoming HTTP header. - metadata := extractMetadataFromHeader(r.Header) + metadata, err := extractMetadataFromHeader(r.Header) + if err != nil { + errorIf(err, "found invalid http request header") + writeErrorResponse(w, ErrInternalError, r.URL) + return + } if rAuthType == authTypeStreamingSigned { if contentEncoding, ok := metadata["content-encoding"]; ok { contentEncoding = trimAwsChunkedContentEncoding(contentEncoding) @@ -579,7 +588,12 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r } // Extract metadata that needs to be saved. - metadata := extractMetadataFromHeader(r.Header) + metadata, err := extractMetadataFromHeader(r.Header) + if err != nil { + errorIf(err, "found invalid http request header") + writeErrorResponse(w, ErrInternalError, r.URL) + return + } uploadID, err := objectAPI.NewMultipartUpload(bucket, object, metadata) if err != nil { diff --git a/cmd/object-handlers_test.go b/cmd/object-handlers_test.go index 4c48f303e..bfe9b676a 100644 --- a/cmd/object-handlers_test.go +++ b/cmd/object-handlers_test.go @@ -2252,7 +2252,7 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, - expectedContent: encodeResponse(getAPIErrorResponse(getAPIError(toAPIErrorCode(BadDigest{})), + expectedContent: encodeResponse(getAPIErrorResponse(getAPIError(toAPIErrorCode(InvalidPart{})), getGetObjectURL("", bucketName, objectName))), expectedRespStatus: http.StatusBadRequest, }, diff --git a/cmd/posix.go b/cmd/posix.go index ac0c2c57d..d0d99e08d 100644 --- a/cmd/posix.go +++ b/cmd/posix.go @@ -35,9 +35,11 @@ import ( ) const ( - fsMinFreeSpace = 1 * humanize.GiByte // Min 1GiB free space. - fsMinFreeInodes = 10000 // Min 10000. - maxAllowedIOError = 5 + diskMinFreeSpace = 1 * humanize.GiByte // Min 1GiB free space. + diskMinTotalSpace = diskMinFreeSpace // Min 1GiB total space. + diskMinFreeInodes = 10000 // Min 10000 free inodes. + diskMinTotalInodes = diskMinFreeInodes // Min 10000 total inodes. + maxAllowedIOError = 5 ) // posix - implements StorageAPI interface. @@ -108,7 +110,7 @@ func newPosix(path string) (StorageAPI, error) { if err != nil { return nil, err } - fs := &posix{ + st := &posix{ diskPath: diskPath, // 1MiB buffer pool for posix internal operations. pool: sync.Pool{ @@ -131,7 +133,19 @@ func newPosix(path string) (StorageAPI, error) { return nil, err } } - return fs, nil + + di, err := getDiskInfo(preparePath(diskPath)) + if err != nil { + return nil, err + } + + // Check if disk has minimum required total space. + if err = checkDiskMinTotal(di); err != nil { + return nil, err + } + + // Success. + return st, nil } // getDiskInfo returns given disk information. @@ -155,11 +169,58 @@ var ignoreDiskFreeOS = []string{ globalSolarisOSName, } +// check if disk total has minimum required size. +func checkDiskMinTotal(di disk.Info) (err error) { + // Remove 5% from total space for cumulative disk space + // used for journalling, inodes etc. + totalDiskSpace := float64(di.Total) * 0.95 + if int64(totalDiskSpace) <= diskMinTotalSpace { + return errDiskFull + } + + // Some filesystems do not implement a way to provide total inodes available, instead + // inodes are allocated based on available disk space. For example CephDISK, StoreNext CVDISK, + // AzureFile driver. Allow for the available disk to be separately validated and we will + // validate inodes only if total inodes are provided by the underlying filesystem. + if di.Files != 0 && di.FSType != "NFS" { + totalFiles := int64(di.Files) + if totalFiles <= diskMinTotalInodes { + return errDiskFull + } + } + + return nil +} + +// check if disk free has minimum required size. +func checkDiskMinFree(di disk.Info) error { + // Remove 5% from free space for cumulative disk space used for journalling, inodes etc. + availableDiskSpace := float64(di.Free) * 0.95 + if int64(availableDiskSpace) <= diskMinFreeSpace { + return errDiskFull + } + + // Some filesystems do not implement a way to provide total inodes available, instead inodes + // are allocated based on available disk space. For example CephDISK, StoreNext CVDISK, AzureFile driver. + // Allow for the available disk to be separately validate and we will validate inodes only if + // total inodes are provided by the underlying filesystem. + if di.Files != 0 && di.FSType != "NFS" { + availableFiles := int64(di.Ffree) + if availableFiles <= diskMinFreeInodes { + return errDiskFull + } + } + + // Success. + return nil +} + // checkDiskFree verifies if disk path has sufficient minimum free disk space and files. func checkDiskFree(diskPath string, neededSpace int64) (err error) { // We don't validate disk space or inode utilization on windows. - // Each windows calls to 'GetVolumeInformationW' takes around 3-5seconds. - // And StatFS is not supported by Go for solaris and netbsd. + // Each windows call to 'GetVolumeInformationW' takes around + // 3-5seconds. And StatDISK is not supported by Go for solaris + // and netbsd. if contains(ignoreDiskFreeOS, runtime.GOOS) { return nil } @@ -170,29 +231,15 @@ func checkDiskFree(diskPath string, neededSpace int64) (err error) { return err } - // Remove 5% from free space for cumulative disk space used for journalling, inodes etc. - availableDiskSpace := float64(di.Free) * 0.95 - if int64(availableDiskSpace) <= fsMinFreeSpace { - return errDiskFull - } - - // Some filesystems do not implement a way to provide total inodes available, instead inodes - // are allocated based on available disk space. For example CephFS, StoreNext CVFS, AzureFile driver. - // Allow for the available disk to be separately validate and we will validate inodes only if - // total inodes are provided by the underlying filesystem. - if di.Files != 0 && di.FSType != "NFS" { - availableFiles := int64(di.Ffree) - if availableFiles <= fsMinFreeInodes { - return errDiskFull - } + if err = checkDiskMinFree(di); err != nil { + return err } // Check if we have enough space to store data - if neededSpace > int64(availableDiskSpace) { + if neededSpace > int64(float64(di.Free)*0.95) { return errDiskFull } - // Success. return nil } diff --git a/cmd/posix_test.go b/cmd/posix_test.go index 3ef44dc04..11023be96 100644 --- a/cmd/posix_test.go +++ b/cmd/posix_test.go @@ -29,6 +29,8 @@ import ( "syscall" "testing" + "github.com/minio/minio/pkg/disk" + "golang.org/x/crypto/blake2b" ) @@ -1669,3 +1671,106 @@ func TestPosixStatFile(t *testing.T) { } } } + +// Checks for restrictions for min total disk space and inodes. +func TestCheckDiskTotalMin(t *testing.T) { + testCases := []struct { + diskInfo disk.Info + err error + }{ + // Test 1 - when fstype is nfs. + { + diskInfo: disk.Info{ + Total: diskMinTotalSpace * 3, + FSType: "NFS", + }, + err: nil, + }, + // Test 2 - when fstype is xfs and total inodes are small. + { + diskInfo: disk.Info{ + Total: diskMinTotalSpace * 3, + FSType: "XFS", + Files: 9999, + }, + err: errDiskFull, + }, + // Test 3 - when fstype is btrfs and total inodes is empty. + { + diskInfo: disk.Info{ + Total: diskMinTotalSpace * 3, + FSType: "BTRFS", + Files: 0, + }, + err: nil, + }, + // Test 4 - when fstype is xfs and total disk space is really small. + { + diskInfo: disk.Info{ + Total: diskMinTotalSpace - diskMinTotalSpace/1024, + FSType: "XFS", + Files: 9999, + }, + err: errDiskFull, + }, + } + + // Validate all cases. + for i, test := range testCases { + if err := checkDiskMinTotal(test.diskInfo); test.err != err { + t.Errorf("Test %d: Expected error %s, got %s", i+1, test.err, err) + } + } +} + +// Checks for restrictions for min free disk space and inodes. +func TestCheckDiskFreeMin(t *testing.T) { + testCases := []struct { + diskInfo disk.Info + err error + }{ + // Test 1 - when fstype is nfs. + { + diskInfo: disk.Info{ + Free: diskMinTotalSpace * 3, + FSType: "NFS", + }, + err: nil, + }, + // Test 2 - when fstype is xfs and total inodes are small. + { + diskInfo: disk.Info{ + Free: diskMinTotalSpace * 3, + FSType: "XFS", + Files: 9999, + Ffree: 9999, + }, + err: errDiskFull, + }, + // Test 3 - when fstype is btrfs and total inodes are empty. + { + diskInfo: disk.Info{ + Free: diskMinTotalSpace * 3, + FSType: "BTRFS", + Files: 0, + }, + err: nil, + }, + // Test 4 - when fstype is xfs and total disk space is really small. + { + diskInfo: disk.Info{ + Free: diskMinTotalSpace - diskMinTotalSpace/1024, + FSType: "XFS", + Files: 9999, + }, + err: errDiskFull, + }, + } + + // Validate all cases. + for i, test := range testCases { + if err := checkDiskMinFree(test.diskInfo); test.err != err { + t.Errorf("Test %d: Expected error %s, got %s", i+1, test.err, err) + } + } +} diff --git a/cmd/postpolicyform.go b/cmd/postpolicyform.go index ed50ed007..cb71cd385 100644 --- a/cmd/postpolicyform.go +++ b/cmd/postpolicyform.go @@ -112,7 +112,7 @@ type PostPolicyForm struct { } // parsePostPolicyForm - Parse JSON policy string into typed POostPolicyForm structure. -func parsePostPolicyForm(policy string) (PostPolicyForm, error) { +func parsePostPolicyForm(policy string) (ppf PostPolicyForm, e error) { // Convert po into interfaces and // perform strict type conversion using reflection. var rawPolicy struct { @@ -122,7 +122,7 @@ func parsePostPolicyForm(policy string) (PostPolicyForm, error) { err := json.Unmarshal([]byte(policy), &rawPolicy) if err != nil { - return PostPolicyForm{}, err + return ppf, err } parsedPolicy := PostPolicyForm{} @@ -130,7 +130,7 @@ func parsePostPolicyForm(policy string) (PostPolicyForm, error) { // Parse expiry time. parsedPolicy.Expiration, err = time.Parse(time.RFC3339Nano, rawPolicy.Expiration) if err != nil { - return PostPolicyForm{}, err + return ppf, err } parsedPolicy.Conditions.Policies = make(map[string]struct { Operator string diff --git a/cmd/prepare-storage-msg.go b/cmd/prepare-storage-msg.go index 77a9ae422..94169ee9e 100644 --- a/cmd/prepare-storage-msg.go +++ b/cmd/prepare-storage-msg.go @@ -133,6 +133,14 @@ func printConfigErrMsg(storageDisks []StorageAPI, sErrs []error, fn printOnceFun // Generate a formatted message when cluster is misconfigured. func getConfigErrMsg(storageDisks []StorageAPI, sErrs []error) string { msg := colorBlue("\nDetected configuration inconsistencies in the cluster. Please fix following servers.") + return msg + combineDiskErrs(storageDisks, sErrs) +} + +// Combines each disk errors in a newline formatted string. +// this is a helper function in printing messages across +// all disks. +func combineDiskErrs(storageDisks []StorageAPI, sErrs []error) string { + var msg string for i, disk := range storageDisks { if disk == nil { continue diff --git a/cmd/prepare-storage.go b/cmd/prepare-storage.go index 550785772..ebf57676f 100644 --- a/cmd/prepare-storage.go +++ b/cmd/prepare-storage.go @@ -18,6 +18,7 @@ package cmd import ( "errors" + "fmt" "time" "github.com/minio/mc/pkg/console" @@ -118,6 +119,24 @@ func quickErrToActions(errMap map[error]int) InitActions { // Preparatory initialization stage for XL validates known errors. // Converts them into specific actions. These actions have special purpose // which caller decides on what needs to be done. + +// Logic used in this function is as shown below. +// +// ---- Possible states and handled conditions ----- +// +// - Formatted setup +// - InitObjectLayer when `disksFormatted >= readQuorum` +// - Wait for quorum when `disksFormatted < readQuorum && disksFormatted + disksOffline >= readQuorum` +// (we don't know yet if there are unformatted disks) +// - Wait for heal when `disksFormatted >= readQuorum && disksUnformatted > 0` +// (here we know there is at least one unformatted disk which requires healing) +// +// - Unformatted setup +// - Format/Wait for format when `disksUnformatted == diskCount` +// +// - Wait for all when `disksUnformatted + disksOffline == disksCount` +// +// Under all other conditions should lead to server initialization aborted. func prepForInitXL(firstDisk bool, sErrs []error, diskCount int) InitActions { // Count errors by error value. errMap := make(map[error]int) @@ -135,19 +154,12 @@ func prepForInitXL(firstDisk bool, sErrs []error, diskCount int) InitActions { disksOffline := errMap[errDiskNotFound] disksFormatted := errMap[nil] disksUnformatted := errMap[errUnformattedDisk] - disksCorrupted := errMap[errCorruptedFormat] // No Quorum lots of offline disks, wait for quorum. if disksOffline > readQuorum { return WaitForQuorum } - // There is quorum or more corrupted disks, there is not enough good - // disks to reconstruct format.json. - if disksCorrupted >= quorum { - return Abort - } - // All disks are unformatted, proceed to formatting disks. if disksUnformatted == diskCount { // Only the first server formats an uninitialized setup, others wait for notification. @@ -163,6 +175,7 @@ func prepForInitXL(firstDisk bool, sErrs []error, diskCount int) InitActions { if disksUnformatted+disksFormatted+disksOffline == diskCount { return WaitForAll } + // Some disks possibly corrupted and too many unformatted disks. return Abort } @@ -172,10 +185,13 @@ func prepForInitXL(firstDisk bool, sErrs []error, diskCount int) InitActions { if disksFormatted+disksOffline == diskCount { return InitObjectLayer } + // Some of the formatted disks are possibly corrupted or unformatted, heal them. return WaitForHeal - } // Exhausted all our checks, un-handled errors perhaps we Abort. - return WaitForQuorum + } + + // Exhausted all our checks, un-handled errors perhaps we Abort. + return Abort } // Prints retry message upon a specific retry count. @@ -227,6 +243,7 @@ func retryFormattingXLDisks(firstDisk bool, endpoints EndpointList, storageDisks // actual errors for disks not being available. printRetryMsg(sErrs, storageDisks) } + // Pre-emptively check if one of the formatted disks // is invalid. This function returns success for the // most part unless one of the formats is not consistent @@ -240,16 +257,17 @@ func retryFormattingXLDisks(firstDisk bool, endpoints EndpointList, storageDisks // first server has a wrong format and exit gracefully. // refer - https://github.com/minio/minio/issues/4140 if retryCount > maxRetryAttempts { - errorIf(err, "Detected disk (%s) in unexpected format", + errorIf(err, "%s : Detected disk in unexpected format", storageDisks[index]) continue } return err } + // Check if this is a XL or distributed XL, anything > 1 is considered XL backend. switch prepForInitXL(firstDisk, sErrs, len(storageDisks)) { case Abort: - return errCorruptedFormat + return fmt.Errorf("%s", combineDiskErrs(storageDisks, sErrs)) case FormatDisks: console.Eraseline() printFormatMsg(endpoints, storageDisks, printOnceFn()) diff --git a/cmd/rpc-server.go b/cmd/rpc-server.go new file mode 100644 index 000000000..3ca020832 --- /dev/null +++ b/cmd/rpc-server.go @@ -0,0 +1,58 @@ +/* + * Minio Cloud Storage, (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "io" + "net/http" + "net/rpc" + + miniohttp "github.com/minio/minio/pkg/http" +) + +// ServeHTTP implements an http.Handler that answers RPC requests, +// hijacks the underlying connection and clears all deadlines if any. +func (server *rpcServer) ServeHTTP(w http.ResponseWriter, req *http.Request) { + if req.Method != http.MethodConnect { + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + conn, _, err := w.(http.Hijacker).Hijack() + if err != nil { + errorIf(err, "rpc hijacking failed for: %s", req.RemoteAddr) + return + } + + // Overrides Read/Write deadlines if any. + bufConn, ok := conn.(*miniohttp.BufConn) + if ok { + bufConn.RemoveTimeout() + conn = bufConn + } + + // Can connect to RPC service using HTTP CONNECT to rpcPath. + io.WriteString(conn, "HTTP/1.0 200 Connected to Go RPC\n\n") + server.ServeConn(conn) +} + +type rpcServer struct{ *rpc.Server } + +// Similar to rpc.NewServer() provides a custom ServeHTTP override. +func newRPCServer() *rpcServer { + return &rpcServer{rpc.NewServer()} +} diff --git a/cmd/rpc-server_test.go b/cmd/rpc-server_test.go new file mode 100644 index 000000000..e58c51bfb --- /dev/null +++ b/cmd/rpc-server_test.go @@ -0,0 +1,76 @@ +/* + * Minio Cloud Storage, (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "net/http" + "net/http/httptest" + "testing" + + router "github.com/gorilla/mux" +) + +type ArithArgs struct { + A, B int +} + +type ArithReply struct { + C int +} + +type Arith int + +// Some of Arith's methods have value args, some have pointer args. That's deliberate. + +func (t *Arith) Add(args ArithArgs, reply *ArithReply) error { + reply.C = args.A + args.B + return nil +} + +func TestGoHTTPRPC(t *testing.T) { + newServer := newRPCServer() + newServer.Register(new(Arith)) + + mux := router.NewRouter().SkipClean(true) + mux.Path("/foo").Handler(newServer) + + httpServer := httptest.NewServer(mux) + defer httpServer.Close() + + client := newRPCClient(httpServer.Listener.Addr().String(), "/foo", false) + defer client.Close() + + // Synchronous calls + args := &ArithArgs{7, 8} + reply := new(ArithReply) + if err := client.Call("Arith.Add", args, reply); err != nil { + t.Errorf("Add: expected no error but got string %v", err) + } + + if reply.C != args.A+args.B { + t.Errorf("Add: expected %d got %d", reply.C, args.A+args.B) + } + + resp, err := http.Get(httpServer.URL + "/foo") + if err != nil { + t.Fatal(err) + } + + if resp.StatusCode != http.StatusMethodNotAllowed { + t.Errorf("Expected %d, got %d", http.StatusMethodNotAllowed, resp.StatusCode) + } +} diff --git a/cmd/s3-peer-router.go b/cmd/s3-peer-router.go index 3701bc8ab..aaf243256 100644 --- a/cmd/s3-peer-router.go +++ b/cmd/s3-peer-router.go @@ -17,8 +17,6 @@ package cmd import ( - "net/rpc" - router "github.com/gorilla/mux" ) @@ -39,7 +37,7 @@ func registerS3PeerRPCRouter(mux *router.Router) error { }, } - s3PeerRPCServer := rpc.NewServer() + s3PeerRPCServer := newRPCServer() err := s3PeerRPCServer.RegisterName("S3", s3PeerHandlers) if err != nil { return traceError(err) diff --git a/cmd/server-main.go b/cmd/server-main.go index 1a3157ca7..dadf2df30 100644 --- a/cmd/server-main.go +++ b/cmd/server-main.go @@ -17,21 +17,21 @@ package cmd import ( - "errors" + "net/http" "os" - "path/filepath" + "os/signal" "runtime" - "strings" - "time" + "syscall" "github.com/minio/cli" "github.com/minio/dsync" + miniohttp "github.com/minio/minio/pkg/http" ) var serverFlags = []cli.Flag{ cli.StringFlag{ Name: "address", - Value: ":9000", + Value: ":" + globalMinioPort, Usage: "Bind to a specific ADDRESS:PORT, ADDRESS can be an IP or hostname.", }, } @@ -58,6 +58,9 @@ ENVIRONMENT VARIABLES: BROWSER: MINIO_BROWSER: To disable web browser access, set this value to "off". + REGION: + MINIO_REGION: To set custom region. By default it is "us-east-1". + EXAMPLES: 1. Start minio server on "/home/shared" directory. $ {{.HelpName}} /home/shared @@ -78,61 +81,9 @@ EXAMPLES: `, } -// Check for updates and print a notification message -func checkUpdate(mode string) { - // Its OK to ignore any errors during getUpdateInfo() here. - if older, downloadURL, err := getUpdateInfo(1*time.Second, mode); err == nil { - if updateMsg := computeUpdateMessage(downloadURL, older); updateMsg != "" { - log.Println(updateMsg) - } - } -} - -func enableLoggers() { - fileLogTarget := serverConfig.Logger.GetFile() - if fileLogTarget.Enable { - err := InitFileLogger(&fileLogTarget) - fatalIf(err, "Unable to initialize file logger") - log.AddTarget(fileLogTarget) - } - - consoleLogTarget := serverConfig.Logger.GetConsole() - if consoleLogTarget.Enable { - InitConsoleLogger(&consoleLogTarget) - } - - log.SetConsoleTarget(consoleLogTarget) -} - -func initConfig() { - // Config file does not exist, we create it fresh and return upon success. - if isFile(getConfigFile()) { - fatalIf(migrateConfig(), "Config migration failed.") - fatalIf(loadConfig(), "Unable to load config version: '%s'.", v18) - } else { - fatalIf(newConfig(), "Unable to initialize minio config for the first time.") - log.Println("Created minio configuration file successfully at " + getConfigDir()) - } -} - func serverHandleCmdArgs(ctx *cli.Context) { - // Set configuration directory. - { - // Get configuration directory from command line argument. - configDir := ctx.String("config-dir") - if !ctx.IsSet("config-dir") && ctx.GlobalIsSet("config-dir") { - configDir = ctx.GlobalString("config-dir") - } - if configDir == "" { - fatalIf(errors.New("empty directory"), "Configuration directory cannot be empty.") - } - - // Disallow relative paths, figure out absolute paths. - configDirAbs, err := filepath.Abs(configDir) - fatalIf(err, "Unable to fetch absolute path for config directory %s", configDir) - - setConfigDir(configDirAbs) - } + // Handle common command args. + handleCommonCmdArgs(ctx) // Server address. serverAddr := ctx.String("address") @@ -159,36 +110,8 @@ func serverHandleCmdArgs(ctx *cli.Context) { } func serverHandleEnvVars() { - // Start profiler if env is set. - if profiler := os.Getenv("_MINIO_PROFILER"); profiler != "" { - globalProfiler = startProfiler(profiler) - } - - // Check if object cache is disabled. - globalXLObjCacheDisabled = strings.EqualFold(os.Getenv("_MINIO_CACHE"), "off") - - accessKey := os.Getenv("MINIO_ACCESS_KEY") - secretKey := os.Getenv("MINIO_SECRET_KEY") - if accessKey != "" && secretKey != "" { - cred, err := createCredential(accessKey, secretKey) - fatalIf(err, "Invalid access/secret Key set in environment.") - - // credential Envs are set globally. - globalIsEnvCreds = true - globalActiveCred = cred - } - - if browser := os.Getenv("MINIO_BROWSER"); browser != "" { - browserFlag, err := ParseBrowserFlag(browser) - if err != nil { - fatalIf(errors.New("invalid value"), "Unknown value ‘%s’ in MINIO_BROWSER environment variable.", browser) - } - - // browser Envs are set globally, this does not represent - // if browser is turned off or on. - globalIsEnvBrowser = true - globalIsBrowserEnabled = bool(browserFlag) - } + // Handle common environment variables. + handleCommonEnvVars() if serverRegion := os.Getenv("MINIO_REGION"); serverRegion != "" { // region Envs are set globally. @@ -210,12 +133,16 @@ func serverMain(ctx *cli.Context) { log.EnableQuiet() } + // Handle all server command args. serverHandleCmdArgs(ctx) + + // Handle all server environment vars. serverHandleEnvVars() // Create certs path. fatalIf(createConfigDir(), "Unable to create configuration directories.") + // Initialize server config. initConfig() // Enable loggers as per configuration file. @@ -226,8 +153,8 @@ func serverMain(ctx *cli.Context) { // Check and load SSL certificates. var err error - globalPublicCerts, globalRootCAs, globalIsSSL, err = getSSLConfig() - fatalIf(err, "Invalid SSL key file") + globalPublicCerts, globalRootCAs, globalTLSCertificate, globalIsSSL, err = getSSLConfig() + fatalIf(err, "Invalid SSL certificate file") if !quietFlag { // Check for new updates from dl.minio.io. @@ -253,43 +180,47 @@ func serverMain(ctx *cli.Context) { initNSLock(globalIsDistXL) // Configure server. - handler, err := configureServerHandler(globalEndpoints) + // Declare handler to avoid lint errors. + var handler http.Handler + handler, err = configureServerHandler(globalEndpoints) fatalIf(err, "Unable to configure one of server's RPC services.") - // Initialize a new HTTP server. - apiServer := NewServerMux(globalMinioAddr, handler) - // Initialize S3 Peers inter-node communication only in distributed setup. initGlobalS3Peers(globalEndpoints) // Initialize Admin Peers inter-node communication only in distributed setup. initGlobalAdminPeers(globalEndpoints) - // Start server, automatically configures TLS if certs are available. + globalHTTPServer = miniohttp.NewServer([]string{globalMinioAddr}, handler, globalTLSCertificate) + globalHTTPServer.UpdateBytesReadFunc = globalConnStats.incInputBytes + globalHTTPServer.UpdateBytesWrittenFunc = globalConnStats.incOutputBytes + globalHTTPServer.ErrorLogFunc = errorIf go func() { - cert, key := "", "" - if globalIsSSL { - cert, key = getPublicCertFile(), getPrivateKeyFile() - } - fatalIf(apiServer.ListenAndServe(cert, key), "Failed to start minio server.") + globalHTTPServerErrorCh <- globalHTTPServer.Start() }() + signal.Notify(globalOSSignalCh, os.Interrupt, syscall.SIGTERM) + newObject, err := newObjectLayer(globalEndpoints) - fatalIf(err, "Initializing object layer failed") + if err != nil { + errorIf(err, "Initializing object layer failed") + err = globalHTTPServer.Shutdown() + errorIf(err, "Unable to shutdown http server") + os.Exit(1) + } globalObjLayerMutex.Lock() globalObjectAPI = newObject globalObjLayerMutex.Unlock() // Prints the formatted startup message once object layer is initialized. - apiEndpoints := getAPIEndpoints(apiServer.Addr) + apiEndpoints := getAPIEndpoints(globalMinioAddr) printStartupMessage(apiEndpoints) // Set uptime time after object layer has initialized. globalBootTime = UTCNow() - // Waits on the server. - <-globalServiceDoneCh + handleSignals() } // Initialize object layer with the supplied disks, objectLayer is nil upon any error. diff --git a/cmd/server-mux.go b/cmd/server-mux.go deleted file mode 100644 index e33a96790..000000000 --- a/cmd/server-mux.go +++ /dev/null @@ -1,526 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "bufio" - "crypto/tls" - "errors" - "io" - "net" - "net/http" - "net/url" - "strings" - "sync" - "sync/atomic" - "time" -) - -const ( - serverShutdownPoll = 500 * time.Millisecond -) - -// The value chosen below is longest word chosen -// from all the http verbs comprising of -// "PRI", "OPTIONS", "GET", "HEAD", "POST", -// "PUT", "DELETE", "TRACE", "CONNECT". -const ( - maxHTTPVerbLen = 7 -) - -// HTTP2 PRI method. -var httpMethodPRI = "PRI" - -var defaultHTTP2Methods = []string{ - httpMethodPRI, -} - -var defaultHTTP1Methods = []string{ - http.MethodOptions, - http.MethodGet, - http.MethodHead, - http.MethodPost, - http.MethodPut, - http.MethodDelete, - http.MethodTrace, - http.MethodConnect, -} - -// ConnMux - Peeks into the incoming connection for relevant -// protocol without advancing the underlying net.Conn (io.Reader). -// ConnMux - allows us to multiplex between TLS and Regular HTTP -// connections on the same listeners. -type ConnMux struct { - net.Conn - // To peek net.Conn incoming data - peeker *bufio.Reader -} - -// NewConnMux - creates a new ConnMux instance -func NewConnMux(c net.Conn) *ConnMux { - br := bufio.NewReader(c) - return &ConnMux{ - Conn: c, - peeker: bufio.NewReader(br), - } -} - -// List of protocols to be detected by PeekProtocol function. -const ( - protocolTLS = "tls" - protocolHTTP1 = "http" - protocolHTTP2 = "http2" -) - -// PeekProtocol - reads the first bytes, then checks if it is similar -// to one of the default http methods. Returns error if there are any -// errors in peeking over the connection. -func (c *ConnMux) PeekProtocol() (string, error) { - // Peek for HTTP verbs. - buf, err := c.peeker.Peek(maxHTTPVerbLen) - if err != nil { - return "", err - } - - // Check for HTTP2 methods first. - for _, m := range defaultHTTP2Methods { - if strings.HasPrefix(string(buf), m) { - return protocolHTTP2, nil - } - } - - // Check for HTTP1 methods. - for _, m := range defaultHTTP1Methods { - if strings.HasPrefix(string(buf), m) { - return protocolHTTP1, nil - } - } - - // Default to TLS, this is not a real indication - // that the connection is TLS but that will be - // validated later by doing a handshake. - return protocolTLS, nil -} - -// Read reads from the tcp session for data sent by -// the client, additionally sets deadline for 15 secs -// after each successful read. Deadline cancels and -// returns error if the client does not send any -// data in 15 secs. Also keeps track of the total -// bytes received from the client. -func (c *ConnMux) Read(b []byte) (n int, err error) { - // Update total incoming number of bytes. - defer func() { - globalConnStats.incInputBytes(n) - }() - - n, err = c.peeker.Read(b) - if err != nil { - return n, err - } - - // Read deadline was already set previously, set again - // after a successful read operation for future read - // operations. - c.Conn.SetReadDeadline(UTCNow().Add(defaultTCPReadTimeout)) - - // Success. - return n, nil -} - -// Write to the client over a tcp session, additionally -// keeps track of the total bytes written by the server. -func (c *ConnMux) Write(b []byte) (n int, err error) { - // Update total outgoing number of bytes. - defer func() { - globalConnStats.incOutputBytes(n) - }() - - // Call the conn write wrapper. - return c.Conn.Write(b) -} - -// Close closes the underlying tcp connection. -func (c *ConnMux) Close() (err error) { - // Make sure that we always close a connection, - return c.Conn.Close() -} - -// ListenerMux wraps the standard net.Listener to inspect -// the communication protocol upon network connection -// ListenerMux also wraps net.Listener to ensure that once -// Listener.Close returns, the underlying socket has been closed. -// -// - https://github.com/golang/go/issues/10527 -// -// The default Listener returns from Close before the underlying -// socket has been closed if another goroutine has an active -// reference (e.g. is in Accept). -// -// The following sequence of events can happen: -// -// Goroutine 1 is running Accept, and is blocked, waiting for epoll -// -// Goroutine 2 calls Close. It sees an extra reference, and so cannot -// destroy the socket, but instead decrements a reference, marks the -// connection as closed and unblocks epoll. -// -// Goroutine 2 returns to the caller, makes a new connection. -// The new connection is sent to the socket (since it hasn't been destroyed) -// -// Goroutine 1 returns from epoll, and accepts the new connection. -// -// To avoid accepting connections after Close, we block Goroutine 2 -// from returning from Close till Accept returns an error to the user. -type ListenerMux struct { - net.Listener - config *tls.Config - // acceptResCh is a channel for transporting wrapped net.Conn (regular or tls) - // after peeking the content of the latter - acceptResCh chan ListenerMuxAcceptRes - // Cond is used to signal Close when there are no references to the listener. - cond *sync.Cond - refs int -} - -// ListenerMuxAcceptRes contains then final net.Conn data (wrapper by tls or not) to be sent to the http handler -type ListenerMuxAcceptRes struct { - conn net.Conn - err error -} - -// Default keep alive interval timeout, on your Linux system to figure out -// maximum probes sent -// -// > cat /proc/sys/net/ipv4/tcp_keepalive_probes -// ! 9 -// -// Final value of total keep-alive comes upto 9 x 10 * seconds = 1.5 minutes. -const defaultKeepAliveTimeout = 10 * time.Second // 10 seconds. - -// Timeout to close and return error to the client when not sending any data. -const defaultTCPReadTimeout = 15 * time.Second // 15 seconds. - -// newListenerMux listens and wraps accepted connections with tls after protocol peeking -func newListenerMux(listener net.Listener, config *tls.Config) *ListenerMux { - l := ListenerMux{ - Listener: listener, - config: config, - cond: sync.NewCond(&sync.Mutex{}), - acceptResCh: make(chan ListenerMuxAcceptRes), - } - // Start listening, wrap connections with tls when needed - go func() { - // Extract tcp listener. - tcpListener, ok := l.Listener.(*net.TCPListener) - if !ok { - l.acceptResCh <- ListenerMuxAcceptRes{err: errInvalidArgument} - return - } - - // Loop for accepting new connections - for { - // Use accept TCP method to receive the connection. - conn, err := tcpListener.AcceptTCP() - if err != nil { - l.acceptResCh <- ListenerMuxAcceptRes{err: err} - continue - } - - // Enable Read timeout - conn.SetReadDeadline(UTCNow().Add(defaultTCPReadTimeout)) - - // Enable keep alive for each connection. - conn.SetKeepAlive(true) - conn.SetKeepAlivePeriod(defaultKeepAliveTimeout) - - // Allocate new conn muxer. - connMux := NewConnMux(conn) - - // Wrap the connection with ConnMux to be able to peek the data in the incoming connection - // and decide if we need to wrap the connection itself with a TLS or not - go func(connMux *ConnMux) { - protocol, cerr := connMux.PeekProtocol() - if cerr != nil { - // io.EOF is usually returned by non-http clients, - // just close the connection to avoid any leak. - if cerr != io.EOF { - errorIf(cerr, "Unable to peek into incoming protocol") - } - connMux.Close() - return - } - switch protocol { - case protocolTLS: - tlsConn := tls.Server(connMux, l.config) - // Make sure to handshake so that we know that this - // is a TLS connection, if not we should close and reject - // such a connection. - if cerr = tlsConn.Handshake(); cerr != nil { - // Close for junk message. - tlsConn.Close() - return - } - l.acceptResCh <- ListenerMuxAcceptRes{ - conn: tlsConn, - } - default: - l.acceptResCh <- ListenerMuxAcceptRes{ - conn: connMux, - } - } - }(connMux) - } - }() - return &l -} - -// IsClosed - Returns if the underlying listener is closed fully. -func (l *ListenerMux) IsClosed() bool { - l.cond.L.Lock() - defer l.cond.L.Unlock() - return l.refs == 0 -} - -func (l *ListenerMux) incRef() { - l.cond.L.Lock() - l.refs++ - l.cond.L.Unlock() -} - -func (l *ListenerMux) decRef() { - l.cond.L.Lock() - l.refs-- - newRefs := l.refs - l.cond.L.Unlock() - if newRefs == 0 { - l.cond.Broadcast() - } -} - -// Close closes the listener. -// Any blocked Accept operations will be unblocked and return errors. -func (l *ListenerMux) Close() error { - if l == nil { - return nil - } - - if err := l.Listener.Close(); err != nil { - return err - } - - l.cond.L.Lock() - for l.refs > 0 { - l.cond.Wait() - } - l.cond.L.Unlock() - return nil -} - -// Accept - peek the protocol to decide if we should wrap the -// network stream with the TLS server -func (l *ListenerMux) Accept() (net.Conn, error) { - l.incRef() - defer l.decRef() - - res := <-l.acceptResCh - return res.conn, res.err -} - -// ServerMux - the main mux server -type ServerMux struct { - Addr string - handler http.Handler - listeners []*ListenerMux - - // Current number of concurrent http requests - currentReqs int32 - // Time to wait before forcing server shutdown - gracefulTimeout time.Duration - - mu sync.RWMutex // guards closing, and listeners - closing bool -} - -// NewServerMux constructor to create a ServerMux -func NewServerMux(addr string, handler http.Handler) *ServerMux { - m := &ServerMux{ - Addr: addr, - handler: handler, - // Wait for 5 seconds for new incoming connnections, otherwise - // forcibly close them during graceful stop or restart. - gracefulTimeout: 5 * time.Second, - } - - // Returns configured HTTP server. - return m -} - -// Initialize listeners on all ports. -func initListeners(serverAddr string, tls *tls.Config) ([]*ListenerMux, error) { - host, port, err := net.SplitHostPort(serverAddr) - if err != nil { - return nil, err - } - var listeners []*ListenerMux - if host == "" { - var listener net.Listener - listener, err = net.Listen("tcp", serverAddr) - if err != nil { - return nil, err - } - listeners = append(listeners, newListenerMux(listener, tls)) - return listeners, nil - } - var addrs []string - if net.ParseIP(host) != nil { - addrs = append(addrs, host) - } else { - addrs, err = net.LookupHost(host) - if err != nil { - return nil, err - } - if len(addrs) == 0 { - return nil, errUnexpected - } - } - for _, addr := range addrs { - var listener net.Listener - listener, err = net.Listen("tcp", net.JoinHostPort(addr, port)) - if err != nil { - return nil, err - } - listeners = append(listeners, newListenerMux(listener, tls)) - } - return listeners, nil -} - -// ListenAndServe - serve HTTP requests with protocol multiplexing support -// TLS is actived when certFile and keyFile parameters are not empty. -func (m *ServerMux) ListenAndServe(certFile, keyFile string) (err error) { - - tlsEnabled := certFile != "" && keyFile != "" - - config := &tls.Config{ - // Causes servers to use Go's default ciphersuite preferences, - // which are tuned to avoid attacks. Does nothing on clients. - PreferServerCipherSuites: true, - // Set minimum version to TLS 1.2 - MinVersion: tls.VersionTLS12, - } // Always instantiate. - - if tlsEnabled { - // Configure TLS in the server - if config.NextProtos == nil { - config.NextProtos = []string{"http/1.1", "h2"} - } - config.Certificates = make([]tls.Certificate, 1) - config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile) - if err != nil { - return err - } - } - - go m.handleServiceSignals() - - listeners, err := initListeners(m.Addr, config) - if err != nil { - return err - } - - m.mu.Lock() - m.listeners = listeners - m.mu.Unlock() - - // All http requests start to be processed by httpHandler - httpHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if tlsEnabled && r.TLS == nil { - // TLS is enabled but request is not TLS - // configured - return error to client. - writeErrorResponse(w, ErrInsecureClientRequest, &url.URL{}) - } else { - - // Return ServiceUnavailable for clients which are sending requests - // in shutdown phase - m.mu.RLock() - closing := m.closing - m.mu.RUnlock() - if closing { - w.WriteHeader(http.StatusServiceUnavailable) - return - } - - // Execute registered handlers, update currentReqs to keep - // track of concurrent requests processing on the server - atomic.AddInt32(&m.currentReqs, 1) - m.handler.ServeHTTP(w, r) - atomic.AddInt32(&m.currentReqs, -1) - } - }) - - var wg = &sync.WaitGroup{} - for _, listener := range listeners { - wg.Add(1) - go func(listener *ListenerMux) { - defer wg.Done() - serr := http.Serve(listener, httpHandler) - // Do not print the error if the listener is closed. - if !listener.IsClosed() { - errorIf(serr, "Unable to serve incoming requests.") - } - }(listener) - } - // Wait for all http.Serve's to return. - wg.Wait() - return nil -} - -// Close initiates the graceful shutdown -func (m *ServerMux) Close() error { - m.mu.Lock() - - if m.closing { - m.mu.Unlock() - return errors.New("Server has been closed") - } - // Closed completely. - m.closing = true - - // Close the listeners. - for _, listener := range m.listeners { - if err := listener.Close(); err != nil { - m.mu.Unlock() - return err - } - } - m.mu.Unlock() - - // Starting graceful shutdown. Check if all requests are finished - // in regular interval or force the shutdown - ticker := time.NewTicker(serverShutdownPoll) - defer ticker.Stop() - for { - select { - case <-time.After(m.gracefulTimeout): - return nil - case <-ticker.C: - if atomic.LoadInt32(&m.currentReqs) <= 0 { - return nil - } - } - } -} diff --git a/cmd/server-mux_test.go b/cmd/server-mux_test.go deleted file mode 100644 index 4a4007710..000000000 --- a/cmd/server-mux_test.go +++ /dev/null @@ -1,506 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2015, 2016, 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "bufio" - "crypto/rand" - "crypto/rsa" - "crypto/tls" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "fmt" - "io/ioutil" - "math/big" - "net" - "net/http" - "os" - "runtime" - "strings" - "sync" - "testing" - "time" -) - -func TestListenerAcceptAfterClose(t *testing.T) { - var wg sync.WaitGroup - for i := 0; i < 16; i++ { - wg.Add(1) - go func() { - defer wg.Done() - - for i := 0; i < 10; i++ { - runTest(t) - } - }() - } - wg.Wait() -} - -func runTest(t *testing.T) { - const connectionsBeforeClose = 1 - - ln, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatal(err) - } - - ln = newListenerMux(ln, &tls.Config{}) - - addr := ln.Addr().String() - waitForListener := make(chan error) - go func() { - defer close(waitForListener) - - var connCount int - for { - conn, aerr := ln.Accept() - if aerr != nil { - return - } - - connCount++ - if connCount > connectionsBeforeClose { - waitForListener <- errUnexpected - return - } - conn.Close() - } - }() - - for i := 0; i < connectionsBeforeClose; i++ { - err = dial(addr) - if err != nil { - t.Fatal(err) - } - } - - ln.Close() - dial(addr) - - err = <-waitForListener - if err != nil { - t.Fatal(err) - } -} - -func dial(addr string) error { - conn, err := net.Dial("tcp", addr) - if err == nil { - conn.Close() - } - return err -} - -// Tests initializing listeners. -func TestInitListeners(t *testing.T) { - testCases := []struct { - serverAddr string - shouldPass bool - }{ - // Test 1 with ip and port. - { - serverAddr: net.JoinHostPort("127.0.0.1", "0"), - shouldPass: true, - }, - // Test 2 only port. - { - serverAddr: net.JoinHostPort("", "0"), - shouldPass: true, - }, - // Test 3 with no port error. - { - serverAddr: "127.0.0.1", - shouldPass: false, - }, - // Test 4 with 'foobar' host not resolvable. - { - serverAddr: "foobar:9000", - shouldPass: false, - }, - } - for i, testCase := range testCases { - listeners, err := initListeners(testCase.serverAddr, &tls.Config{}) - if testCase.shouldPass { - if err != nil { - t.Fatalf("Test %d: Unable to initialize listeners %s", i+1, err) - } - for _, listener := range listeners { - if err = listener.Close(); err != nil { - t.Fatalf("Test %d: Unable to close listeners %s", i+1, err) - } - } - } - if err == nil && !testCase.shouldPass { - t.Fatalf("Test %d: Should fail but is successful", i+1) - } - } - // Windows doesn't have 'localhost' hostname. - if runtime.GOOS != globalWindowsOSName { - listeners, err := initListeners("localhost:"+getFreePort(), &tls.Config{}) - if err != nil { - t.Fatalf("Test 3: Unable to initialize listeners %s", err) - } - for _, listener := range listeners { - if err = listener.Close(); err != nil { - t.Fatalf("Test 3: Unable to close listeners %s", err) - } - } - } -} - -func TestClose(t *testing.T) { - // Create ServerMux - m := NewServerMux("", nil) - - if err := m.Close(); err != nil { - t.Error("Server errored while trying to Close", err) - } - - // Closing again should return an error. - if err := m.Close(); err.Error() != "Server has been closed" { - t.Error("Unexepcted error expected \"Server has been closed\", got", err) - } -} - -func TestServerMux(t *testing.T) { - var err error - var got []byte - var res *http.Response - - // Create ServerMux - m := NewServerMux("127.0.0.1:0", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprint(w, "hello") - })) - // Start serving requests - go m.ListenAndServe("", "") - - // Issue a GET request. Since we started server in a goroutine, it could be not ready - // at this point. So we allow until 5 failed retries before declare there is an error - for i := 0; i < 5; i++ { - // Sleep one second - time.Sleep(1 * time.Second) - // Check if one listener is ready - m.mu.Lock() - listenersCount := len(m.listeners) - m.mu.Unlock() - if listenersCount == 0 { - continue - } - m.mu.Lock() - listenerAddr := m.listeners[0].Addr().String() - m.mu.Unlock() - // Issue the GET request - client := http.Client{} - res, err = client.Get("http://" + listenerAddr) - if err != nil { - continue - } - // Read the request response - got, err = ioutil.ReadAll(res.Body) - if err != nil { - continue - } - // We've got a response, quit the loop - break - } - - // Check for error persisted after 5 times - if err != nil { - t.Fatal(err) - } - - // Check the web service response - if string(got) != "hello" { - t.Errorf("got %q, want hello", string(got)) - } -} - -func TestServerCloseBlocking(t *testing.T) { - // Create ServerMux - m := NewServerMux("127.0.0.1:0", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprint(w, "hello") - })) - - // Start serving requests in a goroutine - go m.ListenAndServe("", "") - - // Dial, try until 5 times before declaring a failure - dial := func() (net.Conn, error) { - var c net.Conn - var err error - for i := 0; i < 5; i++ { - // Sleep one second in case of the server is not ready yet - time.Sleep(1 * time.Second) - // Check if there is at least one listener configured - m.mu.Lock() - if len(m.listeners) == 0 { - m.mu.Unlock() - continue - } - m.mu.Unlock() - // Run the actual Dial - m.mu.Lock() - c, err = net.Dial("tcp", m.listeners[0].Addr().String()) - m.mu.Unlock() - if err != nil { - continue - } - } - return c, err - } - - // Dial to open a StateNew but don't send anything - cnew, err := dial() - if err != nil { - t.Fatal(err) - } - defer cnew.Close() - - // Dial another connection but idle after a request to have StateIdle - cidle, err := dial() - if err != nil { - t.Fatal(err) - } - defer cidle.Close() - - cidle.Write([]byte("HEAD / HTTP/1.1\r\nHost: foo\r\n\r\n")) - _, err = http.ReadResponse(bufio.NewReader(cidle), nil) - if err != nil { - t.Fatal(err) - } - - // Make sure we don't block forever. - m.Close() -} - -func TestServerListenAndServePlain(t *testing.T) { - wait := make(chan struct{}) - addr := net.JoinHostPort("127.0.0.1", getFreePort()) - errc := make(chan error) - once := &sync.Once{} - - // Initialize done channel specifically for each tests. - globalServiceDoneCh = make(chan struct{}, 1) - - // Create ServerMux and when we receive a request we stop waiting - m := NewServerMux(addr, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprint(w, "hello") - once.Do(func() { close(wait) }) - })) - - // ListenAndServe in a goroutine, but we don't know when it's ready - go func() { errc <- m.ListenAndServe("", "") }() - - // Keep trying the server until it's accepting connections - go func() { - client := http.Client{Timeout: time.Millisecond * 10} - for { - res, _ := client.Get("http://" + addr) - if res != nil && res.StatusCode == http.StatusOK { - break - } - } - }() - - wg := &sync.WaitGroup{} - wg.Add(1) - go func() { - defer wg.Done() - select { - case err := <-errc: - if err != nil { - t.Fatal(err) - } - case <-wait: - return - } - }() - - // Wait until we get an error or wait closed - wg.Wait() - - // Shutdown the ServerMux - m.Close() -} - -func TestServerListenAndServeTLS(t *testing.T) { - rootPath, err := newTestConfig(globalMinioDefaultRegion) - if err != nil { - t.Fatalf("Init Test config failed") - } - defer removeAll(rootPath) - - wait := make(chan struct{}) - addr := net.JoinHostPort("127.0.0.1", getFreePort()) - errc := make(chan error) - once := &sync.Once{} - - // Initialize done channel specifically for each tests. - globalServiceDoneCh = make(chan struct{}, 1) - - // Create ServerMux and when we receive a request we stop waiting - m := NewServerMux(addr, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprint(w, "hello") - once.Do(func() { close(wait) }) - })) - - // Create a cert - err = createConfigDir() - if err != nil { - t.Fatal(err) - } - certFile := getPublicCertFile() - keyFile := getPrivateKeyFile() - defer os.RemoveAll(certFile) - defer os.RemoveAll(keyFile) - - err = generateTestCert(addr) - if err != nil { - t.Error(err) - return - } - - // ListenAndServe in a goroutine, but we don't know when it's ready - go func() { errc <- m.ListenAndServe(certFile, keyFile) }() - - wg := &sync.WaitGroup{} - wg.Add(1) - go func() { - tr := &http.Transport{ - TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, - } - client := http.Client{ - Timeout: time.Millisecond * 10, - Transport: tr, - } - // Keep trying the server until it's accepting connections - start := UTCNow() - for { - res, _ := client.Get("https://" + addr) - if res != nil && res.StatusCode == http.StatusOK { - break - } - // Explicit check to terminate loop after 5 minutes - // (for investigational purpose of issue #4461) - if UTCNow().Sub(start) >= 5*time.Minute { - t.Fatalf("Failed to establish connection after 5 minutes") - } - } - - // Once a request succeeds, subsequent requests should - // work fine. - res, err := client.Get("http://" + addr) - if err != nil { - t.Errorf("Got unexpected error: %v", err) - } - // Without TLS we expect a Bad-Request response from the server. - if !(res != nil && res.StatusCode == http.StatusBadRequest && res.Request.URL.Scheme == httpScheme) { - t.Fatalf("Plaintext request to TLS server did not have expected response!") - } - - body, err := ioutil.ReadAll(res.Body) - if err != nil { - t.Errorf("Error reading body") - } - - // Check that the expected error is received. - bodyStr := string(body) - apiErr := getAPIError(ErrInsecureClientRequest) - if !(strings.Contains(bodyStr, apiErr.Code) && strings.Contains(bodyStr, apiErr.Description)) { - t.Fatalf("Plaintext request to TLS server did not have expected response body!") - } - wg.Done() - }() - - wg.Add(1) - go func() { - defer wg.Done() - select { - case err := <-errc: - if err != nil { - t.Error(err) - return - } - case <-wait: - return - } - }() - - // Wait until we get an error or wait closed - wg.Wait() - - // Shutdown the ServerMux - m.Close() -} - -// generateTestCert creates a cert and a key used for testing only -func generateTestCert(host string) error { - certPath := getPublicCertFile() - keyPath := getPrivateKeyFile() - priv, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - return err - } - - serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) - serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) - if err != nil { - return err - } - - template := x509.Certificate{ - SerialNumber: serialNumber, - Subject: pkix.Name{ - Organization: []string{"Minio Test Cert"}, - }, - NotBefore: UTCNow(), - NotAfter: UTCNow().Add(time.Minute * 1), - - KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, - BasicConstraintsValid: true, - } - - if ip := net.ParseIP(host); ip != nil { - template.IPAddresses = append(template.IPAddresses, ip) - } - - template.IsCA = true - template.KeyUsage |= x509.KeyUsageCertSign - - derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv) - if err != nil { - return err - } - - certOut, err := os.Create(certPath) - if err != nil { - return err - } - pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) - certOut.Close() - - keyOut, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) - if err != nil { - return err - } - pem.Encode(keyOut, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)}) - keyOut.Close() - return nil -} diff --git a/cmd/server-startup-msg.go b/cmd/server-startup-msg.go index 9f4549eee..17ef1ee71 100644 --- a/cmd/server-startup-msg.go +++ b/cmd/server-startup-msg.go @@ -52,7 +52,7 @@ func printStartupMessage(apiEndPoints []string) { // Prints `mc` cli configuration message chooses // first endpoint as default. - printCLIAccessMsg(strippedAPIEndpoints[0]) + printCLIAccessMsg(strippedAPIEndpoints[0], "myminio") // Prints documentation message. printObjectAPIMsg() @@ -109,7 +109,7 @@ func printServerCommonMsg(apiEndpoints []string) { apiEndpointStr := strings.Join(apiEndpoints, " ") // Colorize the message and print. - log.Println(colorBlue("\nEndpoint: ") + colorBold(fmt.Sprintf(getFormatStr(len(apiEndpointStr), 1), apiEndpointStr))) + log.Println(colorBlue("Endpoint: ") + colorBold(fmt.Sprintf(getFormatStr(len(apiEndpointStr), 1), apiEndpointStr))) log.Println(colorBlue("AccessKey: ") + colorBold(fmt.Sprintf("%s ", cred.AccessKey))) log.Println(colorBlue("SecretKey: ") + colorBold(fmt.Sprintf("%s ", cred.SecretKey))) if region != "" { @@ -141,17 +141,17 @@ func printEventNotifiers() { // Prints startup message for command line access. Prints link to our documentation // and custom platform specific message. -func printCLIAccessMsg(endPoint string) { +func printCLIAccessMsg(endPoint string, alias string) { // Get saved credentials. cred := serverConfig.GetCredential() // Configure 'mc', following block prints platform specific information for minio client. log.Println(colorBlue("\nCommand-line Access: ") + mcQuickStartGuide) if runtime.GOOS == globalWindowsOSName { - mcMessage := fmt.Sprintf("$ mc.exe config host add myminio %s %s %s", endPoint, cred.AccessKey, cred.SecretKey) + mcMessage := fmt.Sprintf("$ mc.exe config host add %s %s %s %s", alias, endPoint, cred.AccessKey, cred.SecretKey) log.Println(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage)) } else { - mcMessage := fmt.Sprintf("$ mc config host add myminio %s %s %s", endPoint, cred.AccessKey, cred.SecretKey) + mcMessage := fmt.Sprintf("$ mc config host add %s %s %s %s", alias, endPoint, cred.AccessKey, cred.SecretKey) log.Println(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage)) } } @@ -174,7 +174,7 @@ func getStorageInfoMsg(storageInfo StorageInfo) string { if storageInfo.Backend.Type == Erasure { diskInfo := fmt.Sprintf(" %d Online, %d Offline. ", storageInfo.Backend.OnlineDisks, storageInfo.Backend.OfflineDisks) if maxDiskFailures := storageInfo.Backend.ReadQuorum - storageInfo.Backend.OfflineDisks; maxDiskFailures >= 0 { - diskInfo += fmt.Sprintf("We can withstand [%d] more drive failure(s).", maxDiskFailures) + diskInfo += fmt.Sprintf("We can withstand [%d] drive failure(s).", maxDiskFailures) } msg += colorBlue("\nStatus:") + fmt.Sprintf(getFormatStr(len(diskInfo), 8), diskInfo) } diff --git a/cmd/server-startup-msg_test.go b/cmd/server-startup-msg_test.go index 2da77901c..5c4090328 100644 --- a/cmd/server-startup-msg_test.go +++ b/cmd/server-startup-msg_test.go @@ -140,7 +140,7 @@ func TestPrintCLIAccessMsg(t *testing.T) { defer removeAll(root) apiEndpoints := []string{"http://127.0.0.1:9000"} - printCLIAccessMsg(apiEndpoints[0]) + printCLIAccessMsg(apiEndpoints[0], "myminio") } // Test print startup message. diff --git a/cmd/server_test.go b/cmd/server_test.go index 097f1f9cf..cfb07c9e6 100644 --- a/cmd/server_test.go +++ b/cmd/server_test.go @@ -1619,7 +1619,8 @@ func (s *TestSuiteCommon) TestListObjectsHandler(c *C) { c.Assert(err, IsNil) c.Assert(strings.Contains(string(getContent), "bar"), Equals, true) - c.Assert(strings.Contains(string(getContent), "miniominio"), Equals, true) + c.Assert(strings.Contains(string(getContent), fmt.Sprintf("%s", + globalMinioDefaultOwnerID)), Equals, true) } diff --git a/cmd/service.go b/cmd/service.go index 67fdd5f18..821b4432a 100644 --- a/cmd/service.go +++ b/cmd/service.go @@ -19,8 +19,6 @@ package cmd import ( "os" "os/exec" - "syscall" - "time" ) // Type of service signals currently supported. @@ -64,59 +62,3 @@ func restartProcess() error { cmd.Stderr = os.Stderr return cmd.Start() } - -// Handles all serviceSignal and execute service functions. -func (m *ServerMux) handleServiceSignals() error { - // Custom exit function - runExitFn := func(err error) { - // If global profiler is set stop before we exit. - if globalProfiler != nil { - globalProfiler.Stop() - } - - // Call user supplied user exit function - fatalIf(err, "Unable to gracefully complete service operation.") - - // We are usually done here, close global service done channel. - globalServiceDoneCh <- struct{}{} - } - // Wait for SIGTERM in a go-routine. - trapCh := signalTrap(os.Interrupt, syscall.SIGTERM) - go func(trapCh <-chan bool) { - <-trapCh - globalServiceSignalCh <- serviceStop - }(trapCh) - - // Start listening on service signal. Monitor signals. - for { - signal := <-globalServiceSignalCh - switch signal { - case serviceStatus: - /// We don't do anything for this. - case serviceRestart: - if err := m.Close(); err != nil { - errorIf(err, "Unable to close server gracefully") - } - if err := restartProcess(); err != nil { - errorIf(err, "Unable to restart the server.") - } - runExitFn(nil) - case serviceStop: - log.Println("Received signal to exit.") - go func() { - time.Sleep(serverShutdownPoll + time.Millisecond*100) - log.Println("Waiting for active connections to terminate - press Ctrl+C to quit immediately.") - }() - if err := m.Close(); err != nil { - errorIf(err, "Unable to close server gracefully") - } - objAPI := newObjectLayerFn() - if objAPI == nil { - // Server not initialized yet, exit happily. - runExitFn(nil) - } else { - runExitFn(objAPI.Shutdown()) - } - } - } -} diff --git a/cmd/signals.go b/cmd/signals.go index 2431671de..59ba40529 100644 --- a/cmd/signals.go +++ b/cmd/signals.go @@ -1,5 +1,5 @@ /* - * Minio Client, (C) 2015 Minio, Inc. + * Minio Cloud Storage, (C) 2015, 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,32 +18,67 @@ package cmd import ( "os" - "os/signal" ) -// signalTrap traps the registered signals and notifies the caller. -func signalTrap(sig ...os.Signal) <-chan bool { - // channel to notify the caller. - trapCh := make(chan bool, 1) +func handleSignals() { + // Custom exit function + exit := func(state bool) { + // If global profiler is set stop before we exit. + if globalProfiler != nil { + globalProfiler.Stop() + } - go func(chan<- bool) { - // channel to receive signals. - sigCh := make(chan os.Signal, 1) - defer close(sigCh) + if state { + os.Exit(0) + } - // `signal.Notify` registers the given channel to - // receive notifications of the specified signals. - signal.Notify(sigCh, sig...) + os.Exit(1) + } - // Wait for the signal. - <-sigCh + stopProcess := func() bool { + var err, oerr error - // Once signal has been received stop signal Notify handler. - signal.Stop(sigCh) + err = globalHTTPServer.Shutdown() + errorIf(err, "Unable to shutdown http server") - // Notify the caller. - trapCh <- true - }(trapCh) + if objAPI := newObjectLayerFn(); objAPI != nil { + oerr = objAPI.Shutdown() + errorIf(oerr, "Unable to shutdown object layer") + } - return trapCh + return (err == nil && oerr == nil) + } + + for { + select { + case err := <-globalHTTPServerErrorCh: + errorIf(err, "http server exited abnormally") + var oerr error + if objAPI := newObjectLayerFn(); objAPI != nil { + oerr = objAPI.Shutdown() + errorIf(oerr, "Unable to shutdown object layer") + } + + exit(err == nil && oerr == nil) + case osSignal := <-globalOSSignalCh: + log.Printf("Exiting on signal %v\n", osSignal) + exit(stopProcess()) + case signal := <-globalServiceSignalCh: + switch signal { + case serviceStatus: + // Ignore this at the moment. + case serviceRestart: + log.Println("Restarting on service signal") + err := globalHTTPServer.Shutdown() + errorIf(err, "Unable to shutdown http server") + rerr := restartProcess() + errorIf(rerr, "Unable to restart the server") + + exit(err == nil && rerr == nil) + case serviceStop: + log.Println("Stopping on service signal") + exit(stopProcess()) + } + } + } } diff --git a/cmd/signature-v4-parser.go b/cmd/signature-v4-parser.go index 36060cfb8..de2e10ede 100644 --- a/cmd/signature-v4-parser.go +++ b/cmd/signature-v4-parser.go @@ -45,20 +45,20 @@ func (c credentialHeader) getScope() string { } // parse credentialHeader string into its structured form. -func parseCredentialHeader(credElement string) (credentialHeader, APIErrorCode) { +func parseCredentialHeader(credElement string) (ch credentialHeader, aec APIErrorCode) { creds := strings.Split(strings.TrimSpace(credElement), "=") if len(creds) != 2 { - return credentialHeader{}, ErrMissingFields + return ch, ErrMissingFields } if creds[0] != "Credential" { - return credentialHeader{}, ErrMissingCredTag + return ch, ErrMissingCredTag } credElements := strings.Split(strings.TrimSpace(creds[1]), "/") if len(credElements) != 5 { - return credentialHeader{}, ErrCredMalformed + return ch, ErrCredMalformed } if !isAccessKeyValid(credElements[0]) { - return credentialHeader{}, ErrInvalidAccessKeyID + return ch, ErrInvalidAccessKeyID } // Save access key id. cred := credentialHeader{ @@ -67,15 +67,15 @@ func parseCredentialHeader(credElement string) (credentialHeader, APIErrorCode) var e error cred.scope.date, e = time.Parse(yyyymmdd, credElements[1]) if e != nil { - return credentialHeader{}, ErrMalformedCredentialDate + return ch, ErrMalformedCredentialDate } cred.scope.region = credElements[2] if credElements[3] != "s3" { - return credentialHeader{}, ErrInvalidService + return ch, ErrInvalidService } cred.scope.service = credElements[3] if credElements[4] != "aws4_request" { - return credentialHeader{}, ErrInvalidRequestVersion + return ch, ErrInvalidRequestVersion } cred.scope.request = credElements[4] return cred, ErrNone @@ -148,17 +148,17 @@ func doesV4PresignParamsExist(query url.Values) APIErrorCode { } // Parses all the presigned signature values into separate elements. -func parsePreSignV4(query url.Values) (preSignValues, APIErrorCode) { +func parsePreSignV4(query url.Values) (psv preSignValues, aec APIErrorCode) { var err APIErrorCode // verify whether the required query params exist. err = doesV4PresignParamsExist(query) if err != ErrNone { - return preSignValues{}, err + return psv, err } // Verify if the query algorithm is supported or not. if query.Get("X-Amz-Algorithm") != signV4Algorithm { - return preSignValues{}, ErrInvalidQuerySignatureAlgo + return psv, ErrInvalidQuerySignatureAlgo } // Initialize signature version '4' structured header. @@ -167,35 +167,35 @@ func parsePreSignV4(query url.Values) (preSignValues, APIErrorCode) { // Save credential. preSignV4Values.Credential, err = parseCredentialHeader("Credential=" + query.Get("X-Amz-Credential")) if err != ErrNone { - return preSignValues{}, err + return psv, err } var e error // Save date in native time.Time. preSignV4Values.Date, e = time.Parse(iso8601Format, query.Get("X-Amz-Date")) if e != nil { - return preSignValues{}, ErrMalformedPresignedDate + return psv, ErrMalformedPresignedDate } // Save expires in native time.Duration. preSignV4Values.Expires, e = time.ParseDuration(query.Get("X-Amz-Expires") + "s") if e != nil { - return preSignValues{}, ErrMalformedExpires + return psv, ErrMalformedExpires } if preSignV4Values.Expires < 0 { - return preSignValues{}, ErrNegativeExpires + return psv, ErrNegativeExpires } // Save signed headers. preSignV4Values.SignedHeaders, err = parseSignedHeader("SignedHeaders=" + query.Get("X-Amz-SignedHeaders")) if err != ErrNone { - return preSignValues{}, err + return psv, err } // Save signature. preSignV4Values.Signature, err = parseSignature("Signature=" + query.Get("X-Amz-Signature")) if err != ErrNone { - return preSignValues{}, err + return psv, err } // Return structed form of signature query string. @@ -207,25 +207,25 @@ func parsePreSignV4(query url.Values) (preSignValues, APIErrorCode) { // Authorization: algorithm Credential=accessKeyID/credScope, \ // SignedHeaders=signedHeaders, Signature=signature // -func parseSignV4(v4Auth string) (signValues, APIErrorCode) { +func parseSignV4(v4Auth string) (sv signValues, aec APIErrorCode) { // Replace all spaced strings, some clients can send spaced // parameters and some won't. So we pro-actively remove any spaces // to make parsing easier. v4Auth = strings.Replace(v4Auth, " ", "", -1) if v4Auth == "" { - return signValues{}, ErrAuthHeaderEmpty + return sv, ErrAuthHeaderEmpty } // Verify if the header algorithm is supported or not. if !strings.HasPrefix(v4Auth, signV4Algorithm) { - return signValues{}, ErrSignatureVersionNotSupported + return sv, ErrSignatureVersionNotSupported } // Strip off the Algorithm prefix. v4Auth = strings.TrimPrefix(v4Auth, signV4Algorithm) authFields := strings.Split(strings.TrimSpace(v4Auth), ",") if len(authFields) != 3 { - return signValues{}, ErrMissingFields + return sv, ErrMissingFields } // Initialize signature version '4' structured header. @@ -235,19 +235,19 @@ func parseSignV4(v4Auth string) (signValues, APIErrorCode) { // Save credentail values. signV4Values.Credential, err = parseCredentialHeader(authFields[0]) if err != ErrNone { - return signValues{}, err + return sv, err } // Save signed headers. signV4Values.SignedHeaders, err = parseSignedHeader(authFields[1]) if err != ErrNone { - return signValues{}, err + return sv, err } // Save signature. signV4Values.Signature, err = parseSignature(authFields[2]) if err != ErrNone { - return signValues{}, err + return sv, err } // Return the structure here. diff --git a/cmd/signature-v4.go b/cmd/signature-v4.go index f71e17e36..dec9340a9 100644 --- a/cmd/signature-v4.go +++ b/cmd/signature-v4.go @@ -39,10 +39,9 @@ import ( // AWS Signature Version '4' constants. const ( - signV4Algorithm = "AWS4-HMAC-SHA256" - iso8601Format = "20060102T150405Z" - yyyymmdd = "20060102" - presignedHostHeader = "host" + signV4Algorithm = "AWS4-HMAC-SHA256" + iso8601Format = "20060102T150405Z" + yyyymmdd = "20060102" ) // getCanonicalHeaders generate a list of request headers with their values diff --git a/cmd/storage-errors.go b/cmd/storage-errors.go index 2df4b43eb..0148985e2 100644 --- a/cmd/storage-errors.go +++ b/cmd/storage-errors.go @@ -27,6 +27,10 @@ var errUnexpected = errors.New("Unexpected error, please report this issue at ht // errCorruptedFormat - corrupted backend format. var errCorruptedFormat = errors.New("corrupted backend format, please join https://slack.minio.io for assistance") +// errFormatNotSupported - returned when older minio tries to parse metadata +// created by newer minio. +var errFormatNotSupported = errors.New("format not supported") + // errUnformattedDisk - unformatted disk found. var errUnformattedDisk = errors.New("unformatted disk found") @@ -51,9 +55,6 @@ var errFileNotFound = errors.New("file not found") // errFileNameTooLong - given file name is too long than supported length. var errFileNameTooLong = errors.New("file name too long") -// errFileComponentInvalid - given file name has invalid components. -var errFileComponentInvalid = errors.New("file name has invalid components") - // errVolumeExists - cannot create same volume again. var errVolumeExists = errors.New("volume already exists") diff --git a/cmd/storage-rpc-server.go b/cmd/storage-rpc-server.go index ee4d968fb..0f18d709f 100644 --- a/cmd/storage-rpc-server.go +++ b/cmd/storage-rpc-server.go @@ -18,7 +18,6 @@ package cmd import ( "io" - "net/rpc" "path" "time" @@ -217,7 +216,7 @@ func (s *storageServer) RenameFileHandler(args *RenameFileArgs, reply *AuthRPCRe } // Initialize new storage rpc. -func newRPCServer(endpoints EndpointList) (servers []*storageServer, err error) { +func newStorageRPCServer(endpoints EndpointList) (servers []*storageServer, err error) { for _, endpoint := range endpoints { if endpoint.IsLocal { storage, err := newPosix(endpoint.Path) @@ -238,14 +237,14 @@ func newRPCServer(endpoints EndpointList) (servers []*storageServer, err error) // registerStorageRPCRouter - register storage rpc router. func registerStorageRPCRouters(mux *router.Router, endpoints EndpointList) error { // Initialize storage rpc servers for every disk that is hosted on this node. - storageRPCs, err := newRPCServer(endpoints) + storageRPCs, err := newStorageRPCServer(endpoints) if err != nil { return traceError(err) } // Create a unique route for each disk exported from this node. for _, stServer := range storageRPCs { - storageRPCServer := rpc.NewServer() + storageRPCServer := newRPCServer() err = storageRPCServer.RegisterName("Storage", stServer) if err != nil { return traceError(err) diff --git a/cmd/streaming-signature-v4.go b/cmd/streaming-signature-v4.go index 87d1f1449..7a2c08df3 100644 --- a/cmd/streaming-signature-v4.go +++ b/cmd/streaming-signature-v4.go @@ -41,13 +41,10 @@ const ( ) // getChunkSignature - get chunk signature. -func getChunkSignature(seedSignature string, date time.Time, hashedChunk string) string { +func getChunkSignature(seedSignature string, region string, date time.Time, hashedChunk string) string { // Access credentials. cred := serverConfig.GetCredential() - // Server region. - region := serverConfig.GetRegion() - // Calculate string to sign. stringToSign := signV4ChunkedAlgorithm + "\n" + date.Format(iso8601Format) + "\n" + @@ -69,12 +66,12 @@ func getChunkSignature(seedSignature string, date time.Time, hashedChunk string) // - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html // returns signature, error otherwise if the signature mismatches or any other // error while parsing and validating. -func calculateSeedSignature(r *http.Request) (signature string, date time.Time, errCode APIErrorCode) { +func calculateSeedSignature(r *http.Request) (signature string, region string, date time.Time, errCode APIErrorCode) { // Access credentials. cred := serverConfig.GetCredential() - // Server region. - region := serverConfig.GetRegion() + // Configured region. + confRegion := serverConfig.GetRegion() // Copy request. req := *r @@ -85,7 +82,7 @@ func calculateSeedSignature(r *http.Request) (signature string, date time.Time, // Parse signature version '4' header. signV4Values, errCode := parseSignV4(v4Auth) if errCode != ErrNone { - return "", time.Time{}, errCode + return "", "", time.Time{}, errCode } // Payload streaming. @@ -93,32 +90,32 @@ func calculateSeedSignature(r *http.Request) (signature string, date time.Time, // Payload for STREAMING signature should be 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD' if payload != req.Header.Get("X-Amz-Content-Sha256") { - return "", time.Time{}, ErrContentSHA256Mismatch + return "", "", time.Time{}, ErrContentSHA256Mismatch } // Extract all the signed headers along with its values. extractedSignedHeaders, errCode := extractSignedHeaders(signV4Values.SignedHeaders, r) if errCode != ErrNone { - return "", time.Time{}, errCode + return "", "", time.Time{}, errCode } // Verify if the access key id matches. if signV4Values.Credential.accessKey != cred.AccessKey { - return "", time.Time{}, ErrInvalidAccessKeyID + return "", "", time.Time{}, ErrInvalidAccessKeyID } // Verify if region is valid. - sRegion := signV4Values.Credential.scope.region + region = signV4Values.Credential.scope.region // Should validate region, only if region is set. Some operations // do not need region validated for example GetBucketLocation. - if !isValidRegion(sRegion, region) { - return "", time.Time{}, ErrInvalidRegion + if !isValidRegion(region, confRegion) { + return "", "", time.Time{}, ErrInvalidRegion } // Extract date, if not present throw error. var dateStr string if dateStr = req.Header.Get(http.CanonicalHeaderKey("x-amz-date")); dateStr == "" { if dateStr = r.Header.Get("Date"); dateStr == "" { - return "", time.Time{}, ErrMissingDateHeader + return "", "", time.Time{}, ErrMissingDateHeader } } // Parse date header. @@ -126,7 +123,7 @@ func calculateSeedSignature(r *http.Request) (signature string, date time.Time, date, err = time.Parse(iso8601Format, dateStr) if err != nil { errorIf(err, "Unable to parse date", dateStr) - return "", time.Time{}, ErrMalformedDate + return "", "", time.Time{}, ErrMalformedDate } // Query string. @@ -146,11 +143,11 @@ func calculateSeedSignature(r *http.Request) (signature string, date time.Time, // Verify if signature match. if newSignature != signV4Values.Signature { - return "", time.Time{}, ErrSignatureDoesNotMatch + return "", "", time.Time{}, ErrSignatureDoesNotMatch } // Return caculated signature. - return newSignature, date, ErrNone + return newSignature, region, date, ErrNone } const maxLineLength = 4 * humanize.KiByte // assumed <= bufio.defaultBufSize 4KiB @@ -168,7 +165,7 @@ var errMalformedEncoding = errors.New("malformed chunked encoding") // NewChunkedReader is not needed by normal applications. The http package // automatically decodes chunking when reading response bodies. func newSignV4ChunkedReader(req *http.Request) (io.Reader, APIErrorCode) { - seedSignature, seedDate, errCode := calculateSeedSignature(req) + seedSignature, region, seedDate, errCode := calculateSeedSignature(req) if errCode != ErrNone { return nil, errCode } @@ -176,6 +173,7 @@ func newSignV4ChunkedReader(req *http.Request) (io.Reader, APIErrorCode) { reader: bufio.NewReader(req.Body), seedSignature: seedSignature, seedDate: seedDate, + region: region, chunkSHA256Writer: sha256.New(), state: readChunkHeader, }, ErrNone @@ -187,6 +185,7 @@ type s3ChunkedReader struct { reader *bufio.Reader seedSignature string seedDate time.Time + region string state chunkState lastChunk bool chunkSignature string @@ -304,7 +303,7 @@ func (cr *s3ChunkedReader) Read(buf []byte) (n int, err error) { // Calculate the hashed chunk. hashedChunk := hex.EncodeToString(cr.chunkSHA256Writer.Sum(nil)) // Calculate the chunk signature. - newSignature := getChunkSignature(cr.seedSignature, cr.seedDate, hashedChunk) + newSignature := getChunkSignature(cr.seedSignature, cr.region, cr.seedDate, hashedChunk) if cr.chunkSignature != newSignature { // Chunk signature doesn't match we return signature does not match. cr.err = errSignatureMismatch diff --git a/cmd/update-main.go b/cmd/update-main.go index a0e7953ba..96641e67a 100644 --- a/cmd/update-main.go +++ b/cmd/update-main.go @@ -17,6 +17,7 @@ package cmd import ( + "bufio" "fmt" "io/ioutil" "net/http" @@ -140,6 +141,30 @@ func IsKubernetes() bool { return os.Getenv("KUBERNETES_SERVICE_HOST") != "" } +// Minio Helm chart uses DownwardAPIFile to write pod label info to /podinfo/labels +// More info: https://kubernetes.io/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information/#store-pod-fields +// Check if this is Helm package installation and report helm chart version +func getHelmVersion(helmInfoFilePath string) string { + // Read the file exists. + helmInfoFile, err := os.Open(helmInfoFilePath) + // Log errors and return "" as Minio can be deployed without Helm charts as well. + if err != nil && !os.IsNotExist(err) { + errorIf(err, "Unable to read %s", helmInfoFilePath) + return "" + } + + scanner := bufio.NewScanner(helmInfoFile) + for scanner.Scan() { + if strings.Contains(scanner.Text(), "chart=") { + helmChartVersion := strings.TrimPrefix(scanner.Text(), "chart=") + // remove quotes from the chart version + return strings.Trim(helmChartVersion, `"`) + } + } + + return "" +} + func isSourceBuild(minioVersion string) bool { _, err := time.Parse(time.RFC3339, minioVersion) return err != nil @@ -182,6 +207,15 @@ func getUserAgent(mode string) string { userAgent += " Minio/" + "universe-" + universePkgVersion } } + + if IsKubernetes() { + // In Kubernetes environment, try to fetch the helm package version + helmChartVersion := getHelmVersion("/podinfo/labels") + if helmChartVersion != "" { + userAgent += " Minio/" + "helm-" + helmChartVersion + } + } + return userAgent } diff --git a/cmd/update-main_test.go b/cmd/update-main_test.go index 175f97f2b..7e5bc15c5 100644 --- a/cmd/update-main_test.go +++ b/cmd/update-main_test.go @@ -235,6 +235,48 @@ func TestIsKubernetes(t *testing.T) { } } +// Tests if the environment we are running is Helm chart. +func TestGetHelmVersion(t *testing.T) { + createTempFile := func(content string) string { + tmpfile, err := ioutil.TempFile("", "helm-testfile-") + if err != nil { + t.Fatalf("Unable to create temporary file. %s", err) + } + if _, err = tmpfile.Write([]byte(content)); err != nil { + t.Fatalf("Unable to create temporary file. %s", err) + } + if err = tmpfile.Close(); err != nil { + t.Fatalf("Unable to create temporary file. %s", err) + } + return tmpfile.Name() + } + + filename := createTempFile( + `app="virtuous-rat-minio" +chart="minio-0.1.3" +heritage="Tiller" +pod-template-hash="818089471"`) + + defer os.Remove(filename) + + testCases := []struct { + filename string + expectedResult string + }{ + {"", ""}, + {"/tmp/non-existing-file", ""}, + {filename, "minio-0.1.3"}, + } + + for _, testCase := range testCases { + result := getHelmVersion(testCase.filename) + + if testCase.expectedResult != result { + t.Fatalf("result: expected: %v, got: %v", testCase.expectedResult, result) + } + } +} + // Tests if the environment we are running is in docker. func TestIsDocker(t *testing.T) { createTempFile := func(content string) string { diff --git a/cmd/web-handlers.go b/cmd/web-handlers.go index 28c84d289..dced81ba2 100644 --- a/cmd/web-handlers.go +++ b/cmd/web-handlers.go @@ -497,7 +497,12 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) { } // Extract incoming metadata if any. - metadata := extractMetadataFromHeader(r.Header) + metadata, err := extractMetadataFromHeader(r.Header) + if err != nil { + errorIf(err, "found invalid http request header") + writeErrorResponse(w, ErrInternalError, r.URL) + return + } // Lock the object. objectLock := globalNSMutex.NewNSLock(bucket, object) @@ -690,13 +695,15 @@ func getBucketAccessPolicy(objAPI ObjectLayer, bucketName string) (policy.Bucket policyInfo, err = layer.GetBucketPolicies(bucketName) case *azureObjects: policyInfo, err = layer.GetBucketPolicies(bucketName) + case *gcsGateway: + policyInfo, err = layer.GetBucketPolicies(bucketName) default: policyInfo, err = readBucketAccessPolicy(objAPI, bucketName) } return policyInfo, err } -// GetBucketPolicy - get bucket policy. +// GetBucketPolicy - get bucket policy for the requested prefix. func (web *webAPIHandlers) GetBucketPolicy(r *http.Request, args *GetBucketPolicyArgs, reply *GetBucketPolicyRep) error { objectAPI := web.ObjectAPI() if objectAPI == nil { @@ -707,9 +714,12 @@ func (web *webAPIHandlers) GetBucketPolicy(r *http.Request, args *GetBucketPolic return toJSONError(errAuthentication) } - policyInfo, err := readBucketAccessPolicy(objectAPI, args.BucketName) + var policyInfo, err = getBucketAccessPolicy(objectAPI, args.BucketName) if err != nil { - return toJSONError(err, args.BucketName) + _, ok := errorCause(err).(PolicyNotFound) + if !ok { + return toJSONError(err, args.BucketName) + } } reply.UIVersion = browser.UIVersion @@ -745,8 +755,8 @@ func (web *webAPIHandlers) ListAllBucketPolicies(r *http.Request, args *ListAllB if !isHTTPRequestValid(r) { return toJSONError(errAuthentication) } - var policyInfo, err = getBucketAccessPolicy(objectAPI, args.BucketName) + var policyInfo, err = getBucketAccessPolicy(objectAPI, args.BucketName) if err != nil { _, ok := errorCause(err).(PolicyNotFound) if !ok { @@ -791,7 +801,6 @@ func (web *webAPIHandlers) SetBucketPolicy(r *http.Request, args *SetBucketPolic } var policyInfo, err = getBucketAccessPolicy(objectAPI, args.BucketName) - if err != nil { if _, ok := errorCause(err).(PolicyNotFound); !ok { return toJSONError(err, args.BucketName) @@ -915,7 +924,7 @@ func presignedGet(host, bucket, object string, expiry int64) string { signature := getSignature(signingKey, stringToSign) // Construct the final presigned URL. - return host + path + "?" + query + "&" + "X-Amz-Signature=" + signature + return host + getURLEncodedName(path) + "?" + query + "&" + "X-Amz-Signature=" + signature } // toJSONError converts regular errors into more user friendly diff --git a/cmd/xl-v1-bucket.go b/cmd/xl-v1-bucket.go index 54dee3a6d..fcd5dc780 100644 --- a/cmd/xl-v1-bucket.go +++ b/cmd/xl-v1-bucket.go @@ -144,15 +144,15 @@ func (xl xlObjects) getBucketInfo(bucketName string) (bucketInfo BucketInfo, err } // GetBucketInfo - returns BucketInfo for a bucket. -func (xl xlObjects) GetBucketInfo(bucket string) (BucketInfo, error) { +func (xl xlObjects) GetBucketInfo(bucket string) (bi BucketInfo, e error) { // Verify if bucket is valid. if !IsValidBucketName(bucket) { - return BucketInfo{}, BucketNameInvalid{Bucket: bucket} + return bi, BucketNameInvalid{Bucket: bucket} } bucketInfo, err := xl.getBucketInfo(bucket) if err != nil { - return BucketInfo{}, toObjectErr(err, bucket) + return bi, toObjectErr(err, bucket) } return bucketInfo, nil } diff --git a/cmd/xl-v1-errors.go b/cmd/xl-v1-errors.go index faa03e92f..b8014d115 100644 --- a/cmd/xl-v1-errors.go +++ b/cmd/xl-v1-errors.go @@ -18,15 +18,6 @@ package cmd import "errors" -// errXLMaxDisks - returned for reached maximum of disks. -var errXLMaxDisks = errors.New("Number of disks are higher than supported maximum count '16'") - -// errXLMinDisks - returned for minimum number of disks. -var errXLMinDisks = errors.New("Minimum '4' disks are required to enable erasure code") - -// errXLNumDisks - returned for odd number of disks. -var errXLNumDisks = errors.New("Total number of disks should be multiples of '2'") - // errXLReadQuorum - did not meet read quorum. var errXLReadQuorum = errors.New("Read failed. Insufficient number of disks online") diff --git a/cmd/xl-v1-healing-common.go b/cmd/xl-v1-healing-common.go index f463ec6c1..54beedcc9 100644 --- a/cmd/xl-v1-healing-common.go +++ b/cmd/xl-v1-healing-common.go @@ -252,21 +252,22 @@ func xlHealStat(xl xlObjects, partsMetadata []xlMetaV1, errs []error) HealObject // calculating blake2b checksum. func disksWithAllParts(onlineDisks []StorageAPI, partsMetadata []xlMetaV1, errs []error, bucket, object string) ([]StorageAPI, []error, error) { availableDisks := make([]StorageAPI, len(onlineDisks)) - for index, onlineDisk := range onlineDisks { + for diskIndex, onlineDisk := range onlineDisks { if onlineDisk == nil { continue } // disk has a valid xl.json but may not have all the // parts. This is considered an outdated disk, since // it needs healing too. - for _, part := range partsMetadata[index].Parts { + for _, part := range partsMetadata[diskIndex].Parts { // compute blake2b sum of part. partPath := filepath.Join(object, part.Name) - hash := newHash(partsMetadata[index].Erasure.Algorithm) + checkSumInfo := partsMetadata[diskIndex].Erasure.GetCheckSumInfo(part.Name) + hash := newHash(checkSumInfo.Algorithm) blakeBytes, hErr := hashSum(onlineDisk, bucket, partPath, hash) if hErr == errFileNotFound { - errs[index] = errFileNotFound - availableDisks[index] = nil + errs[diskIndex] = errFileNotFound + availableDisks[diskIndex] = nil break } @@ -274,17 +275,16 @@ func disksWithAllParts(onlineDisks []StorageAPI, partsMetadata []xlMetaV1, errs return nil, nil, traceError(hErr) } - partChecksum := partsMetadata[index].Erasure.GetCheckSumInfo(part.Name).Hash blakeSum := hex.EncodeToString(blakeBytes) // if blake2b sum doesn't match for a part // then this disk is outdated and needs // healing. - if blakeSum != partChecksum { - errs[index] = errFileNotFound - availableDisks[index] = nil + if blakeSum != checkSumInfo.Hash { + errs[diskIndex] = errFileNotFound + availableDisks[diskIndex] = nil break } - availableDisks[index] = onlineDisk + availableDisks[diskIndex] = onlineDisk } } diff --git a/cmd/xl-v1-healing-common_test.go b/cmd/xl-v1-healing-common_test.go index d9705395c..351d3a028 100644 --- a/cmd/xl-v1-healing-common_test.go +++ b/cmd/xl-v1-healing-common_test.go @@ -330,3 +330,115 @@ func TestListOnlineDisks(t *testing.T) { } } } + +func TestDisksWithAllParts(t *testing.T) { + rootPath, err := newTestConfig(globalMinioDefaultRegion) + if err != nil { + t.Fatalf("Failed to initialize config - %v", err) + } + defer removeAll(rootPath) + + obj, disks, err := prepareXL() + if err != nil { + t.Fatalf("Prepare XL backend failed - %v", err) + } + defer removeRoots(disks) + + bucket := "bucket" + object := "object" + // make data with more than one part + partCount := 3 + data := bytes.Repeat([]byte("a"), int(globalPutPartSize)*partCount) + xlDisks := obj.(*xlObjects).storageDisks + + err = obj.MakeBucketWithLocation("bucket", "") + if err != nil { + t.Fatalf("Failed to make a bucket %v", err) + } + + _, err = obj.PutObject(bucket, object, int64(len(data)), + bytes.NewReader(data), nil, "") + if err != nil { + t.Fatalf("Failed to putObject %v", err) + } + + partsMetadata, errs := readAllXLMetadata(xlDisks, bucket, object) + if err != nil { + t.Fatalf("Failed to read xl meta data %v", err) + } + + // Replace the default blake2b erasure algorithm to HashSha256 and test that + // disks are excluded + diskFailures := make(map[int]string) + // key = disk index, value = part name with hash mismatch + diskFailures[0] = "part.3" + diskFailures[3] = "part.1" + diskFailures[15] = "part.2" + + for diskIndex, partName := range diskFailures { + for index, info := range partsMetadata[diskIndex].Erasure.Checksum { + if info.Name == partName { + partsMetadata[diskIndex].Erasure.Checksum[index].Algorithm = HashSha256 + } + } + } + + errs = make([]error, len(xlDisks)) + filteredDisks, errs, err := + disksWithAllParts(xlDisks, partsMetadata, errs, bucket, object) + if err != nil { + t.Errorf("Unexpected error: %s", err) + } + + if len(filteredDisks) != len(xlDisks) { + t.Errorf("Unexpected number of disks: %d", len(filteredDisks)) + } + + for diskIndex, disk := range filteredDisks { + + if _, ok := diskFailures[diskIndex]; ok { + if disk != nil { + t.Errorf("Disk not filtered as expected, disk: %d", diskIndex) + } + if errs[diskIndex] == nil { + t.Errorf("Expected error not received, diskIndex: %d", diskIndex) + } + } else { + if disk == nil { + t.Errorf("Disk erroneously filtered, diskIndex: %d", diskIndex) + } + if errs[diskIndex] != nil { + t.Errorf("Unexpected error, %s, diskIndex: %d", errs[diskIndex], diskIndex) + } + + } + } + + // Test that all disks are returned without any failures with unmodified + // meta data + partsMetadata, errs = readAllXLMetadata(xlDisks, bucket, object) + if err != nil { + t.Fatalf("Failed to read xl meta data %v", err) + } + + filteredDisks, errs, err = + disksWithAllParts(xlDisks, partsMetadata, errs, bucket, object) + if err != nil { + t.Errorf("Unexpected error: %s", err) + } + + if len(filteredDisks) != len(xlDisks) { + t.Errorf("Unexpected number of disks: %d", len(filteredDisks)) + } + + for diskIndex, disk := range filteredDisks { + if errs[diskIndex] != nil { + t.Errorf("Unexpected error %s", errs[diskIndex]) + } + + if disk == nil { + t.Errorf("Disk erroneously filtered, diskIndex: %d", diskIndex) + } + } + +} diff --git a/cmd/xl-v1-healing.go b/cmd/xl-v1-healing.go index 6bbccce7d..a0a4b43e9 100644 --- a/cmd/xl-v1-healing.go +++ b/cmd/xl-v1-healing.go @@ -468,7 +468,7 @@ func healObject(storageDisks []StorageAPI, bucket string, object string, quorum } // Generate and write `xl.json` generated from other disks. - aErr = writeUniqueXLMetadata(outDatedDisks, minioMetaTmpBucket, tmpID, partsMetadata, diskCount(outDatedDisks)) + outDatedDisks, aErr = writeUniqueXLMetadata(outDatedDisks, minioMetaTmpBucket, tmpID, partsMetadata, diskCount(outDatedDisks)) if aErr != nil { return 0, 0, toObjectErr(aErr, bucket, object) } diff --git a/cmd/xl-v1-list-objects-heal.go b/cmd/xl-v1-list-objects-heal.go index 029a2ce8a..ea05f222a 100644 --- a/cmd/xl-v1-list-objects-heal.go +++ b/cmd/xl-v1-list-objects-heal.go @@ -69,7 +69,7 @@ func listDirHealFactory(isLeaf isLeafFunc, disks ...StorageAPI) listDirFunc { } // listObjectsHeal - wrapper function implemented over file tree walk. -func (xl xlObjects) listObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) { +func (xl xlObjects) listObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) { // Default is recursive, if delimiter is set then list non recursive. recursive := true if delimiter == slashSeparator { @@ -98,7 +98,7 @@ func (xl xlObjects) listObjectsHeal(bucket, prefix, marker, delimiter string, ma } // For any walk error return right away. if walkResult.err != nil { - return ListObjectsInfo{}, toObjectErr(walkResult.err, bucket, prefix) + return loi, toObjectErr(walkResult.err, bucket, prefix) } entry := walkResult.entry var objInfo ObjectInfo @@ -115,7 +115,7 @@ func (xl xlObjects) listObjectsHeal(bucket, prefix, marker, delimiter string, ma if errorCause(err) == errFileNotFound { continue } - return ListObjectsInfo{}, toObjectErr(err, bucket, prefix) + return loi, toObjectErr(err, bucket, prefix) } } nextMarker = objInfo.Name @@ -160,14 +160,14 @@ func (xl xlObjects) listObjectsHeal(bucket, prefix, marker, delimiter string, ma } // ListObjects - list all objects at prefix, delimited by '/'. -func (xl xlObjects) ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) { +func (xl xlObjects) ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) { if err := checkListObjsArgs(bucket, prefix, marker, delimiter, xl); err != nil { - return ListObjectsInfo{}, err + return loi, err } // With max keys of zero we have reached eof, return right here. if maxKeys == 0 { - return ListObjectsInfo{}, nil + return loi, nil } // For delimiter and prefix as '/' we do not list anything at all @@ -175,7 +175,7 @@ func (xl xlObjects) ListObjectsHeal(bucket, prefix, marker, delimiter string, ma // with the prefix. On a flat namespace with 'prefix' as '/' // we don't have any entries, since all the keys are of form 'keyName/...' if delimiter == slashSeparator && prefix == slashSeparator { - return ListObjectsInfo{}, nil + return loi, nil } // Over flowing count - reset to maxObjectList. @@ -191,27 +191,27 @@ func (xl xlObjects) ListObjectsHeal(bucket, prefix, marker, delimiter string, ma } // Return error at the end. - return ListObjectsInfo{}, toObjectErr(err, bucket, prefix) + return loi, toObjectErr(err, bucket, prefix) } // ListUploadsHeal - lists ongoing multipart uploads that require // healing in one or more disks. func (xl xlObjects) ListUploadsHeal(bucket, prefix, marker, uploadIDMarker, - delimiter string, maxUploads int) (ListMultipartsInfo, error) { + delimiter string, maxUploads int) (lmi ListMultipartsInfo, e error) { // For delimiter and prefix as '/' we do not list anything at all // since according to s3 spec we stop at the 'delimiter' along // with the prefix. On a flat namespace with 'prefix' as '/' // we don't have any entries, since all the keys are of form 'keyName/...' if delimiter == slashSeparator && prefix == slashSeparator { - return ListMultipartsInfo{}, nil + return lmi, nil } // Initiate a list operation. listMultipartInfo, err := xl.listMultipartUploadsHeal(bucket, prefix, marker, uploadIDMarker, delimiter, maxUploads) if err != nil { - return ListMultipartsInfo{}, toObjectErr(err, bucket, prefix) + return lmi, toObjectErr(err, bucket, prefix) } // We got the entries successfully return. @@ -245,7 +245,7 @@ func fetchMultipartUploadIDs(bucket, keyMarker, uploadIDMarker string, // listMultipartUploadsHeal - Returns a list of incomplete multipart // uploads that need to be healed. func (xl xlObjects) listMultipartUploadsHeal(bucket, prefix, keyMarker, - uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, error) { + uploadIDMarker, delimiter string, maxUploads int) (lmi ListMultipartsInfo, e error) { result := ListMultipartsInfo{ IsTruncated: true, @@ -265,7 +265,7 @@ func (xl xlObjects) listMultipartUploadsHeal(bucket, prefix, keyMarker, uploads, _, err = fetchMultipartUploadIDs(bucket, keyMarker, uploadIDMarker, maxUploads, xl.getLoadBalancedDisks()) if err != nil { - return ListMultipartsInfo{}, err + return lmi, err } maxUploads = maxUploads - len(uploads) } @@ -321,7 +321,7 @@ func (xl xlObjects) listMultipartUploadsHeal(bucket, prefix, keyMarker, } // For any error during tree walk, we should return right away. if walkResult.err != nil { - return ListMultipartsInfo{}, walkResult.err + return lmi, walkResult.err } entry := strings.TrimPrefix(walkResult.entry, @@ -346,7 +346,7 @@ func (xl xlObjects) listMultipartUploadsHeal(bucket, prefix, keyMarker, newUploads, end, err = fetchMultipartUploadIDs(bucket, entry, uploadIDMarker, uploadsLeft, xl.getLoadBalancedDisks()) if err != nil { - return ListMultipartsInfo{}, err + return lmi, err } uploads = append(uploads, newUploads...) uploadsLeft -= len(newUploads) diff --git a/cmd/xl-v1-list-objects.go b/cmd/xl-v1-list-objects.go index fb03fbe26..f95f9f842 100644 --- a/cmd/xl-v1-list-objects.go +++ b/cmd/xl-v1-list-objects.go @@ -46,7 +46,7 @@ func listDirFactory(isLeaf isLeafFunc, treeWalkIgnoredErrs []error, disks ...Sto } // listObjects - wrapper function implemented over file tree walk. -func (xl xlObjects) listObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) { +func (xl xlObjects) listObjects(bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) { // Default is recursive, if delimiter is set then list non recursive. recursive := true if delimiter == slashSeparator { @@ -74,7 +74,7 @@ func (xl xlObjects) listObjects(bucket, prefix, marker, delimiter string, maxKey } // For any walk error return right away. if walkResult.err != nil { - return ListObjectsInfo{}, toObjectErr(walkResult.err, bucket, prefix) + return loi, toObjectErr(walkResult.err, bucket, prefix) } entry := walkResult.entry var objInfo ObjectInfo @@ -92,7 +92,7 @@ func (xl xlObjects) listObjects(bucket, prefix, marker, delimiter string, maxKey if errorCause(err) == errFileNotFound { continue } - return ListObjectsInfo{}, toObjectErr(err, bucket, prefix) + return loi, toObjectErr(err, bucket, prefix) } } nextMarker = objInfo.Name @@ -122,14 +122,14 @@ func (xl xlObjects) listObjects(bucket, prefix, marker, delimiter string, maxKey } // ListObjects - list all objects at prefix, delimited by '/'. -func (xl xlObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) { +func (xl xlObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) { if err := checkListObjsArgs(bucket, prefix, marker, delimiter, xl); err != nil { - return ListObjectsInfo{}, err + return loi, err } // With max keys of zero we have reached eof, return right here. if maxKeys == 0 { - return ListObjectsInfo{}, nil + return loi, nil } // For delimiter and prefix as '/' we do not list anything at all @@ -137,7 +137,7 @@ func (xl xlObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKey // with the prefix. On a flat namespace with 'prefix' as '/' // we don't have any entries, since all the keys are of form 'keyName/...' if delimiter == slashSeparator && prefix == slashSeparator { - return ListObjectsInfo{}, nil + return loi, nil } // Over flowing count - reset to maxObjectList. @@ -153,5 +153,5 @@ func (xl xlObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKey } // Return error at the end. - return ListObjectsInfo{}, toObjectErr(err, bucket, prefix) + return loi, toObjectErr(err, bucket, prefix) } diff --git a/cmd/xl-v1-metadata.go b/cmd/xl-v1-metadata.go index af8bdcd69..4060dff69 100644 --- a/cmd/xl-v1-metadata.go +++ b/cmd/xl-v1-metadata.go @@ -282,14 +282,14 @@ func (m xlMetaV1) ObjectToPartOffset(offset int64) (partIndex int, partOffset in // pickValidXLMeta - picks one valid xlMeta content and returns from a // slice of xlmeta content. If no value is found this function panics // and dies. -func pickValidXLMeta(metaArr []xlMetaV1, modTime time.Time) (xlMetaV1, error) { +func pickValidXLMeta(metaArr []xlMetaV1, modTime time.Time) (xmv xlMetaV1, e error) { // Pick latest valid metadata. for _, meta := range metaArr { if meta.IsValid() && meta.Stat.ModTime.Equal(modTime) { return meta, nil } } - return xlMetaV1{}, traceError(errors.New("No valid xl.json present")) + return xmv, traceError(errors.New("No valid xl.json present")) } // list of all errors that can be ignored in a metadata operation. @@ -389,7 +389,7 @@ func deleteAllXLMetadata(disks []StorageAPI, bucket, prefix string, errs []error } // Rename `xl.json` content to destination location for each disk in order. -func renameXLMetadata(disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string, quorum int) error { +func renameXLMetadata(disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string, quorum int) ([]StorageAPI, error) { isDir := false srcXLJSON := path.Join(srcEntry, xlMetaJSONFile) dstXLJSON := path.Join(dstEntry, xlMetaJSONFile) @@ -397,7 +397,7 @@ func renameXLMetadata(disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEnt } // writeUniqueXLMetadata - writes unique `xl.json` content for each disk in order. -func writeUniqueXLMetadata(disks []StorageAPI, bucket, prefix string, xlMetas []xlMetaV1, quorum int) error { +func writeUniqueXLMetadata(disks []StorageAPI, bucket, prefix string, xlMetas []xlMetaV1, quorum int) ([]StorageAPI, error) { var wg = &sync.WaitGroup{} var mErrs = make([]error, len(disks)) @@ -419,8 +419,6 @@ func writeUniqueXLMetadata(disks []StorageAPI, bucket, prefix string, xlMetas [] err := writeXLMetadata(disk, bucket, prefix, xlMetas[index]) if err != nil { mErrs[index] = err - // Ignore disk which returned an error. - disks[index] = nil } }(index, disk) } @@ -433,11 +431,11 @@ func writeUniqueXLMetadata(disks []StorageAPI, bucket, prefix string, xlMetas [] // Delete all `xl.json` successfully renamed. deleteAllXLMetadata(disks, bucket, prefix, mErrs) } - return err + return evalDisks(disks, mErrs), err } // writeSameXLMetadata - write `xl.json` on all disks in order. -func writeSameXLMetadata(disks []StorageAPI, bucket, prefix string, xlMeta xlMetaV1, writeQuorum, readQuorum int) error { +func writeSameXLMetadata(disks []StorageAPI, bucket, prefix string, xlMeta xlMetaV1, writeQuorum, readQuorum int) ([]StorageAPI, error) { var wg = &sync.WaitGroup{} var mErrs = make([]error, len(disks)) @@ -459,8 +457,6 @@ func writeSameXLMetadata(disks []StorageAPI, bucket, prefix string, xlMeta xlMet err := writeXLMetadata(disk, bucket, prefix, metadata) if err != nil { mErrs[index] = err - // Ignore disk which returned an error. - disks[index] = nil } }(index, disk, xlMeta) } @@ -473,5 +469,5 @@ func writeSameXLMetadata(disks []StorageAPI, bucket, prefix string, xlMeta xlMet // Delete all `xl.json` successfully renamed. deleteAllXLMetadata(disks, bucket, prefix, mErrs) } - return err + return evalDisks(disks, mErrs), err } diff --git a/cmd/xl-v1-multipart.go b/cmd/xl-v1-multipart.go index 7f266f1d1..6f23a67de 100644 --- a/cmd/xl-v1-multipart.go +++ b/cmd/xl-v1-multipart.go @@ -233,7 +233,7 @@ func (xl xlObjects) statPart(bucket, object, uploadID, partName string) (fileInf } // commitXLMetadata - commit `xl.json` from source prefix to destination prefix in the given slice of disks. -func commitXLMetadata(disks []StorageAPI, srcBucket, srcPrefix, dstBucket, dstPrefix string, quorum int) error { +func commitXLMetadata(disks []StorageAPI, srcBucket, srcPrefix, dstBucket, dstPrefix string, quorum int) ([]StorageAPI, error) { var wg = &sync.WaitGroup{} var mErrs = make([]error, len(disks)) @@ -257,8 +257,6 @@ func commitXLMetadata(disks []StorageAPI, srcBucket, srcPrefix, dstBucket, dstPr rErr := disk.RenameFile(srcBucket, srcJSONFile, dstBucket, dstJSONFile) if rErr != nil { mErrs[index] = traceError(rErr) - // Ignore disk which returned an error. - disks[index] = nil return } mErrs[index] = nil @@ -272,11 +270,11 @@ func commitXLMetadata(disks []StorageAPI, srcBucket, srcPrefix, dstBucket, dstPr // Delete all `xl.json` successfully renamed. deleteAllXLMetadata(disks, dstBucket, dstPrefix, mErrs) } - return err + return evalDisks(disks, mErrs), err } // listMultipartUploads - lists all multipart uploads. -func (xl xlObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, error) { +func (xl xlObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (lmi ListMultipartsInfo, e error) { result := ListMultipartsInfo{ IsTruncated: true, MaxUploads: maxUploads, @@ -326,7 +324,7 @@ func (xl xlObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark } keyMarkerLock.RUnlock() if err != nil { - return ListMultipartsInfo{}, err + return lmi, err } maxUploads = maxUploads - len(uploads) } @@ -352,7 +350,7 @@ func (xl xlObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark } // For any walk error return right away. if walkResult.err != nil { - return ListMultipartsInfo{}, walkResult.err + return lmi, walkResult.err } entry := strings.TrimPrefix(walkResult.entry, retainSlash(bucket)) // For an entry looking like a directory, store and @@ -396,7 +394,7 @@ func (xl xlObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark if isErrIgnored(err, xlTreeWalkIgnoredErrs...) { continue } - return ListMultipartsInfo{}, err + return lmi, err } uploads = append(uploads, newUploads...) maxUploads -= len(newUploads) @@ -448,9 +446,9 @@ func (xl xlObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark // Implements S3 compatible ListMultipartUploads API. The resulting // ListMultipartsInfo structure is unmarshalled directly into XML and // replied back to the client. -func (xl xlObjects) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, error) { +func (xl xlObjects) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (lmi ListMultipartsInfo, e error) { if err := checkListMultipartArgs(bucket, prefix, keyMarker, uploadIDMarker, delimiter, xl); err != nil { - return ListMultipartsInfo{}, err + return lmi, err } return xl.listMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads) @@ -491,7 +489,7 @@ func (xl xlObjects) newMultipartUpload(bucket string, object string, meta map[st uploadIDPath := path.Join(bucket, object, uploadID) tempUploadIDPath := uploadID // Write updated `xl.json` to all disks. - err := writeSameXLMetadata(xl.storageDisks, minioMetaTmpBucket, tempUploadIDPath, xlMeta, xl.writeQuorum, xl.readQuorum) + disks, err := writeSameXLMetadata(xl.storageDisks, minioMetaTmpBucket, tempUploadIDPath, xlMeta, xl.writeQuorum, xl.readQuorum) if err != nil { return "", toObjectErr(err, minioMetaTmpBucket, tempUploadIDPath) } @@ -501,7 +499,7 @@ func (xl xlObjects) newMultipartUpload(bucket string, object string, meta map[st defer xl.deleteObject(minioMetaTmpBucket, tempUploadIDPath) // Attempt to rename temp upload object to actual upload path object - rErr := renameObject(xl.storageDisks, minioMetaTmpBucket, tempUploadIDPath, minioMetaMultipartBucket, uploadIDPath, xl.writeQuorum) + _, rErr := renameObject(disks, minioMetaTmpBucket, tempUploadIDPath, minioMetaMultipartBucket, uploadIDPath, xl.writeQuorum) if rErr != nil { return "", toObjectErr(rErr, minioMetaMultipartBucket, uploadIDPath) } @@ -536,9 +534,9 @@ func (xl xlObjects) NewMultipartUpload(bucket, object string, meta map[string]st // data is read from an existing object. // // Implements S3 compatible Upload Part Copy API. -func (xl xlObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, startOffset int64, length int64) (PartInfo, error) { +func (xl xlObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, startOffset int64, length int64) (pi PartInfo, e error) { if err := checkNewMultipartArgs(srcBucket, srcObject, xl); err != nil { - return PartInfo{}, err + return pi, err } // Initialize pipe. @@ -555,7 +553,7 @@ func (xl xlObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, u partInfo, err := xl.PutObjectPart(dstBucket, dstObject, uploadID, partID, length, pipeReader, "", "") if err != nil { - return PartInfo{}, toObjectErr(err, dstBucket, dstObject) + return pi, toObjectErr(err, dstBucket, dstObject) } // Explicitly close the reader. @@ -570,9 +568,9 @@ func (xl xlObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, u // of the multipart transaction. // // Implements S3 compatible Upload Part API. -func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (PartInfo, error) { +func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (pi PartInfo, e error) { if err := checkPutObjectPartArgs(bucket, object, xl); err != nil { - return PartInfo{}, err + return pi, err } var partsMetadata []xlMetaV1 @@ -585,7 +583,7 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, s // Validates if upload ID exists. if !xl.isUploadIDExists(bucket, object, uploadID) { preUploadIDLock.RUnlock() - return PartInfo{}, traceError(InvalidUploadID{UploadID: uploadID}) + return pi, traceError(InvalidUploadID{UploadID: uploadID}) } // Read metadata associated with the object from all disks. @@ -594,7 +592,7 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, s reducedErr := reduceWriteQuorumErrs(errs, objectOpIgnoredErrs, xl.writeQuorum) if errorCause(reducedErr) == errXLWriteQuorum { preUploadIDLock.RUnlock() - return PartInfo{}, toObjectErr(reducedErr, bucket, object) + return pi, toObjectErr(reducedErr, bucket, object) } preUploadIDLock.RUnlock() @@ -604,7 +602,7 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, s // Pick one from the first valid metadata. xlMeta, err := pickValidXLMeta(partsMetadata, modTime) if err != nil { - return PartInfo{}, err + return pi, err } onlineDisks = shuffleDisks(onlineDisks, xlMeta.Erasure.Distribution) @@ -648,7 +646,7 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, s if size > 0 { if pErr := xl.prepareFile(minioMetaTmpBucket, tmpPartPath, size, onlineDisks, xlMeta.Erasure.BlockSize, xlMeta.Erasure.DataBlocks); err != nil { - return PartInfo{}, toObjectErr(pErr, bucket, object) + return pi, toObjectErr(pErr, bucket, object) } } @@ -657,15 +655,15 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, s allowEmpty := true // Erasure code data and write across all disks. - sizeWritten, checkSums, err := erasureCreateFile(onlineDisks, minioMetaTmpBucket, tmpPartPath, teeReader, allowEmpty, xlMeta.Erasure.BlockSize, xl.dataBlocks, xl.parityBlocks, bitRotAlgo, xl.writeQuorum) + onlineDisks, sizeWritten, checkSums, err := erasureCreateFile(onlineDisks, minioMetaTmpBucket, tmpPartPath, teeReader, allowEmpty, xlMeta.Erasure.BlockSize, xl.dataBlocks, xl.parityBlocks, bitRotAlgo, xl.writeQuorum) if err != nil { - return PartInfo{}, toObjectErr(err, bucket, object) + return pi, toObjectErr(err, bucket, object) } // Should return IncompleteBody{} error when reader has fewer bytes // than specified in request header. if sizeWritten < size { - return PartInfo{}, traceError(IncompleteBody{}) + return pi, traceError(IncompleteBody{}) } // For size == -1, perhaps client is sending in chunked encoding @@ -679,14 +677,14 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, s if md5Hex != "" { if newMD5Hex != md5Hex { // Returns md5 mismatch. - return PartInfo{}, traceError(BadDigest{md5Hex, newMD5Hex}) + return pi, traceError(BadDigest{md5Hex, newMD5Hex}) } } if sha256sum != "" { newSHA256sum := hex.EncodeToString(sha256Writer.Sum(nil)) if newSHA256sum != sha256sum { - return PartInfo{}, traceError(SHA256Mismatch{}) + return pi, traceError(SHA256Mismatch{}) } } @@ -697,21 +695,21 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, s // Validate again if upload ID still exists. if !xl.isUploadIDExists(bucket, object, uploadID) { - return PartInfo{}, traceError(InvalidUploadID{UploadID: uploadID}) + return pi, traceError(InvalidUploadID{UploadID: uploadID}) } // Rename temporary part file to its final location. partPath := path.Join(uploadIDPath, partSuffix) - err = renamePart(onlineDisks, minioMetaTmpBucket, tmpPartPath, minioMetaMultipartBucket, partPath, xl.writeQuorum) + onlineDisks, err = renamePart(onlineDisks, minioMetaTmpBucket, tmpPartPath, minioMetaMultipartBucket, partPath, xl.writeQuorum) if err != nil { - return PartInfo{}, toObjectErr(err, minioMetaMultipartBucket, partPath) + return pi, toObjectErr(err, minioMetaMultipartBucket, partPath) } // Read metadata again because it might be updated with parallel upload of another part. partsMetadata, errs = readAllXLMetadata(onlineDisks, minioMetaMultipartBucket, uploadIDPath) reducedErr = reduceWriteQuorumErrs(errs, objectOpIgnoredErrs, xl.writeQuorum) if errorCause(reducedErr) == errXLWriteQuorum { - return PartInfo{}, toObjectErr(reducedErr, bucket, object) + return pi, toObjectErr(reducedErr, bucket, object) } // Get current highest version based on re-read partsMetadata. @@ -720,7 +718,7 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, s // Pick one from the first valid metadata. xlMeta, err = pickValidXLMeta(partsMetadata, modTime) if err != nil { - return PartInfo{}, err + return pi, err } // Once part is successfully committed, proceed with updating XL metadata. @@ -746,17 +744,18 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, s tempXLMetaPath := newUUID // Writes a unique `xl.json` each disk carrying new checksum related information. - if err = writeUniqueXLMetadata(onlineDisks, minioMetaTmpBucket, tempXLMetaPath, partsMetadata, xl.writeQuorum); err != nil { - return PartInfo{}, toObjectErr(err, minioMetaTmpBucket, tempXLMetaPath) + if onlineDisks, err = writeUniqueXLMetadata(onlineDisks, minioMetaTmpBucket, tempXLMetaPath, partsMetadata, xl.writeQuorum); err != nil { + return pi, toObjectErr(err, minioMetaTmpBucket, tempXLMetaPath) } - rErr := commitXLMetadata(onlineDisks, minioMetaTmpBucket, tempXLMetaPath, minioMetaMultipartBucket, uploadIDPath, xl.writeQuorum) + var rErr error + onlineDisks, rErr = commitXLMetadata(onlineDisks, minioMetaTmpBucket, tempXLMetaPath, minioMetaMultipartBucket, uploadIDPath, xl.writeQuorum) if rErr != nil { - return PartInfo{}, toObjectErr(rErr, minioMetaMultipartBucket, uploadIDPath) + return pi, toObjectErr(rErr, minioMetaMultipartBucket, uploadIDPath) } fi, err := xl.statPart(bucket, object, uploadID, partSuffix) if err != nil { - return PartInfo{}, toObjectErr(rErr, minioMetaMultipartBucket, partSuffix) + return pi, toObjectErr(rErr, minioMetaMultipartBucket, partSuffix) } // Return success. @@ -770,14 +769,14 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, s // listObjectParts - wrapper reading `xl.json` for a given object and // uploadID. Lists all the parts captured inside `xl.json` content. -func (xl xlObjects) listObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (ListPartsInfo, error) { +func (xl xlObjects) listObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (lpi ListPartsInfo, e error) { result := ListPartsInfo{} uploadIDPath := path.Join(bucket, object, uploadID) xlParts, err := xl.readXLMetaParts(minioMetaMultipartBucket, uploadIDPath) if err != nil { - return ListPartsInfo{}, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath) + return lpi, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath) } // Populate the result stub. @@ -807,7 +806,7 @@ func (xl xlObjects) listObjectParts(bucket, object, uploadID string, partNumberM var fi FileInfo fi, err = xl.statPart(bucket, object, uploadID, part.Name) if err != nil { - return ListPartsInfo{}, toObjectErr(err, minioMetaBucket, path.Join(uploadID, part.Name)) + return lpi, toObjectErr(err, minioMetaBucket, path.Join(uploadID, part.Name)) } result.Parts = append(result.Parts, PartInfo{ PartNumber: part.Number, @@ -838,9 +837,9 @@ func (xl xlObjects) listObjectParts(bucket, object, uploadID string, partNumberM // Implements S3 compatible ListObjectParts API. The resulting // ListPartsInfo structure is unmarshalled directly into XML and // replied back to the client. -func (xl xlObjects) ListObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (ListPartsInfo, error) { +func (xl xlObjects) ListObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (lpi ListPartsInfo, e error) { if err := checkListPartsArgs(bucket, object, xl); err != nil { - return ListPartsInfo{}, err + return lpi, err } // Hold lock so that there is no competing @@ -851,7 +850,7 @@ func (xl xlObjects) ListObjectParts(bucket, object, uploadID string, partNumberM defer uploadIDLock.Unlock() if !xl.isUploadIDExists(bucket, object, uploadID) { - return ListPartsInfo{}, traceError(InvalidUploadID{UploadID: uploadID}) + return lpi, traceError(InvalidUploadID{UploadID: uploadID}) } result, err := xl.listObjectParts(bucket, object, uploadID, partNumberMarker, maxParts) return result, err @@ -863,9 +862,9 @@ func (xl xlObjects) ListObjectParts(bucket, object, uploadID string, partNumberM // md5sums of all the parts. // // Implements S3 compatible Complete multipart API. -func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, uploadID string, parts []completePart) (ObjectInfo, error) { +func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, uploadID string, parts []completePart) (oi ObjectInfo, e error) { if err := checkCompleteMultipartArgs(bucket, object, xl); err != nil { - return ObjectInfo{}, err + return oi, err } // Hold lock so that @@ -880,19 +879,19 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload defer uploadIDLock.Unlock() if !xl.isUploadIDExists(bucket, object, uploadID) { - return ObjectInfo{}, traceError(InvalidUploadID{UploadID: uploadID}) + return oi, traceError(InvalidUploadID{UploadID: uploadID}) } // Check if an object is present as one of the parent dir. // -- FIXME. (needs a new kind of lock). if xl.parentDirIsObject(bucket, path.Dir(object)) { - return ObjectInfo{}, toObjectErr(traceError(errFileAccessDenied), bucket, object) + return oi, toObjectErr(traceError(errFileAccessDenied), bucket, object) } // Calculate s3 compatible md5sum for complete multipart. s3MD5, err := getCompleteMultipartMD5(parts) if err != nil { - return ObjectInfo{}, err + return oi, err } uploadIDPath := pathJoin(bucket, object, uploadID) @@ -901,7 +900,7 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload partsMetadata, errs := readAllXLMetadata(xl.storageDisks, minioMetaMultipartBucket, uploadIDPath) reducedErr := reduceWriteQuorumErrs(errs, objectOpIgnoredErrs, xl.writeQuorum) if errorCause(reducedErr) == errXLWriteQuorum { - return ObjectInfo{}, toObjectErr(reducedErr, bucket, object) + return oi, toObjectErr(reducedErr, bucket, object) } onlineDisks, modTime := listOnlineDisks(xl.storageDisks, partsMetadata, errs) @@ -912,7 +911,7 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload // Pick one from the first valid metadata. xlMeta, err := pickValidXLMeta(partsMetadata, modTime) if err != nil { - return ObjectInfo{}, err + return oi, err } // Order online disks in accordance with distribution order. @@ -932,17 +931,17 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload partIdx := objectPartIndex(currentXLMeta.Parts, part.PartNumber) // All parts should have same part number. if partIdx == -1 { - return ObjectInfo{}, traceError(InvalidPart{}) + return oi, traceError(InvalidPart{}) } // All parts should have same ETag as previously generated. if currentXLMeta.Parts[partIdx].ETag != part.ETag { - return ObjectInfo{}, traceError(BadDigest{}) + return oi, traceError(InvalidPart{}) } // All parts except the last part has to be atleast 5MB. if (i < len(parts)-1) && !isMinAllowedPartSize(currentXLMeta.Parts[partIdx].Size) { - return ObjectInfo{}, traceError(PartTooSmall{ + return oi, traceError(PartTooSmall{ PartNumber: part.PartNumber, PartSize: currentXLMeta.Parts[partIdx].Size, PartETag: part.ETag, @@ -986,13 +985,14 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload } // Write unique `xl.json` for each disk. - if err = writeUniqueXLMetadata(onlineDisks, minioMetaTmpBucket, tempUploadIDPath, partsMetadata, xl.writeQuorum); err != nil { - return ObjectInfo{}, toObjectErr(err, minioMetaTmpBucket, tempUploadIDPath) + if onlineDisks, err = writeUniqueXLMetadata(onlineDisks, minioMetaTmpBucket, tempUploadIDPath, partsMetadata, xl.writeQuorum); err != nil { + return oi, toObjectErr(err, minioMetaTmpBucket, tempUploadIDPath) } - rErr := commitXLMetadata(onlineDisks, minioMetaTmpBucket, tempUploadIDPath, minioMetaMultipartBucket, uploadIDPath, xl.writeQuorum) + var rErr error + onlineDisks, rErr = commitXLMetadata(onlineDisks, minioMetaTmpBucket, tempUploadIDPath, minioMetaMultipartBucket, uploadIDPath, xl.writeQuorum) if rErr != nil { - return ObjectInfo{}, toObjectErr(rErr, minioMetaMultipartBucket, uploadIDPath) + return oi, toObjectErr(rErr, minioMetaMultipartBucket, uploadIDPath) } defer func() { @@ -1018,9 +1018,9 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload // NOTE: Do not use online disks slice here. // The reason is that existing object should be purged // regardless of `xl.json` status and rolled back in case of errors. - err = renameObject(xl.storageDisks, bucket, object, minioMetaTmpBucket, newUniqueID, xl.writeQuorum) + _, err = renameObject(xl.storageDisks, bucket, object, minioMetaTmpBucket, newUniqueID, xl.writeQuorum) if err != nil { - return ObjectInfo{}, toObjectErr(err, bucket, object) + return oi, toObjectErr(err, bucket, object) } } @@ -1038,8 +1038,8 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload } // Rename the multipart object to final location. - if err = renameObject(onlineDisks, minioMetaMultipartBucket, uploadIDPath, bucket, object, xl.writeQuorum); err != nil { - return ObjectInfo{}, toObjectErr(err, bucket, object) + if onlineDisks, err = renameObject(onlineDisks, minioMetaMultipartBucket, uploadIDPath, bucket, object, xl.writeQuorum); err != nil { + return oi, toObjectErr(err, bucket, object) } // Hold the lock so that two parallel @@ -1052,7 +1052,7 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload // remove entry from uploads.json with quorum if err = xl.removeUploadID(bucket, object, uploadID); err != nil { - return ObjectInfo{}, toObjectErr(err, minioMetaMultipartBucket, path.Join(bucket, object)) + return oi, toObjectErr(err, minioMetaMultipartBucket, path.Join(bucket, object)) } objInfo := ObjectInfo{ diff --git a/cmd/xl-v1-object.go b/cmd/xl-v1-object.go index ec4c6ddd4..f4e01b375 100644 --- a/cmd/xl-v1-object.go +++ b/cmd/xl-v1-object.go @@ -59,11 +59,11 @@ func (xl xlObjects) prepareFile(bucket, object string, size int64, onlineDisks [ // CopyObject - copy object source object to destination object. // if source object and destination object are same we only // update metadata. -func (xl xlObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string, metadata map[string]string) (ObjectInfo, error) { +func (xl xlObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string, metadata map[string]string) (oi ObjectInfo, e error) { // Read metadata associated with the object from all disks. metaArr, errs := readAllXLMetadata(xl.storageDisks, srcBucket, srcObject) if reducedErr := reduceReadQuorumErrs(errs, objectOpIgnoredErrs, xl.readQuorum); reducedErr != nil { - return ObjectInfo{}, toObjectErr(reducedErr, srcBucket, srcObject) + return oi, toObjectErr(reducedErr, srcBucket, srcObject) } // List all online disks. @@ -72,7 +72,7 @@ func (xl xlObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string // Pick latest valid metadata. xlMeta, err := pickValidXLMeta(metaArr, modTime) if err != nil { - return ObjectInfo{}, toObjectErr(err, srcBucket, srcObject) + return oi, toObjectErr(err, srcBucket, srcObject) } // Reorder online disks based on erasure distribution order. @@ -94,12 +94,12 @@ func (xl xlObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string tempObj := mustGetUUID() // Write unique `xl.json` for each disk. - if err = writeUniqueXLMetadata(onlineDisks, minioMetaTmpBucket, tempObj, partsMetadata, xl.writeQuorum); err != nil { - return ObjectInfo{}, toObjectErr(err, srcBucket, srcObject) + if onlineDisks, err = writeUniqueXLMetadata(onlineDisks, minioMetaTmpBucket, tempObj, partsMetadata, xl.writeQuorum); err != nil { + return oi, toObjectErr(err, srcBucket, srcObject) } // Rename atomically `xl.json` from tmp location to destination for each disk. - if err = renameXLMetadata(onlineDisks, minioMetaTmpBucket, tempObj, srcBucket, srcObject, xl.writeQuorum); err != nil { - return ObjectInfo{}, toObjectErr(err, srcBucket, srcObject) + if onlineDisks, err = renameXLMetadata(onlineDisks, minioMetaTmpBucket, tempObj, srcBucket, srcObject, xl.writeQuorum); err != nil { + return oi, toObjectErr(err, srcBucket, srcObject) } return xlMeta.ToObjectInfo(srcBucket, srcObject), nil } @@ -119,7 +119,7 @@ func (xl xlObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string objInfo, err := xl.PutObject(dstBucket, dstObject, length, pipeReader, metadata, "") if err != nil { - return ObjectInfo{}, toObjectErr(err, dstBucket, dstObject) + return oi, toObjectErr(err, dstBucket, dstObject) } // Explicitly close the reader. @@ -303,14 +303,14 @@ func (xl xlObjects) GetObject(bucket, object string, startOffset int64, length i } // GetObjectInfo - reads object metadata and replies back ObjectInfo. -func (xl xlObjects) GetObjectInfo(bucket, object string) (ObjectInfo, error) { +func (xl xlObjects) GetObjectInfo(bucket, object string) (oi ObjectInfo, e error) { if err := checkGetObjArgs(bucket, object); err != nil { - return ObjectInfo{}, err + return oi, err } info, err := xl.getObjectInfo(bucket, object) if err != nil { - return ObjectInfo{}, toObjectErr(err, bucket, object) + return oi, toObjectErr(err, bucket, object) } return info, nil } @@ -374,7 +374,7 @@ func undoRename(disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry str // rename - common function that renamePart and renameObject use to rename // the respective underlying storage layer representations. -func rename(disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string, isDir bool, quorum int) error { +func rename(disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string, isDir bool, quorum int) ([]StorageAPI, error) { // Initialize sync waitgroup. var wg = &sync.WaitGroup{} @@ -398,8 +398,6 @@ func rename(disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string, err := disk.RenameFile(srcBucket, srcEntry, dstBucket, dstEntry) if err != nil && err != errFileNotFound { errs[index] = traceError(err) - // Ignore disk which returned an error. - disks[index] = nil } }(index, disk) } @@ -414,14 +412,14 @@ func rename(disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string, // Undo all the partial rename operations. undoRename(disks, srcBucket, srcEntry, dstBucket, dstEntry, isDir, errs) } - return err + return evalDisks(disks, errs), err } // renamePart - renames a part of the source object to the destination // across all disks in parallel. Additionally if we have errors and do // not have a readQuorum partially renamed files are renamed back to // its proper location. -func renamePart(disks []StorageAPI, srcBucket, srcPart, dstBucket, dstPart string, quorum int) error { +func renamePart(disks []StorageAPI, srcBucket, srcPart, dstBucket, dstPart string, quorum int) ([]StorageAPI, error) { isDir := false return rename(disks, srcBucket, srcPart, dstBucket, dstPart, isDir, quorum) } @@ -430,7 +428,7 @@ func renamePart(disks []StorageAPI, srcBucket, srcPart, dstBucket, dstPart strin // across all disks in parallel. Additionally if we have errors and do // not have a readQuorum partially renamed files are renamed back to // its proper location. -func renameObject(disks []StorageAPI, srcBucket, srcObject, dstBucket, dstObject string, quorum int) error { +func renameObject(disks []StorageAPI, srcBucket, srcObject, dstBucket, dstObject string, quorum int) ([]StorageAPI, error) { isDir := true return rename(disks, srcBucket, srcObject, dstBucket, dstObject, isDir, quorum) } @@ -573,8 +571,12 @@ func (xl xlObjects) PutObject(bucket string, object string, size int64, data io. // when size == -1 because in this case, we are not able to predict how many parts we will have. allowEmptyPart := partIdx == 1 + var partSizeWritten int64 + var checkSums []string + var erasureErr error + // Erasure code data and write across all disks. - partSizeWritten, checkSums, erasureErr := erasureCreateFile(onlineDisks, minioMetaTmpBucket, tempErasureObj, partReader, allowEmptyPart, partsMetadata[0].Erasure.BlockSize, partsMetadata[0].Erasure.DataBlocks, partsMetadata[0].Erasure.ParityBlocks, bitRotAlgo, xl.writeQuorum) + onlineDisks, partSizeWritten, checkSums, erasureErr = erasureCreateFile(onlineDisks, minioMetaTmpBucket, tempErasureObj, partReader, allowEmptyPart, partsMetadata[0].Erasure.BlockSize, partsMetadata[0].Erasure.DataBlocks, partsMetadata[0].Erasure.ParityBlocks, bitRotAlgo, xl.writeQuorum) if erasureErr != nil { return ObjectInfo{}, toObjectErr(erasureErr, minioMetaTmpBucket, tempErasureObj) } @@ -674,7 +676,7 @@ func (xl xlObjects) PutObject(bucket string, object string, size int64, data io. // NOTE: Do not use online disks slice here. // The reason is that existing object should be purged // regardless of `xl.json` status and rolled back in case of errors. - err = renameObject(xl.storageDisks, bucket, object, minioMetaTmpBucket, newUniqueID, xl.writeQuorum) + _, err = renameObject(xl.storageDisks, bucket, object, minioMetaTmpBucket, newUniqueID, xl.writeQuorum) if err != nil { return ObjectInfo{}, toObjectErr(err, bucket, object) } @@ -689,12 +691,12 @@ func (xl xlObjects) PutObject(bucket string, object string, size int64, data io. } // Write unique `xl.json` for each disk. - if err = writeUniqueXLMetadata(onlineDisks, minioMetaTmpBucket, tempObj, partsMetadata, xl.writeQuorum); err != nil { + if onlineDisks, err = writeUniqueXLMetadata(onlineDisks, minioMetaTmpBucket, tempObj, partsMetadata, xl.writeQuorum); err != nil { return ObjectInfo{}, toObjectErr(err, bucket, object) } // Rename the successfully written temporary object to final location. - err = renameObject(onlineDisks, minioMetaTmpBucket, tempObj, bucket, object, xl.writeQuorum) + onlineDisks, err = renameObject(onlineDisks, minioMetaTmpBucket, tempObj, bucket, object, xl.writeQuorum) if err != nil { return ObjectInfo{}, toObjectErr(err, bucket, object) } diff --git a/cmd/xl-v1-utils.go b/cmd/xl-v1-utils.go index 43b79cc2b..0c80e1452 100644 --- a/cmd/xl-v1-utils.go +++ b/cmd/xl-v1-utils.go @@ -123,13 +123,13 @@ func hashOrder(key string, cardinality int) []int { return nums } -func parseXLStat(xlMetaBuf []byte) (statInfo, error) { +func parseXLStat(xlMetaBuf []byte) (si statInfo, e error) { // obtain stat info. stat := statInfo{} // fetching modTime. modTime, err := time.Parse(time.RFC3339, gjson.GetBytes(xlMetaBuf, "stat.modTime").String()) if err != nil { - return statInfo{}, err + return si, err } stat.ModTime = modTime // obtain Stat.Size . @@ -207,7 +207,7 @@ func parseXLMetaMap(xlMetaBuf []byte) map[string]string { } // Constructs XLMetaV1 using `gjson` lib to retrieve each field. -func xlMetaV1UnmarshalJSON(xlMetaBuf []byte) (xlMetaV1, error) { +func xlMetaV1UnmarshalJSON(xlMetaBuf []byte) (xmv xlMetaV1, e error) { xlMeta := xlMetaV1{} // obtain version. xlMeta.Version = parseXLVersion(xlMetaBuf) @@ -216,7 +216,7 @@ func xlMetaV1UnmarshalJSON(xlMetaBuf []byte) (xlMetaV1, error) { // Parse xlMetaV1.Stat . stat, err := parseXLStat(xlMetaBuf) if err != nil { - return xlMetaV1{}, err + return xmv, err } xlMeta.Stat = stat @@ -247,11 +247,11 @@ func readXLMetaParts(disk StorageAPI, bucket string, object string) ([]objectPar } // read xl.json from the given disk and parse xlV1Meta.Stat and xlV1Meta.Meta using gjson. -func readXLMetaStat(disk StorageAPI, bucket string, object string) (statInfo, map[string]string, error) { +func readXLMetaStat(disk StorageAPI, bucket string, object string) (si statInfo, mp map[string]string, e error) { // Reads entire `xl.json`. xlMetaBuf, err := disk.ReadAll(bucket, path.Join(object, xlMetaJSONFile)) if err != nil { - return statInfo{}, nil, traceError(err) + return si, nil, traceError(err) } // obtain version. @@ -263,7 +263,7 @@ func readXLMetaStat(disk StorageAPI, bucket string, object string) (statInfo, ma // Validate if the xl.json we read is sane, return corrupted format. if !isXLMetaValid(xlVersion, xlFormat) { // For version mismatchs and unrecognized format, return corrupted format. - return statInfo{}, nil, traceError(errCorruptedFormat) + return si, nil, traceError(errCorruptedFormat) } // obtain xlMetaV1{}.Meta using `github.com/tidwall/gjson`. @@ -272,7 +272,7 @@ func readXLMetaStat(disk StorageAPI, bucket string, object string) (statInfo, ma // obtain xlMetaV1{}.Stat using `github.com/tidwall/gjson`. xlStat, err := parseXLStat(xlMetaBuf) if err != nil { - return statInfo{}, nil, traceError(err) + return si, nil, traceError(err) } // Return structured `xl.json`. @@ -357,6 +357,24 @@ func shuffleDisks(disks []StorageAPI, distribution []int) (shuffledDisks []Stora return shuffledDisks } +// evalDisks - returns a new slice of disks where nil is set if +// the correspond error in errs slice is not nil +func evalDisks(disks []StorageAPI, errs []error) []StorageAPI { + if len(errs) != len(disks) { + errorIf(errors.New("unexpected disks/errors slice length"), "unable to evaluate internal disks") + return nil + } + newDisks := make([]StorageAPI, len(disks)) + for index := range errs { + if errs[index] == nil { + newDisks[index] = disks[index] + } else { + newDisks[index] = nil + } + } + return newDisks +} + // Errors specifically generated by getPartSizeFromIdx function. var ( errPartSizeZero = errors.New("Part size cannot be zero") diff --git a/cmd/xl-v1-utils_test.go b/cmd/xl-v1-utils_test.go index b5c4c8855..b9b4f5ba2 100644 --- a/cmd/xl-v1-utils_test.go +++ b/cmd/xl-v1-utils_test.go @@ -18,6 +18,7 @@ package cmd import ( "encoding/json" + "errors" "reflect" "strconv" "testing" @@ -429,3 +430,61 @@ func testShuffleDisks(t *testing.T, xl *xlObjects) { t.Errorf("shuffleDisks returned incorrect order.") } } + +// TestEvalDisks tests the behavior of evalDisks +func TestEvalDisks(t *testing.T) { + nDisks := 16 + disks, err := getRandomDisks(nDisks) + if err != nil { + t.Fatal(err) + } + objLayer, _, err := initObjectLayer(mustGetNewEndpointList(disks...)) + if err != nil { + removeRoots(disks) + t.Fatal(err) + } + defer removeRoots(disks) + xl := objLayer.(*xlObjects) + testShuffleDisks(t, xl) +} + +func testEvalDisks(t *testing.T, xl *xlObjects) { + disks := xl.storageDisks + + diskErr := errors.New("some disk error") + errs := []error{ + diskErr, nil, nil, nil, + nil, diskErr, nil, nil, + diskErr, nil, nil, nil, + nil, nil, nil, diskErr, + } + + // Test normal setup with some disks + // returning errors + newDisks := evalDisks(disks, errs) + if newDisks[0] != nil || + newDisks[1] != disks[1] || + newDisks[2] != disks[2] || + newDisks[3] != disks[3] || + newDisks[4] != disks[4] || + newDisks[5] != nil || + newDisks[6] != disks[6] || + newDisks[7] != disks[7] || + newDisks[8] != nil || + newDisks[9] != disks[9] || + newDisks[10] != disks[10] || + newDisks[11] != disks[11] || + newDisks[12] != disks[12] || + newDisks[13] != disks[13] || + newDisks[14] != disks[14] || + newDisks[15] != nil { + t.Errorf("evalDisks returned incorrect new disk set.") + } + + // Test when number of errs doesn't match with number of disks + errs = []error{nil, nil, nil, nil} + newDisks = evalDisks(disks, errs) + if newDisks != nil { + t.Errorf("evalDisks returned no nil slice") + } +} diff --git a/docs/bucket/notifications/README.md b/docs/bucket/notifications/README.md index d3f48a764..fc0c626d8 100644 --- a/docs/bucket/notifications/README.md +++ b/docs/bucket/notifications/README.md @@ -6,6 +6,7 @@ mechanism and can be published to the following targets: | Notification Targets| |:---| | [`AMQP`](#AMQP) | +| [`MQTT`](#MQTT) | | [`Elasticsearch`](#Elasticsearch) | | [`Redis`](#Redis) | | [`NATS`](#NATS) | @@ -134,6 +135,113 @@ python rabbit.py ‘{“Records”:[{“eventVersion”:”2.0",”eventSource”:”aws:s3",”awsRegion”:”us-east-1",”eventTime”:”2016–09–08T22:34:38.226Z”,”eventName”:”s3:ObjectCreated:Put”,”userIdentity”:{“principalId”:”minio”},”requestParameters”:{“sourceIPAddress”:”10.1.10.150:44576"},”responseElements”:{},”s3":{“s3SchemaVersion”:”1.0",”configurationId”:”Config”,”bucket”:{“name”:”images”,”ownerIdentity”:{“principalId”:”minio”},”arn”:”arn:aws:s3:::images”},”object”:{“key”:”myphoto.jpg”,”size”:200436,”sequencer”:”147279EAF9F40933"}}}],”level”:”info”,”msg”:””,”time”:”2016–09–08T15:34:38–07:00"}\n ``` + +## Publish Minio events MQTT + +Install an MQTT Broker from [here](https://mosquitto.org/). + +### Step 1: Add MQTT endpoint to Minio + +the default location of Minio server configuration file is ``~./minio/config.json``. The MQTT configuration is location in the `mqtt` key under the `notify` top-level key. Create a configuration key-value pair here for your MQTT instance. The key is a name for your MQTT endpoint, and the value is a collection of key-value parameters described in the table below. + + +| Parameter | Type | Description | +|:---|:---|:---| +| `enable` | _bool_ | (Required) Is this server endpoint configuration active/enabled? | +| `broker` | _string_ | (Required) MQTT server endpoint, e.g. `tcp://localhost:1883` | +| `topic` | _string_ | (Required) Name of the MQTT topic to publish on, e.g. `minio` | +| `qos` | _int_ | Set the Quality of Service Level | +| `clientId` | _string_ | Unique ID for the MQTT broker to identify Minio | +| `username` | _string_ | Username to connect to the MQTT server (if required) | +| `password` | _string_ | Password to connect to the MQTT server (if required) | + +An example configuration for MQTT is shown below: + +```json +"mqtt": { + "1": { + "enable": true, + "broker": "tcp://localhost:1883", + "topic": "minio", + "qos": 1, + "clientId": "minio", + "username": "", + "password": "" + } +} +``` + +After updating the configuration file, restart the Minio sever to put the changes into effect. The server will print a line like `SQS ARNs: arn:minio:sqs:us-east-1:1:mqtt` at start-up if there were no errors. + +Minio supports any MQTT server that supports MQTT 3.1 or 3.1.1 and can connect to them over TCP, TLS, or a Websocket connection using ``tcp://``, ``tls://``, or ``ws://`` respectively as the scheme for the broker url. See the [Go Client](http://www.eclipse.org/paho/clients/golang/) documentation for more information. + +Note that, you can add as many MQTT server endpoint configurations as needed by providing an identifier (like "1" in the example above) for the MQTT instance and an object of per-server configuration parameters. + + +### Step 2: Enable bucket notification using Minio client + +We will enable bucket event notification to trigger whenever a JPEG image is uploaded or deleted ``images`` bucket on ``myminio`` server. Here ARN value is ``arn:minio:sqs:us-east-1:1:mqtt``. + +``` +mc mb myminio/images +mc events add myminio/images arn:minio:sqs:us-east-1:1:mqtt --suffix .jpg +mc events list myminio/images +arn:minio:sqs:us-east-1:1:amqp s3:ObjectCreated:*,s3:ObjectRemoved:* Filter: suffix=”.jpg” +``` + +### Step 3: Test on MQTT + +The python program below waits on mqtt topic ``/minio`` and prints event notifications on the console. We use [paho-mqtt](https://pypi.python.org/pypi/paho-mqtt/) library to do this. + +```py +#!/usr/bin/env python +from __future__ import print_function +import paho.mqtt.client as mqtt + +# The callback for when the client receives a CONNACK response from the server. +def on_connect(client, userdata, flags, rc): + print("Connected with result code", rc) + + # Subscribing in on_connect() means that if we lose the connection and + # reconnect then subscriptions will be renewed. + client.subscribe("/minio") + +# The callback for when a PUBLISH message is received from the server. +def on_message(client, userdata, msg): + print(msg.payload) + +client = mqtt.Client() +client.on_connect = on_connect +client.on_message = on_message + +client.connect("localhost:1883", 1883, 60) + +# Blocking call that processes network traffic, dispatches callbacks and +# handles reconnecting. +# Other loop*() functions are available that give a threaded interface and a +# manual interface. +client.loop_forever() +``` + +Execute this example python program to watch for MQTT events on the console. + +```py +python mqtt.py +``` + +Open another terminal and upload a JPEG image into ``images`` bucket. + +``` +mc cp myphoto.jpg myminio/images +``` + +You should receive the following event notification via MQTT once the upload completes. + +```py +python mqtt.py +{“Records”:[{“eventVersion”:”2.0",”eventSource”:”aws:s3",”awsRegion”:”us-east-1",”eventTime”:”2016–09–08T22:34:38.226Z”,”eventName”:”s3:ObjectCreated:Put”,”userIdentity”:{“principalId”:”minio”},”requestParameters”:{“sourceIPAddress”:”10.1.10.150:44576"},”responseElements”:{},”s3":{“s3SchemaVersion”:”1.0",”configurationId”:”Config”,”bucket”:{“name”:”images”,”ownerIdentity”:{“principalId”:”minio”},”arn”:”arn:aws:s3:::images”},”object”:{“key”:”myphoto.jpg”,”size”:200436,”sequencer”:”147279EAF9F40933"}}}],”level”:”info”,”msg”:””,”time”:”2016–09–08T15:34:38–07:00"} +``` + ## Publish Minio events via Elasticsearch @@ -382,12 +490,44 @@ The default location of Minio server configuration file is ``~/.minio/config.jso "token": "", "secure": false, "pingInterval": 0 + "streaming": { + "enable": false, + "clusterID": "", + "clientID": "", + "async": false, + "maxPubAcksInflight": 0 + } } }, ``` Restart Minio server to reflect config changes. ``bucketevents`` is the subject used by NATS in this example. +Minio server also supports [NATS Streaming mode](http://nats.io/documentation/streaming/nats-streaming-intro/) that offers additional functionality like `Message/event persistence`, `At-least-once-delivery`, and `Publisher rate limiting`. To configure Minio server to send notifications to NATS Streaming server, update the Minio server configuration file as follows: + +``` +"nats": { + "1": { + "enable": true, + "address": "0.0.0.0:4222", + "subject": "bucketevents", + "username": "yourusername", + "password": "yoursecret", + "token": "", + "secure": false, + "pingInterval": 0 + "streaming": { + "enable": true, + "clusterID": "test-cluster", + "clientID": "minio-client", + "async": true, + "maxPubAcksInflight": 10 + } + } +}, +``` +Read more about sections `clusterID`, `clientID` on [NATS documentation](https://github.com/nats-io/nats-streaming-server/blob/master/README.md). Section `maxPubAcksInflight` is explained [here](https://github.com/nats-io/go-nats-streaming#publisher-rate-limiting). + ### Step 2: Enable bucket notification using Minio client We will enable bucket event notification to trigger whenever a JPEG image is uploaded or deleted from ``images`` bucket on ``myminio`` server. Here ARN value is ``arn:minio:sqs:us-east-1:1:nats``. To understand more about ARN please follow [AWS ARN](http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) documentation. @@ -401,7 +541,7 @@ arn:minio:sqs:us-east-1:1:nats s3:ObjectCreated:*,s3:ObjectRemoved:* Filter: suf ### Step 3: Test on NATS -Using this program below we can log the bucket notification added to NATS. +If you use NATS server, check out this sample program below to log the bucket notification added to NATS. ```go package main @@ -454,6 +594,53 @@ go run nats.go 2016/10/12 06:51:33 Received message '{"EventType":"s3:ObjectCreated:Put","Key":"images/myphoto.jpg","Records":[{"eventVersion":"2.0","eventSource":"aws:s3","awsRegion":"us-east-1","eventTime":"2016-10-12T13:51:33Z","eventName":"s3:ObjectCreated:Put","userIdentity":{"principalId":"minio"},"requestParameters":{"sourceIPAddress":"[::1]:57106"},"responseElements":{},"s3":{"s3SchemaVersion":"1.0","configurationId":"Config","bucket":{"name":"images","ownerIdentity":{"principalId":"minio"},"arn":"arn:aws:s3:::images"},"object":{"key":"myphoto.jpg","size":56060,"eTag":"1d97bf45ecb37f7a7b699418070df08f","sequencer":"147CCD1AE054BFD0"}}}],"level":"info","msg":"","time":"2016-10-12T06:51:33-07:00"} ``` +If you use NATS Streaming server, check out this sample program below to log the bucket notification added to NATS. + +```go +package main + +// Import Go and NATS packages +import ( + "fmt" + "runtime" + + "github.com/nats-io/go-nats-streaming" +) + +func main() { + natsConnection, _ := stan.Connect("test-cluster", "test-client") + log.Println("Connected") + + // Subscribe to subject + log.Printf("Subscribing to subject 'bucketevents'\n") + natsConnection.Subscribe("bucketevents", func(m *stan.Msg) { + + // Handle the message + fmt.Printf("Received a message: %s\n", string(m.Data)) + }) + + // Keep the connection alive + runtime.Goexit() +} +``` + +``` +go run nats.go +2017/07/07 11:47:40 Connected +2017/07/07 11:47:40 Subscribing to subject 'bucketevents' +``` +Open another terminal and upload a JPEG image into ``images`` bucket. + +``` +mc cp myphoto.jpg myminio/images +``` + +The example ``nats.go`` program prints event notification to console. + +``` +Received a message: {"EventType":"s3:ObjectCreated:Put","Key":"images/myphoto.jpg","Records":[{"eventVersion":"2.0","eventSource":"minio:s3","awsRegion":"","eventTime":"2017-07-07T18:46:37Z","eventName":"s3:ObjectCreated:Put","userIdentity":{"principalId":"minio"},"requestParameters":{"sourceIPAddress":"192.168.1.80:55328"},"responseElements":{"x-amz-request-id":"14CF20BD1EFD5B93","x-minio-origin-endpoint":"http://127.0.0.1:9000"},"s3":{"s3SchemaVersion":"1.0","configurationId":"Config","bucket":{"name":"images","ownerIdentity":{"principalId":"minio"},"arn":"arn:aws:s3:::images"},"object":{"key":"myphoto.jpg","size":248682,"eTag":"f1671feacb8bbf7b0397c6e9364e8c92","contentType":"image/jpeg","userDefined":{"content-type":"image/jpeg"},"versionId":"1","sequencer":"14CF20BD1EFD5B93"}},"source":{"host":"192.168.1.80","port":"55328","userAgent":"Minio (linux; amd64) minio-go/2.0.4 mc/DEVELOPMENT.GOGET"}}],"level":"info","msg":"","time":"2017-07-07T11:46:37-07:00"} +``` + ## Publish Minio events via PostgreSQL diff --git a/docs/config/README.md b/docs/config/README.md index b138ec935..4ba4f5061 100644 --- a/docs/config/README.md +++ b/docs/config/README.md @@ -48,7 +48,14 @@ minio server ~/Photos #### Region |Field|Type|Description| |:---|:---|:---| -|``region``| _string_ | `region` describes the physical location of the server. By default it is set to `us-east-1`, which is same as AWS3's default region. If you are unsure leave it to default.| +|``region``| _string_ | `region` describes the physical location of the server. By default it is set to `us-east-1`, which is same as AWS S3's default region. You may override this field with `MINIO_REGION` environment variable. If you are unsure leave it unset.| + +Example: + +```sh +export MINIO_REGION="my_region" +minio server ~/Photos +``` #### Browser |Field|Type|Description| @@ -77,6 +84,7 @@ minio server ~/Photos |:---|:---|:---| |``notify``| |Notify enables bucket notification events for lambda computing via the following targets.| |``notify.amqp``| |[Configure to publish Minio events via AMQP target.](http://docs.minio.io/docs/minio-bucket-notification-guide#AMQP)| +|``notify.mqtt``| |[Configure to publish Minio events via MQTT target.](http://docs.minio.io/docs/minio-bucket-notification-guide#MQTT)| |``notify.elasticsearch``| |[Configure to publish Minio events via Elasticsearch target.](http://docs.minio.io/docs/minio-bucket-notification-guide#Elasticsearch)| |``notify.redis``| |[Configure to publish Minio events via Redis target.](http://docs.minio.io/docs/minio-bucket-notification-guide#Redis)| |``notify.nats``| |[Configure to publish Minio events via NATS target.](http://docs.minio.io/docs/minio-bucket-notification-guide#NATS)| diff --git a/docs/config/config.sample.json b/docs/config/config.sample.json index fcdf032b0..cb4673928 100644 --- a/docs/config/config.sample.json +++ b/docs/config/config.sample.json @@ -41,7 +41,7 @@ "password": "yoursecret", "token": "", "secure": false, - "pingInterval": 0 + "pingInterval": 0, "streaming": { "enable": false, "clusterID": "", diff --git a/docs/distributed/README.md b/docs/distributed/README.md index 968693cbc..410bae976 100644 --- a/docs/distributed/README.md +++ b/docs/distributed/README.md @@ -69,7 +69,7 @@ minio.exe server http://192.168.1.11/C:/data http://192.168.1.12/C:/data ^ http://192.168.1.17/C:/data http://192.168.1.18/C:/data ``` -![Distributed Minio, 8 nodes with 1 disk each](https://raw.githubusercontent.com/minio/minio/master/docs/screenshots/Architecture-diagram_distributed_8.png) +![Distributed Minio, 8 nodes with 1 disk each](https://github.com/minio/minio/blob/master/docs/screenshots/Architecture-diagram_distributed_8.jpg?raw=true) Example 2: Start distributed Minio instance with 4 drives each on 4 nodes, by running this command on all the 4 nodes. @@ -103,7 +103,7 @@ minio.exe server http://192.168.1.11/C:/data1 http://192.168.1.11/C:/data2 ^ http://192.168.1.14/C:/data3 http://192.168.1.14/C:/data4 ``` -![Distributed Minio, 4 nodes with 4 disks each](https://raw.githubusercontent.com/minio/minio/master/docs/screenshots/Architecture-diagram_distributed_16.png) +![Distributed Minio, 4 nodes with 4 disks each](https://github.com/minio/minio/blob/master/docs/screenshots/Architecture-diagram_distributed_16.jpg?raw=true) ## 3. Test your setup diff --git a/docs/erasure/README.md b/docs/erasure/README.md index 9d089f989..5c4b5f151 100644 --- a/docs/erasure/README.md +++ b/docs/erasure/README.md @@ -2,10 +2,6 @@ Minio protects data against hardware failures and silent data corruption using erasure code and checksums. You may lose half the number (N/2) of drives and still be able to recover the data. -## 1. Prerequisites: - -Install Minio - [Minio Quickstart Guide](https://docs.minio.io/docs/minio-quickstart-guide) - ## What is Erasure Code? Erasure code is a mathematical algorithm to reconstruct missing or corrupted data. Minio uses Reed-Solomon code to shard objects into N/2 data and N/2 parity blocks. This means that in a 12 drive setup, an object is sharded across as 6 data and 6 parity blocks. You can lose as many as 6 drives (be it parity or data) and still reconstruct the data reliably from the remaining drives. @@ -14,7 +10,7 @@ Erasure code is a mathematical algorithm to reconstruct missing or corrupted dat Erasure code protects data from multiple drives failure unlike RAID or replication. For eg RAID6 can protect against 2 drive failure whereas in Minio erasure code you can lose as many as half number of drives and still the data remains safe. Further Minio's erasure code is at object level and can heal one object at a time. For RAID, healing can only be performed at volume level which translates into huge down time. As Minio encodes each object individually with a high parity count. Storage servers once deployed should not require drive replacement or healing for the lifetime of the server. Minio's erasure coded backend is designed for operational efficiency and takes full advantage of hardware acceleration whenever available. -![Erasure](https://raw.githubusercontent.com/minio/minio/master/docs/screenshots/erasure-code.png?raw=true) +![Erasure](https://github.com/minio/minio/blob/master/docs/screenshots/erasure-code.jpg?raw=true) ## What is Bit Rot protection? @@ -22,28 +18,13 @@ Bit Rot also known as Data Rot or Silent Data Corruption is a serious data loss Minio's erasure coded backend uses high speed [BLAKE2](https://blog.minio.io/accelerating-blake2b-by-4x-using-simd-in-go-assembly-33ef16c8a56b#.jrp1fdwer) hash based checksums to protect against Bit Rot. -## Deployment Scenarios +## Get Started with Minio in Erasure Code -Minio server runs on a variety of hardware, operating systems and virtual/container environments. +### 1. Prerequisites: -Minio erasure code backend is limited by design to a minimum of 4 drives and a maximum of 16 drives. The hard limit of 16 drives comes from operational experience. Failure domain becomes too large beyond 16 drives. If you need to scale beyond 16 drives, you may run multiple instances of Minio server on different ports. +Install Minio - [Minio Quickstart Guide](https://docs.minio.io/docs/minio-quickstart-guide) -#### Reference Physical Hardware: - -* [SMC 5018A-AR12L (Intel Atom)](http://www.supermicro.com/products/system/1U/5018/SSG-5018A-AR12L.cfm?parts=SHOW) - SMC 1U SoC Atom C2750 platform with 12x 3.5” drive bays -* [Quanta Grid D51B-2U (OCP Compliant) ](http://www.qct.io/Product/Servers/Rackmount-Servers/2U/QuantaGrid-D51B-2U-p256c77c70c83c118)- Quanta 2U DP E5-2600v3 platform with 12x 3.5” drive bays -* [Cisco UCS C240 M4 Rack Server](http://www.cisco.com/c/en/us/products/servers-unified-computing/ucs-c240-m4-rack-server/index.html) - Cisco 2U DP E5-2600v3 platform with 12x 3.5” drive bays -* [Intel® Server System R2312WTTYSR](http://ark.intel.com/products/88286) - Intel 2U DP E5-2600v3 platform with 12x 3.5” drive bays - -#### Reference Cloud Hosting Providers: - -* [Packet](https://www.packet.net): Packet is the baremetal cloud provider. -* [Digital Ocean](https://www.digitalocean.com): Deploy an SSD cloud server in 55 seconds. -* [OVH](https://www.ovh.com/us): Build your own infrastructure with OVH public cloud. -* [Onlinetech](http://www.onlinetech.com): Secure, compliant enterprise cloud. -* [SSD Nodes](https://www.ssdnodes.com): Simple, high performance cloud provider with truly personalized support. - -## 2. Run Minio Server with Erasure Code. +### 2. Run Minio Server with Erasure Code. Example: Start Minio server in a 12 drives setup. @@ -51,6 +32,6 @@ Example: Start Minio server in a 12 drives setup. minio server /mnt/export1/backend /mnt/export2/backend /mnt/export3/backend /mnt/export4/backend /mnt/export5/backend /mnt/export6/backend /mnt/export7/backend /mnt/export8/backend /mnt/export9/backend /mnt/export10/backend /mnt/export11/backend /mnt/export12/backend ``` -## 3. Test your setup +### 3. Test your setup You may unplug drives randomly and continue to perform I/O on the system. diff --git a/docs/gateway/README.md b/docs/gateway/README.md index 00922babf..b7087fa5d 100644 --- a/docs/gateway/README.md +++ b/docs/gateway/README.md @@ -1,48 +1,9 @@ # Minio Gateway [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) -Minio Gateway adds Amazon S3 compatibility to third party cloud storage providers. Currently only Microsoft Azure Blob Storage is supported. - -## Run Minio Gateway for Microsoft Azure Blob Storage -### Using Docker -``` -docker run -p 9000:9000 --name azure-s3 \ - -e "MINIO_ACCESS_KEY=azureaccountname" \ - -e "MINIO_SECRET_KEY=azureaccountkey" \ - minio/minio gateway azure -``` - -### Using Binary -``` -export MINIO_ACCESS_KEY=azureaccountname -export MINIO_SECRET_KEY=azureaccountkey -minio gateway azure -``` -## Test using Minio Browser -Minio Gateway comes with an embedded web based object browser. Point your web browser to http://127.0.0.1:9000 ensure your server has started successfully. - -![Screenshot](https://github.com/minio/minio/blob/master/docs/screenshots/minio-browser.jpg?raw=true) -## Test using Minio Client `mc` -`mc` provides a modern alternative to UNIX commands such as ls, cat, cp, mirror, diff etc. It supports filesystems and Amazon S3 compatible cloud storage services. - -### Configure `mc` -``` -mc config host add myazure http://gateway-ip:9000 azureaccountname azureaccountkey -``` - -### List containers on Microsoft Azure -``` -mc ls myazure -[2017-02-22 01:50:43 PST] 0B ferenginar/ -[2017-02-26 21:43:51 PST] 0B my-container/ -[2017-02-26 22:10:11 PST] 0B test-container1/ -``` +Minio Gateway adds Amazon S3 compatibility to third party cloud storage providers. +- [Microsoft Azure Blob Storage](https://github.com/minio/minio/blob/master/docs/gateway/azure.md) +- [Google Cloud Storage](https://github.com/minio/minio/blob/master/docs/gateway/gcs.md) _Alpha release_ ## Roadmap * Minio & AWS S3 * Edge Caching - Disk based proxy caching support -* Google Cloud Storage -## Explore Further -- [`mc` command-line interface](https://docs.minio.io/docs/minio-client-quickstart-guide) -- [`aws` command-line interface](https://docs.minio.io/docs/aws-cli-with-minio) -- [`minfs` filesystem interface](http://docs.minio.io/docs/minfs-quickstart-guide) -- [`minio-go` Go SDK](https://docs.minio.io/docs/golang-client-quickstart-guide) diff --git a/docs/gateway/azure.md b/docs/gateway/azure.md new file mode 100644 index 000000000..4df19e0fc --- /dev/null +++ b/docs/gateway/azure.md @@ -0,0 +1,46 @@ +# Minio Azure Gateway [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) +Minio Gateway adds Amazon S3 compatibility to Microsoft Azure Blob Storage. + +## Run Minio Gateway for Microsoft Azure Blob Storage +### Using Docker +``` +docker run -p 9000:9000 --name azure-s3 \ + -e "MINIO_ACCESS_KEY=azureaccountname" \ + -e "MINIO_SECRET_KEY=azureaccountkey" \ + minio/minio gateway azure +``` + +### Using Binary +``` +export MINIO_ACCESS_KEY=azureaccountname +export MINIO_SECRET_KEY=azureaccountkey +minio gateway azure +``` +## Test using Minio Browser +Minio Gateway comes with an embedded web based object browser. Point your web browser to http://127.0.0.1:9000 ensure your server has started successfully. + +![Screenshot](https://github.com/minio/minio/blob/master/docs/screenshots/minio-browser-gateway.png?raw=true) +## Test using Minio Client `mc` +`mc` provides a modern alternative to UNIX commands such as ls, cat, cp, mirror, diff etc. It supports filesystems and Amazon S3 compatible cloud storage services. + +### Configure `mc` +``` +mc config host add myazure http://gateway-ip:9000 azureaccountname azureaccountkey +``` + +### List containers on Microsoft Azure +``` +mc ls myazure +[2017-02-22 01:50:43 PST] 0B ferenginar/ +[2017-02-26 21:43:51 PST] 0B my-container/ +[2017-02-26 22:10:11 PST] 0B test-container1/ +``` + +### Known limitations +[Limitations](https://github.com/minio/minio/blob/master/docs/gateway/azure-limitations.md) + +## Explore Further +- [`mc` command-line interface](https://docs.minio.io/docs/minio-client-quickstart-guide) +- [`aws` command-line interface](https://docs.minio.io/docs/aws-cli-with-minio) +- [`minfs` filesystem interface](http://docs.minio.io/docs/minfs-quickstart-guide) +- [`minio-go` Go SDK](https://docs.minio.io/docs/golang-client-quickstart-guide) diff --git a/docs/gateway/gcs-limitations.md b/docs/gateway/gcs-limitations.md new file mode 100644 index 000000000..3d362b8ee --- /dev/null +++ b/docs/gateway/gcs-limitations.md @@ -0,0 +1,9 @@ +## Minio GCS Gateway Limitations + +Gateway inherits the following GCS limitations: + +- Maximum number of parts per upload is 1024. +- No support for bucket policies yet. +- No support for bucket notifications yet. +- _List Multipart Uploads_ and _List Object parts_ always returns empty list. i.e Client will need to remember all the parts that it has uploaded and use it for _Complete Multipart Upload_ + diff --git a/docs/gateway/gcs.md b/docs/gateway/gcs.md new file mode 100644 index 000000000..e1b92c59e --- /dev/null +++ b/docs/gateway/gcs.md @@ -0,0 +1,63 @@ +# Minio GCS Gateway [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) +Minio GCS Gateway adds Amazon S3 compatibility to Google Cloud Storage. + +## Run Minio Gateway for GCS +### Create service account key for GCS and get the credentials file +1. Go to the [API Console Credentials page](https://console.developers.google.com/project/_/apis/credentials). +2. Select your project or create a new project. Note down your project ID. +3. On the Credentials page, select the __Create credentials__ drop-down, then select __Service account key__. +4. From the __Service account__ drop-down, select __New service account__ +5. Fill up __Service account name__ and __Service account ID__ +6. For the __Role__, click the selec dropdown to choose __Storage__ -> __Storage Admin__ _(Full control of GCS resources)_ +7. Click the __Create__ button. This will download a credentials file to your desktop. Let's call this credentials.json + +Note: Alternate ways to setup *Application Default Credentials* is explained [here](https://developers.google.com/identity/protocols/application-default-credentials) + +### Using Docker +``` +docker run -p 9000:9000 --name gcs-s3 \ + -v /path/to/credentials.json:/credentials.json \ + -e "GOOGLE_APPLICATION_CREDENTIALS=/credentials.json" \ + -e "MINIO_ACCESS_KEY=minioaccountname" \ + -e "MINIO_SECRET_KEY=minioaccountkey" \ + minio/minio gateway gcs yourprojectid +``` + +### Using Binary +``` +export GOOGLE_APPLICATION_CREDENTIALS=/path/to/credentials.json +export MINIO_ACCESS_KEY=minioaccesskey +export MINIO_SECRET_KEY=miniosecretkey +minio gateway gcs yourprojectid +``` + +## Test using Minio Browser +Minio Gateway comes with an embedded web based object browser. Point your web browser to http://127.0.0.1:9000 ensure your server has started successfully. + +![Screenshot](https://github.com/minio/minio/blob/master/docs/screenshots/minio-browser-gateway.png?raw=true) + +## Test using Minio Client `mc` +`mc` provides a modern alternative to UNIX commands such as ls, cat, cp, mirror, diff etc. It supports filesystems and Amazon S3 compatible cloud storage services. + +### Configure `mc` +``` +mc config host add mygcs http://gateway-ip:9000 minioaccesskey miniosecretkey +``` + +### List containers on GCS +``` +mc ls mygcs +[2017-02-22 01:50:43 PST] 0B ferenginar/ +[2017-02-26 21:43:51 PST] 0B my-container/ +[2017-02-26 22:10:11 PST] 0B test-container1/ +``` + +### Known limitations +[Limitations](https://github.com/minio/minio/blob/master/docs/gateway/gcs-limitations.md) + +## Explore Further +- [`mc` command-line interface](https://docs.minio.io/docs/minio-client-quickstart-guide) +- [`aws` command-line interface](https://docs.minio.io/docs/aws-cli-with-minio) +- [`minfs` filesystem interface](http://docs.minio.io/docs/minfs-quickstart-guide) +- [`minio-go` Go SDK](https://docs.minio.io/docs/golang-client-quickstart-guide) + diff --git a/docs/multi-tenancy/README.md b/docs/multi-tenancy/README.md index 2a4fdeaff..920afd17a 100644 --- a/docs/multi-tenancy/README.md +++ b/docs/multi-tenancy/README.md @@ -12,7 +12,7 @@ minio --config-dir ~/tenant2 server --address :9002 /disk1/data/tenant2 minio --config-dir ~/tenant3 server --address :9003 /disk1/data/tenant3 ``` -![Example-1](https://raw.githubusercontent.com/minio/minio/master/docs/screenshots/Example-1.png) +![Example-1](https://github.com/minio/minio/blob/master/docs/screenshots/Example-1.jpg?raw=true) #### Example 2 : Single host, multiple drives (erasure code) @@ -22,7 +22,7 @@ minio --config-dir ~/tenant1 server --address :9001 /disk1/data/tenant1 /disk2/d minio --config-dir ~/tenant2 server --address :9002 /disk1/data/tenant2 /disk2/data/tenant2 /disk3/data/tenant2 /disk4/data/tenant2 minio --config-dir ~/tenant3 server --address :9003 /disk1/data/tenant3 /disk2/data/tenant3 /disk3/data/tenant3 /disk4/data/tenant3 ``` -![Example-2](https://raw.githubusercontent.com/minio/minio/master/docs/screenshots/Example-2.png) +![Example-2](https://github.com/minio/minio/blob/master/docs/screenshots/Example-2.jpg?raw=true) ## Distributed Deployment To host multiple tenants in a distributed environment, run several distributed Minio instances concurrently. @@ -45,7 +45,7 @@ export MINIO_SECRET_KEY= minio --config-dir ~/tenant3 server --address :9003 http://192.168.10.11/disk1/data/tenant3 http://192.168.10.12/disk1/data/tenant3 http://192.168.10.13/disk1/data/tenant3 http://192.168.10.14/disk1/data/tenant3 ``` -![Example-3](https://raw.githubusercontent.com/minio/minio/master/docs/screenshots/Example-3.png) +![Example-3](https://github.com/minio/minio/blob/master/docs/screenshots/Example-3.jpg?raw=true) ## Cloud Scale Deployment For large scale multi-tenant Minio deployments, we recommend using one of the popular container orchestration platforms, e.g. Kubernetes, DC/OS or Docker Swarm. Refer [this document](https://docs.minio.io/docs/minio-deployment-quickstart-guide) to get started with Minio on orchestration platforms. diff --git a/docs/orchestration/README.md b/docs/orchestration/README.md index e3aa535af..5f0ac24e6 100644 --- a/docs/orchestration/README.md +++ b/docs/orchestration/README.md @@ -20,4 +20,4 @@ Minio is built ground up on the cloud-native premise. With features like erasure In a typical modern infrastructure deployment, application, database, key-store, etc. already live in containers and are managed by orchestration platforms. Minio brings robust, scalable, AWS S3 compatible object storage to the lot. -![Cloud-native](https://raw.githubusercontent.com/NitishT/minio/master/docs/screenshots/Minio_Cloud_Native_Arch.png?raw=true) +![Cloud-native](https://github.com/minio/minio/blob/master/docs/screenshots/Minio_Cloud_Native_Arch.jpg?raw=true) diff --git a/docs/orchestration/dcos/README.md b/docs/orchestration/dcos/README.md index 703f0dc7a..fe82fc804 100644 --- a/docs/orchestration/dcos/README.md +++ b/docs/orchestration/dcos/README.md @@ -1,22 +1,24 @@ # Deploy Minio on DC/OS [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) [![Go Report Card](https://goreportcard.com/badge/minio/minio)](https://goreportcard.com/report/minio/minio) [![Docker Pulls](https://img.shields.io/docker/pulls/minio/minio.svg?maxAge=604800)](https://hub.docker.com/r/minio/minio/) [![codecov](https://codecov.io/gh/minio/minio/branch/master/graph/badge.svg)](https://codecov.io/gh/minio/minio) -To deploy Minio on DC/OS, you can use a Universe package, or create a customized config file. We at Minio recently released an [official universe package](https://github.com/mesosphere/universe/tree/version-3.x/repo/packages/M/minio/0) to enable single click Minio deployment on a DC/OS cluster. +To deploy Minio on DC/OS, you can use our [official universe package](https://github.com/mesosphere/universe/tree/version-3.x/repo/packages/M/minio/6). ## 1. Prerequisites -- DC/OS 1.8 or later running on your cluster. -- [Marathon-LB](https://dcos.io/docs/1.8/usage/service-discovery/marathon-lb/usage/) installed and running. -- IP address of the public agent(s) where Marathon-LB or an available hostname configured to point to the public agent(s) where Marathon-LB is running. + - DC/OS 1.9 or later + - [Marathon-LB](https://dcos.io/docs/1.9/usage/service-discovery/marathon-lb/usage/) must be installed and running + - Identify [IP of the public agent](https://dcos.io/docs/1.9/administration/locate-public-agent/) where Marathon-LB or an available hostname configured to point to the public agent(s) where Marathon-LB is running. + ## 2. Setting up Minio You can install Minio Universe package using the DC/OS GUI or CLI. ### Minio installation on DC/OS GUI +- Visit the DC/OS admin page, and click on Universe on the left menu bar. Then click on the Packages tab and search for Minio, click on the ```Install``` button on the right hand side. -Visit the DC/OS admin page, and click on `Universe` on the left menu bar. Then click on the `Packages` tab and search for Minio. Once you see the package, click the `Install Package` button on the right hand side. Then, enter configuration values like the storage and service type you’d like to use with your Minio instance. Finally enter the public Marathon-LB IP address under `networking >> public-agent`, and click `Review and Install`. +- Click on the `Install Package` button for the single-click default installation. This installs Minio server instance with factory defaults. You can reach your Minio server at `host:9000` where `host` is IP address or hostname of public-agent where Marathon-LB is installed. `minio` and `minio123` are the default access key and secret keys respectively. -This completes the install process. Before you can access Minio server, get the access key and secret key from the Minio container logs. Click on `Services` and select Minio service in DC/OS admin page. Then go to the `logs` tab and copy the accesskey and secretkey. +- For more information on advanced installation of Minio on DC/OS GUI, look [here](https://github.com/dcos/examples/blob/master/minio/1.9/README.md#minio-installation-using-gui). ### Minio installation on DC/OS CLI @@ -35,5 +37,7 @@ $ dcos package uninstall minio ``` ### Explore Further + - [Minio Erasure Code QuickStart Guide](https://docs.minio.io/docs/minio-erasure-code-quickstart-guide) - [DC/OS Project](https://docs.mesosphere.com/) + diff --git a/docs/orchestration/docker-compose/docker-compose.yaml b/docs/orchestration/docker-compose/docker-compose.yaml index b050ca8cb..6adf6788e 100644 --- a/docs/orchestration/docker-compose/docker-compose.yaml +++ b/docs/orchestration/docker-compose/docker-compose.yaml @@ -5,7 +5,7 @@ version: '2' # 9001 through 9004. services: minio1: - image: minio/minio:RELEASE.2017-05-05T01-14-51Z + image: minio/minio:RELEASE.2017-06-13T19-01-01Z ports: - "9001:9000" environment: @@ -13,7 +13,7 @@ services: MINIO_SECRET_KEY: minio123 command: server http://minio1/myexport http://minio2/myexport http://minio3/myexport http://minio4/myexport minio2: - image: minio/minio:RELEASE.2017-05-05T01-14-51Z + image: minio/minio:RELEASE.2017-06-13T19-01-01Z ports: - "9002:9000" environment: @@ -21,7 +21,7 @@ services: MINIO_SECRET_KEY: minio123 command: server http://minio1/myexport http://minio2/myexport http://minio3/myexport http://minio4/myexport minio3: - image: minio/minio:RELEASE.2017-05-05T01-14-51Z + image: minio/minio:RELEASE.2017-06-13T19-01-01Z ports: - "9003:9000" environment: @@ -29,7 +29,7 @@ services: MINIO_SECRET_KEY: minio123 command: server http://minio1/myexport http://minio2/myexport http://minio3/myexport http://minio4/myexport minio4: - image: minio/minio:RELEASE.2017-05-05T01-14-51Z + image: minio/minio:RELEASE.2017-06-13T19-01-01Z ports: - "9004:9000" environment: diff --git a/docs/orchestration/docker-swarm/docker-compose-secrets.yaml b/docs/orchestration/docker-swarm/docker-compose-secrets.yaml index bccca3a4d..fbd578f65 100644 --- a/docs/orchestration/docker-swarm/docker-compose-secrets.yaml +++ b/docs/orchestration/docker-swarm/docker-compose-secrets.yaml @@ -2,7 +2,7 @@ version: '3.1' services: minio1: - image: minio/minio:RELEASE.2017-05-05T01-14-51Z + image: minio/minio:RELEASE.2017-06-13T19-01-01Z volumes: - minio1-data:/export ports: @@ -20,7 +20,7 @@ services: - access_key minio2: - image: minio/minio:RELEASE.2017-05-05T01-14-51Z + image: minio/minio:RELEASE.2017-06-13T19-01-01Z volumes: - minio2-data:/export ports: @@ -38,7 +38,7 @@ services: - access_key minio3: - image: minio/minio:RELEASE.2017-05-05T01-14-51Z + image: minio/minio:RELEASE.2017-06-13T19-01-01Z volumes: - minio3-data:/export ports: @@ -56,7 +56,7 @@ services: - access_key minio4: - image: minio/minio:RELEASE.2017-05-05T01-14-51Z + image: minio/minio:RELEASE.2017-06-13T19-01-01Z volumes: - minio4-data:/export ports: diff --git a/docs/orchestration/docker-swarm/docker-compose.yaml b/docs/orchestration/docker-swarm/docker-compose.yaml index a58aac5e4..b1df557df 100644 --- a/docs/orchestration/docker-swarm/docker-compose.yaml +++ b/docs/orchestration/docker-swarm/docker-compose.yaml @@ -2,7 +2,7 @@ version: '3' services: minio1: - image: minio/minio:RELEASE.2017-05-05T01-14-51Z + image: minio/minio:RELEASE.2017-06-13T19-01-01Z volumes: - minio1-data:/export ports: @@ -20,7 +20,7 @@ services: command: server http://minio1/export http://minio2/export http://minio3/export http://minio4/export minio2: - image: minio/minio:RELEASE.2017-05-05T01-14-51Z + image: minio/minio:RELEASE.2017-06-13T19-01-01Z volumes: - minio2-data:/export ports: @@ -38,7 +38,7 @@ services: command: server http://minio1/export http://minio2/export http://minio3/export http://minio4/export minio3: - image: minio/minio:RELEASE.2017-05-05T01-14-51Z + image: minio/minio:RELEASE.2017-06-13T19-01-01Z volumes: - minio3-data:/export ports: @@ -56,7 +56,7 @@ services: command: server http://minio1/export http://minio2/export http://minio3/export http://minio4/export minio4: - image: minio/minio:RELEASE.2017-05-05T01-14-51Z + image: minio/minio:RELEASE.2017-06-13T19-01-01Z volumes: - minio4-data:/export ports: diff --git a/docs/orchestration/kubernetes-yaml/minio-distributed-statefulset.yaml b/docs/orchestration/kubernetes-yaml/minio-distributed-statefulset.yaml index 08aadeebc..dd6c421a3 100644 --- a/docs/orchestration/kubernetes-yaml/minio-distributed-statefulset.yaml +++ b/docs/orchestration/kubernetes-yaml/minio-distributed-statefulset.yaml @@ -19,7 +19,7 @@ spec: value: "minio" - name: MINIO_SECRET_KEY value: "minio123" - image: minio/minio:RELEASE.2017-05-05T01-14-51Z + image: minio/minio:RELEASE.2017-06-13T19-01-01Z args: - server - http://minio-0.minio.default.svc.cluster.local/data diff --git a/docs/orchestration/kubernetes-yaml/minio-standalone-deployment.yaml b/docs/orchestration/kubernetes-yaml/minio-standalone-deployment.yaml index 51218e4c5..3fc01d490 100644 --- a/docs/orchestration/kubernetes-yaml/minio-standalone-deployment.yaml +++ b/docs/orchestration/kubernetes-yaml/minio-standalone-deployment.yaml @@ -21,7 +21,7 @@ spec: containers: - name: minio # Pulls the default Minio image from Docker Hub - image: minio/minio:RELEASE.2017-05-05T01-14-51Z + image: minio/minio:RELEASE.2017-06-13T19-01-01Z args: - server - /storage diff --git a/docs/orchestration/minikube/statefulset.yaml b/docs/orchestration/minikube/statefulset.yaml index fb9c73018..da424b3cc 100644 --- a/docs/orchestration/minikube/statefulset.yaml +++ b/docs/orchestration/minikube/statefulset.yaml @@ -54,7 +54,7 @@ spec: value: "minio" - name: MINIO_SECRET_KEY value: "minio123" - image: minio/minio:RELEASE.2017-02-16T01-47-30Z + image: minio/minio:RELEASE.2017-06-13T19-01-01Z imagePullPolicy: IfNotPresent #command: ["df"] #args: ["-i", "/data"] diff --git a/docs/screenshots/Architecture-diagram_distributed_16.jpg b/docs/screenshots/Architecture-diagram_distributed_16.jpg new file mode 100644 index 000000000..49eb5400f Binary files /dev/null and b/docs/screenshots/Architecture-diagram_distributed_16.jpg differ diff --git a/docs/screenshots/Architecture-diagram_distributed_8.jpg b/docs/screenshots/Architecture-diagram_distributed_8.jpg new file mode 100644 index 000000000..52c3d64f7 Binary files /dev/null and b/docs/screenshots/Architecture-diagram_distributed_8.jpg differ diff --git a/docs/screenshots/Example-1.jpg b/docs/screenshots/Example-1.jpg new file mode 100644 index 000000000..f46a7d216 Binary files /dev/null and b/docs/screenshots/Example-1.jpg differ diff --git a/docs/screenshots/Example-2.jpg b/docs/screenshots/Example-2.jpg new file mode 100644 index 000000000..838f5c88d Binary files /dev/null and b/docs/screenshots/Example-2.jpg differ diff --git a/docs/screenshots/Example-3.jpg b/docs/screenshots/Example-3.jpg new file mode 100644 index 000000000..e0394cffa Binary files /dev/null and b/docs/screenshots/Example-3.jpg differ diff --git a/docs/screenshots/Minio_Cloud_Native_Arch.jpg b/docs/screenshots/Minio_Cloud_Native_Arch.jpg new file mode 100644 index 000000000..c4ca5ceb1 Binary files /dev/null and b/docs/screenshots/Minio_Cloud_Native_Arch.jpg differ diff --git a/docs/screenshots/erasure-code.jpg b/docs/screenshots/erasure-code.jpg new file mode 100644 index 000000000..440f307bc Binary files /dev/null and b/docs/screenshots/erasure-code.jpg differ diff --git a/docs/screenshots/minio-browser-gateway.png b/docs/screenshots/minio-browser-gateway.png new file mode 100644 index 000000000..c5ea2bca6 Binary files /dev/null and b/docs/screenshots/minio-browser-gateway.png differ diff --git a/pkg/http/bufconn.go b/pkg/http/bufconn.go new file mode 100644 index 000000000..cf6679502 --- /dev/null +++ b/pkg/http/bufconn.go @@ -0,0 +1,101 @@ +/* + * Minio Cloud Storage, (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package http + +import ( + "bufio" + "net" + "time" +) + +// BufConn - is a generic stream-oriented network connection supporting buffered reader and read/write timeout. +type BufConn struct { + net.Conn + bufReader *bufio.Reader // buffered reader wraps reader in net.Conn. + readTimeout time.Duration // sets the read timeout in the connection. + writeTimeout time.Duration // sets the write timeout in the connection. + updateBytesReadFunc func(int) // function to be called to update bytes read. + updateBytesWrittenFunc func(int) // function to be called to update bytes written. +} + +// Sets read timeout +func (c *BufConn) setReadTimeout() { + if c.readTimeout != 0 { + c.SetReadDeadline(time.Now().UTC().Add(c.readTimeout)) + } +} + +func (c *BufConn) setWriteTimeout() { + if c.writeTimeout != 0 { + c.SetWriteDeadline(time.Now().UTC().Add(c.writeTimeout)) + } +} + +// RemoveTimeout - removes all configured read and write +// timeouts. Used by callers which control net.Conn behavior +// themselves. +func (c *BufConn) RemoveTimeout() { + c.readTimeout = 0 + c.writeTimeout = 0 + // Unset read/write timeouts, since we use **bufio** it is not + // guaranteed that the underlying Peek/Read operation in-fact + // indeed performed a Read() operation on the network. With + // that in mind we need to unset any timeouts currently set to + // avoid any pre-mature timeouts. + c.SetDeadline(time.Time{}) +} + +// Peek - returns the next n bytes without advancing the reader. It just wraps bufio.Reader.Peek(). +func (c *BufConn) Peek(n int) ([]byte, error) { + c.setReadTimeout() + return c.bufReader.Peek(n) +} + +// Read - reads data from the connection using wrapped buffered reader. +func (c *BufConn) Read(b []byte) (n int, err error) { + c.setReadTimeout() + n, err = c.bufReader.Read(b) + if err == nil && c.updateBytesReadFunc != nil { + c.updateBytesReadFunc(n) + } + + return n, err +} + +// Write - writes data to the connection. +func (c *BufConn) Write(b []byte) (n int, err error) { + c.setWriteTimeout() + n, err = c.Conn.Write(b) + if err == nil && c.updateBytesWrittenFunc != nil { + c.updateBytesWrittenFunc(n) + } + + return n, err +} + +// newBufConn - creates a new connection object wrapping net.Conn. +func newBufConn(c net.Conn, readTimeout, writeTimeout time.Duration, + updateBytesReadFunc, updateBytesWrittenFunc func(int)) *BufConn { + return &BufConn{ + Conn: c, + bufReader: bufio.NewReader(c), + readTimeout: readTimeout, + writeTimeout: writeTimeout, + updateBytesReadFunc: updateBytesReadFunc, + updateBytesWrittenFunc: updateBytesWrittenFunc, + } +} diff --git a/pkg/http/bufconn_test.go b/pkg/http/bufconn_test.go new file mode 100644 index 000000000..d3ff749d1 --- /dev/null +++ b/pkg/http/bufconn_test.go @@ -0,0 +1,111 @@ +/* + * Minio Cloud Storage, (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package http + +import ( + "bufio" + "io" + "net" + "sync" + "testing" + "time" +) + +// Test bufconn handles read timeout properly by reading two messages beyond deadline. +func TestBuffConnReadTimeout(t *testing.T) { + port := getNextPort() + l, err := net.Listen("tcp", "localhost:"+port) + if err != nil { + t.Fatalf("unable to create listener. %v", err) + } + defer l.Close() + + tcpListener, ok := l.(*net.TCPListener) + if !ok { + t.Fatalf("failed to assert to net.TCPListener") + } + + var wg sync.WaitGroup + go func() { + wg.Add(1) + defer wg.Done() + + tcpConn, terr := tcpListener.AcceptTCP() + if terr != nil { + t.Fatalf("failed to accept new connection. %v", terr) + } + bufconn := newBufConn(tcpConn, 1*time.Second, 1*time.Second, nil, nil) + defer bufconn.Close() + + // Read a line + var b = make([]byte, 12) + _, terr = bufconn.Read(b) + if terr != nil { + t.Fatalf("failed to read from client. %v", terr) + } + received := string(b) + if received != "message one\n" { + t.Fatalf(`server: expected: "message one\n", got: %v`, received) + } + + // Wait for more than read timeout to simulate processing. + time.Sleep(3 * time.Second) + + _, terr = bufconn.Read(b) + if terr != nil { + t.Fatalf("failed to read from client. %v", terr) + } + received = string(b) + if received != "message two\n" { + t.Fatalf(`server: expected: "message two\n", got: %v`, received) + } + + // Send a response. + _, terr = io.WriteString(bufconn, "messages received\n") + if terr != nil { + t.Fatalf("failed to write to client. %v", terr) + } + + // Removes all deadlines if any. + bufconn.RemoveTimeout() + }() + + c, err := net.Dial("tcp", "localhost:"+port) + if err != nil { + t.Fatalf("unable to connect to server. %v", err) + } + defer c.Close() + + _, err = io.WriteString(c, "message one\n") + if err != nil { + t.Fatalf("failed to write to server. %v", err) + } + _, err = io.WriteString(c, "message two\n") + if err != nil { + t.Fatalf("failed to write to server. %v", err) + } + + received, err := bufio.NewReader(c).ReadString('\n') + if err != nil { + t.Fatalf("failed to read from server. %v", err) + } + if received != "messages received\n" { + t.Fatalf(`client: expected: "messages received\n", got: %v`, received) + } + + wg.Wait() +} diff --git a/pkg/http/listener.go b/pkg/http/listener.go new file mode 100644 index 000000000..96e92951b --- /dev/null +++ b/pkg/http/listener.go @@ -0,0 +1,320 @@ +/* + * Minio Cloud Storage, (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package http + +import ( + "crypto/tls" + "errors" + "fmt" + "net" + "net/http" + "strings" + "sync" + "syscall" + "time" +) + +var sslRequiredErrMsg = []byte("HTTP/1.0 403 Forbidden\r\n\r\nSSL required") + +// HTTP methods. +var methods = []string{ + http.MethodGet, + http.MethodHead, + http.MethodPost, + http.MethodPut, + http.MethodPatch, + http.MethodDelete, + http.MethodConnect, + http.MethodOptions, + http.MethodTrace, + "PRI", // HTTP 2 method +} + +// maximum length of above methods + one space. +var methodMaxLen = getMethodMaxLen() + 1 + +func getMethodMaxLen() int { + maxLen := 0 + for _, method := range methods { + if len(method) > maxLen { + maxLen = len(method) + } + } + + return maxLen +} + +func isHTTPMethod(s string) bool { + for _, method := range methods { + if s == method { + return true + } + } + + return false +} + +type acceptResult struct { + conn net.Conn + err error +} + +// httpListener - HTTP listener capable of handling multiple server addresses. +type httpListener struct { + mutex sync.Mutex // to guard Close() method. + tcpListeners []*net.TCPListener // underlaying TCP listeners. + acceptCh chan acceptResult // channel where all TCP listeners write accepted connection. + doneCh chan struct{} // done channel for TCP listener goroutines. + tlsConfig *tls.Config // TLS configuration + tcpKeepAliveTimeout time.Duration + readTimeout time.Duration + writeTimeout time.Duration + updateBytesReadFunc func(int) // function to be called to update bytes read in BufConn. + updateBytesWrittenFunc func(int) // function to be called to update bytes written in BufConn. + errorLogFunc func(error, string, ...interface{}) // function to be called on errors. +} + +// start - starts separate goroutine for each TCP listener. A valid insecure/TLS HTTP new connection is passed to httpListener.acceptCh. +func (listener *httpListener) start() { + listener.acceptCh = make(chan acceptResult) + listener.doneCh = make(chan struct{}) + + // Closure to send acceptResult to acceptCh. + // It returns true if the result is sent else false if returns when doneCh is closed. + send := func(result acceptResult, doneCh <-chan struct{}) bool { + select { + case listener.acceptCh <- result: + // Successfully written to acceptCh + return true + case <-doneCh: + // As stop signal is received, close accepted connection. + if result.conn != nil { + result.conn.Close() + } + return false + } + } + + // Closure to handle single connection. + handleConn := func(tcpConn *net.TCPConn, doneCh <-chan struct{}) { + // Tune accepted TCP connection. + tcpConn.SetKeepAlive(true) + tcpConn.SetKeepAlivePeriod(listener.tcpKeepAliveTimeout) + + bufconn := newBufConn(tcpConn, listener.readTimeout, listener.writeTimeout, + listener.updateBytesReadFunc, listener.updateBytesWrittenFunc) + + // Peek bytes of maximum length of all HTTP methods. + data, err := bufconn.Peek(methodMaxLen) + if err != nil { + if listener.errorLogFunc != nil { + listener.errorLogFunc(err, + "Error in reading from new connection %s at server %s", + bufconn.RemoteAddr(), bufconn.LocalAddr()) + } + bufconn.Close() + return + } + + // Return bufconn if read data is a valid HTTP method. + tokens := strings.SplitN(string(data), " ", 2) + if isHTTPMethod(tokens[0]) { + if listener.tlsConfig == nil { + send(acceptResult{bufconn, nil}, doneCh) + } else { + // As TLS is configured and we got plain text HTTP request, + // return 403 (forbidden) error. + bufconn.Write(sslRequiredErrMsg) + bufconn.Close() + } + return + } + + if listener.tlsConfig != nil { + // As the listener is configured with TLS, try to do TLS handshake, drop the connection if it fails. + tlsConn := tls.Server(bufconn, listener.tlsConfig) + if err := tlsConn.Handshake(); err != nil { + if listener.errorLogFunc != nil { + listener.errorLogFunc(err, + "TLS handshake failed with new connection %s at server %s", + bufconn.RemoteAddr(), bufconn.LocalAddr()) + } + bufconn.Close() + return + } + + // Check whether the connection contains HTTP request or not. + bufconn = newBufConn(tlsConn, listener.readTimeout, listener.writeTimeout, + listener.updateBytesReadFunc, listener.updateBytesWrittenFunc) + + // Peek bytes of maximum length of all HTTP methods. + data, err := bufconn.Peek(methodMaxLen) + if err != nil { + if listener.errorLogFunc != nil { + listener.errorLogFunc(err, + "Error in reading from new TLS connection %s at server %s", + bufconn.RemoteAddr(), bufconn.LocalAddr()) + } + bufconn.Close() + return + } + + // Return bufconn if read data is a valid HTTP method. + tokens := strings.SplitN(string(data), " ", 2) + if isHTTPMethod(tokens[0]) { + send(acceptResult{bufconn, nil}, doneCh) + return + } + } + + if listener.errorLogFunc != nil { + listener.errorLogFunc(errors.New("junk message"), + "Received non-HTTP message from new connection %s at server %s", + bufconn.RemoteAddr(), bufconn.LocalAddr()) + } + + bufconn.Close() + return + } + + // Closure to handle TCPListener until done channel is closed. + handleListener := func(tcpListener *net.TCPListener, doneCh <-chan struct{}) { + for { + tcpConn, err := tcpListener.AcceptTCP() + if err != nil { + // Returns when send fails. + if !send(acceptResult{nil, err}, doneCh) { + return + } + } else { + go handleConn(tcpConn, doneCh) + } + } + } + + // Start separate goroutine for each TCP listener to handle connection. + for _, tcpListener := range listener.tcpListeners { + go handleListener(tcpListener, listener.doneCh) + } +} + +// Accept - reads from httpListener.acceptCh for one of previously accepted TCP connection and returns the same. +func (listener *httpListener) Accept() (conn net.Conn, err error) { + result, ok := <-listener.acceptCh + if ok { + return result.conn, result.err + } + + return nil, syscall.EINVAL +} + +// Close - closes underneath all TCP listeners. +func (listener *httpListener) Close() (err error) { + listener.mutex.Lock() + defer listener.mutex.Unlock() + if listener.doneCh == nil { + return syscall.EINVAL + } + + for i := range listener.tcpListeners { + listener.tcpListeners[i].Close() + } + close(listener.doneCh) + + listener.doneCh = nil + return nil +} + +// Addr - net.Listener interface compatible method returns net.Addr. In case of multiple TCP listeners, it returns '0.0.0.0' as IP address. +func (listener *httpListener) Addr() (addr net.Addr) { + addr = listener.tcpListeners[0].Addr() + if len(listener.tcpListeners) == 1 { + return addr + } + + tcpAddr := addr.(*net.TCPAddr) + if ip := net.ParseIP("0.0.0.0"); ip != nil { + tcpAddr.IP = ip + } + + addr = tcpAddr + return addr +} + +// Addrs - returns all address information of TCP listeners. +func (listener *httpListener) Addrs() (addrs []net.Addr) { + for i := range listener.tcpListeners { + addrs = append(addrs, listener.tcpListeners[i].Addr()) + } + + return addrs +} + +// newHTTPListener - creates new httpListener object which is interface compatible to net.Listener. +// httpListener is capable to +// * listen to multiple addresses +// * controls incoming connections only doing HTTP protocol +func newHTTPListener(serverAddrs []string, + tlsConfig *tls.Config, + tcpKeepAliveTimeout time.Duration, + readTimeout time.Duration, + writeTimeout time.Duration, + updateBytesReadFunc func(int), + updateBytesWrittenFunc func(int), + errorLogFunc func(error, string, ...interface{})) (listener *httpListener, err error) { + + var tcpListeners []*net.TCPListener + // Close all opened listeners on error + defer func() { + if err == nil { + return + } + + for _, tcpListener := range tcpListeners { + // Ignore error on close. + tcpListener.Close() + } + }() + + for _, serverAddr := range serverAddrs { + var l net.Listener + if l, err = net.Listen("tcp", serverAddr); err != nil { + return nil, err + } + + tcpListener, ok := l.(*net.TCPListener) + if !ok { + return nil, fmt.Errorf("unexpected listener type found %v, expected net.TCPListener", l) + } + + tcpListeners = append(tcpListeners, tcpListener) + } + + listener = &httpListener{ + tcpListeners: tcpListeners, + tlsConfig: tlsConfig, + tcpKeepAliveTimeout: tcpKeepAliveTimeout, + readTimeout: readTimeout, + writeTimeout: writeTimeout, + updateBytesReadFunc: updateBytesReadFunc, + updateBytesWrittenFunc: updateBytesWrittenFunc, + errorLogFunc: errorLogFunc, + } + listener.start() + + return listener, nil +} diff --git a/pkg/http/listener_test.go b/pkg/http/listener_test.go new file mode 100644 index 000000000..12b80c022 --- /dev/null +++ b/pkg/http/listener_test.go @@ -0,0 +1,817 @@ +/* + * Minio Cloud Storage, (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package http + +import ( + "bufio" + "bytes" + "crypto/tls" + "errors" + "fmt" + "io" + "net" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/minio/minio-go/pkg/set" +) + +var serverPort uint32 = 60000 + +func getNextPort() string { + return strconv.Itoa(int(atomic.AddUint32(&serverPort, 1))) +} + +func getTLSCert() (tls.Certificate, error) { + keyPEMBlock := []byte(`-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEApEkbPrT6wzcWK1W5atQiGptvuBsRdf8MCg4u6SN10QbslA5k +6BYRdZfFeRpwAwYyzkumug6+eBJatDZEd7+0FF86yxB7eMTSiHKRZ5Mi5ZyCFsez +dndknGBeK6I80s1jd5ZsLLuMKErvbNwSbfX+X6d2mBeYW8Scv9N+qYnNrHHHohvX +oxy1gZ18EhhogQhrD22zaqg/jtmOT8ImUiXzB1mKInt2LlSkoRYuBzepkDJrsE1L +/cyYZbtcO/ASDj+/qQAuQ66v9pNyJkIQ7bDOUyxaT5Hx9XvbqI1OqUVAdGLLi+eZ +IFguFyYd0lemwdN/IDvxftzegTO3cO0D28d1UQIDAQABAoIBAB42x8j3lerTNcOQ +h4JLM157WcedSs/NsVQkGaKM//0KbfYo04wPivR6jjngj9suh6eDKE2tqoAAuCfO +lzcCzca1YOW5yUuDv0iS8YT//XoHF7HC1pGiEaHk40zZEKCgX3u98XUkpPlAFtqJ +euY4SKkk7l24cS/ncACjj/b0PhxJoT/CncuaaJKqmCc+vdL4wj1UcrSNPZqRjDR/ +sh5DO0LblB0XrqVjpNxqxM60/IkbftB8YTnyGgtO2tbTPr8KdQ8DhHQniOp+WEPV +u/iXt0LLM7u62LzadkGab2NDWS3agnmdvw2ADtv5Tt8fZ7WnPqiOpNyD5Bv1a3/h +YBw5HsUCgYEA0Sfv6BiSAFEby2KusRoq5UeUjp/SfL7vwpO1KvXeiYkPBh2XYVq2 +azMnOw7Rz5ixFhtUtto2XhYdyvvr3dZu1fNHtxWo9ITBivqTGGRNwfiaQa58Bugo +gy7vCdIE/f6xE5LYIovBnES2vs/ZayMyhTX84SCWd0pTY0kdDA8ePGsCgYEAyRSA +OTzX43KUR1G/trpuM6VBc0W6YUNYzGRa1TcUxBP4K7DfKMpPGg6ulqypfoHmu8QD +L+z+iQmG9ySSuvScIW6u8LgkrTwZga8y2eb/A2FAVYY/bnelef1aMkis+bBX2OQ4 +QAg2uq+pkhpW1k5NSS9lVCPkj4e5Ur9RCm9fRDMCgYAf3CSIR03eLHy+Y37WzXSh +TmELxL6sb+1Xx2Y+cAuBCda3CMTpeIb3F2ivb1d4dvrqsikaXW0Qse/B3tQUC7kA +cDmJYwxEiwBsajUD7yuFE5hzzt9nse+R5BFXfp1yD1zr7V9tC7rnUfRAZqrozgjB +D/NAW9VvwGupYRbCon7plwKBgQCRPfeoYGRoa9ji8w+Rg3QaZeGyy8jmfGjlqg9a +NyEOyIXXuThYFFmyrqw5NZpwQJBTTDApK/xnK7SLS6WY2Rr1oydFxRzo7KJX5B7M ++md1H4gCvqeOuWmThgbij1AyQsgRaDehOM2fZ0cKu2/B+Gkm1c9RSWPMsPKR7JMz +AGNFtQKBgQCRCFIdGJHnvz35vJfLoihifCejBWtZbAnZoBHpF3xMCtV755J96tUf +k1Tv9hz6WfSkOSlwLq6eGZY2dCENJRW1ft1UelpFvCjbfrfLvoFFLs3gu0lfqXHi +CS6fjhn9Ahvz10yD6fd4ixRUjoJvULzI0Sxc1O95SYVF1lIAuVr9Hw== +-----END RSA PRIVATE KEY-----`) + certPEMBlock := []byte(`-----BEGIN CERTIFICATE----- +MIIDXTCCAkWgAwIBAgIJAKlqK5HKlo9MMA0GCSqGSIb3DQEBCwUAMEUxCzAJBgNV +BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX +aWRnaXRzIFB0eSBMdGQwHhcNMTcwNjE5MTA0MzEyWhcNMjcwNjE3MTA0MzEyWjBF +MQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50 +ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEApEkbPrT6wzcWK1W5atQiGptvuBsRdf8MCg4u6SN10QbslA5k6BYRdZfF +eRpwAwYyzkumug6+eBJatDZEd7+0FF86yxB7eMTSiHKRZ5Mi5ZyCFsezdndknGBe +K6I80s1jd5ZsLLuMKErvbNwSbfX+X6d2mBeYW8Scv9N+qYnNrHHHohvXoxy1gZ18 +EhhogQhrD22zaqg/jtmOT8ImUiXzB1mKInt2LlSkoRYuBzepkDJrsE1L/cyYZbtc +O/ASDj+/qQAuQ66v9pNyJkIQ7bDOUyxaT5Hx9XvbqI1OqUVAdGLLi+eZIFguFyYd +0lemwdN/IDvxftzegTO3cO0D28d1UQIDAQABo1AwTjAdBgNVHQ4EFgQUqMVdMIA1 +68Dv+iwGugAaEGUSd0IwHwYDVR0jBBgwFoAUqMVdMIA168Dv+iwGugAaEGUSd0Iw +DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAjQVoqRv2HlE5PJIX/qk5 +oMOKZlHTyJP+s2HzOOVt+eCE/jNdfC7+8R/HcPldQs7p9GqH2F6hQ9aOtDhJVEaU +pjxCi4qKeZ1kWwqv8UMBXW92eHGysBvE2Gmm/B1JFl8S2GR5fBmheZVnYW893MoI +gp+bOoCcIuMJRqCra4vJgrOsQjgRElQvd2OlP8qQzInf/fRqO/AnZPwMkGr3+KZ0 +BKEOXtmSZaPs3xEsnvJd8wrTgA0NQK7v48E+gHSXzQtaHmOLqisRXlUOu2r1gNCJ +rr3DRiUP6V/10CZ/ImeSJ72k69VuTw9vq2HzB4x6pqxF2X7JQSLUCS2wfNN13N0d +9A== +-----END CERTIFICATE-----`) + + return tls.X509KeyPair(certPEMBlock, keyPEMBlock) +} + +func getTLSConfig(t *testing.T) *tls.Config { + tlsCert, err := getTLSCert() + if err != nil { + t.Fatalf("Unable to parse private/certificate data. %v\n", err) + } + tlsConfig := &tls.Config{ + PreferServerCipherSuites: true, + MinVersion: tls.VersionTLS12, + NextProtos: []string{"http/1.1", "h2"}, + } + tlsConfig.Certificates = append(tlsConfig.Certificates, tlsCert) + + return tlsConfig +} + +func getNonLoopBackIP(t *testing.T) string { + localIP4 := set.NewStringSet() + addrs, err := net.InterfaceAddrs() + if err != nil { + t.Fatalf("%s. Unable to get IP addresses of this host.", err) + } + + for _, addr := range addrs { + var ip net.IP + switch v := addr.(type) { + case *net.IPNet: + ip = v.IP + case *net.IPAddr: + ip = v.IP + } + + if ip.To4() != nil { + localIP4.Add(ip.String()) + } + } + + // Filter ipList by IPs those do not start with '127.'. + nonLoopBackIPs := localIP4.FuncMatch(func(ip string, matchString string) bool { + return !strings.HasPrefix(ip, "127.") + }, "") + if len(nonLoopBackIPs) == 0 { + t.Fatalf("No non-loop back IP address found for this host") + } + nonLoopBackIP := nonLoopBackIPs.ToSlice()[0] + return nonLoopBackIP +} + +// Test getMethodMaxLen() +func TestGetMethodMaxLen(t *testing.T) { + l := getMethodMaxLen() + if l != (methodMaxLen - 1) { + t.Fatalf("expected: %v, got: %v", (methodMaxLen - 1), l) + } +} + +// Test isHTTPMethod() +func TestIsHTTPMethod(t *testing.T) { + testCases := []struct { + method string + expectedResult bool + }{ + {"", false}, + {"get", false}, + {"put", false}, + {"UPLOAD", false}, + {"OPTIONS", true}, + {"GET", true}, + {"HEAD", true}, + {"POST", true}, + {"PUT", true}, + {"DELETE", true}, + {"TRACE", true}, + {"CONNECT", true}, + {"PRI", true}, + } + + for _, testCase := range testCases { + result := isHTTPMethod(testCase.method) + if result != testCase.expectedResult { + t.Fatalf("expected: %v, got: %v", testCase.expectedResult, result) + } + } +} + +func TestNewHTTPListener(t *testing.T) { + errMsg := ": no such host" + if runtime.GOOS == "windows" { + errMsg = ": No such host is known." + } + + remoteAddrErrMsg := "listen tcp 93.184.216.34:9000: bind: cannot assign requested address" + if runtime.GOOS == "windows" { + remoteAddrErrMsg = "listen tcp 93.184.216.34:9000: bind: The requested address is not valid in its context." + } + + tlsConfig := getTLSConfig(t) + + testCases := []struct { + serverAddrs []string + tlsConfig *tls.Config + tcpKeepAliveTimeout time.Duration + readTimeout time.Duration + writeTimeout time.Duration + updateBytesReadFunc func(int) + updateBytesWrittenFunc func(int) + errorLogFunc func(error, string, ...interface{}) + expectedErr error + }{ + {[]string{"93.184.216.34:9000"}, nil, time.Duration(0), time.Duration(0), time.Duration(0), nil, nil, nil, errors.New(remoteAddrErrMsg)}, + {[]string{"example.org:9000"}, nil, time.Duration(0), time.Duration(0), time.Duration(0), nil, nil, nil, errors.New(remoteAddrErrMsg)}, + {[]string{"unknown-host"}, nil, time.Duration(0), time.Duration(0), time.Duration(0), nil, nil, nil, errors.New("listen tcp: missing port in address unknown-host")}, + {[]string{"unknown-host:9000"}, nil, time.Duration(0), time.Duration(0), time.Duration(0), nil, nil, nil, errors.New("listen tcp: lookup unknown-host" + errMsg)}, + {[]string{"localhost:9000", "93.184.216.34:9000"}, nil, time.Duration(0), time.Duration(0), time.Duration(0), nil, nil, nil, errors.New(remoteAddrErrMsg)}, + {[]string{"localhost:9000", "unknown-host:9000"}, nil, time.Duration(0), time.Duration(0), time.Duration(0), nil, nil, nil, errors.New("listen tcp: lookup unknown-host" + errMsg)}, + {[]string{"localhost:" + getNextPort()}, nil, time.Duration(0), time.Duration(0), time.Duration(0), nil, nil, nil, nil}, + {[]string{"localhost:" + getNextPort()}, tlsConfig, time.Duration(0), time.Duration(0), time.Duration(0), nil, nil, nil, nil}, + } + + for _, testCase := range testCases { + listener, err := newHTTPListener( + testCase.serverAddrs, + testCase.tlsConfig, + testCase.tcpKeepAliveTimeout, + testCase.readTimeout, + testCase.writeTimeout, + testCase.updateBytesReadFunc, + testCase.updateBytesWrittenFunc, + testCase.errorLogFunc, + ) + + if testCase.expectedErr == nil { + if err != nil { + t.Fatalf("error: expected = , got = %v", err) + } + } else if err == nil { + t.Fatalf("error: expected = %v, got = ", testCase.expectedErr) + } else { + var match bool + if strings.HasSuffix(testCase.expectedErr.Error(), errMsg) { + match = strings.HasSuffix(err.Error(), errMsg) + } else { + match = (testCase.expectedErr.Error() == err.Error()) + } + if !match { + t.Fatalf("error: expected = %v, got = %v", testCase.expectedErr, err) + } + } + + if err == nil { + listener.Close() + } + } +} + +func TestHTTPListenerStartClose(t *testing.T) { + tlsConfig := getTLSConfig(t) + nonLoopBackIP := getNonLoopBackIP(t) + var casePorts []string + for i := 0; i < 6; i++ { + casePorts = append(casePorts, getNextPort()) + } + + testCases := []struct { + serverAddrs []string + tlsConfig *tls.Config + }{ + {[]string{"localhost:" + casePorts[0]}, nil}, + {[]string{nonLoopBackIP + ":" + casePorts[1]}, nil}, + {[]string{"127.0.0.1:" + casePorts[2], nonLoopBackIP + ":" + casePorts[2]}, nil}, + {[]string{"localhost:" + casePorts[3]}, tlsConfig}, + {[]string{nonLoopBackIP + ":" + casePorts[4]}, tlsConfig}, + {[]string{"127.0.0.1:" + casePorts[5], nonLoopBackIP + ":" + casePorts[5]}, tlsConfig}, + } + + for i, testCase := range testCases { + listener, err := newHTTPListener( + testCase.serverAddrs, + testCase.tlsConfig, + time.Duration(0), + time.Duration(0), + time.Duration(0), + nil, + nil, + nil, + ) + if err != nil { + t.Fatalf("Test %d: error: expected = , got = %v", i+1, err) + } + + for _, serverAddr := range testCase.serverAddrs { + conn, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatalf("Test %d: error: expected = , got = %v", i+1, err) + } + conn.Close() + } + + listener.Close() + } +} + +func TestHTTPListenerAddr(t *testing.T) { + tlsConfig := getTLSConfig(t) + nonLoopBackIP := getNonLoopBackIP(t) + var casePorts []string + for i := 0; i < 6; i++ { + casePorts = append(casePorts, getNextPort()) + } + + testCases := []struct { + serverAddrs []string + tlsConfig *tls.Config + expectedAddr string + }{ + {[]string{"localhost:" + casePorts[0]}, nil, "127.0.0.1:" + casePorts[0]}, + {[]string{nonLoopBackIP + ":" + casePorts[1]}, nil, nonLoopBackIP + ":" + casePorts[1]}, + {[]string{"127.0.0.1:" + casePorts[2], nonLoopBackIP + ":" + casePorts[2]}, nil, "0.0.0.0:" + casePorts[2]}, + {[]string{"localhost:" + casePorts[3]}, tlsConfig, "127.0.0.1:" + casePorts[3]}, + {[]string{nonLoopBackIP + ":" + casePorts[4]}, tlsConfig, nonLoopBackIP + ":" + casePorts[4]}, + {[]string{"127.0.0.1:" + casePorts[5], nonLoopBackIP + ":" + casePorts[5]}, tlsConfig, "0.0.0.0:" + casePorts[5]}, + } + + for i, testCase := range testCases { + listener, err := newHTTPListener( + testCase.serverAddrs, + testCase.tlsConfig, + time.Duration(0), + time.Duration(0), + time.Duration(0), + nil, + nil, + nil, + ) + if err != nil { + t.Fatalf("Test %d: error: expected = , got = %v", i+1, err) + } + + addr := listener.Addr() + if addr.String() != testCase.expectedAddr { + t.Fatalf("Test %d: addr: expected = %v, got = %v", i+1, testCase.expectedAddr, addr) + } + + listener.Close() + } +} + +func TestHTTPListenerAddrs(t *testing.T) { + tlsConfig := getTLSConfig(t) + nonLoopBackIP := getNonLoopBackIP(t) + var casePorts []string + for i := 0; i < 6; i++ { + casePorts = append(casePorts, getNextPort()) + } + + testCases := []struct { + serverAddrs []string + tlsConfig *tls.Config + expectedAddrs set.StringSet + }{ + {[]string{"localhost:" + casePorts[0]}, nil, set.CreateStringSet("127.0.0.1:" + casePorts[0])}, + {[]string{nonLoopBackIP + ":" + casePorts[1]}, nil, set.CreateStringSet(nonLoopBackIP + ":" + casePorts[1])}, + {[]string{"127.0.0.1:" + casePorts[2], nonLoopBackIP + ":" + casePorts[2]}, nil, set.CreateStringSet("127.0.0.1:"+casePorts[2], nonLoopBackIP+":"+casePorts[2])}, + {[]string{"localhost:" + casePorts[3]}, tlsConfig, set.CreateStringSet("127.0.0.1:" + casePorts[3])}, + {[]string{nonLoopBackIP + ":" + casePorts[4]}, tlsConfig, set.CreateStringSet(nonLoopBackIP + ":" + casePorts[4])}, + {[]string{"127.0.0.1:" + casePorts[5], nonLoopBackIP + ":" + casePorts[5]}, tlsConfig, set.CreateStringSet("127.0.0.1:"+casePorts[5], nonLoopBackIP+":"+casePorts[5])}, + } + + for i, testCase := range testCases { + listener, err := newHTTPListener( + testCase.serverAddrs, + testCase.tlsConfig, + time.Duration(0), + time.Duration(0), + time.Duration(0), + nil, + nil, + nil, + ) + if err != nil { + t.Fatalf("Test %d: error: expected = , got = %v", i+1, err) + } + + addrs := listener.Addrs() + addrSet := set.NewStringSet() + for _, addr := range addrs { + addrSet.Add(addr.String()) + } + + if !addrSet.Equals(testCase.expectedAddrs) { + t.Fatalf("Test %d: addr: expected = %v, got = %v", i+1, testCase.expectedAddrs, addrs) + } + + listener.Close() + } +} + +func TestHTTPListenerAccept(t *testing.T) { + tlsConfig := getTLSConfig(t) + nonLoopBackIP := getNonLoopBackIP(t) + var casePorts []string + for i := 0; i < 6; i++ { + casePorts = append(casePorts, getNextPort()) + } + + testCases := []struct { + serverAddrs []string + tlsConfig *tls.Config + request string + reply string + }{ + {[]string{"localhost:" + casePorts[0]}, nil, "GET / HTTP/1.0\n", "200 OK\n"}, + {[]string{nonLoopBackIP + ":" + casePorts[1]}, nil, "POST / HTTP/1.0\n", "200 OK\n"}, + {[]string{"127.0.0.1:" + casePorts[2], nonLoopBackIP + ":" + casePorts[2]}, nil, "CONNECT \n", "200 OK\n"}, + {[]string{"localhost:" + casePorts[3]}, tlsConfig, "GET / HTTP/1.0\n", "200 OK\n"}, + {[]string{nonLoopBackIP + ":" + casePorts[4]}, tlsConfig, "POST / HTTP/1.0\n", "200 OK\n"}, + {[]string{"127.0.0.1:" + casePorts[5], nonLoopBackIP + ":" + casePorts[5]}, tlsConfig, "CONNECT \n", "200 OK\n"}, + } + + for i, testCase := range testCases { + listener, err := newHTTPListener( + testCase.serverAddrs, + testCase.tlsConfig, + time.Duration(0), + time.Duration(0), + time.Duration(0), + nil, + nil, + nil, + ) + if err != nil { + t.Fatalf("Test %d: error: expected = , got = %v", i+1, err) + } + + for _, serverAddr := range testCase.serverAddrs { + var conn net.Conn + var err error + + if testCase.tlsConfig == nil { + conn, err = net.Dial("tcp", serverAddr) + } else { + conn, err = tls.Dial("tcp", serverAddr, &tls.Config{InsecureSkipVerify: true}) + } + if err != nil { + t.Fatalf("Test %d: error: expected = , got = %v", i+1, err) + } + + if _, err = io.WriteString(conn, testCase.request); err != nil { + t.Fatalf("Test %d: request send: expected = , got = %v", i+1, err) + } + + serverConn, err := listener.Accept() + if err != nil { + t.Fatalf("Test %d: accept: expected = , got = %v", i+1, err) + } + + request, err := bufio.NewReader(serverConn).ReadString('\n') + if err != nil { + t.Fatalf("Test %d: request read: expected = , got = %v", i+1, err) + } + + if testCase.request != request { + t.Fatalf("Test %d: request: expected = %v, got = %v", i+1, testCase.request, request) + } + + if _, err = io.WriteString(serverConn, testCase.reply); err != nil { + t.Fatalf("Test %d: reply send: expected = , got = %v", i+1, err) + } + + reply, err := bufio.NewReader(conn).ReadString('\n') + if err != nil { + t.Fatalf("Test %d: reply read: expected = , got = %v", i+1, err) + } + + if testCase.reply != reply { + t.Fatalf("Test %d: reply: expected = %v, got = %v", i+1, testCase.reply, reply) + } + + serverConn.Close() + conn.Close() + } + + listener.Close() + } +} + +func TestHTTPListenerAcceptPeekError(t *testing.T) { + tlsConfig := getTLSConfig(t) + nonLoopBackIP := getNonLoopBackIP(t) + var casePorts []string + for i := 0; i < 2; i++ { + casePorts = append(casePorts, getNextPort()) + } + errorFunc := func(err error, template string, args ...interface{}) { + msg := fmt.Sprintf("error: %v. ", err) + msg += fmt.Sprintf(template, args...) + fmt.Println(msg) + } + + testCases := []struct { + serverAddrs []string + tlsConfig *tls.Config + request string + }{ + {[]string{"127.0.0.1:" + casePorts[0], nonLoopBackIP + ":" + casePorts[0]}, nil, "CONN"}, + {[]string{"127.0.0.1:" + casePorts[1], nonLoopBackIP + ":" + casePorts[1]}, tlsConfig, "CONN"}, + } + + for i, testCase := range testCases { + listener, err := newHTTPListener( + testCase.serverAddrs, + testCase.tlsConfig, + time.Duration(0), + time.Duration(0), + time.Duration(0), + nil, + nil, + errorFunc, + ) + if err != nil { + t.Fatalf("Test %d: error: expected = , got = %v", i+1, err) + } + + go func() { + serverConn, aerr := listener.Accept() + if aerr == nil { + t.Fatalf("Test %d: accept: expected = , got = ", i+1) + } + if serverConn != nil { + t.Fatalf("Test %d: accept: server expected = , got = %v", i+1, serverConn) + } + }() + + for _, serverAddr := range testCase.serverAddrs { + conn, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatalf("Test %d: error: expected = , got = %v", i+1, err) + } + + if _, err = io.WriteString(conn, testCase.request); err != nil { + t.Fatalf("Test %d: request send: expected = , got = %v", i+1, err) + } + + conn.Close() + } + + listener.Close() + } +} + +func TestHTTPListenerAcceptTLSError(t *testing.T) { + tlsConfig := getTLSConfig(t) + nonLoopBackIP := getNonLoopBackIP(t) + var casePorts []string + for i := 0; i < 1; i++ { + casePorts = append(casePorts, getNextPort()) + } + errorFunc := func(err error, template string, args ...interface{}) { + msg := fmt.Sprintf("error: %v. ", err) + msg += fmt.Sprintf(template, args...) + fmt.Println(msg) + } + + testCases := []struct { + serverAddrs []string + tlsConfig *tls.Config + request string + }{ + {[]string{"127.0.0.1:" + casePorts[0], nonLoopBackIP + ":" + casePorts[0]}, tlsConfig, "GET / HTTP/1.0\n"}, + } + + for i, testCase := range testCases { + listener, err := newHTTPListener( + testCase.serverAddrs, + testCase.tlsConfig, + time.Duration(0), + time.Duration(0), + time.Duration(0), + nil, + nil, + errorFunc, + ) + if err != nil { + t.Fatalf("Test %d: error: expected = , got = %v", i+1, err) + } + + for _, serverAddr := range testCase.serverAddrs { + conn, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatalf("Test %d: error: expected = , got = %v", i+1, err) + } + + if _, err = io.WriteString(conn, testCase.request); err != nil { + t.Fatalf("Test %d: request send: expected = , got = %v", i+1, err) + } + + go func() { + serverConn, aerr := listener.Accept() + if aerr == nil { + t.Fatalf("Test %d: accept: expected = , got = ", i+1) + } + if serverConn != nil { + t.Fatalf("Test %d: accept: server expected = , got = %v", i+1, serverConn) + } + }() + + buf := make([]byte, len(sslRequiredErrMsg)) + n, err := io.ReadFull(conn, buf) + if err != nil { + t.Fatalf("Test %d: reply read: expected = got = %v", i+1, err) + } else if n != len(buf) { + t.Fatalf("Test %d: reply length: expected = %v got = %v", i+1, len(buf), n) + } else if !bytes.Equal(buf, sslRequiredErrMsg) { + t.Fatalf("Test %d: reply: expected = %v got = %v", i+1, string(sslRequiredErrMsg), string(buf)) + } + + conn.Close() + } + + listener.Close() + } +} + +func TestHTTPListenerAcceptError(t *testing.T) { + tlsConfig := getTLSConfig(t) + nonLoopBackIP := getNonLoopBackIP(t) + var casePorts []string + for i := 0; i < 3; i++ { + casePorts = append(casePorts, getNextPort()) + } + errorFunc := func(err error, template string, args ...interface{}) { + msg := fmt.Sprintf("error: %v. ", err) + msg += fmt.Sprintf(template, args...) + fmt.Println(msg) + } + + testCases := []struct { + serverAddrs []string + tlsConfig *tls.Config + secureClient bool + request string + }{ + {[]string{"127.0.0.1:" + casePorts[0], nonLoopBackIP + ":" + casePorts[0]}, nil, false, "CONNECTION"}, + {[]string{"127.0.0.1:" + casePorts[1], nonLoopBackIP + ":" + casePorts[1]}, tlsConfig, false, "CONNECTION"}, + {[]string{"127.0.0.1:" + casePorts[2], nonLoopBackIP + ":" + casePorts[2]}, tlsConfig, true, "CONNECTION"}, + } + + for i, testCase := range testCases { + listener, err := newHTTPListener( + testCase.serverAddrs, + testCase.tlsConfig, + time.Duration(0), + time.Duration(0), + time.Duration(0), + nil, + nil, + errorFunc, + ) + if err != nil { + t.Fatalf("Test %d: error: expected = , got = %v", i+1, err) + } + + for _, serverAddr := range testCase.serverAddrs { + var conn net.Conn + var err error + + if testCase.secureClient { + conn, err = tls.Dial("tcp", serverAddr, &tls.Config{InsecureSkipVerify: true}) + } else { + conn, err = net.Dial("tcp", serverAddr) + } + if err != nil { + t.Fatalf("Test %d: error: expected = , got = %v", i+1, err) + } + + if _, err = io.WriteString(conn, testCase.request); err != nil { + t.Fatalf("Test %d: request send: expected = , got = %v", i+1, err) + } + + go func() { + serverConn, aerr := listener.Accept() + if aerr == nil { + t.Fatalf("Test %d: accept: expected = , got = ", i+1) + } + if serverConn != nil { + t.Fatalf("Test %d: accept: server expected = , got = %v", i+1, serverConn) + } + }() + + _, err = bufio.NewReader(conn).ReadString('\n') + if err == nil { + t.Fatalf("Test %d: reply read: expected = EOF got = ", i+1) + } else if err.Error() != "EOF" { + t.Fatalf("Test %d: reply read: expected = EOF got = %v", i+1, err) + } + + conn.Close() + } + + listener.Close() + } +} + +func TestHTTPListenerAcceptParallel(t *testing.T) { + tlsConfig := getTLSConfig(t) + nonLoopBackIP := getNonLoopBackIP(t) + case1Port := getNextPort() + case2Port := getNextPort() + + testCases := []struct { + serverAddrs []string + tlsConfig *tls.Config + reply string + }{ + {[]string{"127.0.0.1:" + case1Port, nonLoopBackIP + ":" + case1Port}, nil, "200 OK\n"}, + {[]string{"127.0.0.1:" + case2Port, nonLoopBackIP + ":" + case2Port}, tlsConfig, "200 OK\n"}, + } + + // As t.Fatalf() is not goroutine safe, use this closure. + fail := func(template string, args ...interface{}) { + fmt.Printf(template, args...) + fmt.Println() + t.Fail() + } + + connect := func(i int, serverAddr string, secure bool, delay bool, request, reply string) { + var conn net.Conn + var err error + + if secure { + conn, err = tls.Dial("tcp", serverAddr, &tls.Config{InsecureSkipVerify: true}) + } else { + conn, err = net.Dial("tcp", serverAddr) + } + if err != nil { + fail("Test %d: error: expected = , got = %v", i+1, err) + } + + if delay { + if _, err = io.WriteString(conn, request[:3]); err != nil { + fail("Test %d: request send: expected = , got = %v", i+1, err) + } + time.Sleep(1 * time.Second) + if _, err = io.WriteString(conn, request[3:]); err != nil { + fail("Test %d: request send: expected = , got = %v", i+1, err) + } + } else { + if _, err = io.WriteString(conn, request); err != nil { + fail("Test %d: request send: expected = , got = %v", i+1, err) + } + } + + received, err := bufio.NewReader(conn).ReadString('\n') + if err != nil { + fail("Test %d: reply read: expected = , got = %v", i+1, err) + } + if received != reply { + fail("Test %d: reply: expected = %v, got = %v", i+1, reply, received) + } + + conn.Close() + } + + handleConnection := func(i int, wg *sync.WaitGroup, serverConn net.Conn, request, reply string) { + wg.Add(1) + defer wg.Done() + + received, err := bufio.NewReader(serverConn).ReadString('\n') + if err != nil { + fail("Test %d: request read: expected = , got = %v", i+1, err) + } + + if received != request { + fail("Test %d: request: expected = %v, got = %v", i+1, request, received) + } + + if _, err := io.WriteString(serverConn, reply); err != nil { + fail("Test %d: reply send: expected = , got = %v", i+1, err) + } + + serverConn.Close() + } + + for i, testCase := range testCases { + listener, err := newHTTPListener( + testCase.serverAddrs, + testCase.tlsConfig, + time.Duration(0), + time.Duration(0), + time.Duration(0), + nil, + nil, + nil, + ) + if err != nil { + t.Fatalf("Test %d: error: expected = , got = %v", i+1, err) + } + + for _, serverAddr := range testCase.serverAddrs { + go connect(i, serverAddr, testCase.tlsConfig != nil, true, "GET /1 HTTP/1.0\n", testCase.reply) + go connect(i, serverAddr, testCase.tlsConfig != nil, false, "GET /2 HTTP/1.0\n", testCase.reply) + + var wg sync.WaitGroup + + serverConn, err := listener.Accept() + if err != nil { + t.Fatalf("Test %d: accept: expected = , got = %v", i+1, err) + } + go handleConnection(i, &wg, serverConn, "GET /2 HTTP/1.0\n", testCase.reply) + + serverConn, err = listener.Accept() + if err != nil { + t.Fatalf("Test %d: accept: expected = , got = %v", i+1, err) + } + go handleConnection(i, &wg, serverConn, "GET /1 HTTP/1.0\n", testCase.reply) + + wg.Wait() + } + + listener.Close() + } +} diff --git a/pkg/http/server.go b/pkg/http/server.go new file mode 100644 index 000000000..5181e1f96 --- /dev/null +++ b/pkg/http/server.go @@ -0,0 +1,173 @@ +/* + * Minio Cloud Storage, (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package http + +import ( + "crypto/tls" + "errors" + "net/http" + "sync" + "sync/atomic" + "time" + + humanize "github.com/dustin/go-humanize" +) + +const ( + serverShutdownPoll = 500 * time.Millisecond + + // DefaultShutdownTimeout - default shutdown timeout used for graceful http server shutdown. + DefaultShutdownTimeout = 5 * time.Second + + // DefaultTCPKeepAliveTimeout - default TCP keep alive timeout for accepted connection. + DefaultTCPKeepAliveTimeout = 10 * time.Second + + // DefaultReadTimeout - default timout to read data from accepted connection. + DefaultReadTimeout = 30 * time.Second + + // DefaultWriteTimeout - default timout to write data to accepted connection. + DefaultWriteTimeout = 30 * time.Second + + // DefaultMaxHeaderBytes - default maximum HTTP header size in bytes. + DefaultMaxHeaderBytes = 1 * humanize.MiByte +) + +// Server - extended http.Server supports multiple addresses to serve and enhanced connection handling. +type Server struct { + http.Server + Addrs []string // addresses on which the server listens for new connection. + ShutdownTimeout time.Duration // timeout used for graceful server shutdown. + TCPKeepAliveTimeout time.Duration // timeout used for underneath TCP connection. + UpdateBytesReadFunc func(int) // function to be called to update bytes read in bufConn. + UpdateBytesWrittenFunc func(int) // function to be called to update bytes written in bufConn. + ErrorLogFunc func(error, string, ...interface{}) // function to be called on errors. + listenerMutex *sync.Mutex // to guard 'listener' field. + listener *httpListener // HTTP listener for all 'Addrs' field. + inShutdown uint32 // indicates whether the server is in shutdown or not + requestCount int32 // counter holds no. of request in process. +} + +// Start - start HTTP server +func (srv *Server) Start() (err error) { + // Take a copy of server fields. + tlsConfig := srv.TLSConfig + readTimeout := srv.ReadTimeout + writeTimeout := srv.WriteTimeout + handler := srv.Handler + + addrs := srv.Addrs + tcpKeepAliveTimeout := srv.TCPKeepAliveTimeout + updateBytesReadFunc := srv.UpdateBytesReadFunc + updateBytesWrittenFunc := srv.UpdateBytesWrittenFunc + errorLogFunc := srv.ErrorLogFunc + + // Create new HTTP listener. + var listener *httpListener + listener, err = newHTTPListener( + addrs, + tlsConfig, + tcpKeepAliveTimeout, + readTimeout, + writeTimeout, + updateBytesReadFunc, + updateBytesWrittenFunc, + errorLogFunc, + ) + if err != nil { + return err + } + + // Wrap given handler to do additional + // * return 503 (service unavailable) if the server in shutdown. + wrappedHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + atomic.AddInt32(&srv.requestCount, 1) + defer atomic.AddInt32(&srv.requestCount, -1) + + // If server is in shutdown, return 503 (service unavailable) + if atomic.LoadUint32(&srv.inShutdown) != 0 { + w.WriteHeader(http.StatusServiceUnavailable) + return + } + + // Handle request using passed handler. + handler.ServeHTTP(w, r) + }) + + srv.listenerMutex.Lock() + srv.Handler = wrappedHandler + srv.listener = listener + srv.listenerMutex.Unlock() + + // Start servicing with listener. + return srv.Server.Serve(listener) +} + +// Shutdown - shuts down HTTP server. +func (srv *Server) Shutdown() error { + if atomic.AddUint32(&srv.inShutdown, 1) > 1 { + // shutdown in progress + return errors.New("http server already in shutdown") + } + + // Close underneath HTTP listener. + srv.listenerMutex.Lock() + err := srv.listener.Close() + srv.listenerMutex.Unlock() + + // Wait for opened connection to be closed up to Shutdown timeout. + shutdownTimeout := srv.ShutdownTimeout + shutdownTimer := time.NewTimer(shutdownTimeout) + ticker := time.NewTicker(serverShutdownPoll) + defer ticker.Stop() + for { + select { + case <-shutdownTimer.C: + return errors.New("timed out. some connections are still active. doing abnormal shutdown") + case <-ticker.C: + if atomic.LoadInt32(&srv.requestCount) <= 0 { + return err + } + } + } +} + +// NewServer - creates new HTTP server using given arguments. +func NewServer(addrs []string, handler http.Handler, certificate *tls.Certificate) *Server { + var tlsConfig *tls.Config + if certificate != nil { + tlsConfig = &tls.Config{ + PreferServerCipherSuites: true, + MinVersion: tls.VersionTLS12, + NextProtos: []string{"http/1.1", "h2"}, + } + tlsConfig.Certificates = append(tlsConfig.Certificates, *certificate) + } + + httpServer := &Server{ + Addrs: addrs, + ShutdownTimeout: DefaultShutdownTimeout, + TCPKeepAliveTimeout: DefaultTCPKeepAliveTimeout, + listenerMutex: &sync.Mutex{}, + } + httpServer.Handler = handler + httpServer.TLSConfig = tlsConfig + httpServer.ReadTimeout = DefaultReadTimeout + httpServer.WriteTimeout = DefaultWriteTimeout + httpServer.MaxHeaderBytes = DefaultMaxHeaderBytes + + return httpServer +} diff --git a/pkg/http/server_test.go b/pkg/http/server_test.go new file mode 100644 index 000000000..2b1e82ff5 --- /dev/null +++ b/pkg/http/server_test.go @@ -0,0 +1,99 @@ +/* + * Minio Cloud Storage, (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package http + +import ( + "crypto/tls" + "fmt" + "net/http" + "reflect" + "testing" +) + +func TestNewServer(t *testing.T) { + nonLoopBackIP := getNonLoopBackIP(t) + certificate, err := getTLSCert() + if err != nil { + t.Fatalf("Unable to parse private/certificate data. %v\n", err) + } + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, "Hello, world") + }) + + testCases := []struct { + addrs []string + handler http.Handler + certificate *tls.Certificate + }{ + {[]string{"127.0.0.1:9000"}, handler, nil}, + {[]string{nonLoopBackIP + ":9000"}, handler, nil}, + {[]string{"127.0.0.1:9000", nonLoopBackIP + ":9000"}, handler, nil}, + {[]string{"127.0.0.1:9000"}, handler, &certificate}, + {[]string{nonLoopBackIP + ":9000"}, handler, &certificate}, + {[]string{"127.0.0.1:9000", nonLoopBackIP + ":9000"}, handler, &certificate}, + } + + for i, testCase := range testCases { + server := NewServer(testCase.addrs, testCase.handler, testCase.certificate) + if server == nil { + t.Fatalf("Case %v: server: expected: , got: ", (i + 1)) + } + + if !reflect.DeepEqual(server.Addrs, testCase.addrs) { + t.Fatalf("Case %v: server.Addrs: expected: %v, got: %v", (i + 1), testCase.addrs, server.Addrs) + } + + // Interfaces are not comparable even with reflection. + // if !reflect.DeepEqual(server.Handler, testCase.handler) { + // t.Fatalf("Case %v: server.Handler: expected: %v, got: %v", (i + 1), testCase.handler, server.Handler) + // } + + if testCase.certificate == nil { + if server.TLSConfig != nil { + t.Fatalf("Case %v: server.TLSConfig: expected: , got: %v", (i + 1), server.TLSConfig) + } + } else { + if server.TLSConfig == nil { + t.Fatalf("Case %v: server.TLSConfig: expected: , got: ", (i + 1)) + } + } + + if server.ShutdownTimeout != DefaultShutdownTimeout { + t.Fatalf("Case %v: server.ShutdownTimeout: expected: %v, got: %v", (i + 1), DefaultShutdownTimeout, server.ShutdownTimeout) + } + + if server.TCPKeepAliveTimeout != DefaultTCPKeepAliveTimeout { + t.Fatalf("Case %v: server.TCPKeepAliveTimeout: expected: %v, got: %v", (i + 1), DefaultTCPKeepAliveTimeout, server.TCPKeepAliveTimeout) + } + + if server.listenerMutex == nil { + t.Fatalf("Case %v: server.listenerMutex: expected: , got: ", (i + 1)) + } + + if server.ReadTimeout != DefaultReadTimeout { + t.Fatalf("Case %v: server.ReadTimeout: expected: %v, got: %v", (i + 1), DefaultReadTimeout, server.ReadTimeout) + } + + if server.WriteTimeout != DefaultWriteTimeout { + t.Fatalf("Case %v: server.WriteTimeout: expected: %v, got: %v", (i + 1), DefaultWriteTimeout, server.WriteTimeout) + } + + if server.MaxHeaderBytes != DefaultMaxHeaderBytes { + t.Fatalf("Case %v: server.MaxHeaderBytes: expected: %v, got: %v", (i + 1), DefaultMaxHeaderBytes, server.MaxHeaderBytes) + } + } +} diff --git a/pkg/madmin/api.go b/pkg/madmin/api.go index ef010583b..713ea647b 100644 --- a/pkg/madmin/api.go +++ b/pkg/madmin/api.go @@ -446,7 +446,7 @@ func (c AdminClient) newRequest(method string, reqData requestData) (req *http.R } // Add signature version '4' authorization header. - req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, location) + req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, "", location) // Return request. return req, nil diff --git a/pkg/x/os/stat_windows_test.go b/pkg/x/os/stat_windows_test.go index 27ede7257..2e063096f 100644 --- a/pkg/x/os/stat_windows_test.go +++ b/pkg/x/os/stat_windows_test.go @@ -71,9 +71,6 @@ func sameFile(fi1, fi2 os1.FileInfo) bool { } func TestNetworkSymbolicLink(t *testing.T) { - // Skipping this test for now - appveyor builds are failing. - t.Skip() - dir, err := ioutil.TempDir("", "TestNetworkSymbolicLink") if err != nil { t.Fatal(err) diff --git a/vendor/cloud.google.com/go/LICENSE b/vendor/cloud.google.com/go/LICENSE new file mode 100644 index 000000000..a4c5efd82 --- /dev/null +++ b/vendor/cloud.google.com/go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2014 Google Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go new file mode 100644 index 000000000..e708c031b --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -0,0 +1,437 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package metadata provides access to Google Compute Engine (GCE) +// metadata and API service accounts. +// +// This package is a wrapper around the GCE metadata service, +// as documented at https://developers.google.com/compute/docs/metadata. +package metadata // import "cloud.google.com/go/compute/metadata" + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "runtime" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/context/ctxhttp" +) + +const ( + // metadataIP is the documented metadata server IP address. + metadataIP = "169.254.169.254" + + // metadataHostEnv is the environment variable specifying the + // GCE metadata hostname. If empty, the default value of + // metadataIP ("169.254.169.254") is used instead. + // This is variable name is not defined by any spec, as far as + // I know; it was made up for the Go package. + metadataHostEnv = "GCE_METADATA_HOST" + + userAgent = "gcloud-golang/0.1" +) + +type cachedValue struct { + k string + trim bool + mu sync.Mutex + v string +} + +var ( + projID = &cachedValue{k: "project/project-id", trim: true} + projNum = &cachedValue{k: "project/numeric-project-id", trim: true} + instID = &cachedValue{k: "instance/id", trim: true} +) + +var ( + metaClient = &http.Client{ + Transport: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + ResponseHeaderTimeout: 2 * time.Second, + }, + } + subscribeClient = &http.Client{ + Transport: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + }, + } +) + +// NotDefinedError is returned when requested metadata is not defined. +// +// The underlying string is the suffix after "/computeMetadata/v1/". +// +// This error is not returned if the value is defined to be the empty +// string. +type NotDefinedError string + +func (suffix NotDefinedError) Error() string { + return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) +} + +// Get returns a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// +// If the GCE_METADATA_HOST environment variable is not defined, a default of +// 169.254.169.254 will be used instead. +// +// If the requested metadata is not defined, the returned error will +// be of type NotDefinedError. +func Get(suffix string) (string, error) { + val, _, err := getETag(metaClient, suffix) + return val, err +} + +// getETag returns a value from the metadata service as well as the associated +// ETag using the provided client. This func is otherwise equivalent to Get. +func getETag(client *http.Client, suffix string) (value, etag string, err error) { + // Using a fixed IP makes it very difficult to spoof the metadata service in + // a container, which is an important use-case for local testing of cloud + // deployments. To enable spoofing of the metadata service, the environment + // variable GCE_METADATA_HOST is first inspected to decide where metadata + // requests shall go. + host := os.Getenv(metadataHostEnv) + if host == "" { + // Using 169.254.169.254 instead of "metadata" here because Go + // binaries built with the "netgo" tag and without cgo won't + // know the search suffix for "metadata" is + // ".google.internal", and this IP address is documented as + // being stable anyway. + host = metadataIP + } + url := "http://" + host + "/computeMetadata/v1/" + suffix + req, _ := http.NewRequest("GET", url, nil) + req.Header.Set("Metadata-Flavor", "Google") + req.Header.Set("User-Agent", userAgent) + res, err := client.Do(req) + if err != nil { + return "", "", err + } + defer res.Body.Close() + if res.StatusCode == http.StatusNotFound { + return "", "", NotDefinedError(suffix) + } + if res.StatusCode != 200 { + return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url) + } + all, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", err + } + return string(all), res.Header.Get("Etag"), nil +} + +func getTrimmed(suffix string) (s string, err error) { + s, err = Get(suffix) + s = strings.TrimSpace(s) + return +} + +func (c *cachedValue) get() (v string, err error) { + defer c.mu.Unlock() + c.mu.Lock() + if c.v != "" { + return c.v, nil + } + if c.trim { + v, err = getTrimmed(c.k) + } else { + v, err = Get(c.k) + } + if err == nil { + c.v = v + } + return +} + +var ( + onGCEOnce sync.Once + onGCE bool +) + +// OnGCE reports whether this process is running on Google Compute Engine. +func OnGCE() bool { + onGCEOnce.Do(initOnGCE) + return onGCE +} + +func initOnGCE() { + onGCE = testOnGCE() +} + +func testOnGCE() bool { + // The user explicitly said they're on GCE, so trust them. + if os.Getenv(metadataHostEnv) != "" { + return true + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + resc := make(chan bool, 2) + + // Try two strategies in parallel. + // See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194 + go func() { + req, _ := http.NewRequest("GET", "http://"+metadataIP, nil) + req.Header.Set("User-Agent", userAgent) + res, err := ctxhttp.Do(ctx, metaClient, req) + if err != nil { + resc <- false + return + } + defer res.Body.Close() + resc <- res.Header.Get("Metadata-Flavor") == "Google" + }() + + go func() { + addrs, err := net.LookupHost("metadata.google.internal") + if err != nil || len(addrs) == 0 { + resc <- false + return + } + resc <- strsContains(addrs, metadataIP) + }() + + tryHarder := systemInfoSuggestsGCE() + if tryHarder { + res := <-resc + if res { + // The first strategy succeeded, so let's use it. + return true + } + // Wait for either the DNS or metadata server probe to + // contradict the other one and say we are running on + // GCE. Give it a lot of time to do so, since the system + // info already suggests we're running on a GCE BIOS. + timer := time.NewTimer(5 * time.Second) + defer timer.Stop() + select { + case res = <-resc: + return res + case <-timer.C: + // Too slow. Who knows what this system is. + return false + } + } + + // There's no hint from the system info that we're running on + // GCE, so use the first probe's result as truth, whether it's + // true or false. The goal here is to optimize for speed for + // users who are NOT running on GCE. We can't assume that + // either a DNS lookup or an HTTP request to a blackholed IP + // address is fast. Worst case this should return when the + // metaClient's Transport.ResponseHeaderTimeout or + // Transport.Dial.Timeout fires (in two seconds). + return <-resc +} + +// systemInfoSuggestsGCE reports whether the local system (without +// doing network requests) suggests that we're running on GCE. If this +// returns true, testOnGCE tries a bit harder to reach its metadata +// server. +func systemInfoSuggestsGCE() bool { + if runtime.GOOS != "linux" { + // We don't have any non-Linux clues available, at least yet. + return false + } + slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name") + name := strings.TrimSpace(string(slurp)) + return name == "Google" || name == "Google Compute Engine" +} + +// Subscribe subscribes to a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// The suffix may contain query parameters. +// +// Subscribe calls fn with the latest metadata value indicated by the provided +// suffix. If the metadata value is deleted, fn is called with the empty string +// and ok false. Subscribe blocks until fn returns a non-nil error or the value +// is deleted. Subscribe returns the error value returned from the last call to +// fn, which may be nil when ok == false. +func Subscribe(suffix string, fn func(v string, ok bool) error) error { + const failedSubscribeSleep = time.Second * 5 + + // First check to see if the metadata value exists at all. + val, lastETag, err := getETag(subscribeClient, suffix) + if err != nil { + return err + } + + if err := fn(val, true); err != nil { + return err + } + + ok := true + if strings.ContainsRune(suffix, '?') { + suffix += "&wait_for_change=true&last_etag=" + } else { + suffix += "?wait_for_change=true&last_etag=" + } + for { + val, etag, err := getETag(subscribeClient, suffix+url.QueryEscape(lastETag)) + if err != nil { + if _, deleted := err.(NotDefinedError); !deleted { + time.Sleep(failedSubscribeSleep) + continue // Retry on other errors. + } + ok = false + } + lastETag = etag + + if err := fn(val, ok); err != nil || !ok { + return err + } + } +} + +// ProjectID returns the current instance's project ID string. +func ProjectID() (string, error) { return projID.get() } + +// NumericProjectID returns the current instance's numeric project ID. +func NumericProjectID() (string, error) { return projNum.get() } + +// InternalIP returns the instance's primary internal IP address. +func InternalIP() (string, error) { + return getTrimmed("instance/network-interfaces/0/ip") +} + +// ExternalIP returns the instance's primary external (public) IP address. +func ExternalIP() (string, error) { + return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") +} + +// Hostname returns the instance's hostname. This will be of the form +// ".c..internal". +func Hostname() (string, error) { + return getTrimmed("instance/hostname") +} + +// InstanceTags returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func InstanceTags() ([]string, error) { + var s []string + j, err := Get("instance/tags") + if err != nil { + return nil, err + } + if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil { + return nil, err + } + return s, nil +} + +// InstanceID returns the current VM's numeric instance ID. +func InstanceID() (string, error) { + return instID.get() +} + +// InstanceName returns the current VM's instance ID string. +func InstanceName() (string, error) { + host, err := Hostname() + if err != nil { + return "", err + } + return strings.Split(host, ".")[0], nil +} + +// Zone returns the current VM's zone, such as "us-central1-b". +func Zone() (string, error) { + zone, err := getTrimmed("instance/zone") + // zone is of the form "projects//zones/". + if err != nil { + return "", err + } + return zone[strings.LastIndex(zone, "/")+1:], nil +} + +// InstanceAttributes returns the list of user-defined attributes, +// assigned when initially creating a GCE VM instance. The value of an +// attribute can be obtained with InstanceAttributeValue. +func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") } + +// ProjectAttributes returns the list of user-defined attributes +// applying to the project as a whole, not just this VM. The value of +// an attribute can be obtained with ProjectAttributeValue. +func ProjectAttributes() ([]string, error) { return lines("project/attributes/") } + +func lines(suffix string) ([]string, error) { + j, err := Get(suffix) + if err != nil { + return nil, err + } + s := strings.Split(strings.TrimSpace(j), "\n") + for i := range s { + s[i] = strings.TrimSpace(s[i]) + } + return s, nil +} + +// InstanceAttributeValue returns the value of the provided VM +// instance attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// InstanceAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func InstanceAttributeValue(attr string) (string, error) { + return Get("instance/attributes/" + attr) +} + +// ProjectAttributeValue returns the value of the provided +// project attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// ProjectAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func ProjectAttributeValue(attr string) (string, error) { + return Get("project/attributes/" + attr) +} + +// Scopes returns the service account scopes for the given account. +// The account may be empty or the string "default" to use the instance's +// main account. +func Scopes(serviceAccount string) ([]string, error) { + if serviceAccount == "" { + serviceAccount = "default" + } + return lines("instance/service-accounts/" + serviceAccount + "/scopes") +} + +func strsContains(ss []string, s string) bool { + for _, v := range ss { + if v == s { + return true + } + } + return false +} diff --git a/vendor/cloud.google.com/go/iam/iam.go b/vendor/cloud.google.com/go/iam/iam.go new file mode 100644 index 000000000..8722ee883 --- /dev/null +++ b/vendor/cloud.google.com/go/iam/iam.go @@ -0,0 +1,256 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package iam supports the resource-specific operations of Google Cloud +// IAM (Identity and Access Management) for the Google Cloud Libraries. +// See https://cloud.google.com/iam for more about IAM. +// +// Users of the Google Cloud Libraries will typically not use this package +// directly. Instead they will begin with some resource that supports IAM, like +// a pubsub topic, and call its IAM method to get a Handle for that resource. +package iam + +import ( + "golang.org/x/net/context" + pb "google.golang.org/genproto/googleapis/iam/v1" + "google.golang.org/grpc" +) + +// client abstracts the IAMPolicy API to allow multiple implementations. +type client interface { + Get(ctx context.Context, resource string) (*pb.Policy, error) + Set(ctx context.Context, resource string, p *pb.Policy) error + Test(ctx context.Context, resource string, perms []string) ([]string, error) +} + +// grpcClient implements client for the standard gRPC-based IAMPolicy service. +type grpcClient struct { + c pb.IAMPolicyClient +} + +func (g *grpcClient) Get(ctx context.Context, resource string) (*pb.Policy, error) { + proto, err := g.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{Resource: resource}) + if err != nil { + return nil, err + } + return proto, nil +} +func (g *grpcClient) Set(ctx context.Context, resource string, p *pb.Policy) error { + _, err := g.c.SetIamPolicy(ctx, &pb.SetIamPolicyRequest{ + Resource: resource, + Policy: p, + }) + return err +} + +func (g *grpcClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) { + res, err := g.c.TestIamPermissions(ctx, &pb.TestIamPermissionsRequest{ + Resource: resource, + Permissions: perms, + }) + if err != nil { + return nil, err + } + return res.Permissions, nil +} + +// A Handle provides IAM operations for a resource. +type Handle struct { + c client + resource string +} + +// InternalNewHandle is for use by the Google Cloud Libraries only. +// +// InternalNewHandle returns a Handle for resource. +// The conn parameter refers to a server that must support the IAMPolicy service. +func InternalNewHandle(conn *grpc.ClientConn, resource string) *Handle { + return InternalNewHandleClient(&grpcClient{c: pb.NewIAMPolicyClient(conn)}, resource) +} + +// InternalNewHandleClient is for use by the Google Cloud Libraries only. +// +// InternalNewHandleClient returns a Handle for resource using the given +// client implementation. +func InternalNewHandleClient(c client, resource string) *Handle { + return &Handle{ + c: c, + resource: resource, + } +} + +// Policy retrieves the IAM policy for the resource. +func (h *Handle) Policy(ctx context.Context) (*Policy, error) { + proto, err := h.c.Get(ctx, h.resource) + if err != nil { + return nil, err + } + return &Policy{InternalProto: proto}, nil +} + +// SetPolicy replaces the resource's current policy with the supplied Policy. +// +// If policy was created from a prior call to Get, then the modification will +// only succeed if the policy has not changed since the Get. +func (h *Handle) SetPolicy(ctx context.Context, policy *Policy) error { + return h.c.Set(ctx, h.resource, policy.InternalProto) +} + +// TestPermissions returns the subset of permissions that the caller has on the resource. +func (h *Handle) TestPermissions(ctx context.Context, permissions []string) ([]string, error) { + return h.c.Test(ctx, h.resource, permissions) +} + +// A RoleName is a name representing a collection of permissions. +type RoleName string + +// Common role names. +const ( + Owner RoleName = "roles/owner" + Editor RoleName = "roles/editor" + Viewer RoleName = "roles/viewer" +) + +const ( + // AllUsers is a special member that denotes all users, even unauthenticated ones. + AllUsers = "allUsers" + + // AllAuthenticatedUsers is a special member that denotes all authenticated users. + AllAuthenticatedUsers = "allAuthenticatedUsers" +) + +// A Policy is a list of Bindings representing roles +// granted to members. +// +// The zero Policy is a valid policy with no bindings. +type Policy struct { + // TODO(jba): when type aliases are available, put Policy into an internal package + // and provide an exported alias here. + + // This field is exported for use by the Google Cloud Libraries only. + // It may become unexported in a future release. + InternalProto *pb.Policy +} + +// Members returns the list of members with the supplied role. +// The return value should not be modified. Use Add and Remove +// to modify the members of a role. +func (p *Policy) Members(r RoleName) []string { + b := p.binding(r) + if b == nil { + return nil + } + return b.Members +} + +// HasRole reports whether member has role r. +func (p *Policy) HasRole(member string, r RoleName) bool { + return memberIndex(member, p.binding(r)) >= 0 +} + +// Add adds member member to role r if it is not already present. +// A new binding is created if there is no binding for the role. +func (p *Policy) Add(member string, r RoleName) { + b := p.binding(r) + if b == nil { + if p.InternalProto == nil { + p.InternalProto = &pb.Policy{} + } + p.InternalProto.Bindings = append(p.InternalProto.Bindings, &pb.Binding{ + Role: string(r), + Members: []string{member}, + }) + return + } + if memberIndex(member, b) < 0 { + b.Members = append(b.Members, member) + return + } +} + +// Remove removes member from role r if it is present. +func (p *Policy) Remove(member string, r RoleName) { + bi := p.bindingIndex(r) + if bi < 0 { + return + } + bindings := p.InternalProto.Bindings + b := bindings[bi] + mi := memberIndex(member, b) + if mi < 0 { + return + } + // Order doesn't matter for bindings or members, so to remove, move the last item + // into the removed spot and shrink the slice. + if len(b.Members) == 1 { + // Remove binding. + last := len(bindings) - 1 + bindings[bi] = bindings[last] + bindings[last] = nil + p.InternalProto.Bindings = bindings[:last] + return + } + // Remove member. + // TODO(jba): worry about multiple copies of m? + last := len(b.Members) - 1 + b.Members[mi] = b.Members[last] + b.Members[last] = "" + b.Members = b.Members[:last] +} + +// Roles returns the names of all the roles that appear in the Policy. +func (p *Policy) Roles() []RoleName { + if p.InternalProto == nil { + return nil + } + var rns []RoleName + for _, b := range p.InternalProto.Bindings { + rns = append(rns, RoleName(b.Role)) + } + return rns +} + +// binding returns the Binding for the suppied role, or nil if there isn't one. +func (p *Policy) binding(r RoleName) *pb.Binding { + i := p.bindingIndex(r) + if i < 0 { + return nil + } + return p.InternalProto.Bindings[i] +} + +func (p *Policy) bindingIndex(r RoleName) int { + if p.InternalProto == nil { + return -1 + } + for i, b := range p.InternalProto.Bindings { + if b.Role == string(r) { + return i + } + } + return -1 +} + +// memberIndex returns the index of m in b's Members, or -1 if not found. +func memberIndex(m string, b *pb.Binding) int { + if b == nil { + return -1 + } + for i, mm := range b.Members { + if mm == m { + return i + } + } + return -1 +} diff --git a/vendor/cloud.google.com/go/internal/optional/optional.go b/vendor/cloud.google.com/go/internal/optional/optional.go new file mode 100644 index 000000000..f9102f365 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/optional/optional.go @@ -0,0 +1,94 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package optional provides versions of primitive types that can +// be nil. These are useful in methods that update some of an API object's +// fields. +package optional + +import ( + "fmt" + "strings" +) + +type ( + // Bool is either a bool or nil. + Bool interface{} + + // String is either a string or nil. + String interface{} + + // Int is either an int or nil. + Int interface{} + + // Uint is either a uint or nil. + Uint interface{} + + // Float64 is either a float64 or nil. + Float64 interface{} +) + +// ToBool returns its argument as a bool. +// It panics if its argument is nil or not a bool. +func ToBool(v Bool) bool { + x, ok := v.(bool) + if !ok { + doPanic("Bool", v) + } + return x +} + +// ToString returns its argument as a string. +// It panics if its argument is nil or not a string. +func ToString(v String) string { + x, ok := v.(string) + if !ok { + doPanic("String", v) + } + return x +} + +// ToInt returns its argument as an int. +// It panics if its argument is nil or not an int. +func ToInt(v Int) int { + x, ok := v.(int) + if !ok { + doPanic("Int", v) + } + return x +} + +// ToUint returns its argument as a uint. +// It panics if its argument is nil or not a uint. +func ToUint(v Uint) uint { + x, ok := v.(uint) + if !ok { + doPanic("Uint", v) + } + return x +} + +// ToFloat64 returns its argument as a float64. +// It panics if its argument is nil or not a float64. +func ToFloat64(v Float64) float64 { + x, ok := v.(float64) + if !ok { + doPanic("Float64", v) + } + return x +} + +func doPanic(capType string, v interface{}) { + panic(fmt.Sprintf("optional.%s value should be %s, got %T", capType, strings.ToLower(capType), v)) +} diff --git a/vendor/cloud.google.com/go/internal/retry.go b/vendor/cloud.google.com/go/internal/retry.go new file mode 100644 index 000000000..f554fbf8f --- /dev/null +++ b/vendor/cloud.google.com/go/internal/retry.go @@ -0,0 +1,56 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "fmt" + "time" + + gax "github.com/googleapis/gax-go" + + "golang.org/x/net/context" +) + +// Retry calls the supplied function f repeatedly according to the provided +// backoff parameters. It returns when one of the following occurs: +// When f's first return value is true, Retry immediately returns with f's second +// return value. +// When the provided context is done, Retry returns with an error that +// includes both ctx.Error() and the last error returned by f. +func Retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error)) error { + return retry(ctx, bo, f, gax.Sleep) +} + +func retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error), + sleep func(context.Context, time.Duration) error) error { + var lastErr error + for { + stop, err := f() + if stop { + return err + } + // Remember the last "real" error from f. + if err != nil && err != context.Canceled && err != context.DeadlineExceeded { + lastErr = err + } + p := bo.Pause() + if cerr := sleep(ctx, p); cerr != nil { + if lastErr != nil { + return fmt.Errorf("%v; last function err: %v", cerr, lastErr) + } + return cerr + } + } +} diff --git a/vendor/cloud.google.com/go/internal/version/update_version.sh b/vendor/cloud.google.com/go/internal/version/update_version.sh new file mode 100755 index 000000000..fecf1f03f --- /dev/null +++ b/vendor/cloud.google.com/go/internal/version/update_version.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +today=$(date +%Y%m%d) + +sed -i -r -e 's/const Repo = "([0-9]{8})"/const Repo = "'$today'"/' $GOFILE + diff --git a/vendor/cloud.google.com/go/internal/version/version.go b/vendor/cloud.google.com/go/internal/version/version.go new file mode 100644 index 000000000..33f1cdb60 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/version/version.go @@ -0,0 +1,71 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate ./update_version.sh + +// Package version contains version information for Google Cloud Client +// Libraries for Go, as reported in request headers. +package version + +import ( + "runtime" + "strings" + "unicode" +) + +// Repo is the current version of the client libraries in this +// repo. It should be a date in YYYYMMDD format. +const Repo = "20170404" + +// Go returns the Go runtime version. The returned string +// has no whitespace. +func Go() string { + return goVersion +} + +var goVersion = goVer(runtime.Version()) + +const develPrefix = "devel +" + +func goVer(s string) string { + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + s += "-" + prerelease + } + return s + } + return "" +} + +func notSemverRune(r rune) bool { + return strings.IndexRune("0123456789.", r) < 0 +} diff --git a/vendor/cloud.google.com/go/storage/acl.go b/vendor/cloud.google.com/go/storage/acl.go new file mode 100644 index 000000000..041e54132 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/acl.go @@ -0,0 +1,241 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "fmt" + + "golang.org/x/net/context" + raw "google.golang.org/api/storage/v1" +) + +// ACLRole is the level of access to grant. +type ACLRole string + +const ( + RoleOwner ACLRole = "OWNER" + RoleReader ACLRole = "READER" + RoleWriter ACLRole = "WRITER" +) + +// ACLEntity refers to a user or group. +// They are sometimes referred to as grantees. +// +// It could be in the form of: +// "user-", "user-", "group-", "group-", +// "domain-" and "project-team-". +// +// Or one of the predefined constants: AllUsers, AllAuthenticatedUsers. +type ACLEntity string + +const ( + AllUsers ACLEntity = "allUsers" + AllAuthenticatedUsers ACLEntity = "allAuthenticatedUsers" +) + +// ACLRule represents a grant for a role to an entity (user, group or team) for a Google Cloud Storage object or bucket. +type ACLRule struct { + Entity ACLEntity + Role ACLRole +} + +// ACLHandle provides operations on an access control list for a Google Cloud Storage bucket or object. +type ACLHandle struct { + c *Client + bucket string + object string + isDefault bool +} + +// Delete permanently deletes the ACL entry for the given entity. +func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) error { + if a.object != "" { + return a.objectDelete(ctx, entity) + } + if a.isDefault { + return a.bucketDefaultDelete(ctx, entity) + } + return a.bucketDelete(ctx, entity) +} + +// Set sets the permission level for the given entity. +func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) error { + if a.object != "" { + return a.objectSet(ctx, entity, role) + } + if a.isDefault { + return a.bucketDefaultSet(ctx, entity, role) + } + return a.bucketSet(ctx, entity, role) +} + +// List retrieves ACL entries. +func (a *ACLHandle) List(ctx context.Context) ([]ACLRule, error) { + if a.object != "" { + return a.objectList(ctx) + } + if a.isDefault { + return a.bucketDefaultList(ctx) + } + return a.bucketList(ctx) +} + +func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) { + var acls *raw.ObjectAccessControls + var err error + err = runWithRetry(ctx, func() error { + req := a.c.raw.DefaultObjectAccessControls.List(a.bucket).Context(ctx) + setClientHeader(req.Header()) + acls, err = req.Do() + return err + }) + if err != nil { + return nil, fmt.Errorf("storage: error listing default object ACL for bucket %q: %v", a.bucket, err) + } + return toACLRules(acls.Items), nil +} + +func (a *ACLHandle) bucketDefaultSet(ctx context.Context, entity ACLEntity, role ACLRole) error { + acl := &raw.ObjectAccessControl{ + Bucket: a.bucket, + Entity: string(entity), + Role: string(role), + } + err := runWithRetry(ctx, func() error { + req := a.c.raw.DefaultObjectAccessControls.Update(a.bucket, string(entity), acl).Context(ctx) + setClientHeader(req.Header()) + _, err := req.Do() + return err + }) + if err != nil { + return fmt.Errorf("storage: error updating default ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err) + } + return nil +} + +func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error { + err := runWithRetry(ctx, func() error { + req := a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity)).Context(ctx) + setClientHeader(req.Header()) + return req.Do() + }) + if err != nil { + return fmt.Errorf("storage: error deleting default ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err) + } + return nil +} + +func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) { + var acls *raw.BucketAccessControls + var err error + err = runWithRetry(ctx, func() error { + req := a.c.raw.BucketAccessControls.List(a.bucket).Context(ctx) + setClientHeader(req.Header()) + acls, err = req.Do() + return err + }) + if err != nil { + return nil, fmt.Errorf("storage: error listing bucket ACL for bucket %q: %v", a.bucket, err) + } + r := make([]ACLRule, len(acls.Items)) + for i, v := range acls.Items { + r[i].Entity = ACLEntity(v.Entity) + r[i].Role = ACLRole(v.Role) + } + return r, nil +} + +func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRole) error { + acl := &raw.BucketAccessControl{ + Bucket: a.bucket, + Entity: string(entity), + Role: string(role), + } + err := runWithRetry(ctx, func() error { + req := a.c.raw.BucketAccessControls.Update(a.bucket, string(entity), acl).Context(ctx) + setClientHeader(req.Header()) + _, err := req.Do() + return err + }) + if err != nil { + return fmt.Errorf("storage: error updating bucket ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err) + } + return nil +} + +func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error { + err := runWithRetry(ctx, func() error { + req := a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity)).Context(ctx) + setClientHeader(req.Header()) + return req.Do() + }) + if err != nil { + return fmt.Errorf("storage: error deleting bucket ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err) + } + return nil +} + +func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) { + var acls *raw.ObjectAccessControls + var err error + err = runWithRetry(ctx, func() error { + req := a.c.raw.ObjectAccessControls.List(a.bucket, a.object).Context(ctx) + setClientHeader(req.Header()) + acls, err = req.Do() + return err + }) + if err != nil { + return nil, fmt.Errorf("storage: error listing object ACL for bucket %q, file %q: %v", a.bucket, a.object, err) + } + return toACLRules(acls.Items), nil +} + +func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole) error { + acl := &raw.ObjectAccessControl{ + Bucket: a.bucket, + Entity: string(entity), + Role: string(role), + } + err := runWithRetry(ctx, func() error { + req := a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl).Context(ctx) + setClientHeader(req.Header()) + _, err := req.Do() + return err + }) + if err != nil { + return fmt.Errorf("storage: error updating object ACL entry for bucket %q, file %q, entity %q: %v", a.bucket, a.object, entity, err) + } + return nil +} + +func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error { + err := runWithRetry(ctx, func() error { + req := a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity)).Context(ctx) + setClientHeader(req.Header()) + return req.Do() + }) + if err != nil { + return fmt.Errorf("storage: error deleting object ACL entry for bucket %q, file %q, entity %q: %v", a.bucket, a.object, entity, err) + } + return nil +} + +func toACLRules(items []*raw.ObjectAccessControl) []ACLRule { + r := make([]ACLRule, 0, len(items)) + for _, item := range items { + r = append(r, ACLRule{Entity: ACLEntity(item.Entity), Role: ACLRole(item.Role)}) + } + return r +} diff --git a/vendor/cloud.google.com/go/storage/bucket.go b/vendor/cloud.google.com/go/storage/bucket.go new file mode 100644 index 000000000..deed5b23b --- /dev/null +++ b/vendor/cloud.google.com/go/storage/bucket.go @@ -0,0 +1,337 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "net/http" + "time" + + "golang.org/x/net/context" + "google.golang.org/api/googleapi" + "google.golang.org/api/iterator" + raw "google.golang.org/api/storage/v1" +) + +// Create creates the Bucket in the project. +// If attrs is nil the API defaults will be used. +func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *BucketAttrs) error { + var bkt *raw.Bucket + if attrs != nil { + bkt = attrs.toRawBucket() + } else { + bkt = &raw.Bucket{} + } + bkt.Name = b.name + req := b.c.raw.Buckets.Insert(projectID, bkt) + setClientHeader(req.Header()) + return runWithRetry(ctx, func() error { _, err := req.Context(ctx).Do(); return err }) +} + +// Delete deletes the Bucket. +func (b *BucketHandle) Delete(ctx context.Context) error { + req := b.c.raw.Buckets.Delete(b.name) + setClientHeader(req.Header()) + return runWithRetry(ctx, func() error { return req.Context(ctx).Do() }) +} + +// ACL returns an ACLHandle, which provides access to the bucket's access control list. +// This controls who can list, create or overwrite the objects in a bucket. +// This call does not perform any network operations. +func (b *BucketHandle) ACL() *ACLHandle { + return &b.acl +} + +// DefaultObjectACL returns an ACLHandle, which provides access to the bucket's default object ACLs. +// These ACLs are applied to newly created objects in this bucket that do not have a defined ACL. +// This call does not perform any network operations. +func (b *BucketHandle) DefaultObjectACL() *ACLHandle { + return &b.defaultObjectACL +} + +// Object returns an ObjectHandle, which provides operations on the named object. +// This call does not perform any network operations. +// +// name must consist entirely of valid UTF-8-encoded runes. The full specification +// for valid object names can be found at: +// https://cloud.google.com/storage/docs/bucket-naming +func (b *BucketHandle) Object(name string) *ObjectHandle { + return &ObjectHandle{ + c: b.c, + bucket: b.name, + object: name, + acl: ACLHandle{ + c: b.c, + bucket: b.name, + object: name, + }, + gen: -1, + } +} + +// Attrs returns the metadata for the bucket. +func (b *BucketHandle) Attrs(ctx context.Context) (*BucketAttrs, error) { + req := b.c.raw.Buckets.Get(b.name).Projection("full") + setClientHeader(req.Header()) + var resp *raw.Bucket + var err error + err = runWithRetry(ctx, func() error { + resp, err = req.Context(ctx).Do() + return err + }) + if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { + return nil, ErrBucketNotExist + } + if err != nil { + return nil, err + } + return newBucket(resp), nil +} + +// BucketAttrs represents the metadata for a Google Cloud Storage bucket. +type BucketAttrs struct { + // Name is the name of the bucket. + Name string + + // ACL is the list of access control rules on the bucket. + ACL []ACLRule + + // DefaultObjectACL is the list of access controls to + // apply to new objects when no object ACL is provided. + DefaultObjectACL []ACLRule + + // Location is the location of the bucket. It defaults to "US". + Location string + + // MetaGeneration is the metadata generation of the bucket. + MetaGeneration int64 + + // StorageClass is the default storage class of the bucket. This defines + // how objects in the bucket are stored and determines the SLA + // and the cost of storage. Typical values are "MULTI_REGIONAL", + // "REGIONAL", "NEARLINE", "COLDLINE", "STANDARD" and + // "DURABLE_REDUCED_AVAILABILITY". Defaults to "STANDARD", which + // is equivalent to "MULTI_REGIONAL" or "REGIONAL" depending on + // the bucket's location settings. + StorageClass string + + // Created is the creation time of the bucket. + Created time.Time + + // VersioningEnabled reports whether this bucket has versioning enabled. + // This field is read-only. + VersioningEnabled bool +} + +func newBucket(b *raw.Bucket) *BucketAttrs { + if b == nil { + return nil + } + bucket := &BucketAttrs{ + Name: b.Name, + Location: b.Location, + MetaGeneration: b.Metageneration, + StorageClass: b.StorageClass, + Created: convertTime(b.TimeCreated), + VersioningEnabled: b.Versioning != nil && b.Versioning.Enabled, + } + acl := make([]ACLRule, len(b.Acl)) + for i, rule := range b.Acl { + acl[i] = ACLRule{ + Entity: ACLEntity(rule.Entity), + Role: ACLRole(rule.Role), + } + } + bucket.ACL = acl + objACL := make([]ACLRule, len(b.DefaultObjectAcl)) + for i, rule := range b.DefaultObjectAcl { + objACL[i] = ACLRule{ + Entity: ACLEntity(rule.Entity), + Role: ACLRole(rule.Role), + } + } + bucket.DefaultObjectACL = objACL + return bucket +} + +// toRawBucket copies the editable attribute from b to the raw library's Bucket type. +func (b *BucketAttrs) toRawBucket() *raw.Bucket { + var acl []*raw.BucketAccessControl + if len(b.ACL) > 0 { + acl = make([]*raw.BucketAccessControl, len(b.ACL)) + for i, rule := range b.ACL { + acl[i] = &raw.BucketAccessControl{ + Entity: string(rule.Entity), + Role: string(rule.Role), + } + } + } + dACL := toRawObjectACL(b.DefaultObjectACL) + return &raw.Bucket{ + Name: b.Name, + DefaultObjectAcl: dACL, + Location: b.Location, + StorageClass: b.StorageClass, + Acl: acl, + } +} + +// Objects returns an iterator over the objects in the bucket that match the Query q. +// If q is nil, no filtering is done. +func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator { + it := &ObjectIterator{ + ctx: ctx, + bucket: b, + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.items) }, + func() interface{} { b := it.items; it.items = nil; return b }) + if q != nil { + it.query = *q + } + return it +} + +// An ObjectIterator is an iterator over ObjectAttrs. +type ObjectIterator struct { + ctx context.Context + bucket *BucketHandle + query Query + pageInfo *iterator.PageInfo + nextFunc func() error + items []*ObjectAttrs +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *ObjectIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +// Next returns the next result. Its second return value is iterator.Done if +// there are no more results. Once Next returns iterator.Done, all subsequent +// calls will return iterator.Done. +// +// If Query.Delimiter is non-empty, some of the ObjectAttrs returned by Next will +// have a non-empty Prefix field, and a zero value for all other fields. These +// represent prefixes. +func (it *ObjectIterator) Next() (*ObjectAttrs, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + item := it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error) { + req := it.bucket.c.raw.Objects.List(it.bucket.name) + setClientHeader(req.Header()) + req.Projection("full") + req.Delimiter(it.query.Delimiter) + req.Prefix(it.query.Prefix) + req.Versions(it.query.Versions) + req.PageToken(pageToken) + if pageSize > 0 { + req.MaxResults(int64(pageSize)) + } + var resp *raw.Objects + var err error + err = runWithRetry(it.ctx, func() error { + resp, err = req.Context(it.ctx).Do() + return err + }) + if err != nil { + if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { + err = ErrBucketNotExist + } + return "", err + } + for _, item := range resp.Items { + it.items = append(it.items, newObject(item)) + } + for _, prefix := range resp.Prefixes { + it.items = append(it.items, &ObjectAttrs{Prefix: prefix}) + } + return resp.NextPageToken, nil +} + +// TODO(jbd): Add storage.buckets.update. + +// Buckets returns an iterator over the buckets in the project. You may +// optionally set the iterator's Prefix field to restrict the list to buckets +// whose names begin with the prefix. By default, all buckets in the project +// are returned. +func (c *Client) Buckets(ctx context.Context, projectID string) *BucketIterator { + it := &BucketIterator{ + ctx: ctx, + client: c, + projectID: projectID, + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.buckets) }, + func() interface{} { b := it.buckets; it.buckets = nil; return b }) + return it +} + +// A BucketIterator is an iterator over BucketAttrs. +type BucketIterator struct { + // Prefix restricts the iterator to buckets whose names begin with it. + Prefix string + + ctx context.Context + client *Client + projectID string + buckets []*BucketAttrs + pageInfo *iterator.PageInfo + nextFunc func() error +} + +// Next returns the next result. Its second return value is iterator.Done if +// there are no more results. Once Next returns iterator.Done, all subsequent +// calls will return iterator.Done. +func (it *BucketIterator) Next() (*BucketAttrs, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + b := it.buckets[0] + it.buckets = it.buckets[1:] + return b, nil +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *BucketIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +func (it *BucketIterator) fetch(pageSize int, pageToken string) (string, error) { + req := it.client.raw.Buckets.List(it.projectID) + setClientHeader(req.Header()) + req.Projection("full") + req.Prefix(it.Prefix) + req.PageToken(pageToken) + if pageSize > 0 { + req.MaxResults(int64(pageSize)) + } + var resp *raw.Buckets + var err error + err = runWithRetry(it.ctx, func() error { + resp, err = req.Context(it.ctx).Do() + return err + }) + if err != nil { + return "", err + } + for _, item := range resp.Items { + it.buckets = append(it.buckets, newBucket(item)) + } + return resp.NextPageToken, nil +} diff --git a/vendor/cloud.google.com/go/storage/copy.go b/vendor/cloud.google.com/go/storage/copy.go new file mode 100644 index 000000000..16e28e382 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/copy.go @@ -0,0 +1,189 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "errors" + "fmt" + + "golang.org/x/net/context" + raw "google.golang.org/api/storage/v1" +) + +// CopierFrom creates a Copier that can copy src to dst. +// You can immediately call Run on the returned Copier, or +// you can configure it first. +func (dst *ObjectHandle) CopierFrom(src *ObjectHandle) *Copier { + return &Copier{dst: dst, src: src} +} + +// A Copier copies a source object to a destination. +type Copier struct { + // ObjectAttrs are optional attributes to set on the destination object. + // Any attributes must be initialized before any calls on the Copier. Nil + // or zero-valued attributes are ignored. + ObjectAttrs + + // RewriteToken can be set before calling Run to resume a copy + // operation. After Run returns a non-nil error, RewriteToken will + // have been updated to contain the value needed to resume the copy. + RewriteToken string + + // ProgressFunc can be used to monitor the progress of a multi-RPC copy + // operation. If ProgressFunc is not nil and CopyFrom requires multiple + // calls to the underlying service (see + // https://cloud.google.com/storage/docs/json_api/v1/objects/rewrite), then + // ProgressFunc will be invoked after each call with the number of bytes of + // content copied so far and the total size in bytes of the source object. + // + // ProgressFunc is intended to make upload progress available to the + // application. For example, the implementation of ProgressFunc may update + // a progress bar in the application's UI, or log the result of + // float64(copiedBytes)/float64(totalBytes). + // + // ProgressFunc should return quickly without blocking. + ProgressFunc func(copiedBytes, totalBytes uint64) + + dst, src *ObjectHandle +} + +// Run performs the copy. +func (c *Copier) Run(ctx context.Context) (*ObjectAttrs, error) { + if err := c.src.validate(); err != nil { + return nil, err + } + if err := c.dst.validate(); err != nil { + return nil, err + } + // Convert destination attributes to raw form, omitting the bucket. + // If the bucket is included but name or content-type aren't, the service + // returns a 400 with "Required" as the only message. Omitting the bucket + // does not cause any problems. + rawObject := c.ObjectAttrs.toRawObject("") + for { + res, err := c.callRewrite(ctx, c.src, rawObject) + if err != nil { + return nil, err + } + if c.ProgressFunc != nil { + c.ProgressFunc(res.TotalBytesRewritten, res.ObjectSize) + } + if res.Done { // Finished successfully. + return newObject(res.Resource), nil + } + } + return nil, nil +} + +func (c *Copier) callRewrite(ctx context.Context, src *ObjectHandle, rawObj *raw.Object) (*raw.RewriteResponse, error) { + call := c.dst.c.raw.Objects.Rewrite(src.bucket, src.object, c.dst.bucket, c.dst.object, rawObj) + + call.Context(ctx).Projection("full") + if c.RewriteToken != "" { + call.RewriteToken(c.RewriteToken) + } + if err := applyConds("Copy destination", c.dst.gen, c.dst.conds, call); err != nil { + return nil, err + } + if err := applySourceConds(c.src.gen, c.src.conds, call); err != nil { + return nil, err + } + if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil { + return nil, err + } + if err := setEncryptionHeaders(call.Header(), c.src.encryptionKey, true); err != nil { + return nil, err + } + var res *raw.RewriteResponse + var err error + setClientHeader(call.Header()) + err = runWithRetry(ctx, func() error { res, err = call.Do(); return err }) + if err != nil { + return nil, err + } + c.RewriteToken = res.RewriteToken + return res, nil +} + +// ComposerFrom creates a Composer that can compose srcs into dst. +// You can immediately call Run on the returned Composer, or you can +// configure it first. +// +// The encryption key for the destination object will be used to decrypt all +// source objects and encrypt the destination object. It is an error +// to specify an encryption key for any of the source objects. +func (dst *ObjectHandle) ComposerFrom(srcs ...*ObjectHandle) *Composer { + return &Composer{dst: dst, srcs: srcs} +} + +// A Composer composes source objects into a destination object. +type Composer struct { + // ObjectAttrs are optional attributes to set on the destination object. + // Any attributes must be initialized before any calls on the Composer. Nil + // or zero-valued attributes are ignored. + ObjectAttrs + + dst *ObjectHandle + srcs []*ObjectHandle +} + +// Run performs the compose operation. +func (c *Composer) Run(ctx context.Context) (*ObjectAttrs, error) { + if err := c.dst.validate(); err != nil { + return nil, err + } + if len(c.srcs) == 0 { + return nil, errors.New("storage: at least one source object must be specified") + } + + req := &raw.ComposeRequest{} + // Compose requires a non-empty Destination, so we always set it, + // even if the caller-provided ObjectAttrs is the zero value. + req.Destination = c.ObjectAttrs.toRawObject(c.dst.bucket) + for _, src := range c.srcs { + if err := src.validate(); err != nil { + return nil, err + } + if src.bucket != c.dst.bucket { + return nil, fmt.Errorf("storage: all source objects must be in bucket %q, found %q", c.dst.bucket, src.bucket) + } + if src.encryptionKey != nil { + return nil, fmt.Errorf("storage: compose source %s.%s must not have encryption key", src.bucket, src.object) + } + srcObj := &raw.ComposeRequestSourceObjects{ + Name: src.object, + } + if err := applyConds("ComposeFrom source", src.gen, src.conds, composeSourceObj{srcObj}); err != nil { + return nil, err + } + req.SourceObjects = append(req.SourceObjects, srcObj) + } + + call := c.dst.c.raw.Objects.Compose(c.dst.bucket, c.dst.object, req).Context(ctx) + if err := applyConds("ComposeFrom destination", c.dst.gen, c.dst.conds, call); err != nil { + return nil, err + } + if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil { + return nil, err + } + var obj *raw.Object + var err error + setClientHeader(call.Header()) + err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err }) + if err != nil { + return nil, err + } + return newObject(obj), nil +} diff --git a/vendor/cloud.google.com/go/storage/doc.go b/vendor/cloud.google.com/go/storage/doc.go new file mode 100644 index 000000000..951391f54 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/doc.go @@ -0,0 +1,161 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package storage provides an easy way to work with Google Cloud Storage. +Google Cloud Storage stores data in named objects, which are grouped into buckets. + +More information about Google Cloud Storage is available at +https://cloud.google.com/storage/docs. + +All of the methods of this package use exponential backoff to retry calls +that fail with certain errors, as described in +https://cloud.google.com/storage/docs/exponential-backoff. + +Note: This package is in beta. Some backwards-incompatible changes may occur. + + +Creating a Client + +To start working with this package, create a client: + + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + +Buckets + +A Google Cloud Storage bucket is a collection of objects. To work with a +bucket, make a bucket handle: + + bkt := client.Bucket(bucketName) + +A handle is a reference to a bucket. You can have a handle even if the +bucket doesn't exist yet. To create a bucket in Google Cloud Storage, +call Create on the handle: + + if err := bkt.Create(ctx, projectID, nil); err != nil { + // TODO: Handle error. + } + +Note that although buckets are associated with projects, bucket names are +global across all projects. + +Each bucket has associated metadata, represented in this package by +BucketAttrs. The third argument to BucketHandle.Create allows you to set +the intial BucketAttrs of a bucket. To retrieve a bucket's attributes, use +Attrs: + + attrs, err := bkt.Attrs(ctx) + if err != nil { + // TODO: Handle error. + } + fmt.Printf("bucket %s, created at %s, is located in %s with storage class %s\n", + attrs.Name, attrs.Created, attrs.Location, attrs.StorageClass) + +Objects + +An object holds arbitrary data as a sequence of bytes, like a file. You +refer to objects using a handle, just as with buckets. You can use the +standard Go io.Reader and io.Writer interfaces to read and write +object data: + + obj := bkt.Object("data") + // Write something to obj. + // w implements io.Writer. + w := obj.NewWriter(ctx) + // Write some text to obj. This will overwrite whatever is there. + if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil { + // TODO: Handle error. + } + // Close, just like writing a file. + if err := w.Close(); err != nil { + // TODO: Handle error. + } + + // Read it back. + r, err := obj.NewReader(ctx) + if err != nil { + // TODO: Handle error. + } + defer r.Close() + if _, err := io.Copy(os.Stdout, r); err != nil { + // TODO: Handle error. + } + // Prints "This object contains text." + +Objects also have attributes, which you can fetch with Attrs: + + objAttrs, err := obj.Attrs(ctx) + if err != nil { + // TODO: Handle error. + } + fmt.Printf("object %s has size %d and can be read using %s\n", + objAttrs.Name, objAttrs.Size, objAttrs.MediaLink) + +ACLs + +Both objects and buckets have ACLs (Access Control Lists). An ACL is a list of +ACLRules, each of which specifies the role of a user, group or project. ACLs +are suitable for fine-grained control, but you may prefer using IAM to control +access at the project level (see +https://cloud.google.com/storage/docs/access-control/iam). + +To list the ACLs of a bucket or object, obtain an ACLHandle and call its List method: + + acls, err := obj.ACL().List(ctx) + if err != nil { + // TODO: Handle error. + } + for _, rule := range acls { + fmt.Printf("%s has role %s\n", rule.Entity, rule.Role) + } + +You can also set and delete ACLs. + +Conditions + +Every object has a generation and a metageneration. The generation changes +whenever the content changes, and the metageneration changes whenever the +metadata changes. Conditions let you check these values before an operation; +the operation only executes if the conditions match. You can use conditions to +prevent race conditions in read-modify-write operations. + +For example, say you've read an object's metadata into objAttrs. Now +you want to write to that object, but only if its contents haven't changed +since you read it. Here is how to express that: + + w = obj.If(storage.Conditions{GenerationMatch: objAttrs.Generation}).NewWriter(ctx) + // Proceed with writing as above. + +Signed URLs + +You can obtain a URL that lets anyone read or write an object for a limited time. +You don't need to create a client to do this. See the documentation of +SignedURL for details. + + url, err := storage.SignedURL(bucketName, "shared-object", opts) + if err != nil { + // TODO: Handle error. + } + fmt.Println(url) + +Authentication + +See examples of authorization and authentication at +https://godoc.org/cloud.google.com/go#pkg-examples. +*/ +package storage // import "cloud.google.com/go/storage" diff --git a/vendor/cloud.google.com/go/storage/go17.go b/vendor/cloud.google.com/go/storage/go17.go new file mode 100644 index 000000000..982db4e1a --- /dev/null +++ b/vendor/cloud.google.com/go/storage/go17.go @@ -0,0 +1,26 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.7 + +package storage + +import ( + "context" + "net/http" +) + +func withContext(r *http.Request, ctx context.Context) *http.Request { + return r.WithContext(ctx) +} diff --git a/vendor/cloud.google.com/go/storage/iam.go b/vendor/cloud.google.com/go/storage/iam.go new file mode 100644 index 000000000..6607d8cc2 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/iam.go @@ -0,0 +1,108 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "cloud.google.com/go/iam" + "golang.org/x/net/context" + raw "google.golang.org/api/storage/v1" + iampb "google.golang.org/genproto/googleapis/iam/v1" +) + +// IAM provides access to IAM access control for the bucket. +func (b *BucketHandle) IAM() *iam.Handle { + return iam.InternalNewHandleClient(&iamClient{raw: b.c.raw}, b.name) +} + +// iamClient implements the iam.client interface. +type iamClient struct { + raw *raw.Service +} + +func (c *iamClient) Get(ctx context.Context, resource string) (*iampb.Policy, error) { + req := c.raw.Buckets.GetIamPolicy(resource) + setClientHeader(req.Header()) + var rp *raw.Policy + var err error + err = runWithRetry(ctx, func() error { + rp, err = req.Context(ctx).Do() + return err + }) + if err != nil { + return nil, err + } + return iamFromStoragePolicy(rp), nil +} + +func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) error { + rp := iamToStoragePolicy(p) + req := c.raw.Buckets.SetIamPolicy(resource, rp) + setClientHeader(req.Header()) + return runWithRetry(ctx, func() error { + _, err := req.Context(ctx).Do() + return err + }) +} + +func (c *iamClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) { + req := c.raw.Buckets.TestIamPermissions(resource, perms) + setClientHeader(req.Header()) + var res *raw.TestIamPermissionsResponse + var err error + err = runWithRetry(ctx, func() error { + res, err = req.Context(ctx).Do() + return err + }) + if err != nil { + return nil, err + } + return res.Permissions, nil +} + +func iamToStoragePolicy(ip *iampb.Policy) *raw.Policy { + return &raw.Policy{ + Bindings: iamToStorageBindings(ip.Bindings), + Etag: string(ip.Etag), + } +} + +func iamToStorageBindings(ibs []*iampb.Binding) []*raw.PolicyBindings { + var rbs []*raw.PolicyBindings + for _, ib := range ibs { + rbs = append(rbs, &raw.PolicyBindings{ + Role: ib.Role, + Members: ib.Members, + }) + } + return rbs +} + +func iamFromStoragePolicy(rp *raw.Policy) *iampb.Policy { + return &iampb.Policy{ + Bindings: iamFromStorageBindings(rp.Bindings), + Etag: []byte(rp.Etag), + } +} + +func iamFromStorageBindings(rbs []*raw.PolicyBindings) []*iampb.Binding { + var ibs []*iampb.Binding + for _, rb := range rbs { + ibs = append(ibs, &iampb.Binding{ + Role: rb.Role, + Members: rb.Members, + }) + } + return ibs +} diff --git a/vendor/cloud.google.com/go/storage/invoke.go b/vendor/cloud.google.com/go/storage/invoke.go new file mode 100644 index 000000000..e8fc924ea --- /dev/null +++ b/vendor/cloud.google.com/go/storage/invoke.go @@ -0,0 +1,43 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "cloud.google.com/go/internal" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/googleapi" +) + +// runWithRetry calls the function until it returns nil or a non-retryable error, or +// the context is done. +func runWithRetry(ctx context.Context, call func() error) error { + return internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) { + err = call() + if err == nil { + return true, nil + } + e, ok := err.(*googleapi.Error) + if !ok { + return true, err + } + // Retry on 429 and 5xx, according to + // https://cloud.google.com/storage/docs/exponential-backoff. + if e.Code == 429 || (e.Code >= 500 && e.Code < 600) { + return false, nil + } + return true, err + }) +} diff --git a/vendor/cloud.google.com/go/storage/not_go17.go b/vendor/cloud.google.com/go/storage/not_go17.go new file mode 100644 index 000000000..1f6f7ae95 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/not_go17.go @@ -0,0 +1,26 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.7 + +package storage + +import ( + "net/http" +) + +func withContext(r *http.Request, _ interface{}) *http.Request { + // In Go 1.6 and below, ignore the context. + return r +} diff --git a/vendor/cloud.google.com/go/storage/reader.go b/vendor/cloud.google.com/go/storage/reader.go new file mode 100644 index 000000000..aa103c175 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/reader.go @@ -0,0 +1,74 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "fmt" + "hash/crc32" + "io" +) + +var crc32cTable = crc32.MakeTable(crc32.Castagnoli) + +// Reader reads a Cloud Storage object. +// It implements io.Reader. +type Reader struct { + body io.ReadCloser + remain, size int64 + contentType string + checkCRC bool // should we check the CRC? + wantCRC uint32 // the CRC32c value the server sent in the header + gotCRC uint32 // running crc +} + +// Close closes the Reader. It must be called when done reading. +func (r *Reader) Close() error { + return r.body.Close() +} + +func (r *Reader) Read(p []byte) (int, error) { + n, err := r.body.Read(p) + if r.remain != -1 { + r.remain -= int64(n) + } + if r.checkCRC { + r.gotCRC = crc32.Update(r.gotCRC, crc32cTable, p[:n]) + // Check CRC here. It would be natural to check it in Close, but + // everybody defers Close on the assumption that it doesn't return + // anything worth looking at. + if r.remain == 0 && r.gotCRC != r.wantCRC { + return n, fmt.Errorf("storage: bad CRC on read: got %d, want %d", + r.gotCRC, r.wantCRC) + } + } + return n, err +} + +// Size returns the size of the object in bytes. +// The returned value is always the same and is not affected by +// calls to Read or Close. +func (r *Reader) Size() int64 { + return r.size +} + +// Remain returns the number of bytes left to read, or -1 if unknown. +func (r *Reader) Remain() int64 { + return r.remain +} + +// ContentType returns the content type of the object. +func (r *Reader) ContentType() string { + return r.contentType +} diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go new file mode 100644 index 000000000..6be544e0b --- /dev/null +++ b/vendor/cloud.google.com/go/storage/storage.go @@ -0,0 +1,1136 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "bytes" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "strconv" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/api/option" + "google.golang.org/api/transport" + + "cloud.google.com/go/internal/optional" + "cloud.google.com/go/internal/version" + "golang.org/x/net/context" + "google.golang.org/api/googleapi" + raw "google.golang.org/api/storage/v1" +) + +var ( + ErrBucketNotExist = errors.New("storage: bucket doesn't exist") + ErrObjectNotExist = errors.New("storage: object doesn't exist") +) + +const userAgent = "gcloud-golang-storage/20151204" + +const ( + // ScopeFullControl grants permissions to manage your + // data and permissions in Google Cloud Storage. + ScopeFullControl = raw.DevstorageFullControlScope + + // ScopeReadOnly grants permissions to + // view your data in Google Cloud Storage. + ScopeReadOnly = raw.DevstorageReadOnlyScope + + // ScopeReadWrite grants permissions to manage your + // data in Google Cloud Storage. + ScopeReadWrite = raw.DevstorageReadWriteScope +) + +var xGoogHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), version.Repo) + +func setClientHeader(headers http.Header) { + headers.Set("x-goog-api-client", xGoogHeader) +} + +// Client is a client for interacting with Google Cloud Storage. +// +// Clients should be reused instead of created as needed. +// The methods of Client are safe for concurrent use by multiple goroutines. +type Client struct { + hc *http.Client + raw *raw.Service +} + +// NewClient creates a new Google Cloud Storage client. +// The default scope is ScopeFullControl. To use a different scope, like ScopeReadOnly, use option.WithScopes. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + o := []option.ClientOption{ + option.WithScopes(ScopeFullControl), + option.WithUserAgent(userAgent), + } + opts = append(o, opts...) + hc, ep, err := transport.NewHTTPClient(ctx, opts...) + if err != nil { + return nil, fmt.Errorf("dialing: %v", err) + } + rawService, err := raw.New(hc) + if err != nil { + return nil, fmt.Errorf("storage client: %v", err) + } + if ep != "" { + rawService.BasePath = ep + } + return &Client{ + hc: hc, + raw: rawService, + }, nil +} + +// Close closes the Client. +// +// Close need not be called at program exit. +func (c *Client) Close() error { + c.hc = nil + return nil +} + +// BucketHandle provides operations on a Google Cloud Storage bucket. +// Use Client.Bucket to get a handle. +type BucketHandle struct { + acl ACLHandle + defaultObjectACL ACLHandle + + c *Client + name string +} + +// Bucket returns a BucketHandle, which provides operations on the named bucket. +// This call does not perform any network operations. +// +// The supplied name must contain only lowercase letters, numbers, dashes, +// underscores, and dots. The full specification for valid bucket names can be +// found at: +// https://cloud.google.com/storage/docs/bucket-naming +func (c *Client) Bucket(name string) *BucketHandle { + return &BucketHandle{ + c: c, + name: name, + acl: ACLHandle{ + c: c, + bucket: name, + }, + defaultObjectACL: ACLHandle{ + c: c, + bucket: name, + isDefault: true, + }, + } +} + +// SignedURLOptions allows you to restrict the access to the signed URL. +type SignedURLOptions struct { + // GoogleAccessID represents the authorizer of the signed URL generation. + // It is typically the Google service account client email address from + // the Google Developers Console in the form of "xxx@developer.gserviceaccount.com". + // Required. + GoogleAccessID string + + // PrivateKey is the Google service account private key. It is obtainable + // from the Google Developers Console. + // At https://console.developers.google.com/project//apiui/credential, + // create a service account client ID or reuse one of your existing service account + // credentials. Click on the "Generate new P12 key" to generate and download + // a new private key. Once you download the P12 file, use the following command + // to convert it into a PEM file. + // + // $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes + // + // Provide the contents of the PEM file as a byte slice. + // Exactly one of PrivateKey or SignBytes must be non-nil. + PrivateKey []byte + + // SignBytes is a function for implementing custom signing. + // If your application is running on Google App Engine, you can use appengine's internal signing function: + // ctx := appengine.NewContext(request) + // acc, _ := appengine.ServiceAccount(ctx) + // url, err := SignedURL("bucket", "object", &SignedURLOptions{ + // GoogleAccessID: acc, + // SignBytes: func(b []byte) ([]byte, error) { + // _, signedBytes, err := appengine.SignBytes(ctx, b) + // return signedBytes, err + // }, + // // etc. + // }) + // + // Exactly one of PrivateKey or SignBytes must be non-nil. + SignBytes func([]byte) ([]byte, error) + + // Method is the HTTP method to be used with the signed URL. + // Signed URLs can be used with GET, HEAD, PUT, and DELETE requests. + // Required. + Method string + + // Expires is the expiration time on the signed URL. It must be + // a datetime in the future. + // Required. + Expires time.Time + + // ContentType is the content type header the client must provide + // to use the generated signed URL. + // Optional. + ContentType string + + // Headers is a list of extention headers the client must provide + // in order to use the generated signed URL. + // Optional. + Headers []string + + // MD5 is the base64 encoded MD5 checksum of the file. + // If provided, the client should provide the exact value on the request + // header in order to use the signed URL. + // Optional. + MD5 string +} + +// SignedURL returns a URL for the specified object. Signed URLs allow +// the users access to a restricted resource for a limited time without having a +// Google account or signing in. For more information about the signed +// URLs, see https://cloud.google.com/storage/docs/accesscontrol#Signed-URLs. +func SignedURL(bucket, name string, opts *SignedURLOptions) (string, error) { + if opts == nil { + return "", errors.New("storage: missing required SignedURLOptions") + } + if opts.GoogleAccessID == "" { + return "", errors.New("storage: missing required GoogleAccessID") + } + if (opts.PrivateKey == nil) == (opts.SignBytes == nil) { + return "", errors.New("storage: exactly one of PrivateKey or SignedBytes must be set") + } + if opts.Method == "" { + return "", errors.New("storage: missing required method option") + } + if opts.Expires.IsZero() { + return "", errors.New("storage: missing required expires option") + } + if opts.MD5 != "" { + md5, err := base64.StdEncoding.DecodeString(opts.MD5) + if err != nil || len(md5) != 16 { + return "", errors.New("storage: invalid MD5 checksum") + } + } + + signBytes := opts.SignBytes + if opts.PrivateKey != nil { + key, err := parseKey(opts.PrivateKey) + if err != nil { + return "", err + } + signBytes = func(b []byte) ([]byte, error) { + sum := sha256.Sum256(b) + return rsa.SignPKCS1v15( + rand.Reader, + key, + crypto.SHA256, + sum[:], + ) + } + } + + u := &url.URL{ + Path: fmt.Sprintf("/%s/%s", bucket, name), + } + + buf := &bytes.Buffer{} + fmt.Fprintf(buf, "%s\n", opts.Method) + fmt.Fprintf(buf, "%s\n", opts.MD5) + fmt.Fprintf(buf, "%s\n", opts.ContentType) + fmt.Fprintf(buf, "%d\n", opts.Expires.Unix()) + if len(opts.Headers) > 0 { + fmt.Fprintf(buf, "%s\n", strings.Join(opts.Headers, "\n")) + } + fmt.Fprintf(buf, "%s", u.String()) + + b, err := signBytes(buf.Bytes()) + if err != nil { + return "", err + } + encoded := base64.StdEncoding.EncodeToString(b) + u.Scheme = "https" + u.Host = "storage.googleapis.com" + q := u.Query() + q.Set("GoogleAccessId", opts.GoogleAccessID) + q.Set("Expires", fmt.Sprintf("%d", opts.Expires.Unix())) + q.Set("Signature", string(encoded)) + u.RawQuery = q.Encode() + return u.String(), nil +} + +// ObjectHandle provides operations on an object in a Google Cloud Storage bucket. +// Use BucketHandle.Object to get a handle. +type ObjectHandle struct { + c *Client + bucket string + object string + acl ACLHandle + gen int64 // a negative value indicates latest + conds *Conditions + encryptionKey []byte // AES-256 key +} + +// ACL provides access to the object's access control list. +// This controls who can read and write this object. +// This call does not perform any network operations. +func (o *ObjectHandle) ACL() *ACLHandle { + return &o.acl +} + +// Generation returns a new ObjectHandle that operates on a specific generation +// of the object. +// By default, the handle operates on the latest generation. Not +// all operations work when given a specific generation; check the API +// endpoints at https://cloud.google.com/storage/docs/json_api/ for details. +func (o *ObjectHandle) Generation(gen int64) *ObjectHandle { + o2 := *o + o2.gen = gen + return &o2 +} + +// If returns a new ObjectHandle that applies a set of preconditions. +// Preconditions already set on the ObjectHandle are ignored. +// Operations on the new handle will only occur if the preconditions are +// satisfied. See https://cloud.google.com/storage/docs/generations-preconditions +// for more details. +func (o *ObjectHandle) If(conds Conditions) *ObjectHandle { + o2 := *o + o2.conds = &conds + return &o2 +} + +// Key returns a new ObjectHandle that uses the supplied encryption +// key to encrypt and decrypt the object's contents. +// +// Encryption key must be a 32-byte AES-256 key. +// See https://cloud.google.com/storage/docs/encryption for details. +func (o *ObjectHandle) Key(encryptionKey []byte) *ObjectHandle { + o2 := *o + o2.encryptionKey = encryptionKey + return &o2 +} + +// Attrs returns meta information about the object. +// ErrObjectNotExist will be returned if the object is not found. +func (o *ObjectHandle) Attrs(ctx context.Context) (*ObjectAttrs, error) { + if err := o.validate(); err != nil { + return nil, err + } + call := o.c.raw.Objects.Get(o.bucket, o.object).Projection("full").Context(ctx) + if err := applyConds("Attrs", o.gen, o.conds, call); err != nil { + return nil, err + } + if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil { + return nil, err + } + var obj *raw.Object + var err error + setClientHeader(call.Header()) + err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err }) + if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { + return nil, ErrObjectNotExist + } + if err != nil { + return nil, err + } + return newObject(obj), nil +} + +// Update updates an object with the provided attributes. +// All zero-value attributes are ignored. +// ErrObjectNotExist will be returned if the object is not found. +func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (*ObjectAttrs, error) { + if err := o.validate(); err != nil { + return nil, err + } + var attrs ObjectAttrs + // Lists of fields to send, and set to null, in the JSON. + var forceSendFields, nullFields []string + if uattrs.ContentType != nil { + attrs.ContentType = optional.ToString(uattrs.ContentType) + forceSendFields = append(forceSendFields, "ContentType") + } + if uattrs.ContentLanguage != nil { + attrs.ContentLanguage = optional.ToString(uattrs.ContentLanguage) + // For ContentLanguage It's an error to send the empty string. + // Instead we send a null. + if attrs.ContentLanguage == "" { + nullFields = append(nullFields, "ContentLanguage") + } else { + forceSendFields = append(forceSendFields, "ContentLanguage") + } + } + if uattrs.ContentEncoding != nil { + attrs.ContentEncoding = optional.ToString(uattrs.ContentEncoding) + forceSendFields = append(forceSendFields, "ContentType") + } + if uattrs.ContentDisposition != nil { + attrs.ContentDisposition = optional.ToString(uattrs.ContentDisposition) + forceSendFields = append(forceSendFields, "ContentDisposition") + } + if uattrs.CacheControl != nil { + attrs.CacheControl = optional.ToString(uattrs.CacheControl) + forceSendFields = append(forceSendFields, "CacheControl") + } + if uattrs.Metadata != nil { + attrs.Metadata = uattrs.Metadata + if len(attrs.Metadata) == 0 { + // Sending the empty map is a no-op. We send null instead. + nullFields = append(nullFields, "Metadata") + } else { + forceSendFields = append(forceSendFields, "Metadata") + } + } + if uattrs.ACL != nil { + attrs.ACL = uattrs.ACL + // It's an error to attempt to delete the ACL, so + // we don't append to nullFields here. + forceSendFields = append(forceSendFields, "Acl") + } + rawObj := attrs.toRawObject(o.bucket) + rawObj.ForceSendFields = forceSendFields + rawObj.NullFields = nullFields + call := o.c.raw.Objects.Patch(o.bucket, o.object, rawObj).Projection("full").Context(ctx) + if err := applyConds("Update", o.gen, o.conds, call); err != nil { + return nil, err + } + if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil { + return nil, err + } + var obj *raw.Object + var err error + setClientHeader(call.Header()) + err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err }) + if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { + return nil, ErrObjectNotExist + } + if err != nil { + return nil, err + } + return newObject(obj), nil +} + +// ObjectAttrsToUpdate is used to update the attributes of an object. +// Only fields set to non-nil values will be updated. +// Set a field to its zero value to delete it. +// +// For example, to change ContentType and delete ContentEncoding and +// Metadata, use +// ObjectAttrsToUpdate{ +// ContentType: "text/html", +// ContentEncoding: "", +// Metadata: map[string]string{}, +// } +type ObjectAttrsToUpdate struct { + ContentType optional.String + ContentLanguage optional.String + ContentEncoding optional.String + ContentDisposition optional.String + CacheControl optional.String + Metadata map[string]string // set to map[string]string{} to delete + ACL []ACLRule +} + +// Delete deletes the single specified object. +func (o *ObjectHandle) Delete(ctx context.Context) error { + if err := o.validate(); err != nil { + return err + } + call := o.c.raw.Objects.Delete(o.bucket, o.object).Context(ctx) + if err := applyConds("Delete", o.gen, o.conds, call); err != nil { + return err + } + setClientHeader(call.Header()) + err := runWithRetry(ctx, func() error { return call.Do() }) + switch e := err.(type) { + case nil: + return nil + case *googleapi.Error: + if e.Code == http.StatusNotFound { + return ErrObjectNotExist + } + } + return err +} + +// NewReader creates a new Reader to read the contents of the +// object. +// ErrObjectNotExist will be returned if the object is not found. +// +// The caller must call Close on the returned Reader when done reading. +func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) { + return o.NewRangeReader(ctx, 0, -1) +} + +// NewRangeReader reads part of an object, reading at most length bytes +// starting at the given offset. If length is negative, the object is read +// until the end. +func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (*Reader, error) { + if err := o.validate(); err != nil { + return nil, err + } + if offset < 0 { + return nil, fmt.Errorf("storage: invalid offset %d < 0", offset) + } + if o.conds != nil { + if err := o.conds.validate("NewRangeReader"); err != nil { + return nil, err + } + } + u := &url.URL{ + Scheme: "https", + Host: "storage.googleapis.com", + Path: fmt.Sprintf("/%s/%s", o.bucket, o.object), + RawQuery: conditionsQuery(o.gen, o.conds), + } + verb := "GET" + if length == 0 { + verb = "HEAD" + } + req, err := http.NewRequest(verb, u.String(), nil) + if err != nil { + return nil, err + } + req = withContext(req, ctx) + if length < 0 && offset > 0 { + req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset)) + } else if length > 0 { + req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1)) + } + if err := setEncryptionHeaders(req.Header, o.encryptionKey, false); err != nil { + return nil, err + } + var res *http.Response + err = runWithRetry(ctx, func() error { + res, err = o.c.hc.Do(req) + if err != nil { + return err + } + if res.StatusCode == http.StatusNotFound { + res.Body.Close() + return ErrObjectNotExist + } + if res.StatusCode < 200 || res.StatusCode > 299 { + body, _ := ioutil.ReadAll(res.Body) + res.Body.Close() + return &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + Body: string(body), + } + } + if offset > 0 && length != 0 && res.StatusCode != http.StatusPartialContent { + res.Body.Close() + return errors.New("storage: partial request not satisfied") + } + return nil + }) + if err != nil { + return nil, err + } + + var size int64 // total size of object, even if a range was requested. + if res.StatusCode == http.StatusPartialContent { + cr := strings.TrimSpace(res.Header.Get("Content-Range")) + if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") { + return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) + } + size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64) + if err != nil { + return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) + } + } else { + size = res.ContentLength + } + + remain := res.ContentLength + body := res.Body + if length == 0 { + remain = 0 + body.Close() + body = emptyBody + } + var ( + checkCRC bool + crc uint32 + ) + // Even if there is a CRC header, we can't compute the hash on partial data. + if remain == size { + crc, checkCRC = parseCRC32c(res) + } + return &Reader{ + body: body, + size: size, + remain: remain, + contentType: res.Header.Get("Content-Type"), + wantCRC: crc, + checkCRC: checkCRC, + }, nil +} + +func parseCRC32c(res *http.Response) (uint32, bool) { + const prefix = "crc32c=" + for _, spec := range res.Header["X-Goog-Hash"] { + if strings.HasPrefix(spec, prefix) { + c, err := decodeUint32(spec[len(prefix):]) + if err == nil { + return c, true + } + } + } + return 0, false +} + +var emptyBody = ioutil.NopCloser(strings.NewReader("")) + +// NewWriter returns a storage Writer that writes to the GCS object +// associated with this ObjectHandle. +// +// A new object will be created unless an object with this name already exists. +// Otherwise any previous object with the same name will be replaced. +// The object will not be available (and any previous object will remain) +// until Close has been called. +// +// Attributes can be set on the object by modifying the returned Writer's +// ObjectAttrs field before the first call to Write. If no ContentType +// attribute is specified, the content type will be automatically sniffed +// using net/http.DetectContentType. +// +// It is the caller's responsibility to call Close when writing is done. +func (o *ObjectHandle) NewWriter(ctx context.Context) *Writer { + return &Writer{ + ctx: ctx, + o: o, + donec: make(chan struct{}), + ObjectAttrs: ObjectAttrs{Name: o.object}, + ChunkSize: googleapi.DefaultUploadChunkSize, + } +} + +func (o *ObjectHandle) validate() error { + if o.bucket == "" { + return errors.New("storage: bucket name is empty") + } + if o.object == "" { + return errors.New("storage: object name is empty") + } + if !utf8.ValidString(o.object) { + return fmt.Errorf("storage: object name %q is not valid UTF-8", o.object) + } + return nil +} + +// parseKey converts the binary contents of a private key file +// to an *rsa.PrivateKey. It detects whether the private key is in a +// PEM container or not. If so, it extracts the the private key +// from PEM container before conversion. It only supports PEM +// containers with no passphrase. +func parseKey(key []byte) (*rsa.PrivateKey, error) { + if block, _ := pem.Decode(key); block != nil { + key = block.Bytes + } + parsedKey, err := x509.ParsePKCS8PrivateKey(key) + if err != nil { + parsedKey, err = x509.ParsePKCS1PrivateKey(key) + if err != nil { + return nil, err + } + } + parsed, ok := parsedKey.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("oauth2: private key is invalid") + } + return parsed, nil +} + +func toRawObjectACL(oldACL []ACLRule) []*raw.ObjectAccessControl { + var acl []*raw.ObjectAccessControl + if len(oldACL) > 0 { + acl = make([]*raw.ObjectAccessControl, len(oldACL)) + for i, rule := range oldACL { + acl[i] = &raw.ObjectAccessControl{ + Entity: string(rule.Entity), + Role: string(rule.Role), + } + } + } + return acl +} + +// toRawObject copies the editable attributes from o to the raw library's Object type. +func (o *ObjectAttrs) toRawObject(bucket string) *raw.Object { + acl := toRawObjectACL(o.ACL) + return &raw.Object{ + Bucket: bucket, + Name: o.Name, + ContentType: o.ContentType, + ContentEncoding: o.ContentEncoding, + ContentLanguage: o.ContentLanguage, + CacheControl: o.CacheControl, + ContentDisposition: o.ContentDisposition, + StorageClass: o.StorageClass, + Acl: acl, + Metadata: o.Metadata, + } +} + +// ObjectAttrs represents the metadata for a Google Cloud Storage (GCS) object. +type ObjectAttrs struct { + // Bucket is the name of the bucket containing this GCS object. + // This field is read-only. + Bucket string + + // Name is the name of the object within the bucket. + // This field is read-only. + Name string + + // ContentType is the MIME type of the object's content. + ContentType string + + // ContentLanguage is the content language of the object's content. + ContentLanguage string + + // CacheControl is the Cache-Control header to be sent in the response + // headers when serving the object data. + CacheControl string + + // ACL is the list of access control rules for the object. + ACL []ACLRule + + // Owner is the owner of the object. This field is read-only. + // + // If non-zero, it is in the form of "user-". + Owner string + + // Size is the length of the object's content. This field is read-only. + Size int64 + + // ContentEncoding is the encoding of the object's content. + ContentEncoding string + + // ContentDisposition is the optional Content-Disposition header of the object + // sent in the response headers. + ContentDisposition string + + // MD5 is the MD5 hash of the object's content. This field is read-only. + MD5 []byte + + // CRC32C is the CRC32 checksum of the object's content using + // the Castagnoli93 polynomial. This field is read-only. + CRC32C uint32 + + // MediaLink is an URL to the object's content. This field is read-only. + MediaLink string + + // Metadata represents user-provided metadata, in key/value pairs. + // It can be nil if no metadata is provided. + Metadata map[string]string + + // Generation is the generation number of the object's content. + // This field is read-only. + Generation int64 + + // Metageneration is the version of the metadata for this + // object at this generation. This field is used for preconditions + // and for detecting changes in metadata. A metageneration number + // is only meaningful in the context of a particular generation + // of a particular object. This field is read-only. + Metageneration int64 + + // StorageClass is the storage class of the object. + // This value defines how objects in the bucket are stored and + // determines the SLA and the cost of storage. Typical values are + // "MULTI_REGIONAL", "REGIONAL", "NEARLINE", "COLDLINE", "STANDARD" + // and "DURABLE_REDUCED_AVAILABILITY". + // It defaults to "STANDARD", which is equivalent to "MULTI_REGIONAL" + // or "REGIONAL" depending on the bucket's location settings. + StorageClass string + + // Created is the time the object was created. This field is read-only. + Created time.Time + + // Deleted is the time the object was deleted. + // If not deleted, it is the zero value. This field is read-only. + Deleted time.Time + + // Updated is the creation or modification time of the object. + // For buckets with versioning enabled, changing an object's + // metadata does not change this property. This field is read-only. + Updated time.Time + + // CustomerKeySHA256 is the base64-encoded SHA-256 hash of the + // customer-supplied encryption key for the object. It is empty if there is + // no customer-supplied encryption key. + // See // https://cloud.google.com/storage/docs/encryption for more about + // encryption in Google Cloud Storage. + CustomerKeySHA256 string + + // Prefix is set only for ObjectAttrs which represent synthetic "directory + // entries" when iterating over buckets using Query.Delimiter. See + // ObjectIterator.Next. When set, no other fields in ObjectAttrs will be + // populated. + Prefix string +} + +// convertTime converts a time in RFC3339 format to time.Time. +// If any error occurs in parsing, the zero-value time.Time is silently returned. +func convertTime(t string) time.Time { + var r time.Time + if t != "" { + r, _ = time.Parse(time.RFC3339, t) + } + return r +} + +func newObject(o *raw.Object) *ObjectAttrs { + if o == nil { + return nil + } + acl := make([]ACLRule, len(o.Acl)) + for i, rule := range o.Acl { + acl[i] = ACLRule{ + Entity: ACLEntity(rule.Entity), + Role: ACLRole(rule.Role), + } + } + owner := "" + if o.Owner != nil { + owner = o.Owner.Entity + } + md5, _ := base64.StdEncoding.DecodeString(o.Md5Hash) + crc32c, _ := decodeUint32(o.Crc32c) + var sha256 string + if o.CustomerEncryption != nil { + sha256 = o.CustomerEncryption.KeySha256 + } + return &ObjectAttrs{ + Bucket: o.Bucket, + Name: o.Name, + ContentType: o.ContentType, + ContentLanguage: o.ContentLanguage, + CacheControl: o.CacheControl, + ACL: acl, + Owner: owner, + ContentEncoding: o.ContentEncoding, + Size: int64(o.Size), + MD5: md5, + CRC32C: crc32c, + MediaLink: o.MediaLink, + Metadata: o.Metadata, + Generation: o.Generation, + Metageneration: o.Metageneration, + StorageClass: o.StorageClass, + CustomerKeySHA256: sha256, + Created: convertTime(o.TimeCreated), + Deleted: convertTime(o.TimeDeleted), + Updated: convertTime(o.Updated), + } +} + +// Decode a uint32 encoded in Base64 in big-endian byte order. +func decodeUint32(b64 string) (uint32, error) { + d, err := base64.StdEncoding.DecodeString(b64) + if err != nil { + return 0, err + } + if len(d) != 4 { + return 0, fmt.Errorf("storage: %q does not encode a 32-bit value", d) + } + return uint32(d[0])<<24 + uint32(d[1])<<16 + uint32(d[2])<<8 + uint32(d[3]), nil +} + +// Encode a uint32 as Base64 in big-endian byte order. +func encodeUint32(u uint32) string { + b := []byte{byte(u >> 24), byte(u >> 16), byte(u >> 8), byte(u)} + return base64.StdEncoding.EncodeToString(b) +} + +// Query represents a query to filter objects from a bucket. +type Query struct { + // Delimiter returns results in a directory-like fashion. + // Results will contain only objects whose names, aside from the + // prefix, do not contain delimiter. Objects whose names, + // aside from the prefix, contain delimiter will have their name, + // truncated after the delimiter, returned in prefixes. + // Duplicate prefixes are omitted. + // Optional. + Delimiter string + + // Prefix is the prefix filter to query objects + // whose names begin with this prefix. + // Optional. + Prefix string + + // Versions indicates whether multiple versions of the same + // object will be included in the results. + Versions bool +} + +// contentTyper implements ContentTyper to enable an +// io.ReadCloser to specify its MIME type. +type contentTyper struct { + io.Reader + t string +} + +func (c *contentTyper) ContentType() string { + return c.t +} + +// Conditions constrain methods to act on specific generations of +// resources. +// +// The zero value is an empty set of constraints. Not all conditions or +// combinations of conditions are applicable to all methods. +// See https://cloud.google.com/storage/docs/generations-preconditions +// for details on how these operate. +type Conditions struct { + // Generation constraints. + // At most one of the following can be set to a non-zero value. + + // GenerationMatch specifies that the object must have the given generation + // for the operation to occur. + // If GenerationMatch is zero, it has no effect. + // Use DoesNotExist to specify that the object does not exist in the bucket. + GenerationMatch int64 + + // GenerationNotMatch specifies that the object must not have the given + // generation for the operation to occur. + // If GenerationNotMatch is zero, it has no effect. + GenerationNotMatch int64 + + // DoesNotExist specifies that the object must not exist in the bucket for + // the operation to occur. + // If DoesNotExist is false, it has no effect. + DoesNotExist bool + + // Metadata generation constraints. + // At most one of the following can be set to a non-zero value. + + // MetagenerationMatch specifies that the object must have the given + // metageneration for the operation to occur. + // If MetagenerationMatch is zero, it has no effect. + MetagenerationMatch int64 + + // MetagenerationNotMatch specifies that the object must not have the given + // metageneration for the operation to occur. + // If MetagenerationNotMatch is zero, it has no effect. + MetagenerationNotMatch int64 +} + +func (c *Conditions) validate(method string) error { + if *c == (Conditions{}) { + return fmt.Errorf("storage: %s: empty conditions", method) + } + if !c.isGenerationValid() { + return fmt.Errorf("storage: %s: multiple conditions specified for generation", method) + } + if !c.isMetagenerationValid() { + return fmt.Errorf("storage: %s: multiple conditions specified for metageneration", method) + } + return nil +} + +func (c *Conditions) isGenerationValid() bool { + n := 0 + if c.GenerationMatch != 0 { + n++ + } + if c.GenerationNotMatch != 0 { + n++ + } + if c.DoesNotExist { + n++ + } + return n <= 1 +} + +func (c *Conditions) isMetagenerationValid() bool { + return c.MetagenerationMatch == 0 || c.MetagenerationNotMatch == 0 +} + +// applyConds modifies the provided call using the conditions in conds. +// call is something that quacks like a *raw.WhateverCall. +func applyConds(method string, gen int64, conds *Conditions, call interface{}) error { + cval := reflect.ValueOf(call) + if gen >= 0 { + if !setConditionField(cval, "Generation", gen) { + return fmt.Errorf("storage: %s: generation not supported", method) + } + } + if conds == nil { + return nil + } + if err := conds.validate(method); err != nil { + return err + } + switch { + case conds.GenerationMatch != 0: + if !setConditionField(cval, "IfGenerationMatch", conds.GenerationMatch) { + return fmt.Errorf("storage: %s: ifGenerationMatch not supported", method) + } + case conds.GenerationNotMatch != 0: + if !setConditionField(cval, "IfGenerationNotMatch", conds.GenerationNotMatch) { + return fmt.Errorf("storage: %s: ifGenerationNotMatch not supported", method) + } + case conds.DoesNotExist: + if !setConditionField(cval, "IfGenerationMatch", int64(0)) { + return fmt.Errorf("storage: %s: DoesNotExist not supported", method) + } + } + switch { + case conds.MetagenerationMatch != 0: + if !setConditionField(cval, "IfMetagenerationMatch", conds.MetagenerationMatch) { + return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method) + } + case conds.MetagenerationNotMatch != 0: + if !setConditionField(cval, "IfMetagenerationNotMatch", conds.MetagenerationNotMatch) { + return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method) + } + } + return nil +} + +func applySourceConds(gen int64, conds *Conditions, call *raw.ObjectsRewriteCall) error { + if gen >= 0 { + call.SourceGeneration(gen) + } + if conds == nil { + return nil + } + if err := conds.validate("CopyTo source"); err != nil { + return err + } + switch { + case conds.GenerationMatch != 0: + call.IfSourceGenerationMatch(conds.GenerationMatch) + case conds.GenerationNotMatch != 0: + call.IfSourceGenerationNotMatch(conds.GenerationNotMatch) + case conds.DoesNotExist: + call.IfSourceGenerationMatch(0) + } + switch { + case conds.MetagenerationMatch != 0: + call.IfSourceMetagenerationMatch(conds.MetagenerationMatch) + case conds.MetagenerationNotMatch != 0: + call.IfSourceMetagenerationNotMatch(conds.MetagenerationNotMatch) + } + return nil +} + +// setConditionField sets a field on a *raw.WhateverCall. +// We can't use anonymous interfaces because the return type is +// different, since the field setters are builders. +func setConditionField(call reflect.Value, name string, value interface{}) bool { + m := call.MethodByName(name) + if !m.IsValid() { + return false + } + m.Call([]reflect.Value{reflect.ValueOf(value)}) + return true +} + +// conditionsQuery returns the generation and conditions as a URL query +// string suitable for URL.RawQuery. It assumes that the conditions +// have been validated. +func conditionsQuery(gen int64, conds *Conditions) string { + // URL escapes are elided because integer strings are URL-safe. + var buf []byte + + appendParam := func(s string, n int64) { + if len(buf) > 0 { + buf = append(buf, '&') + } + buf = append(buf, s...) + buf = strconv.AppendInt(buf, n, 10) + } + + if gen >= 0 { + appendParam("generation=", gen) + } + if conds == nil { + return string(buf) + } + switch { + case conds.GenerationMatch != 0: + appendParam("ifGenerationMatch=", conds.GenerationMatch) + case conds.GenerationNotMatch != 0: + appendParam("ifGenerationNotMatch=", conds.GenerationNotMatch) + case conds.DoesNotExist: + appendParam("ifGenerationMatch=", 0) + } + switch { + case conds.MetagenerationMatch != 0: + appendParam("ifMetagenerationMatch=", conds.MetagenerationMatch) + case conds.MetagenerationNotMatch != 0: + appendParam("ifMetagenerationNotMatch=", conds.MetagenerationNotMatch) + } + return string(buf) +} + +// composeSourceObj wraps a *raw.ComposeRequestSourceObjects, but adds the methods +// that modifyCall searches for by name. +type composeSourceObj struct { + src *raw.ComposeRequestSourceObjects +} + +func (c composeSourceObj) Generation(gen int64) { + c.src.Generation = gen +} + +func (c composeSourceObj) IfGenerationMatch(gen int64) { + // It's safe to overwrite ObjectPreconditions, since its only field is + // IfGenerationMatch. + c.src.ObjectPreconditions = &raw.ComposeRequestSourceObjectsObjectPreconditions{ + IfGenerationMatch: gen, + } +} + +func setEncryptionHeaders(headers http.Header, key []byte, copySource bool) error { + if key == nil { + return nil + } + // TODO(jbd): Ask the API team to return a more user-friendly error + // and avoid doing this check at the client level. + if len(key) != 32 { + return errors.New("storage: not a 32-byte AES-256 key") + } + var cs string + if copySource { + cs = "copy-source-" + } + headers.Set("x-goog-"+cs+"encryption-algorithm", "AES256") + headers.Set("x-goog-"+cs+"encryption-key", base64.StdEncoding.EncodeToString(key)) + keyHash := sha256.Sum256(key) + headers.Set("x-goog-"+cs+"encryption-key-sha256", base64.StdEncoding.EncodeToString(keyHash[:])) + return nil +} + +// TODO(jbd): Add storage.objects.watch. diff --git a/vendor/cloud.google.com/go/storage/writer.go b/vendor/cloud.google.com/go/storage/writer.go new file mode 100644 index 000000000..21ffdb91c --- /dev/null +++ b/vendor/cloud.google.com/go/storage/writer.go @@ -0,0 +1,164 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "encoding/base64" + "errors" + "fmt" + "io" + "unicode/utf8" + + "golang.org/x/net/context" + "google.golang.org/api/googleapi" + raw "google.golang.org/api/storage/v1" +) + +// A Writer writes a Cloud Storage object. +type Writer struct { + // ObjectAttrs are optional attributes to set on the object. Any attributes + // must be initialized before the first Write call. Nil or zero-valued + // attributes are ignored. + ObjectAttrs + + // SendCRC specifies whether to transmit a CRC32C field. It should be set + // to true in addition to setting the Writer's CRC32C field, because zero + // is a valid CRC and normally a zero would not be transmitted. + SendCRC32C bool + + // ChunkSize controls the maximum number of bytes of the object that the + // Writer will attempt to send to the server in a single request. Objects + // smaller than the size will be sent in a single request, while larger + // objects will be split over multiple requests. The size will be rounded up + // to the nearest multiple of 256K. If zero, chunking will be disabled and + // the object will be uploaded in a single request. + // + // ChunkSize will default to a reasonable value. Any custom configuration + // must be done before the first Write call. + ChunkSize int + + ctx context.Context + o *ObjectHandle + + opened bool + pw *io.PipeWriter + + donec chan struct{} // closed after err and obj are set. + err error + obj *ObjectAttrs +} + +func (w *Writer) open() error { + attrs := w.ObjectAttrs + // Check the developer didn't change the object Name (this is unfortunate, but + // we don't want to store an object under the wrong name). + if attrs.Name != w.o.object { + return fmt.Errorf("storage: Writer.Name %q does not match object name %q", attrs.Name, w.o.object) + } + if !utf8.ValidString(attrs.Name) { + return fmt.Errorf("storage: object name %q is not valid UTF-8", attrs.Name) + } + pr, pw := io.Pipe() + w.pw = pw + w.opened = true + + if w.ChunkSize < 0 { + return errors.New("storage: Writer.ChunkSize must non-negative") + } + mediaOpts := []googleapi.MediaOption{ + googleapi.ChunkSize(w.ChunkSize), + } + if c := attrs.ContentType; c != "" { + mediaOpts = append(mediaOpts, googleapi.ContentType(c)) + } + + go func() { + defer close(w.donec) + + rawObj := attrs.toRawObject(w.o.bucket) + if w.SendCRC32C { + rawObj.Crc32c = encodeUint32(attrs.CRC32C) + } + if w.MD5 != nil { + rawObj.Md5Hash = base64.StdEncoding.EncodeToString(w.MD5) + } + call := w.o.c.raw.Objects.Insert(w.o.bucket, rawObj). + Media(pr, mediaOpts...). + Projection("full"). + Context(w.ctx) + if err := setEncryptionHeaders(call.Header(), w.o.encryptionKey, false); err != nil { + w.err = err + pr.CloseWithError(w.err) + return + } + var resp *raw.Object + err := applyConds("NewWriter", w.o.gen, w.o.conds, call) + if err == nil { + setClientHeader(call.Header()) + resp, err = call.Do() + } + if err != nil { + w.err = err + pr.CloseWithError(w.err) + return + } + w.obj = newObject(resp) + }() + return nil +} + +// Write appends to w. It implements the io.Writer interface. +func (w *Writer) Write(p []byte) (n int, err error) { + if w.err != nil { + return 0, w.err + } + if !w.opened { + if err := w.open(); err != nil { + return 0, err + } + } + return w.pw.Write(p) +} + +// Close completes the write operation and flushes any buffered data. +// If Close doesn't return an error, metadata about the written object +// can be retrieved by calling Attrs. +func (w *Writer) Close() error { + if !w.opened { + if err := w.open(); err != nil { + return err + } + } + if err := w.pw.Close(); err != nil { + return err + } + <-w.donec + return w.err +} + +// CloseWithError aborts the write operation with the provided error. +// CloseWithError always returns nil. +func (w *Writer) CloseWithError(err error) error { + if !w.opened { + return nil + } + return w.pw.CloseWithError(err) +} + +// Attrs returns metadata about a successfully-written object. +// It's only valid to call it after Close returns nil. +func (w *Writer) Attrs() *ObjectAttrs { + return w.obj +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/CONTRIBUTING.md b/vendor/github.com/eclipse/paho.mqtt.golang/CONTRIBUTING.md new file mode 100644 index 000000000..9791dc603 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/CONTRIBUTING.md @@ -0,0 +1,56 @@ +Contributing to Paho +==================== + +Thanks for your interest in this project. + +Project description: +-------------------- + +The Paho project has been created to provide scalable open-source implementations of open and standard messaging protocols aimed at new, existing, and emerging applications for Machine-to-Machine (M2M) and Internet of Things (IoT). +Paho reflects the inherent physical and cost constraints of device connectivity. Its objectives include effective levels of decoupling between devices and applications, designed to keep markets open and encourage the rapid growth of scalable Web and Enterprise middleware and applications. Paho is being kicked off with MQTT publish/subscribe client implementations for use on embedded platforms, along with corresponding server support as determined by the community. + +- https://projects.eclipse.org/projects/technology.paho + +Developer resources: +-------------------- + +Information regarding source code management, builds, coding standards, and more. + +- https://projects.eclipse.org/projects/technology.paho/developer + +Contributor License Agreement: +------------------------------ + +Before your contribution can be accepted by the project, you need to create and electronically sign the Eclipse Foundation Contributor License Agreement (CLA). + +- http://www.eclipse.org/legal/CLA.php + +Contributing Code: +------------------ + +The Go client is developed in Github, see their documentation on the process of forking and pull requests; https://help.github.com/categories/collaborating-on-projects-using-pull-requests/ + +Git commit messages should follow the style described here; + +http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html + +Contact: +-------- + +Contact the project developers via the project's "dev" list. + +- https://dev.eclipse.org/mailman/listinfo/paho-dev + +Search for bugs: +---------------- + +This project uses Github issues to track ongoing development and issues. + +- https://github.com/eclipse/paho.mqtt.golang/issues + +Create a new bug: +----------------- + +Be sure to search for existing bugs before you create another one. Remember that contributions are always welcome! + +- https://github.com/eclipse/paho.mqtt.golang/issues diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/DISTRIBUTION b/vendor/github.com/eclipse/paho.mqtt.golang/DISTRIBUTION new file mode 100644 index 000000000..34e49731d --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/DISTRIBUTION @@ -0,0 +1,15 @@ + + +Eclipse Distribution License - v 1.0 + +Copyright (c) 2007, Eclipse Foundation, Inc. and its licensors. + +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + + Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + Neither the name of the Eclipse Foundation, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/LICENSE b/vendor/github.com/eclipse/paho.mqtt.golang/LICENSE new file mode 100644 index 000000000..aa7cc810f --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/LICENSE @@ -0,0 +1,87 @@ +Eclipse Public License - v 1.0 + +THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. + +1. DEFINITIONS + +"Contribution" means: + +a) in the case of the initial Contributor, the initial code and documentation distributed under this Agreement, and + +b) in the case of each subsequent Contributor: + +i) changes to the Program, and + +ii) additions to the Program; + +where such changes and/or additions to the Program originate from and are distributed by that particular Contributor. A Contribution 'originates' from a Contributor if it was added to the Program by such Contributor itself or anyone acting on such Contributor's behalf. Contributions do not include additions to the Program which: (i) are separate modules of software distributed in conjunction with the Program under their own license agreement, and (ii) are not derivative works of the Program. + +"Contributor" means any person or entity that distributes the Program. + +"Licensed Patents" mean patent claims licensable by a Contributor which are necessarily infringed by the use or sale of its Contribution alone or when combined with the Program. + +"Program" means the Contributions distributed in accordance with this Agreement. + +"Recipient" means anyone who receives the Program under this Agreement, including all Contributors. + +2. GRANT OF RIGHTS + +a) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, distribute and sublicense the Contribution of such Contributor, if any, and such derivative works, in source code and object code form. + +b) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed Patents to make, use, sell, offer to sell, import and otherwise transfer the Contribution of such Contributor, if any, in source code and object code form. This patent license shall apply to the combination of the Contribution and the Program if, at the time the Contribution is added by the Contributor, such addition of the Contribution causes such combination to be covered by the Licensed Patents. The patent license shall not apply to any other combinations which include the Contribution. No hardware per se is licensed hereunder. + +c) Recipient understands that although each Contributor grants the licenses to its Contributions set forth herein, no assurances are provided by any Contributor that the Program does not infringe the patent or other intellectual property rights of any other entity. Each Contributor disclaims any liability to Recipient for claims brought by any other entity based on infringement of intellectual property rights or otherwise. As a condition to exercising the rights and licenses granted hereunder, each Recipient hereby assumes sole responsibility to secure any other intellectual property rights needed, if any. For example, if a third party patent license is required to allow Recipient to distribute the Program, it is Recipient's responsibility to acquire that license before distributing the Program. + +d) Each Contributor represents that to its knowledge it has sufficient copyright rights in its Contribution, if any, to grant the copyright license set forth in this Agreement. + +3. REQUIREMENTS + +A Contributor may choose to distribute the Program in object code form under its own license agreement, provided that: + +a) it complies with the terms and conditions of this Agreement; and + +b) its license agreement: + +i) effectively disclaims on behalf of all Contributors all warranties and conditions, express and implied, including warranties or conditions of title and non-infringement, and implied warranties or conditions of merchantability and fitness for a particular purpose; + +ii) effectively excludes on behalf of all Contributors all liability for damages, including direct, indirect, special, incidental and consequential damages, such as lost profits; + +iii) states that any provisions which differ from this Agreement are offered by that Contributor alone and not by any other party; and + +iv) states that source code for the Program is available from such Contributor, and informs licensees how to obtain it in a reasonable manner on or through a medium customarily used for software exchange. + +When the Program is made available in source code form: + +a) it must be made available under this Agreement; and + +b) a copy of this Agreement must be included with each copy of the Program. + +Contributors may not remove or alter any copyright notices contained within the Program. + +Each Contributor must identify itself as the originator of its Contribution, if any, in a manner that reasonably allows subsequent Recipients to identify the originator of the Contribution. + +4. COMMERCIAL DISTRIBUTION + +Commercial distributors of software may accept certain responsibilities with respect to end users, business partners and the like. While this license is intended to facilitate the commercial use of the Program, the Contributor who includes the Program in a commercial product offering should do so in a manner which does not create potential liability for other Contributors. Therefore, if a Contributor includes the Program in a commercial product offering, such Contributor ("Commercial Contributor") hereby agrees to defend and indemnify every other Contributor ("Indemnified Contributor") against any losses, damages and costs (collectively "Losses") arising from claims, lawsuits and other legal actions brought by a third party against the Indemnified Contributor to the extent caused by the acts or omissions of such Commercial Contributor in connection with its distribution of the Program in a commercial product offering. The obligations in this section do not apply to any claims or Losses relating to any actual or alleged intellectual property infringement. In order to qualify, an Indemnified Contributor must: a) promptly notify the Commercial Contributor in writing of such claim, and b) allow the Commercial Contributor to control, and cooperate with the Commercial Contributor in, the defense and any related settlement negotiations. The Indemnified Contributor may participate in any such claim at its own expense. + +For example, a Contributor might include the Program in a commercial product offering, Product X. That Contributor is then a Commercial Contributor. If that Commercial Contributor then makes performance claims, or offers warranties related to Product X, those performance claims and warranties are such Commercial Contributor's responsibility alone. Under this section, the Commercial Contributor would have to defend claims against the other Contributors related to those performance claims and warranties, and if a court requires any other Contributor to pay any damages as a result, the Commercial Contributor must pay those damages. + +5. NO WARRANTY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is solely responsible for determining the appropriateness of using and distributing the Program and assumes all risks associated with its exercise of rights under this Agreement , including but not limited to the risks and costs of program errors, compliance with applicable laws, damage to or loss of data, programs or equipment, and unavailability or interruption of operations. + +6. DISCLAIMER OF LIABILITY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +7. GENERAL + +If any provision of this Agreement is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this Agreement, and without further action by the parties hereto, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable. + +If Recipient institutes patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Program itself (excluding combinations of the Program with other software or hardware) infringes such Recipient's patent(s), then such Recipient's rights granted under Section 2(b) shall terminate as of the date such litigation is filed. + +All Recipient's rights under this Agreement shall terminate if it fails to comply with any of the material terms or conditions of this Agreement and does not cure such failure in a reasonable period of time after becoming aware of such noncompliance. If all Recipient's rights under this Agreement terminate, Recipient agrees to cease use and distribution of the Program as soon as reasonably practicable. However, Recipient's obligations under this Agreement and any licenses granted by Recipient relating to the Program shall continue and survive. + +Everyone is permitted to copy and distribute copies of this Agreement, but in order to avoid inconsistency the Agreement is copyrighted and may only be modified in the following manner. The Agreement Steward reserves the right to publish new versions (including revisions) of this Agreement from time to time. No one other than the Agreement Steward has the right to modify this Agreement. The Eclipse Foundation is the initial Agreement Steward. The Eclipse Foundation may assign the responsibility to serve as the Agreement Steward to a suitable separate entity. Each new version of the Agreement will be given a distinguishing version number. The Program (including Contributions) may always be distributed subject to the version of the Agreement under which it was received. In addition, after a new version of the Agreement is published, Contributor may elect to distribute the Program (including its Contributions) under the new version. Except as expressly stated in Sections 2(a) and 2(b) above, Recipient receives no rights or licenses to the intellectual property of any Contributor under this Agreement, whether expressly, by implication, estoppel or otherwise. All rights in the Program not expressly granted under this Agreement are reserved. + +This Agreement is governed by the laws of the State of New York and the intellectual property laws of the United States of America. No party to this Agreement will bring a legal action under this Agreement more than one year after the cause of action arose. Each party waives its rights to a jury trial in any resulting litigation. \ No newline at end of file diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/README.md b/vendor/github.com/eclipse/paho.mqtt.golang/README.md new file mode 100644 index 000000000..660caa80a --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/README.md @@ -0,0 +1,62 @@ +Eclipse Paho MQTT Go client +=========================== + + +This repository contains the source code for the [Eclipse Paho](http://eclipse.org/paho) MQTT Go client library. + +This code builds a library which enable applications to connect to an [MQTT](http://mqtt.org) broker to publish messages, and to subscribe to topics and receive published messages. + +This library supports a fully asynchronous mode of operation. + + +Installation and Build +---------------------- + +This client is designed to work with the standard Go tools, so installation is as easy as: + +``` +go get github.com/eclipse/paho.mqtt.golang +``` + +The client depends on Google's [websockets](https://godoc.org/golang.org/x/net/websocket) package, +also easily installed with the command: + +``` +go get golang.org/x/net/websocket +``` + + +Usage and API +------------- + +Detailed API documentation is available by using to godoc tool, or can be browsed online +using the [godoc.org](http://godoc.org/github.com/eclipse/paho.mqtt.golang) service. + +Make use of the library by importing it in your Go client source code. For example, +``` +import "github.com/eclipse/paho.mqtt.golang" +``` + +Samples are available in the `cmd` directory for reference. + + +Runtime tracing +--------------- + +Tracing is enabled by assigning logs (from the Go log package) to the logging endpoints, ERROR, CRITICAL, WARN and DEBUG + + +Reporting bugs +-------------- + +Please report bugs by raising issues for this project in github https://github.com/eclipse/paho.mqtt.golang/issues + + +More information +---------------- + +Discussion of the Paho clients takes place on the [Eclipse paho-dev mailing list](https://dev.eclipse.org/mailman/listinfo/paho-dev). + +General questions about the MQTT protocol are discussed in the [MQTT Google Group](https://groups.google.com/forum/?hl=en-US&fromgroups#!forum/mqtt). + +There is much more information available via the [MQTT community site](http://mqtt.org). diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/about.html b/vendor/github.com/eclipse/paho.mqtt.golang/about.html new file mode 100644 index 000000000..b183f417a --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/about.html @@ -0,0 +1,41 @@ + + + +About + + +

About This Content

+ +

December 9, 2013

+

License

+ +

The Eclipse Foundation makes available all content in this plug-in ("Content"). Unless otherwise +indicated below, the Content is provided to you under the terms and conditions of the +Eclipse Public License Version 1.0 ("EPL") and Eclipse Distribution License Version 1.0 ("EDL"). +A copy of the EPL is available at +http://www.eclipse.org/legal/epl-v10.html +and a copy of the EDL is available at +http://www.eclipse.org/org/documents/edl-v10.php. +For purposes of the EPL, "Program" will mean the Content.

+ +

If you did not receive this Content directly from the Eclipse Foundation, the Content is +being redistributed by another party ("Redistributor") and different terms and conditions may +apply to your use of any object code in the Content. Check the Redistributor's license that was +provided with the Content. If no such license exists, contact the Redistributor. Unless otherwise +indicated below, the terms and conditions of the EPL still apply to any source code in the Content +and such source code may be obtained at http://www.eclipse.org.

+ + +

Third Party Content

+

The Content includes items that have been sourced from third parties as set out below. If you + did not receive this Content directly from the Eclipse Foundation, the following is provided + for informational purposes only, and you should look to the Redistributor's license for + terms and conditions of use.

+

+ None

+

+

+ + + + diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/client.go b/vendor/github.com/eclipse/paho.mqtt.golang/client.go new file mode 100644 index 000000000..242344bf8 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/client.go @@ -0,0 +1,611 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +// Package mqtt provides an MQTT v3.1.1 client library. +package mqtt + +import ( + "errors" + "fmt" + "net" + "sync" + "time" + + "github.com/eclipse/paho.mqtt.golang/packets" +) + +type connStatus uint + +const ( + disconnected connStatus = iota + connecting + reconnecting + connected +) + +// Client is the interface definition for a Client as used by this +// library, the interface is primarily to allow mocking tests. +// +// It is an MQTT v3.1.1 client for communicating +// with an MQTT server using non-blocking methods that allow work +// to be done in the background. +// An application may connect to an MQTT server using: +// A plain TCP socket +// A secure SSL/TLS socket +// A websocket +// To enable ensured message delivery at Quality of Service (QoS) levels +// described in the MQTT spec, a message persistence mechanism must be +// used. This is done by providing a type which implements the Store +// interface. For convenience, FileStore and MemoryStore are provided +// implementations that should be sufficient for most use cases. More +// information can be found in their respective documentation. +// Numerous connection options may be specified by configuring a +// and then supplying a ClientOptions type. +type Client interface { + IsConnected() bool + Connect() Token + Disconnect(quiesce uint) + Publish(topic string, qos byte, retained bool, payload interface{}) Token + Subscribe(topic string, qos byte, callback MessageHandler) Token + SubscribeMultiple(filters map[string]byte, callback MessageHandler) Token + Unsubscribe(topics ...string) Token + AddRoute(topic string, callback MessageHandler) +} + +// client implements the Client interface +type client struct { + sync.RWMutex + messageIds + conn net.Conn + ibound chan packets.ControlPacket + obound chan *PacketAndToken + oboundP chan *PacketAndToken + msgRouter *router + stopRouter chan bool + incomingPubChan chan *packets.PublishPacket + errors chan error + stop chan struct{} + persist Store + options ClientOptions + pingResp chan struct{} + packetResp chan struct{} + keepaliveReset chan struct{} + status connStatus + workers sync.WaitGroup +} + +// NewClient will create an MQTT v3.1.1 client with all of the options specified +// in the provided ClientOptions. The client must have the Connect method called +// on it before it may be used. This is to make sure resources (such as a net +// connection) are created before the application is actually ready. +func NewClient(o *ClientOptions) Client { + c := &client{} + c.options = *o + + if c.options.Store == nil { + c.options.Store = NewMemoryStore() + } + switch c.options.ProtocolVersion { + case 3, 4: + c.options.protocolVersionExplicit = true + default: + c.options.ProtocolVersion = 4 + c.options.protocolVersionExplicit = false + } + c.persist = c.options.Store + c.status = disconnected + c.messageIds = messageIds{index: make(map[uint16]Token)} + c.msgRouter, c.stopRouter = newRouter() + c.msgRouter.setDefaultHandler(c.options.DefaultPublishHander) + if !c.options.AutoReconnect { + c.options.MessageChannelDepth = 0 + } + return c +} + +func (c *client) AddRoute(topic string, callback MessageHandler) { + if callback != nil { + c.msgRouter.addRoute(topic, callback) + } +} + +// IsConnected returns a bool signifying whether +// the client is connected or not. +func (c *client) IsConnected() bool { + c.RLock() + defer c.RUnlock() + switch { + case c.status == connected: + return true + case c.options.AutoReconnect && c.status > disconnected: + return true + default: + return false + } +} + +func (c *client) connectionStatus() connStatus { + c.RLock() + defer c.RUnlock() + return c.status +} + +func (c *client) setConnected(status connStatus) { + c.Lock() + defer c.Unlock() + c.status = status +} + +//ErrNotConnected is the error returned from function calls that are +//made when the client is not connected to a broker +var ErrNotConnected = errors.New("Not Connected") + +// Connect will create a connection to the message broker +// If clean session is false, then a slice will +// be returned containing Receipts for all messages +// that were in-flight at the last disconnect. +// If clean session is true, then any existing client +// state will be removed. +func (c *client) Connect() Token { + var err error + t := newToken(packets.Connect).(*ConnectToken) + DEBUG.Println(CLI, "Connect()") + + go func() { + c.persist.Open() + + c.setConnected(connecting) + var rc byte + cm := newConnectMsgFromOptions(&c.options) + + for _, broker := range c.options.Servers { + CONN: + DEBUG.Println(CLI, "about to write new connect msg") + c.conn, err = openConnection(broker, &c.options.TLSConfig, c.options.ConnectTimeout) + if err == nil { + DEBUG.Println(CLI, "socket connected to broker") + switch c.options.ProtocolVersion { + case 3: + DEBUG.Println(CLI, "Using MQTT 3.1 protocol") + cm.ProtocolName = "MQIsdp" + cm.ProtocolVersion = 3 + default: + DEBUG.Println(CLI, "Using MQTT 3.1.1 protocol") + c.options.ProtocolVersion = 4 + cm.ProtocolName = "MQTT" + cm.ProtocolVersion = 4 + } + cm.Write(c.conn) + + rc = c.connect() + if rc != packets.Accepted { + if c.conn != nil { + c.conn.Close() + c.conn = nil + } + //if the protocol version was explicitly set don't do any fallback + if c.options.protocolVersionExplicit { + ERROR.Println(CLI, "Connecting to", broker, "CONNACK was not CONN_ACCEPTED, but rather", packets.ConnackReturnCodes[rc]) + continue + } + if c.options.ProtocolVersion == 4 { + DEBUG.Println(CLI, "Trying reconnect using MQTT 3.1 protocol") + c.options.ProtocolVersion = 3 + goto CONN + } + } + break + } else { + ERROR.Println(CLI, err.Error()) + WARN.Println(CLI, "failed to connect to broker, trying next") + rc = packets.ErrNetworkError + } + } + + if c.conn == nil { + ERROR.Println(CLI, "Failed to connect to a broker") + t.returnCode = rc + if rc != packets.ErrNetworkError { + t.err = packets.ConnErrors[rc] + } else { + t.err = fmt.Errorf("%s : %s", packets.ConnErrors[rc], err) + } + c.setConnected(disconnected) + c.persist.Close() + t.flowComplete() + return + } + + if c.options.KeepAlive != 0 { + c.workers.Add(1) + go keepalive(c) + } + + c.obound = make(chan *PacketAndToken, c.options.MessageChannelDepth) + c.oboundP = make(chan *PacketAndToken, c.options.MessageChannelDepth) + c.ibound = make(chan packets.ControlPacket) + c.errors = make(chan error, 1) + c.stop = make(chan struct{}) + c.pingResp = make(chan struct{}, 1) + c.packetResp = make(chan struct{}, 1) + c.keepaliveReset = make(chan struct{}, 1) + + c.incomingPubChan = make(chan *packets.PublishPacket, c.options.MessageChannelDepth) + c.msgRouter.matchAndDispatch(c.incomingPubChan, c.options.Order, c) + + c.setConnected(connected) + DEBUG.Println(CLI, "client is connected") + if c.options.OnConnect != nil { + go c.options.OnConnect(c) + } + + // Take care of any messages in the store + //var leftovers []Receipt + if c.options.CleanSession == false { + //leftovers = c.resume() + } else { + c.persist.Reset() + } + + go errorWatch(c) + + // Do not start incoming until resume has completed + c.workers.Add(3) + go alllogic(c) + go outgoing(c) + go incoming(c) + + DEBUG.Println(CLI, "exit startClient") + t.flowComplete() + }() + return t +} + +// internal function used to reconnect the client when it loses its connection +func (c *client) reconnect() { + DEBUG.Println(CLI, "enter reconnect") + var ( + err error + + rc = byte(1) + sleep = time.Duration(1 * time.Second) + ) + + for rc != 0 && c.status != disconnected { + cm := newConnectMsgFromOptions(&c.options) + + for _, broker := range c.options.Servers { + CONN: + DEBUG.Println(CLI, "about to write new connect msg") + c.conn, err = openConnection(broker, &c.options.TLSConfig, c.options.ConnectTimeout) + if err == nil { + DEBUG.Println(CLI, "socket connected to broker") + switch c.options.ProtocolVersion { + case 3: + DEBUG.Println(CLI, "Using MQTT 3.1 protocol") + cm.ProtocolName = "MQIsdp" + cm.ProtocolVersion = 3 + default: + DEBUG.Println(CLI, "Using MQTT 3.1.1 protocol") + c.options.ProtocolVersion = 4 + cm.ProtocolName = "MQTT" + cm.ProtocolVersion = 4 + } + cm.Write(c.conn) + + rc = c.connect() + if rc != packets.Accepted { + c.conn.Close() + c.conn = nil + //if the protocol version was explicitly set don't do any fallback + if c.options.protocolVersionExplicit { + ERROR.Println(CLI, "Connecting to", broker, "CONNACK was not Accepted, but rather", packets.ConnackReturnCodes[rc]) + continue + } + if c.options.ProtocolVersion == 4 { + DEBUG.Println(CLI, "Trying reconnect using MQTT 3.1 protocol") + c.options.ProtocolVersion = 3 + goto CONN + } + } + break + } else { + ERROR.Println(CLI, err.Error()) + WARN.Println(CLI, "failed to connect to broker, trying next") + rc = packets.ErrNetworkError + } + } + if rc != 0 { + DEBUG.Println(CLI, "Reconnect failed, sleeping for", int(sleep.Seconds()), "seconds") + time.Sleep(sleep) + if sleep < c.options.MaxReconnectInterval { + sleep *= 2 + } + + if sleep > c.options.MaxReconnectInterval { + sleep = c.options.MaxReconnectInterval + } + } + } + // Disconnect() must have been called while we were trying to reconnect. + if c.status == disconnected { + DEBUG.Println(CLI, "Client moved to disconnected state while reconnecting, abandoning reconnect") + return + } + + if c.options.KeepAlive != 0 { + c.workers.Add(1) + go keepalive(c) + } + + c.stop = make(chan struct{}) + + c.setConnected(connected) + DEBUG.Println(CLI, "client is reconnected") + if c.options.OnConnect != nil { + go c.options.OnConnect(c) + } + + go errorWatch(c) + + c.workers.Add(3) + go alllogic(c) + go outgoing(c) + go incoming(c) +} + +// This function is only used for receiving a connack +// when the connection is first started. +// This prevents receiving incoming data while resume +// is in progress if clean session is false. +func (c *client) connect() byte { + DEBUG.Println(NET, "connect started") + + ca, err := packets.ReadPacket(c.conn) + if err != nil { + ERROR.Println(NET, "connect got error", err) + return packets.ErrNetworkError + } + if ca == nil { + ERROR.Println(NET, "received nil packet") + return packets.ErrNetworkError + } + + msg, ok := ca.(*packets.ConnackPacket) + if !ok { + ERROR.Println(NET, "received msg that was not CONNACK") + return packets.ErrNetworkError + } + + DEBUG.Println(NET, "received connack") + return msg.ReturnCode +} + +// Disconnect will end the connection with the server, but not before waiting +// the specified number of milliseconds to wait for existing work to be +// completed. +func (c *client) Disconnect(quiesce uint) { + if c.status == connected { + DEBUG.Println(CLI, "disconnecting") + c.setConnected(disconnected) + + dm := packets.NewControlPacket(packets.Disconnect).(*packets.DisconnectPacket) + dt := newToken(packets.Disconnect) + c.oboundP <- &PacketAndToken{p: dm, t: dt} + + // wait for work to finish, or quiesce time consumed + dt.WaitTimeout(time.Duration(quiesce) * time.Millisecond) + } else { + WARN.Println(CLI, "Disconnect() called but not connected (disconnected/reconnecting)") + c.setConnected(disconnected) + } + + c.disconnect() +} + +// ForceDisconnect will end the connection with the mqtt broker immediately. +func (c *client) forceDisconnect() { + if !c.IsConnected() { + WARN.Println(CLI, "already disconnected") + return + } + c.setConnected(disconnected) + c.conn.Close() + DEBUG.Println(CLI, "forcefully disconnecting") + c.disconnect() +} + +func (c *client) internalConnLost(err error) { + // Only do anything if this was called and we are still "connected" + // forceDisconnect can cause incoming/outgoing/alllogic to end with + // error from closing the socket but state will be "disconnected" + if c.IsConnected() { + c.closeStop() + c.conn.Close() + c.workers.Wait() + if c.options.CleanSession { + c.messageIds.cleanUp() + } + if c.options.AutoReconnect { + c.setConnected(reconnecting) + go c.reconnect() + } else { + c.setConnected(disconnected) + } + if c.options.OnConnectionLost != nil { + go c.options.OnConnectionLost(c, err) + } + } +} + +func (c *client) closeStop() { + c.Lock() + defer c.Unlock() + select { + case <-c.stop: + DEBUG.Println("In disconnect and stop channel is already closed") + default: + close(c.stop) + } +} + +func (c *client) closeConn() { + c.Lock() + defer c.Unlock() + if c.conn != nil { + c.conn.Close() + } +} + +func (c *client) disconnect() { + c.closeStop() + c.closeConn() + c.workers.Wait() + close(c.stopRouter) + DEBUG.Println(CLI, "disconnected") + c.persist.Close() +} + +// Publish will publish a message with the specified QoS and content +// to the specified topic. +// Returns a token to track delivery of the message to the broker +func (c *client) Publish(topic string, qos byte, retained bool, payload interface{}) Token { + token := newToken(packets.Publish).(*PublishToken) + DEBUG.Println(CLI, "enter Publish") + switch { + case !c.IsConnected(): + token.err = ErrNotConnected + token.flowComplete() + return token + case c.connectionStatus() == reconnecting && qos == 0: + token.flowComplete() + return token + } + pub := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) + pub.Qos = qos + pub.TopicName = topic + pub.Retain = retained + switch payload.(type) { + case string: + pub.Payload = []byte(payload.(string)) + case []byte: + pub.Payload = payload.([]byte) + default: + token.err = errors.New("Unknown payload type") + token.flowComplete() + return token + } + + DEBUG.Println(CLI, "sending publish message, topic:", topic) + if pub.Qos != 0 && pub.MessageID == 0 { + pub.MessageID = c.getID(token) + token.messageID = pub.MessageID + } + persistOutbound(c.persist, pub) + c.obound <- &PacketAndToken{p: pub, t: token} + return token +} + +// Subscribe starts a new subscription. Provide a MessageHandler to be executed when +// a message is published on the topic provided. +func (c *client) Subscribe(topic string, qos byte, callback MessageHandler) Token { + token := newToken(packets.Subscribe).(*SubscribeToken) + DEBUG.Println(CLI, "enter Subscribe") + if !c.IsConnected() { + token.err = ErrNotConnected + token.flowComplete() + return token + } + sub := packets.NewControlPacket(packets.Subscribe).(*packets.SubscribePacket) + if err := validateTopicAndQos(topic, qos); err != nil { + token.err = err + return token + } + sub.Topics = append(sub.Topics, topic) + sub.Qoss = append(sub.Qoss, qos) + DEBUG.Println(CLI, sub.String()) + + if callback != nil { + c.msgRouter.addRoute(topic, callback) + } + + token.subs = append(token.subs, topic) + c.oboundP <- &PacketAndToken{p: sub, t: token} + DEBUG.Println(CLI, "exit Subscribe") + return token +} + +// SubscribeMultiple starts a new subscription for multiple topics. Provide a MessageHandler to +// be executed when a message is published on one of the topics provided. +func (c *client) SubscribeMultiple(filters map[string]byte, callback MessageHandler) Token { + var err error + token := newToken(packets.Subscribe).(*SubscribeToken) + DEBUG.Println(CLI, "enter SubscribeMultiple") + if !c.IsConnected() { + token.err = ErrNotConnected + token.flowComplete() + return token + } + sub := packets.NewControlPacket(packets.Subscribe).(*packets.SubscribePacket) + if sub.Topics, sub.Qoss, err = validateSubscribeMap(filters); err != nil { + token.err = err + return token + } + + if callback != nil { + for topic := range filters { + c.msgRouter.addRoute(topic, callback) + } + } + token.subs = make([]string, len(sub.Topics)) + copy(token.subs, sub.Topics) + c.oboundP <- &PacketAndToken{p: sub, t: token} + DEBUG.Println(CLI, "exit SubscribeMultiple") + return token +} + +// Unsubscribe will end the subscription from each of the topics provided. +// Messages published to those topics from other clients will no longer be +// received. +func (c *client) Unsubscribe(topics ...string) Token { + token := newToken(packets.Unsubscribe).(*UnsubscribeToken) + DEBUG.Println(CLI, "enter Unsubscribe") + if !c.IsConnected() { + token.err = ErrNotConnected + token.flowComplete() + return token + } + unsub := packets.NewControlPacket(packets.Unsubscribe).(*packets.UnsubscribePacket) + unsub.Topics = make([]string, len(topics)) + copy(unsub.Topics, topics) + + c.oboundP <- &PacketAndToken{p: unsub, t: token} + for _, topic := range topics { + c.msgRouter.deleteRoute(topic) + } + + DEBUG.Println(CLI, "exit Unsubscribe") + return token +} + +func (c *client) OptionsReader() ClientOptionsReader { + r := ClientOptionsReader{options: &c.options} + return r +} + +//DefaultConnectionLostHandler is a definition of a function that simply +//reports to the DEBUG log the reason for the client losing a connection. +func DefaultConnectionLostHandler(client Client, reason error) { + DEBUG.Println("Connection lost:", reason.Error()) +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/components.go b/vendor/github.com/eclipse/paho.mqtt.golang/components.go new file mode 100644 index 000000000..01f5fafdf --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/components.go @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +type component string + +// Component names for debug output +const ( + NET component = "[net] " + PNG component = "[pinger] " + CLI component = "[client] " + DEC component = "[decode] " + MES component = "[message] " + STR component = "[store] " + MID component = "[msgids] " + TST component = "[test] " + STA component = "[state] " + ERR component = "[error] " +) diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/edl-v10 b/vendor/github.com/eclipse/paho.mqtt.golang/edl-v10 new file mode 100644 index 000000000..cf989f145 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/edl-v10 @@ -0,0 +1,15 @@ + +Eclipse Distribution License - v 1.0 + +Copyright (c) 2007, Eclipse Foundation, Inc. and its licensors. + +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + + Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + Neither the name of the Eclipse Foundation, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/epl-v10 b/vendor/github.com/eclipse/paho.mqtt.golang/epl-v10 new file mode 100644 index 000000000..79e486c3d --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/epl-v10 @@ -0,0 +1,70 @@ +Eclipse Public License - v 1.0 + +THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. + +1. DEFINITIONS + +"Contribution" means: + +a) in the case of the initial Contributor, the initial code and documentation distributed under this Agreement, and +b) in the case of each subsequent Contributor: +i) changes to the Program, and +ii) additions to the Program; +where such changes and/or additions to the Program originate from and are distributed by that particular Contributor. A Contribution 'originates' from a Contributor if it was added to the Program by such Contributor itself or anyone acting on such Contributor's behalf. Contributions do not include additions to the Program which: (i) are separate modules of software distributed in conjunction with the Program under their own license agreement, and (ii) are not derivative works of the Program. +"Contributor" means any person or entity that distributes the Program. + +"Licensed Patents" mean patent claims licensable by a Contributor which are necessarily infringed by the use or sale of its Contribution alone or when combined with the Program. + +"Program" means the Contributions distributed in accordance with this Agreement. + +"Recipient" means anyone who receives the Program under this Agreement, including all Contributors. + +2. GRANT OF RIGHTS + +a) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, distribute and sublicense the Contribution of such Contributor, if any, and such derivative works, in source code and object code form. +b) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed Patents to make, use, sell, offer to sell, import and otherwise transfer the Contribution of such Contributor, if any, in source code and object code form. This patent license shall apply to the combination of the Contribution and the Program if, at the time the Contribution is added by the Contributor, such addition of the Contribution causes such combination to be covered by the Licensed Patents. The patent license shall not apply to any other combinations which include the Contribution. No hardware per se is licensed hereunder. +c) Recipient understands that although each Contributor grants the licenses to its Contributions set forth herein, no assurances are provided by any Contributor that the Program does not infringe the patent or other intellectual property rights of any other entity. Each Contributor disclaims any liability to Recipient for claims brought by any other entity based on infringement of intellectual property rights or otherwise. As a condition to exercising the rights and licenses granted hereunder, each Recipient hereby assumes sole responsibility to secure any other intellectual property rights needed, if any. For example, if a third party patent license is required to allow Recipient to distribute the Program, it is Recipient's responsibility to acquire that license before distributing the Program. +d) Each Contributor represents that to its knowledge it has sufficient copyright rights in its Contribution, if any, to grant the copyright license set forth in this Agreement. +3. REQUIREMENTS + +A Contributor may choose to distribute the Program in object code form under its own license agreement, provided that: + +a) it complies with the terms and conditions of this Agreement; and +b) its license agreement: +i) effectively disclaims on behalf of all Contributors all warranties and conditions, express and implied, including warranties or conditions of title and non-infringement, and implied warranties or conditions of merchantability and fitness for a particular purpose; +ii) effectively excludes on behalf of all Contributors all liability for damages, including direct, indirect, special, incidental and consequential damages, such as lost profits; +iii) states that any provisions which differ from this Agreement are offered by that Contributor alone and not by any other party; and +iv) states that source code for the Program is available from such Contributor, and informs licensees how to obtain it in a reasonable manner on or through a medium customarily used for software exchange. +When the Program is made available in source code form: + +a) it must be made available under this Agreement; and +b) a copy of this Agreement must be included with each copy of the Program. +Contributors may not remove or alter any copyright notices contained within the Program. + +Each Contributor must identify itself as the originator of its Contribution, if any, in a manner that reasonably allows subsequent Recipients to identify the originator of the Contribution. + +4. COMMERCIAL DISTRIBUTION + +Commercial distributors of software may accept certain responsibilities with respect to end users, business partners and the like. While this license is intended to facilitate the commercial use of the Program, the Contributor who includes the Program in a commercial product offering should do so in a manner which does not create potential liability for other Contributors. Therefore, if a Contributor includes the Program in a commercial product offering, such Contributor ("Commercial Contributor") hereby agrees to defend and indemnify every other Contributor ("Indemnified Contributor") against any losses, damages and costs (collectively "Losses") arising from claims, lawsuits and other legal actions brought by a third party against the Indemnified Contributor to the extent caused by the acts or omissions of such Commercial Contributor in connection with its distribution of the Program in a commercial product offering. The obligations in this section do not apply to any claims or Losses relating to any actual or alleged intellectual property infringement. In order to qualify, an Indemnified Contributor must: a) promptly notify the Commercial Contributor in writing of such claim, and b) allow the Commercial Contributor to control, and cooperate with the Commercial Contributor in, the defense and any related settlement negotiations. The Indemnified Contributor may participate in any such claim at its own expense. + +For example, a Contributor might include the Program in a commercial product offering, Product X. That Contributor is then a Commercial Contributor. If that Commercial Contributor then makes performance claims, or offers warranties related to Product X, those performance claims and warranties are such Commercial Contributor's responsibility alone. Under this section, the Commercial Contributor would have to defend claims against the other Contributors related to those performance claims and warranties, and if a court requires any other Contributor to pay any damages as a result, the Commercial Contributor must pay those damages. + +5. NO WARRANTY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is solely responsible for determining the appropriateness of using and distributing the Program and assumes all risks associated with its exercise of rights under this Agreement , including but not limited to the risks and costs of program errors, compliance with applicable laws, damage to or loss of data, programs or equipment, and unavailability or interruption of operations. + +6. DISCLAIMER OF LIABILITY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +7. GENERAL + +If any provision of this Agreement is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this Agreement, and without further action by the parties hereto, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable. + +If Recipient institutes patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Program itself (excluding combinations of the Program with other software or hardware) infringes such Recipient's patent(s), then such Recipient's rights granted under Section 2(b) shall terminate as of the date such litigation is filed. + +All Recipient's rights under this Agreement shall terminate if it fails to comply with any of the material terms or conditions of this Agreement and does not cure such failure in a reasonable period of time after becoming aware of such noncompliance. If all Recipient's rights under this Agreement terminate, Recipient agrees to cease use and distribution of the Program as soon as reasonably practicable. However, Recipient's obligations under this Agreement and any licenses granted by Recipient relating to the Program shall continue and survive. + +Everyone is permitted to copy and distribute copies of this Agreement, but in order to avoid inconsistency the Agreement is copyrighted and may only be modified in the following manner. The Agreement Steward reserves the right to publish new versions (including revisions) of this Agreement from time to time. No one other than the Agreement Steward has the right to modify this Agreement. The Eclipse Foundation is the initial Agreement Steward. The Eclipse Foundation may assign the responsibility to serve as the Agreement Steward to a suitable separate entity. Each new version of the Agreement will be given a distinguishing version number. The Program (including Contributions) may always be distributed subject to the version of the Agreement under which it was received. In addition, after a new version of the Agreement is published, Contributor may elect to distribute the Program (including its Contributions) under the new version. Except as expressly stated in Sections 2(a) and 2(b) above, Recipient receives no rights or licenses to the intellectual property of any Contributor under this Agreement, whether expressly, by implication, estoppel or otherwise. All rights in the Program not expressly granted under this Agreement are reserved. + +This Agreement is governed by the laws of the State of New York and the intellectual property laws of the United States of America. No party to this Agreement will bring a legal action under this Agreement more than one year after the cause of action arose. Each party waives its rights to a jury trial in any resulting litigation. diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/filestore.go b/vendor/github.com/eclipse/paho.mqtt.golang/filestore.go new file mode 100644 index 000000000..daf3a9e40 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/filestore.go @@ -0,0 +1,235 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "io/ioutil" + "os" + "path" + "sync" + + "github.com/eclipse/paho.mqtt.golang/packets" +) + +const ( + msgExt = ".msg" + tmpExt = ".tmp" + corruptExt = ".CORRUPT" +) + +// FileStore implements the store interface using the filesystem to provide +// true persistence, even across client failure. This is designed to use a +// single directory per running client. If you are running multiple clients +// on the same filesystem, you will need to be careful to specify unique +// store directories for each. +type FileStore struct { + sync.RWMutex + directory string + opened bool +} + +// NewFileStore will create a new FileStore which stores its messages in the +// directory provided. +func NewFileStore(directory string) *FileStore { + store := &FileStore{ + directory: directory, + opened: false, + } + return store +} + +// Open will allow the FileStore to be used. +func (store *FileStore) Open() { + store.Lock() + defer store.Unlock() + // if no store directory was specified in ClientOpts, by default use the + // current working directory + if store.directory == "" { + store.directory, _ = os.Getwd() + } + + // if store dir exists, great, otherwise, create it + if !exists(store.directory) { + perms := os.FileMode(0770) + merr := os.MkdirAll(store.directory, perms) + chkerr(merr) + } + store.opened = true + DEBUG.Println(STR, "store is opened at", store.directory) +} + +// Close will disallow the FileStore from being used. +func (store *FileStore) Close() { + store.Lock() + defer store.Unlock() + store.opened = false + DEBUG.Println(STR, "store is closed") +} + +// Put will put a message into the store, associated with the provided +// key value. +func (store *FileStore) Put(key string, m packets.ControlPacket) { + store.Lock() + defer store.Unlock() + if !store.opened { + ERROR.Println(STR, "Trying to use file store, but not open") + return + } + full := fullpath(store.directory, key) + write(store.directory, key, m) + if !exists(full) { + ERROR.Println(STR, "file not created:", full) + } +} + +// Get will retrieve a message from the store, the one associated with +// the provided key value. +func (store *FileStore) Get(key string) packets.ControlPacket { + store.RLock() + defer store.RUnlock() + if !store.opened { + ERROR.Println(STR, "Trying to use file store, but not open") + return nil + } + filepath := fullpath(store.directory, key) + if !exists(filepath) { + return nil + } + mfile, oerr := os.Open(filepath) + chkerr(oerr) + msg, rerr := packets.ReadPacket(mfile) + chkerr(mfile.Close()) + + // Message was unreadable, return nil + if rerr != nil { + newpath := corruptpath(store.directory, key) + WARN.Println(STR, "corrupted file detected:", rerr.Error(), "archived at:", newpath) + os.Rename(filepath, newpath) + return nil + } + return msg +} + +// All will provide a list of all of the keys associated with messages +// currenly residing in the FileStore. +func (store *FileStore) All() []string { + store.RLock() + defer store.RUnlock() + return store.all() +} + +// Del will remove the persisted message associated with the provided +// key from the FileStore. +func (store *FileStore) Del(key string) { + store.Lock() + defer store.Unlock() + store.del(key) +} + +// Reset will remove all persisted messages from the FileStore. +func (store *FileStore) Reset() { + store.Lock() + defer store.Unlock() + WARN.Println(STR, "FileStore Reset") + for _, key := range store.all() { + store.del(key) + } +} + +// lockless +func (store *FileStore) all() []string { + if !store.opened { + ERROR.Println(STR, "Trying to use file store, but not open") + return nil + } + keys := []string{} + files, rderr := ioutil.ReadDir(store.directory) + chkerr(rderr) + for _, f := range files { + DEBUG.Println(STR, "file in All():", f.Name()) + name := f.Name() + if name[len(name)-4:len(name)] != msgExt { + DEBUG.Println(STR, "skipping file, doesn't have right extension: ", name) + continue + } + key := name[0 : len(name)-4] // remove file extension + keys = append(keys, key) + } + return keys +} + +// lockless +func (store *FileStore) del(key string) { + if !store.opened { + ERROR.Println(STR, "Trying to use file store, but not open") + return + } + DEBUG.Println(STR, "store del filepath:", store.directory) + DEBUG.Println(STR, "store delete key:", key) + filepath := fullpath(store.directory, key) + DEBUG.Println(STR, "path of deletion:", filepath) + if !exists(filepath) { + WARN.Println(STR, "store could not delete key:", key) + return + } + rerr := os.Remove(filepath) + chkerr(rerr) + DEBUG.Println(STR, "del msg:", key) + if exists(filepath) { + ERROR.Println(STR, "file not deleted:", filepath) + } +} + +func fullpath(store string, key string) string { + p := path.Join(store, key+msgExt) + return p +} + +func tmppath(store string, key string) string { + p := path.Join(store, key+tmpExt) + return p +} + +func corruptpath(store string, key string) string { + p := path.Join(store, key+corruptExt) + return p +} + +// create file called "X.[messageid].tmp" located in the store +// the contents of the file is the bytes of the message, then +// rename it to "X.[messageid].msg", overwriting any existing +// message with the same id +// X will be 'i' for inbound messages, and O for outbound messages +func write(store, key string, m packets.ControlPacket) { + temppath := tmppath(store, key) + f, err := os.Create(temppath) + chkerr(err) + werr := m.Write(f) + chkerr(werr) + cerr := f.Close() + chkerr(cerr) + rerr := os.Rename(temppath, fullpath(store, key)) + chkerr(rerr) +} + +func exists(file string) bool { + if _, err := os.Stat(file); err != nil { + if os.IsNotExist(err) { + return false + } + chkerr(err) + } + return true +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/memstore.go b/vendor/github.com/eclipse/paho.mqtt.golang/memstore.go new file mode 100644 index 000000000..d3bfe084d --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/memstore.go @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "sync" + + "github.com/eclipse/paho.mqtt.golang/packets" +) + +// MemoryStore implements the store interface to provide a "persistence" +// mechanism wholly stored in memory. This is only useful for +// as long as the client instance exists. +type MemoryStore struct { + sync.RWMutex + messages map[string]packets.ControlPacket + opened bool +} + +// NewMemoryStore returns a pointer to a new instance of +// MemoryStore, the instance is not initialized and ready to +// use until Open() has been called on it. +func NewMemoryStore() *MemoryStore { + store := &MemoryStore{ + messages: make(map[string]packets.ControlPacket), + opened: false, + } + return store +} + +// Open initializes a MemoryStore instance. +func (store *MemoryStore) Open() { + store.Lock() + defer store.Unlock() + store.opened = true + DEBUG.Println(STR, "memorystore initialized") +} + +// Put takes a key and a pointer to a Message and stores the +// message. +func (store *MemoryStore) Put(key string, message packets.ControlPacket) { + store.Lock() + defer store.Unlock() + if !store.opened { + ERROR.Println(STR, "Trying to use memory store, but not open") + return + } + store.messages[key] = message +} + +// Get takes a key and looks in the store for a matching Message +// returning either the Message pointer or nil. +func (store *MemoryStore) Get(key string) packets.ControlPacket { + store.RLock() + defer store.RUnlock() + if !store.opened { + ERROR.Println(STR, "Trying to use memory store, but not open") + return nil + } + mid := mIDFromKey(key) + m := store.messages[key] + if m == nil { + CRITICAL.Println(STR, "memorystore get: message", mid, "not found") + } else { + DEBUG.Println(STR, "memorystore get: message", mid, "found") + } + return m +} + +// All returns a slice of strings containing all the keys currently +// in the MemoryStore. +func (store *MemoryStore) All() []string { + store.RLock() + defer store.RUnlock() + if !store.opened { + ERROR.Println(STR, "Trying to use memory store, but not open") + return nil + } + keys := []string{} + for k := range store.messages { + keys = append(keys, k) + } + return keys +} + +// Del takes a key, searches the MemoryStore and if the key is found +// deletes the Message pointer associated with it. +func (store *MemoryStore) Del(key string) { + store.Lock() + defer store.Unlock() + if !store.opened { + ERROR.Println(STR, "Trying to use memory store, but not open") + return + } + mid := mIDFromKey(key) + m := store.messages[key] + if m == nil { + WARN.Println(STR, "memorystore del: message", mid, "not found") + } else { + store.messages[key] = nil + DEBUG.Println(STR, "memorystore del: message", mid, "was deleted") + } +} + +// Close will disallow modifications to the state of the store. +func (store *MemoryStore) Close() { + store.Lock() + defer store.Unlock() + if !store.opened { + ERROR.Println(STR, "Trying to close memory store, but not open") + return + } + store.opened = false + DEBUG.Println(STR, "memorystore closed") +} + +// Reset eliminates all persisted message data in the store. +func (store *MemoryStore) Reset() { + store.Lock() + defer store.Unlock() + if !store.opened { + ERROR.Println(STR, "Trying to reset memory store, but not open") + } + store.messages = make(map[string]packets.ControlPacket) + WARN.Println(STR, "memorystore wiped") +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/message.go b/vendor/github.com/eclipse/paho.mqtt.golang/message.go new file mode 100644 index 000000000..b1b716488 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/message.go @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "github.com/eclipse/paho.mqtt.golang/packets" +) + +// Message defines the externals that a message implementation must support +// these are received messages that are passed to the callbacks, not internal +// messages +type Message interface { + Duplicate() bool + Qos() byte + Retained() bool + Topic() string + MessageID() uint16 + Payload() []byte +} + +type message struct { + duplicate bool + qos byte + retained bool + topic string + messageID uint16 + payload []byte +} + +func (m *message) Duplicate() bool { + return m.duplicate +} + +func (m *message) Qos() byte { + return m.qos +} + +func (m *message) Retained() bool { + return m.retained +} + +func (m *message) Topic() string { + return m.topic +} + +func (m *message) MessageID() uint16 { + return m.messageID +} + +func (m *message) Payload() []byte { + return m.payload +} + +func messageFromPublish(p *packets.PublishPacket) Message { + return &message{ + duplicate: p.Dup, + qos: p.Qos, + retained: p.Retain, + topic: p.TopicName, + messageID: p.MessageID, + payload: p.Payload, + } +} + +func newConnectMsgFromOptions(options *ClientOptions) *packets.ConnectPacket { + m := packets.NewControlPacket(packets.Connect).(*packets.ConnectPacket) + + m.CleanSession = options.CleanSession + m.WillFlag = options.WillEnabled + m.WillRetain = options.WillRetained + m.ClientIdentifier = options.ClientID + + if options.WillEnabled { + m.WillQos = options.WillQos + m.WillTopic = options.WillTopic + m.WillMessage = options.WillPayload + } + + if options.Username != "" { + m.UsernameFlag = true + m.Username = options.Username + //mustn't have password without user as well + if options.Password != "" { + m.PasswordFlag = true + m.Password = []byte(options.Password) + } + } + + m.Keepalive = uint16(options.KeepAlive.Seconds()) + + return m +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/messageids.go b/vendor/github.com/eclipse/paho.mqtt.golang/messageids.go new file mode 100644 index 000000000..9f4d0441c --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/messageids.go @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "fmt" + "sync" +) + +// MId is 16 bit message id as specified by the MQTT spec. +// In general, these values should not be depended upon by +// the client application. +type MId uint16 + +type messageIds struct { + sync.RWMutex + index map[uint16]Token +} + +const ( + midMin uint16 = 1 + midMax uint16 = 65535 +) + +func (mids *messageIds) cleanUp() { + mids.Lock() + for _, token := range mids.index { + switch t := token.(type) { + case *PublishToken: + t.err = fmt.Errorf("Connection lost before Publish completed") + case *SubscribeToken: + t.err = fmt.Errorf("Connection lost before Subscribe completed") + case *UnsubscribeToken: + t.err = fmt.Errorf("Connection lost before Unsubscribe completed") + } + token.flowComplete() + } + mids.index = make(map[uint16]Token) + mids.Unlock() +} + +func (mids *messageIds) freeID(id uint16) { + mids.Lock() + delete(mids.index, id) + mids.Unlock() +} + +func (mids *messageIds) getID(t Token) uint16 { + mids.Lock() + defer mids.Unlock() + for i := midMin; i < midMax; i++ { + if _, ok := mids.index[i]; !ok { + mids.index[i] = t + return i + } + } + return 0 +} + +func (mids *messageIds) getToken(id uint16) Token { + mids.RLock() + defer mids.RUnlock() + if token, ok := mids.index[id]; ok { + return token + } + return nil +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/net.go b/vendor/github.com/eclipse/paho.mqtt.golang/net.go new file mode 100644 index 000000000..9b3c0b334 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/net.go @@ -0,0 +1,285 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "crypto/tls" + "errors" + "fmt" + "net" + "net/url" + "reflect" + "time" + + "github.com/eclipse/paho.mqtt.golang/packets" + "golang.org/x/net/websocket" +) + +func signalError(c chan<- error, err error) { + select { + case c <- err: + default: + } +} + +func openConnection(uri *url.URL, tlsc *tls.Config, timeout time.Duration) (net.Conn, error) { + switch uri.Scheme { + case "ws": + conn, err := websocket.Dial(uri.String(), "mqtt", fmt.Sprintf("http://%s", uri.Host)) + if err != nil { + return nil, err + } + conn.PayloadType = websocket.BinaryFrame + return conn, err + case "wss": + config, _ := websocket.NewConfig(uri.String(), fmt.Sprintf("https://%s", uri.Host)) + config.Protocol = []string{"mqtt"} + config.TlsConfig = tlsc + conn, err := websocket.DialConfig(config) + if err != nil { + return nil, err + } + conn.PayloadType = websocket.BinaryFrame + return conn, err + case "tcp": + conn, err := net.DialTimeout("tcp", uri.Host, timeout) + if err != nil { + return nil, err + } + return conn, nil + case "ssl": + fallthrough + case "tls": + fallthrough + case "tcps": + conn, err := tls.DialWithDialer(&net.Dialer{Timeout: timeout}, "tcp", uri.Host, tlsc) + if err != nil { + return nil, err + } + return conn, nil + } + return nil, errors.New("Unknown protocol") +} + +// actually read incoming messages off the wire +// send Message object into ibound channel +func incoming(c *client) { + var err error + var cp packets.ControlPacket + + defer c.workers.Done() + + DEBUG.Println(NET, "incoming started") + + for { + if cp, err = packets.ReadPacket(c.conn); err != nil { + break + } + DEBUG.Println(NET, "Received Message") + select { + case c.ibound <- cp: + // Notify keepalive logic that we recently received a packet + if c.options.KeepAlive != 0 { + c.packetResp <- struct{}{} + } + case <-c.stop: + // This avoids a deadlock should a message arrive while shutting down. + // In that case the "reader" of c.ibound might already be gone + WARN.Println(NET, "incoming dropped a received message during shutdown") + break + } + } + // We received an error on read. + // If disconnect is in progress, swallow error and return + select { + case <-c.stop: + DEBUG.Println(NET, "incoming stopped") + return + // Not trying to disconnect, send the error to the errors channel + default: + ERROR.Println(NET, "incoming stopped with error", err) + signalError(c.errors, err) + return + } +} + +// receive a Message object on obound, and then +// actually send outgoing message to the wire +func outgoing(c *client) { + defer c.workers.Done() + DEBUG.Println(NET, "outgoing started") + + for { + DEBUG.Println(NET, "outgoing waiting for an outbound message") + select { + case <-c.stop: + DEBUG.Println(NET, "outgoing stopped") + return + case pub := <-c.obound: + msg := pub.p.(*packets.PublishPacket) + + if c.options.WriteTimeout > 0 { + c.conn.SetWriteDeadline(time.Now().Add(c.options.WriteTimeout)) + } + + if err := msg.Write(c.conn); err != nil { + ERROR.Println(NET, "outgoing stopped with error", err) + signalError(c.errors, err) + return + } + + if c.options.WriteTimeout > 0 { + // If we successfully wrote, we don't want the timeout to happen during an idle period + // so we reset it to infinite. + c.conn.SetWriteDeadline(time.Time{}) + } + + if msg.Qos == 0 { + pub.t.flowComplete() + } + DEBUG.Println(NET, "obound wrote msg, id:", msg.MessageID) + case msg := <-c.oboundP: + switch msg.p.(type) { + case *packets.SubscribePacket: + msg.p.(*packets.SubscribePacket).MessageID = c.getID(msg.t) + case *packets.UnsubscribePacket: + msg.p.(*packets.UnsubscribePacket).MessageID = c.getID(msg.t) + } + DEBUG.Println(NET, "obound priority msg to write, type", reflect.TypeOf(msg.p)) + if err := msg.p.Write(c.conn); err != nil { + ERROR.Println(NET, "outgoing stopped with error", err) + signalError(c.errors, err) + return + } + switch msg.p.(type) { + case *packets.DisconnectPacket: + msg.t.(*DisconnectToken).flowComplete() + DEBUG.Println(NET, "outbound wrote disconnect, stopping") + return + } + } + // Reset ping timer after sending control packet. + if c.options.KeepAlive != 0 { + select { + case c.keepaliveReset <- struct{}{}: + default: + DEBUG.Println(NET, "couldn't send keepalive signal in outbound as channel full") + } + } + } +} + +// receive Message objects on ibound +// store messages if necessary +// send replies on obound +// delete messages from store if necessary +func alllogic(c *client) { + defer c.workers.Done() + DEBUG.Println(NET, "logic started") + + for { + DEBUG.Println(NET, "logic waiting for msg on ibound") + + select { + case msg := <-c.ibound: + DEBUG.Println(NET, "logic got msg on ibound") + persistInbound(c.persist, msg) + switch m := msg.(type) { + case *packets.PingrespPacket: + DEBUG.Println(NET, "received pingresp") + c.pingResp <- struct{}{} + case *packets.SubackPacket: + DEBUG.Println(NET, "received suback, id:", m.MessageID) + token := c.getToken(m.MessageID).(*SubscribeToken) + DEBUG.Println(NET, "granted qoss", m.ReturnCodes) + for i, qos := range m.ReturnCodes { + token.subResult[token.subs[i]] = qos + } + token.flowComplete() + c.freeID(m.MessageID) + case *packets.UnsubackPacket: + DEBUG.Println(NET, "received unsuback, id:", m.MessageID) + token := c.getToken(m.MessageID).(*UnsubscribeToken) + token.flowComplete() + c.freeID(m.MessageID) + case *packets.PublishPacket: + DEBUG.Println(NET, "received publish, msgId:", m.MessageID) + DEBUG.Println(NET, "putting msg on onPubChan") + switch m.Qos { + case 2: + c.incomingPubChan <- m + DEBUG.Println(NET, "done putting msg on incomingPubChan") + pr := packets.NewControlPacket(packets.Pubrec).(*packets.PubrecPacket) + pr.MessageID = m.MessageID + DEBUG.Println(NET, "putting pubrec msg on obound") + c.oboundP <- &PacketAndToken{p: pr, t: nil} + DEBUG.Println(NET, "done putting pubrec msg on obound") + case 1: + c.incomingPubChan <- m + DEBUG.Println(NET, "done putting msg on incomingPubChan") + pa := packets.NewControlPacket(packets.Puback).(*packets.PubackPacket) + pa.MessageID = m.MessageID + DEBUG.Println(NET, "putting puback msg on obound") + c.oboundP <- &PacketAndToken{p: pa, t: nil} + DEBUG.Println(NET, "done putting puback msg on obound") + case 0: + c.incomingPubChan <- m + DEBUG.Println(NET, "done putting msg on incomingPubChan") + } + case *packets.PubackPacket: + DEBUG.Println(NET, "received puback, id:", m.MessageID) + // c.receipts.get(msg.MsgId()) <- Receipt{} + // c.receipts.end(msg.MsgId()) + c.getToken(m.MessageID).flowComplete() + c.freeID(m.MessageID) + case *packets.PubrecPacket: + DEBUG.Println(NET, "received pubrec, id:", m.MessageID) + prel := packets.NewControlPacket(packets.Pubrel).(*packets.PubrelPacket) + prel.MessageID = m.MessageID + select { + case c.oboundP <- &PacketAndToken{p: prel, t: nil}: + case <-time.After(time.Second): + } + case *packets.PubrelPacket: + DEBUG.Println(NET, "received pubrel, id:", m.MessageID) + pc := packets.NewControlPacket(packets.Pubcomp).(*packets.PubcompPacket) + pc.MessageID = m.MessageID + select { + case c.oboundP <- &PacketAndToken{p: pc, t: nil}: + case <-time.After(time.Second): + } + case *packets.PubcompPacket: + DEBUG.Println(NET, "received pubcomp, id:", m.MessageID) + c.getToken(m.MessageID).flowComplete() + c.freeID(m.MessageID) + } + case <-c.stop: + WARN.Println(NET, "logic stopped") + return + } + } +} + +func errorWatch(c *client) { + select { + case <-c.stop: + WARN.Println(NET, "errorWatch stopped") + return + case err := <-c.errors: + ERROR.Println(NET, "error triggered, stopping") + c.internalConnLost(err) + return + } +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/notice.html b/vendor/github.com/eclipse/paho.mqtt.golang/notice.html new file mode 100644 index 000000000..f19c483b9 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/notice.html @@ -0,0 +1,108 @@ + + + + + +Eclipse Foundation Software User Agreement + + + +

Eclipse Foundation Software User Agreement

+

February 1, 2011

+ +

Usage Of Content

+ +

THE ECLIPSE FOUNDATION MAKES AVAILABLE SOFTWARE, DOCUMENTATION, INFORMATION AND/OR OTHER MATERIALS FOR OPEN SOURCE PROJECTS + (COLLECTIVELY "CONTENT"). USE OF THE CONTENT IS GOVERNED BY THE TERMS AND CONDITIONS OF THIS AGREEMENT AND/OR THE TERMS AND + CONDITIONS OF LICENSE AGREEMENTS OR NOTICES INDICATED OR REFERENCED BELOW. BY USING THE CONTENT, YOU AGREE THAT YOUR USE + OF THE CONTENT IS GOVERNED BY THIS AGREEMENT AND/OR THE TERMS AND CONDITIONS OF ANY APPLICABLE LICENSE AGREEMENTS OR + NOTICES INDICATED OR REFERENCED BELOW. IF YOU DO NOT AGREE TO THE TERMS AND CONDITIONS OF THIS AGREEMENT AND THE TERMS AND + CONDITIONS OF ANY APPLICABLE LICENSE AGREEMENTS OR NOTICES INDICATED OR REFERENCED BELOW, THEN YOU MAY NOT USE THE CONTENT.

+ +

Applicable Licenses

+ +

Unless otherwise indicated, all Content made available by the Eclipse Foundation is provided to you under the terms and conditions of the Eclipse Public License Version 1.0 + ("EPL"). A copy of the EPL is provided with this Content and is also available at http://www.eclipse.org/legal/epl-v10.html. + For purposes of the EPL, "Program" will mean the Content.

+ +

Content includes, but is not limited to, source code, object code, documentation and other files maintained in the Eclipse Foundation source code + repository ("Repository") in software modules ("Modules") and made available as downloadable archives ("Downloads").

+ +
    +
  • Content may be structured and packaged into modules to facilitate delivering, extending, and upgrading the Content. Typical modules may include plug-ins ("Plug-ins"), plug-in fragments ("Fragments"), and features ("Features").
  • +
  • Each Plug-in or Fragment may be packaged as a sub-directory or JAR (Java™ ARchive) in a directory named "plugins".
  • +
  • A Feature is a bundle of one or more Plug-ins and/or Fragments and associated material. Each Feature may be packaged as a sub-directory in a directory named "features". Within a Feature, files named "feature.xml" may contain a list of the names and version numbers of the Plug-ins + and/or Fragments associated with that Feature.
  • +
  • Features may also include other Features ("Included Features"). Within a Feature, files named "feature.xml" may contain a list of the names and version numbers of Included Features.
  • +
+ +

The terms and conditions governing Plug-ins and Fragments should be contained in files named "about.html" ("Abouts"). The terms and conditions governing Features and +Included Features should be contained in files named "license.html" ("Feature Licenses"). Abouts and Feature Licenses may be located in any directory of a Download or Module +including, but not limited to the following locations:

+ +
    +
  • The top-level (root) directory
  • +
  • Plug-in and Fragment directories
  • +
  • Inside Plug-ins and Fragments packaged as JARs
  • +
  • Sub-directories of the directory named "src" of certain Plug-ins
  • +
  • Feature directories
  • +
+ +

Note: if a Feature made available by the Eclipse Foundation is installed using the Provisioning Technology (as defined below), you must agree to a license ("Feature Update License") during the +installation process. If the Feature contains Included Features, the Feature Update License should either provide you with the terms and conditions governing the Included Features or +inform you where you can locate them. Feature Update Licenses may be found in the "license" property of files named "feature.properties" found within a Feature. +Such Abouts, Feature Licenses, and Feature Update Licenses contain the terms and conditions (or references to such terms and conditions) that govern your use of the associated Content in +that directory.

+ +

THE ABOUTS, FEATURE LICENSES, AND FEATURE UPDATE LICENSES MAY REFER TO THE EPL OR OTHER LICENSE AGREEMENTS, NOTICES OR TERMS AND CONDITIONS. SOME OF THESE +OTHER LICENSE AGREEMENTS MAY INCLUDE (BUT ARE NOT LIMITED TO):

+ + + +

IT IS YOUR OBLIGATION TO READ AND ACCEPT ALL SUCH TERMS AND CONDITIONS PRIOR TO USE OF THE CONTENT. If no About, Feature License, or Feature Update License is provided, please +contact the Eclipse Foundation to determine what terms and conditions govern that particular Content.

+ + +

Use of Provisioning Technology

+ +

The Eclipse Foundation makes available provisioning software, examples of which include, but are not limited to, p2 and the Eclipse + Update Manager ("Provisioning Technology") for the purpose of allowing users to install software, documentation, information and/or + other materials (collectively "Installable Software"). This capability is provided with the intent of allowing such users to + install, extend and update Eclipse-based products. Information about packaging Installable Software is available at http://eclipse.org/equinox/p2/repository_packaging.html + ("Specification").

+ +

You may use Provisioning Technology to allow other parties to install Installable Software. You shall be responsible for enabling the + applicable license agreements relating to the Installable Software to be presented to, and accepted by, the users of the Provisioning Technology + in accordance with the Specification. By using Provisioning Technology in such a manner and making it available in accordance with the + Specification, you further acknowledge your agreement to, and the acquisition of all necessary rights to permit the following:

+ +
    +
  1. A series of actions may occur ("Provisioning Process") in which a user may execute the Provisioning Technology + on a machine ("Target Machine") with the intent of installing, extending or updating the functionality of an Eclipse-based + product.
  2. +
  3. During the Provisioning Process, the Provisioning Technology may cause third party Installable Software or a portion thereof to be + accessed and copied to the Target Machine.
  4. +
  5. Pursuant to the Specification, you will provide to the user the terms and conditions that govern the use of the Installable + Software ("Installable Software Agreement") and such Installable Software Agreement shall be accessed from the Target + Machine in accordance with the Specification. Such Installable Software Agreement must inform the user of the terms and conditions that govern + the Installable Software and must solicit acceptance by the end user in the manner prescribed in such Installable Software Agreement. Upon such + indication of agreement by the user, the provisioning Technology will complete installation of the Installable Software.
  6. +
+ +

Cryptography

+ +

Content may contain encryption software. The country in which you are currently may have restrictions on the import, possession, and use, and/or re-export to + another country, of encryption software. BEFORE using any encryption software, please check the country's laws, regulations and policies concerning the import, + possession, or use, and re-export of encryption software, to see if this is permitted.

+ +

Java and all Java-based trademarks are trademarks of Oracle Corporation in the United States, other countries, or both.

+ + diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/oops.go b/vendor/github.com/eclipse/paho.mqtt.golang/oops.go new file mode 100644 index 000000000..39630d7f2 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/oops.go @@ -0,0 +1,21 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +func chkerr(e error) { + if e != nil { + panic(e) + } +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/options.go b/vendor/github.com/eclipse/paho.mqtt.golang/options.go new file mode 100644 index 000000000..16129ceb4 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/options.go @@ -0,0 +1,293 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "crypto/tls" + "net/url" + "time" +) + +// MessageHandler is a callback type which can be set to be +// executed upon the arrival of messages published to topics +// to which the client is subscribed. +type MessageHandler func(Client, Message) + +// ConnectionLostHandler is a callback type which can be set to be +// executed upon an unintended disconnection from the MQTT broker. +// Disconnects caused by calling Disconnect or ForceDisconnect will +// not cause an OnConnectionLost callback to execute. +type ConnectionLostHandler func(Client, error) + +// OnConnectHandler is a callback that is called when the client +// state changes from unconnected/disconnected to connected. Both +// at initial connection and on reconnection +type OnConnectHandler func(Client) + +// ClientOptions contains configurable options for an Client. +type ClientOptions struct { + Servers []*url.URL + ClientID string + Username string + Password string + CleanSession bool + Order bool + WillEnabled bool + WillTopic string + WillPayload []byte + WillQos byte + WillRetained bool + ProtocolVersion uint + protocolVersionExplicit bool + TLSConfig tls.Config + KeepAlive time.Duration + PingTimeout time.Duration + ConnectTimeout time.Duration + MaxReconnectInterval time.Duration + AutoReconnect bool + Store Store + DefaultPublishHander MessageHandler + OnConnect OnConnectHandler + OnConnectionLost ConnectionLostHandler + WriteTimeout time.Duration + MessageChannelDepth uint +} + +// NewClientOptions will create a new ClientClientOptions type with some +// default values. +// Port: 1883 +// CleanSession: True +// Order: True +// KeepAlive: 30 (seconds) +// ConnectTimeout: 30 (seconds) +// MaxReconnectInterval 10 (minutes) +// AutoReconnect: True +func NewClientOptions() *ClientOptions { + o := &ClientOptions{ + Servers: nil, + ClientID: "", + Username: "", + Password: "", + CleanSession: true, + Order: true, + WillEnabled: false, + WillTopic: "", + WillPayload: nil, + WillQos: 0, + WillRetained: false, + ProtocolVersion: 0, + protocolVersionExplicit: false, + TLSConfig: tls.Config{}, + KeepAlive: 30 * time.Second, + PingTimeout: 10 * time.Second, + ConnectTimeout: 30 * time.Second, + MaxReconnectInterval: 10 * time.Minute, + AutoReconnect: true, + Store: nil, + OnConnect: nil, + OnConnectionLost: DefaultConnectionLostHandler, + WriteTimeout: 0, // 0 represents timeout disabled + MessageChannelDepth: 100, + } + return o +} + +// AddBroker adds a broker URI to the list of brokers to be used. The format should be +// scheme://host:port +// Where "scheme" is one of "tcp", "ssl", or "ws", "host" is the ip-address (or hostname) +// and "port" is the port on which the broker is accepting connections. +func (o *ClientOptions) AddBroker(server string) *ClientOptions { + brokerURI, err := url.Parse(server) + if err == nil { + o.Servers = append(o.Servers, brokerURI) + } + return o +} + +// SetClientID will set the client id to be used by this client when +// connecting to the MQTT broker. According to the MQTT v3.1 specification, +// a client id mus be no longer than 23 characters. +func (o *ClientOptions) SetClientID(id string) *ClientOptions { + o.ClientID = id + return o +} + +// SetUsername will set the username to be used by this client when connecting +// to the MQTT broker. Note: without the use of SSL/TLS, this information will +// be sent in plaintext accross the wire. +func (o *ClientOptions) SetUsername(u string) *ClientOptions { + o.Username = u + return o +} + +// SetPassword will set the password to be used by this client when connecting +// to the MQTT broker. Note: without the use of SSL/TLS, this information will +// be sent in plaintext accross the wire. +func (o *ClientOptions) SetPassword(p string) *ClientOptions { + o.Password = p + return o +} + +// SetCleanSession will set the "clean session" flag in the connect message +// when this client connects to an MQTT broker. By setting this flag, you are +// indicating that no messages saved by the broker for this client should be +// delivered. Any messages that were going to be sent by this client before +// diconnecting previously but didn't will not be sent upon connecting to the +// broker. +func (o *ClientOptions) SetCleanSession(clean bool) *ClientOptions { + o.CleanSession = clean + return o +} + +// SetOrderMatters will set the message routing to guarantee order within +// each QoS level. By default, this value is true. If set to false, +// this flag indicates that messages can be delivered asynchronously +// from the client to the application and possibly arrive out of order. +func (o *ClientOptions) SetOrderMatters(order bool) *ClientOptions { + o.Order = order + return o +} + +// SetTLSConfig will set an SSL/TLS configuration to be used when connecting +// to an MQTT broker. Please read the official Go documentation for more +// information. +func (o *ClientOptions) SetTLSConfig(t *tls.Config) *ClientOptions { + o.TLSConfig = *t + return o +} + +// SetStore will set the implementation of the Store interface +// used to provide message persistence in cases where QoS levels +// QoS_ONE or QoS_TWO are used. If no store is provided, then the +// client will use MemoryStore by default. +func (o *ClientOptions) SetStore(s Store) *ClientOptions { + o.Store = s + return o +} + +// SetKeepAlive will set the amount of time (in seconds) that the client +// should wait before sending a PING request to the broker. This will +// allow the client to know that a connection has not been lost with the +// server. +func (o *ClientOptions) SetKeepAlive(k time.Duration) *ClientOptions { + o.KeepAlive = k + return o +} + +// SetPingTimeout will set the amount of time (in seconds) that the client +// will wait after sending a PING request to the broker, before deciding +// that the connection has been lost. Default is 10 seconds. +func (o *ClientOptions) SetPingTimeout(k time.Duration) *ClientOptions { + o.PingTimeout = k + return o +} + +// SetProtocolVersion sets the MQTT version to be used to connect to the +// broker. Legitimate values are currently 3 - MQTT 3.1 or 4 - MQTT 3.1.1 +func (o *ClientOptions) SetProtocolVersion(pv uint) *ClientOptions { + if pv >= 3 && pv <= 4 { + o.ProtocolVersion = pv + o.protocolVersionExplicit = true + } + return o +} + +// UnsetWill will cause any set will message to be disregarded. +func (o *ClientOptions) UnsetWill() *ClientOptions { + o.WillEnabled = false + return o +} + +// SetWill accepts a string will message to be set. When the client connects, +// it will give this will message to the broker, which will then publish the +// provided payload (the will) to any clients that are subscribed to the provided +// topic. +func (o *ClientOptions) SetWill(topic string, payload string, qos byte, retained bool) *ClientOptions { + o.SetBinaryWill(topic, []byte(payload), qos, retained) + return o +} + +// SetBinaryWill accepts a []byte will message to be set. When the client connects, +// it will give this will message to the broker, which will then publish the +// provided payload (the will) to any clients that are subscribed to the provided +// topic. +func (o *ClientOptions) SetBinaryWill(topic string, payload []byte, qos byte, retained bool) *ClientOptions { + o.WillEnabled = true + o.WillTopic = topic + o.WillPayload = payload + o.WillQos = qos + o.WillRetained = retained + return o +} + +// SetDefaultPublishHandler sets the MessageHandler that will be called when a message +// is received that does not match any known subscriptions. +func (o *ClientOptions) SetDefaultPublishHandler(defaultHandler MessageHandler) *ClientOptions { + o.DefaultPublishHander = defaultHandler + return o +} + +// SetOnConnectHandler sets the function to be called when the client is connected. Both +// at initial connection time and upon automatic reconnect. +func (o *ClientOptions) SetOnConnectHandler(onConn OnConnectHandler) *ClientOptions { + o.OnConnect = onConn + return o +} + +// SetConnectionLostHandler will set the OnConnectionLost callback to be executed +// in the case where the client unexpectedly loses connection with the MQTT broker. +func (o *ClientOptions) SetConnectionLostHandler(onLost ConnectionLostHandler) *ClientOptions { + o.OnConnectionLost = onLost + return o +} + +// SetWriteTimeout puts a limit on how long a mqtt publish should block until it unblocks with a +// timeout error. A duration of 0 never times out. Default 30 seconds +func (o *ClientOptions) SetWriteTimeout(t time.Duration) *ClientOptions { + o.WriteTimeout = t + return o +} + +// SetConnectTimeout limits how long the client will wait when trying to open a connection +// to an MQTT server before timeing out and erroring the attempt. A duration of 0 never times out. +// Default 30 seconds. Currently only operational on TCP/TLS connections. +func (o *ClientOptions) SetConnectTimeout(t time.Duration) *ClientOptions { + o.ConnectTimeout = t + return o +} + +// SetMaxReconnectInterval sets the maximum time that will be waited between reconnection attempts +// when connection is lost +func (o *ClientOptions) SetMaxReconnectInterval(t time.Duration) *ClientOptions { + o.MaxReconnectInterval = t + return o +} + +// SetAutoReconnect sets whether the automatic reconnection logic should be used +// when the connection is lost, even if disabled the ConnectionLostHandler is still +// called +func (o *ClientOptions) SetAutoReconnect(a bool) *ClientOptions { + o.AutoReconnect = a + return o +} + +// SetMessageChannelDepth sets the size of the internal queue that holds messages while the +// client is temporairily offline, allowing the application to publish when the client is +// reconnecting. This setting is only valid if AutoReconnect is set to true, it is otherwise +// ignored. +func (o *ClientOptions) SetMessageChannelDepth(s uint) *ClientOptions { + o.MessageChannelDepth = s + return o +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/options_reader.go b/vendor/github.com/eclipse/paho.mqtt.golang/options_reader.go new file mode 100644 index 000000000..85157d82e --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/options_reader.go @@ -0,0 +1,132 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "crypto/tls" + "net/url" + "time" +) + +// ClientOptionsReader provides an interface for reading ClientOptions after the client has been initialized. +type ClientOptionsReader struct { + options *ClientOptions +} + +func (r *ClientOptionsReader) Servers() []*url.URL { + s := make([]*url.URL, len(r.options.Servers)) + + for i, u := range r.options.Servers { + nu := *u + s[i] = &nu + } + + return s +} + +func (r *ClientOptionsReader) ClientID() string { + s := r.options.ClientID + return s +} + +func (r *ClientOptionsReader) Username() string { + s := r.options.Username + return s +} + +func (r *ClientOptionsReader) Password() string { + s := r.options.Password + return s +} + +func (r *ClientOptionsReader) CleanSession() bool { + s := r.options.CleanSession + return s +} + +func (r *ClientOptionsReader) Order() bool { + s := r.options.Order + return s +} + +func (r *ClientOptionsReader) WillEnabled() bool { + s := r.options.WillEnabled + return s +} + +func (r *ClientOptionsReader) WillTopic() string { + s := r.options.WillTopic + return s +} + +func (r *ClientOptionsReader) WillPayload() []byte { + s := r.options.WillPayload + return s +} + +func (r *ClientOptionsReader) WillQos() byte { + s := r.options.WillQos + return s +} + +func (r *ClientOptionsReader) WillRetained() bool { + s := r.options.WillRetained + return s +} + +func (r *ClientOptionsReader) ProtocolVersion() uint { + s := r.options.ProtocolVersion + return s +} + +func (r *ClientOptionsReader) TLSConfig() tls.Config { + s := r.options.TLSConfig + return s +} + +func (r *ClientOptionsReader) KeepAlive() time.Duration { + s := r.options.KeepAlive + return s +} + +func (r *ClientOptionsReader) PingTimeout() time.Duration { + s := r.options.PingTimeout + return s +} + +func (r *ClientOptionsReader) ConnectTimeout() time.Duration { + s := r.options.ConnectTimeout + return s +} + +func (r *ClientOptionsReader) MaxReconnectInterval() time.Duration { + s := r.options.MaxReconnectInterval + return s +} + +func (r *ClientOptionsReader) AutoReconnect() bool { + s := r.options.AutoReconnect + return s +} + +func (r *ClientOptionsReader) WriteTimeout() time.Duration { + s := r.options.WriteTimeout + return s +} + +func (r *ClientOptionsReader) MessageChannelDepth() uint { + s := r.options.MessageChannelDepth + return s +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/connack.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/connack.go new file mode 100644 index 000000000..a512ace0f --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/connack.go @@ -0,0 +1,51 @@ +package packets + +import ( + "bytes" + "fmt" + "io" +) + +//ConnackPacket is an internal representation of the fields of the +//Connack MQTT packet +type ConnackPacket struct { + FixedHeader + SessionPresent bool + ReturnCode byte +} + +func (ca *ConnackPacket) String() string { + str := fmt.Sprintf("%s", ca.FixedHeader) + str += " " + str += fmt.Sprintf("sessionpresent: %t returncode: %d", ca.SessionPresent, ca.ReturnCode) + return str +} + +func (ca *ConnackPacket) Write(w io.Writer) error { + var body bytes.Buffer + var err error + + body.WriteByte(boolToByte(ca.SessionPresent)) + body.WriteByte(ca.ReturnCode) + ca.FixedHeader.RemainingLength = 2 + packet := ca.FixedHeader.pack() + packet.Write(body.Bytes()) + _, err = packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (ca *ConnackPacket) Unpack(b io.Reader) error { + ca.SessionPresent = 1&decodeByte(b) > 0 + ca.ReturnCode = decodeByte(b) + + return nil +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (ca *ConnackPacket) Details() Details { + return Details{Qos: 0, MessageID: 0} +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/connect.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/connect.go new file mode 100644 index 000000000..378f0ed58 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/connect.go @@ -0,0 +1,122 @@ +package packets + +import ( + "bytes" + "fmt" + "io" +) + +//ConnectPacket is an internal representation of the fields of the +//Connect MQTT packet +type ConnectPacket struct { + FixedHeader + ProtocolName string + ProtocolVersion byte + CleanSession bool + WillFlag bool + WillQos byte + WillRetain bool + UsernameFlag bool + PasswordFlag bool + ReservedBit byte + Keepalive uint16 + + ClientIdentifier string + WillTopic string + WillMessage []byte + Username string + Password []byte +} + +func (c *ConnectPacket) String() string { + str := fmt.Sprintf("%s", c.FixedHeader) + str += " " + str += fmt.Sprintf("protocolversion: %d protocolname: %s cleansession: %t willflag: %t WillQos: %d WillRetain: %t Usernameflag: %t Passwordflag: %t keepalive: %d clientId: %s willtopic: %s willmessage: %s Username: %s Password: %s", c.ProtocolVersion, c.ProtocolName, c.CleanSession, c.WillFlag, c.WillQos, c.WillRetain, c.UsernameFlag, c.PasswordFlag, c.Keepalive, c.ClientIdentifier, c.WillTopic, c.WillMessage, c.Username, c.Password) + return str +} + +func (c *ConnectPacket) Write(w io.Writer) error { + var body bytes.Buffer + var err error + + body.Write(encodeString(c.ProtocolName)) + body.WriteByte(c.ProtocolVersion) + body.WriteByte(boolToByte(c.CleanSession)<<1 | boolToByte(c.WillFlag)<<2 | c.WillQos<<3 | boolToByte(c.WillRetain)<<5 | boolToByte(c.PasswordFlag)<<6 | boolToByte(c.UsernameFlag)<<7) + body.Write(encodeUint16(c.Keepalive)) + body.Write(encodeString(c.ClientIdentifier)) + if c.WillFlag { + body.Write(encodeString(c.WillTopic)) + body.Write(encodeBytes(c.WillMessage)) + } + if c.UsernameFlag { + body.Write(encodeString(c.Username)) + } + if c.PasswordFlag { + body.Write(encodeBytes(c.Password)) + } + c.FixedHeader.RemainingLength = body.Len() + packet := c.FixedHeader.pack() + packet.Write(body.Bytes()) + _, err = packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (c *ConnectPacket) Unpack(b io.Reader) error { + c.ProtocolName = decodeString(b) + c.ProtocolVersion = decodeByte(b) + options := decodeByte(b) + c.ReservedBit = 1 & options + c.CleanSession = 1&(options>>1) > 0 + c.WillFlag = 1&(options>>2) > 0 + c.WillQos = 3 & (options >> 3) + c.WillRetain = 1&(options>>5) > 0 + c.PasswordFlag = 1&(options>>6) > 0 + c.UsernameFlag = 1&(options>>7) > 0 + c.Keepalive = decodeUint16(b) + c.ClientIdentifier = decodeString(b) + if c.WillFlag { + c.WillTopic = decodeString(b) + c.WillMessage = decodeBytes(b) + } + if c.UsernameFlag { + c.Username = decodeString(b) + } + if c.PasswordFlag { + c.Password = decodeBytes(b) + } + + return nil +} + +//Validate performs validation of the fields of a Connect packet +func (c *ConnectPacket) Validate() byte { + if c.PasswordFlag && !c.UsernameFlag { + return ErrRefusedBadUsernameOrPassword + } + if c.ReservedBit != 0 { + //Bad reserved bit + return ErrProtocolViolation + } + if (c.ProtocolName == "MQIsdp" && c.ProtocolVersion != 3) || (c.ProtocolName == "MQTT" && c.ProtocolVersion != 4) { + //Mismatched or unsupported protocol version + return ErrRefusedBadProtocolVersion + } + if c.ProtocolName != "MQIsdp" && c.ProtocolName != "MQTT" { + //Bad protocol name + return ErrProtocolViolation + } + if len(c.ClientIdentifier) > 65535 || len(c.Username) > 65535 || len(c.Password) > 65535 { + //Bad size field + return ErrProtocolViolation + } + return Accepted +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (c *ConnectPacket) Details() Details { + return Details{Qos: 0, MessageID: 0} +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/disconnect.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/disconnect.go new file mode 100644 index 000000000..e5c186920 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/disconnect.go @@ -0,0 +1,36 @@ +package packets + +import ( + "fmt" + "io" +) + +//DisconnectPacket is an internal representation of the fields of the +//Disconnect MQTT packet +type DisconnectPacket struct { + FixedHeader +} + +func (d *DisconnectPacket) String() string { + str := fmt.Sprintf("%s", d.FixedHeader) + return str +} + +func (d *DisconnectPacket) Write(w io.Writer) error { + packet := d.FixedHeader.pack() + _, err := packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (d *DisconnectPacket) Unpack(b io.Reader) error { + return nil +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (d *DisconnectPacket) Details() Details { + return Details{Qos: 0, MessageID: 0} +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/packets.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/packets.go new file mode 100644 index 000000000..cbc194a26 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/packets.go @@ -0,0 +1,322 @@ +package packets + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" +) + +//ControlPacket defines the interface for structs intended to hold +//decoded MQTT packets, either from being read or before being +//written +type ControlPacket interface { + Write(io.Writer) error + Unpack(io.Reader) error + String() string + Details() Details +} + +//PacketNames maps the constants for each of the MQTT packet types +//to a string representation of their name. +var PacketNames = map[uint8]string{ + 1: "CONNECT", + 2: "CONNACK", + 3: "PUBLISH", + 4: "PUBACK", + 5: "PUBREC", + 6: "PUBREL", + 7: "PUBCOMP", + 8: "SUBSCRIBE", + 9: "SUBACK", + 10: "UNSUBSCRIBE", + 11: "UNSUBACK", + 12: "PINGREQ", + 13: "PINGRESP", + 14: "DISCONNECT", +} + +//Below are the constants assigned to each of the MQTT packet types +const ( + Connect = 1 + Connack = 2 + Publish = 3 + Puback = 4 + Pubrec = 5 + Pubrel = 6 + Pubcomp = 7 + Subscribe = 8 + Suback = 9 + Unsubscribe = 10 + Unsuback = 11 + Pingreq = 12 + Pingresp = 13 + Disconnect = 14 +) + +//Below are the const definitions for error codes returned by +//Connect() +const ( + Accepted = 0x00 + ErrRefusedBadProtocolVersion = 0x01 + ErrRefusedIDRejected = 0x02 + ErrRefusedServerUnavailable = 0x03 + ErrRefusedBadUsernameOrPassword = 0x04 + ErrRefusedNotAuthorised = 0x05 + ErrNetworkError = 0xFE + ErrProtocolViolation = 0xFF +) + +//ConnackReturnCodes is a map of the error codes constants for Connect() +//to a string representation of the error +var ConnackReturnCodes = map[uint8]string{ + 0: "Connection Accepted", + 1: "Connection Refused: Bad Protocol Version", + 2: "Connection Refused: Client Identifier Rejected", + 3: "Connection Refused: Server Unavailable", + 4: "Connection Refused: Username or Password in unknown format", + 5: "Connection Refused: Not Authorised", + 254: "Connection Error", + 255: "Connection Refused: Protocol Violation", +} + +//ConnErrors is a map of the errors codes constants for Connect() +//to a Go error +var ConnErrors = map[byte]error{ + Accepted: nil, + ErrRefusedBadProtocolVersion: errors.New("Unnacceptable protocol version"), + ErrRefusedIDRejected: errors.New("Identifier rejected"), + ErrRefusedServerUnavailable: errors.New("Server Unavailable"), + ErrRefusedBadUsernameOrPassword: errors.New("Bad user name or password"), + ErrRefusedNotAuthorised: errors.New("Not Authorized"), + ErrNetworkError: errors.New("Network Error"), + ErrProtocolViolation: errors.New("Protocol Violation"), +} + +//ReadPacket takes an instance of an io.Reader (such as net.Conn) and attempts +//to read an MQTT packet from the stream. It returns a ControlPacket +//representing the decoded MQTT packet and an error. One of these returns will +//always be nil, a nil ControlPacket indicating an error occurred. +func ReadPacket(r io.Reader) (cp ControlPacket, err error) { + var fh FixedHeader + b := make([]byte, 1) + + _, err = io.ReadFull(r, b) + if err != nil { + return nil, err + } + fh.unpack(b[0], r) + cp = NewControlPacketWithHeader(fh) + if cp == nil { + return nil, errors.New("Bad data from client") + } + packetBytes := make([]byte, fh.RemainingLength) + _, err = io.ReadFull(r, packetBytes) + if err != nil { + return nil, err + } + err = cp.Unpack(bytes.NewBuffer(packetBytes)) + return cp, err +} + +//NewControlPacket is used to create a new ControlPacket of the type specified +//by packetType, this is usually done by reference to the packet type constants +//defined in packets.go. The newly created ControlPacket is empty and a pointer +//is returned. +func NewControlPacket(packetType byte) (cp ControlPacket) { + switch packetType { + case Connect: + cp = &ConnectPacket{FixedHeader: FixedHeader{MessageType: Connect}} + case Connack: + cp = &ConnackPacket{FixedHeader: FixedHeader{MessageType: Connack}} + case Disconnect: + cp = &DisconnectPacket{FixedHeader: FixedHeader{MessageType: Disconnect}} + case Publish: + cp = &PublishPacket{FixedHeader: FixedHeader{MessageType: Publish}} + case Puback: + cp = &PubackPacket{FixedHeader: FixedHeader{MessageType: Puback}} + case Pubrec: + cp = &PubrecPacket{FixedHeader: FixedHeader{MessageType: Pubrec}} + case Pubrel: + cp = &PubrelPacket{FixedHeader: FixedHeader{MessageType: Pubrel, Qos: 1}} + case Pubcomp: + cp = &PubcompPacket{FixedHeader: FixedHeader{MessageType: Pubcomp}} + case Subscribe: + cp = &SubscribePacket{FixedHeader: FixedHeader{MessageType: Subscribe, Qos: 1}} + case Suback: + cp = &SubackPacket{FixedHeader: FixedHeader{MessageType: Suback}} + case Unsubscribe: + cp = &UnsubscribePacket{FixedHeader: FixedHeader{MessageType: Unsubscribe, Qos: 1}} + case Unsuback: + cp = &UnsubackPacket{FixedHeader: FixedHeader{MessageType: Unsuback}} + case Pingreq: + cp = &PingreqPacket{FixedHeader: FixedHeader{MessageType: Pingreq}} + case Pingresp: + cp = &PingrespPacket{FixedHeader: FixedHeader{MessageType: Pingresp}} + default: + return nil + } + return cp +} + +//NewControlPacketWithHeader is used to create a new ControlPacket of the type +//specified within the FixedHeader that is passed to the function. +//The newly created ControlPacket is empty and a pointer is returned. +func NewControlPacketWithHeader(fh FixedHeader) (cp ControlPacket) { + switch fh.MessageType { + case Connect: + cp = &ConnectPacket{FixedHeader: fh} + case Connack: + cp = &ConnackPacket{FixedHeader: fh} + case Disconnect: + cp = &DisconnectPacket{FixedHeader: fh} + case Publish: + cp = &PublishPacket{FixedHeader: fh} + case Puback: + cp = &PubackPacket{FixedHeader: fh} + case Pubrec: + cp = &PubrecPacket{FixedHeader: fh} + case Pubrel: + cp = &PubrelPacket{FixedHeader: fh} + case Pubcomp: + cp = &PubcompPacket{FixedHeader: fh} + case Subscribe: + cp = &SubscribePacket{FixedHeader: fh} + case Suback: + cp = &SubackPacket{FixedHeader: fh} + case Unsubscribe: + cp = &UnsubscribePacket{FixedHeader: fh} + case Unsuback: + cp = &UnsubackPacket{FixedHeader: fh} + case Pingreq: + cp = &PingreqPacket{FixedHeader: fh} + case Pingresp: + cp = &PingrespPacket{FixedHeader: fh} + default: + return nil + } + return cp +} + +//Details struct returned by the Details() function called on +//ControlPackets to present details of the Qos and MessageID +//of the ControlPacket +type Details struct { + Qos byte + MessageID uint16 +} + +//FixedHeader is a struct to hold the decoded information from +//the fixed header of an MQTT ControlPacket +type FixedHeader struct { + MessageType byte + Dup bool + Qos byte + Retain bool + RemainingLength int +} + +func (fh FixedHeader) String() string { + return fmt.Sprintf("%s: dup: %t qos: %d retain: %t rLength: %d", PacketNames[fh.MessageType], fh.Dup, fh.Qos, fh.Retain, fh.RemainingLength) +} + +func boolToByte(b bool) byte { + switch b { + case true: + return 1 + default: + return 0 + } +} + +func (fh *FixedHeader) pack() bytes.Buffer { + var header bytes.Buffer + header.WriteByte(fh.MessageType<<4 | boolToByte(fh.Dup)<<3 | fh.Qos<<1 | boolToByte(fh.Retain)) + header.Write(encodeLength(fh.RemainingLength)) + return header +} + +func (fh *FixedHeader) unpack(typeAndFlags byte, r io.Reader) { + fh.MessageType = typeAndFlags >> 4 + fh.Dup = (typeAndFlags>>3)&0x01 > 0 + fh.Qos = (typeAndFlags >> 1) & 0x03 + fh.Retain = typeAndFlags&0x01 > 0 + fh.RemainingLength = decodeLength(r) +} + +func decodeByte(b io.Reader) byte { + num := make([]byte, 1) + b.Read(num) + return num[0] +} + +func decodeUint16(b io.Reader) uint16 { + num := make([]byte, 2) + b.Read(num) + return binary.BigEndian.Uint16(num) +} + +func encodeUint16(num uint16) []byte { + bytes := make([]byte, 2) + binary.BigEndian.PutUint16(bytes, num) + return bytes +} + +func encodeString(field string) []byte { + fieldLength := make([]byte, 2) + binary.BigEndian.PutUint16(fieldLength, uint16(len(field))) + return append(fieldLength, []byte(field)...) +} + +func decodeString(b io.Reader) string { + fieldLength := decodeUint16(b) + field := make([]byte, fieldLength) + b.Read(field) + return string(field) +} + +func decodeBytes(b io.Reader) []byte { + fieldLength := decodeUint16(b) + field := make([]byte, fieldLength) + b.Read(field) + return field +} + +func encodeBytes(field []byte) []byte { + fieldLength := make([]byte, 2) + binary.BigEndian.PutUint16(fieldLength, uint16(len(field))) + return append(fieldLength, field...) +} + +func encodeLength(length int) []byte { + var encLength []byte + for { + digit := byte(length % 128) + length /= 128 + if length > 0 { + digit |= 0x80 + } + encLength = append(encLength, digit) + if length == 0 { + break + } + } + return encLength +} + +func decodeLength(r io.Reader) int { + var rLength uint32 + var multiplier uint32 + b := make([]byte, 1) + for multiplier < 27 { //fix: Infinite '(digit & 128) == 1' will cause the dead loop + io.ReadFull(r, b) + digit := b[0] + rLength |= uint32(digit&127) << multiplier + if (digit & 128) == 0 { + break + } + multiplier += 7 + } + return int(rLength) +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/pingreq.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pingreq.go new file mode 100644 index 000000000..5c3e88f94 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pingreq.go @@ -0,0 +1,36 @@ +package packets + +import ( + "fmt" + "io" +) + +//PingreqPacket is an internal representation of the fields of the +//Pingreq MQTT packet +type PingreqPacket struct { + FixedHeader +} + +func (pr *PingreqPacket) String() string { + str := fmt.Sprintf("%s", pr.FixedHeader) + return str +} + +func (pr *PingreqPacket) Write(w io.Writer) error { + packet := pr.FixedHeader.pack() + _, err := packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (pr *PingreqPacket) Unpack(b io.Reader) error { + return nil +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (pr *PingreqPacket) Details() Details { + return Details{Qos: 0, MessageID: 0} +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/pingresp.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pingresp.go new file mode 100644 index 000000000..39ebc001e --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pingresp.go @@ -0,0 +1,36 @@ +package packets + +import ( + "fmt" + "io" +) + +//PingrespPacket is an internal representation of the fields of the +//Pingresp MQTT packet +type PingrespPacket struct { + FixedHeader +} + +func (pr *PingrespPacket) String() string { + str := fmt.Sprintf("%s", pr.FixedHeader) + return str +} + +func (pr *PingrespPacket) Write(w io.Writer) error { + packet := pr.FixedHeader.pack() + _, err := packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (pr *PingrespPacket) Unpack(b io.Reader) error { + return nil +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (pr *PingrespPacket) Details() Details { + return Details{Qos: 0, MessageID: 0} +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/puback.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/puback.go new file mode 100644 index 000000000..e30402cd4 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/puback.go @@ -0,0 +1,44 @@ +package packets + +import ( + "fmt" + "io" +) + +//PubackPacket is an internal representation of the fields of the +//Puback MQTT packet +type PubackPacket struct { + FixedHeader + MessageID uint16 +} + +func (pa *PubackPacket) String() string { + str := fmt.Sprintf("%s", pa.FixedHeader) + str += " " + str += fmt.Sprintf("MessageID: %d", pa.MessageID) + return str +} + +func (pa *PubackPacket) Write(w io.Writer) error { + var err error + pa.FixedHeader.RemainingLength = 2 + packet := pa.FixedHeader.pack() + packet.Write(encodeUint16(pa.MessageID)) + _, err = packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (pa *PubackPacket) Unpack(b io.Reader) error { + pa.MessageID = decodeUint16(b) + + return nil +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (pa *PubackPacket) Details() Details { + return Details{Qos: pa.Qos, MessageID: pa.MessageID} +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubcomp.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubcomp.go new file mode 100644 index 000000000..fb994ae77 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubcomp.go @@ -0,0 +1,44 @@ +package packets + +import ( + "fmt" + "io" +) + +//PubcompPacket is an internal representation of the fields of the +//Pubcomp MQTT packet +type PubcompPacket struct { + FixedHeader + MessageID uint16 +} + +func (pc *PubcompPacket) String() string { + str := fmt.Sprintf("%s", pc.FixedHeader) + str += " " + str += fmt.Sprintf("MessageID: %d", pc.MessageID) + return str +} + +func (pc *PubcompPacket) Write(w io.Writer) error { + var err error + pc.FixedHeader.RemainingLength = 2 + packet := pc.FixedHeader.pack() + packet.Write(encodeUint16(pc.MessageID)) + _, err = packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (pc *PubcompPacket) Unpack(b io.Reader) error { + pc.MessageID = decodeUint16(b) + + return nil +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (pc *PubcompPacket) Details() Details { + return Details{Qos: pc.Qos, MessageID: pc.MessageID} +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/publish.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/publish.go new file mode 100644 index 000000000..b660ef4c5 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/publish.go @@ -0,0 +1,80 @@ +package packets + +import ( + "bytes" + "fmt" + "io" +) + +//PublishPacket is an internal representation of the fields of the +//Publish MQTT packet +type PublishPacket struct { + FixedHeader + TopicName string + MessageID uint16 + Payload []byte +} + +func (p *PublishPacket) String() string { + str := fmt.Sprintf("%s", p.FixedHeader) + str += " " + str += fmt.Sprintf("topicName: %s MessageID: %d", p.TopicName, p.MessageID) + str += " " + str += fmt.Sprintf("payload: %s", string(p.Payload)) + return str +} + +func (p *PublishPacket) Write(w io.Writer) error { + var body bytes.Buffer + var err error + + body.Write(encodeString(p.TopicName)) + if p.Qos > 0 { + body.Write(encodeUint16(p.MessageID)) + } + p.FixedHeader.RemainingLength = body.Len() + len(p.Payload) + packet := p.FixedHeader.pack() + packet.Write(body.Bytes()) + packet.Write(p.Payload) + _, err = w.Write(packet.Bytes()) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (p *PublishPacket) Unpack(b io.Reader) error { + var payloadLength = p.FixedHeader.RemainingLength + p.TopicName = decodeString(b) + if p.Qos > 0 { + p.MessageID = decodeUint16(b) + payloadLength -= len(p.TopicName) + 4 + } else { + payloadLength -= len(p.TopicName) + 2 + } + if payloadLength < 0 { + return fmt.Errorf("Error upacking publish, payload length < 0") + } + p.Payload = make([]byte, payloadLength) + _, err := b.Read(p.Payload) + + return err +} + +//Copy creates a new PublishPacket with the same topic and payload +//but an empty fixed header, useful for when you want to deliver +//a message with different properties such as Qos but the same +//content +func (p *PublishPacket) Copy() *PublishPacket { + newP := NewControlPacket(Publish).(*PublishPacket) + newP.TopicName = p.TopicName + newP.Payload = p.Payload + + return newP +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (p *PublishPacket) Details() Details { + return Details{Qos: p.Qos, MessageID: p.MessageID} +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrec.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrec.go new file mode 100644 index 000000000..9874e6413 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrec.go @@ -0,0 +1,44 @@ +package packets + +import ( + "fmt" + "io" +) + +//PubrecPacket is an internal representation of the fields of the +//Pubrec MQTT packet +type PubrecPacket struct { + FixedHeader + MessageID uint16 +} + +func (pr *PubrecPacket) String() string { + str := fmt.Sprintf("%s", pr.FixedHeader) + str += " " + str += fmt.Sprintf("MessageID: %d", pr.MessageID) + return str +} + +func (pr *PubrecPacket) Write(w io.Writer) error { + var err error + pr.FixedHeader.RemainingLength = 2 + packet := pr.FixedHeader.pack() + packet.Write(encodeUint16(pr.MessageID)) + _, err = packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (pr *PubrecPacket) Unpack(b io.Reader) error { + pr.MessageID = decodeUint16(b) + + return nil +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (pr *PubrecPacket) Details() Details { + return Details{Qos: pr.Qos, MessageID: pr.MessageID} +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrel.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrel.go new file mode 100644 index 000000000..a7ecce754 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrel.go @@ -0,0 +1,44 @@ +package packets + +import ( + "fmt" + "io" +) + +//PubrelPacket is an internal representation of the fields of the +//Pubrel MQTT packet +type PubrelPacket struct { + FixedHeader + MessageID uint16 +} + +func (pr *PubrelPacket) String() string { + str := fmt.Sprintf("%s", pr.FixedHeader) + str += " " + str += fmt.Sprintf("MessageID: %d", pr.MessageID) + return str +} + +func (pr *PubrelPacket) Write(w io.Writer) error { + var err error + pr.FixedHeader.RemainingLength = 2 + packet := pr.FixedHeader.pack() + packet.Write(encodeUint16(pr.MessageID)) + _, err = packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (pr *PubrelPacket) Unpack(b io.Reader) error { + pr.MessageID = decodeUint16(b) + + return nil +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (pr *PubrelPacket) Details() Details { + return Details{Qos: pr.Qos, MessageID: pr.MessageID} +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/suback.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/suback.go new file mode 100644 index 000000000..557a7dbe7 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/suback.go @@ -0,0 +1,52 @@ +package packets + +import ( + "bytes" + "fmt" + "io" +) + +//SubackPacket is an internal representation of the fields of the +//Suback MQTT packet +type SubackPacket struct { + FixedHeader + MessageID uint16 + ReturnCodes []byte +} + +func (sa *SubackPacket) String() string { + str := fmt.Sprintf("%s", sa.FixedHeader) + str += " " + str += fmt.Sprintf("MessageID: %d", sa.MessageID) + return str +} + +func (sa *SubackPacket) Write(w io.Writer) error { + var body bytes.Buffer + var err error + body.Write(encodeUint16(sa.MessageID)) + body.Write(sa.ReturnCodes) + sa.FixedHeader.RemainingLength = body.Len() + packet := sa.FixedHeader.pack() + packet.Write(body.Bytes()) + _, err = packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (sa *SubackPacket) Unpack(b io.Reader) error { + var qosBuffer bytes.Buffer + sa.MessageID = decodeUint16(b) + qosBuffer.ReadFrom(b) + sa.ReturnCodes = qosBuffer.Bytes() + + return nil +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (sa *SubackPacket) Details() Details { + return Details{Qos: 0, MessageID: sa.MessageID} +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/subscribe.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/subscribe.go new file mode 100644 index 000000000..c418ef0fb --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/subscribe.go @@ -0,0 +1,62 @@ +package packets + +import ( + "bytes" + "fmt" + "io" +) + +//SubscribePacket is an internal representation of the fields of the +//Subscribe MQTT packet +type SubscribePacket struct { + FixedHeader + MessageID uint16 + Topics []string + Qoss []byte +} + +func (s *SubscribePacket) String() string { + str := fmt.Sprintf("%s", s.FixedHeader) + str += " " + str += fmt.Sprintf("MessageID: %d topics: %s", s.MessageID, s.Topics) + return str +} + +func (s *SubscribePacket) Write(w io.Writer) error { + var body bytes.Buffer + var err error + + body.Write(encodeUint16(s.MessageID)) + for i, topic := range s.Topics { + body.Write(encodeString(topic)) + body.WriteByte(s.Qoss[i]) + } + s.FixedHeader.RemainingLength = body.Len() + packet := s.FixedHeader.pack() + packet.Write(body.Bytes()) + _, err = packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (s *SubscribePacket) Unpack(b io.Reader) error { + s.MessageID = decodeUint16(b) + payloadLength := s.FixedHeader.RemainingLength - 2 + for payloadLength > 0 { + topic := decodeString(b) + s.Topics = append(s.Topics, topic) + qos := decodeByte(b) + s.Qoss = append(s.Qoss, qos) + payloadLength -= 2 + len(topic) + 1 //2 bytes of string length, plus string, plus 1 byte for Qos + } + + return nil +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (s *SubscribePacket) Details() Details { + return Details{Qos: 1, MessageID: s.MessageID} +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/unsuback.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/unsuback.go new file mode 100644 index 000000000..b3b91ce3c --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/unsuback.go @@ -0,0 +1,44 @@ +package packets + +import ( + "fmt" + "io" +) + +//UnsubackPacket is an internal representation of the fields of the +//Unsuback MQTT packet +type UnsubackPacket struct { + FixedHeader + MessageID uint16 +} + +func (ua *UnsubackPacket) String() string { + str := fmt.Sprintf("%s", ua.FixedHeader) + str += " " + str += fmt.Sprintf("MessageID: %d", ua.MessageID) + return str +} + +func (ua *UnsubackPacket) Write(w io.Writer) error { + var err error + ua.FixedHeader.RemainingLength = 2 + packet := ua.FixedHeader.pack() + packet.Write(encodeUint16(ua.MessageID)) + _, err = packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (ua *UnsubackPacket) Unpack(b io.Reader) error { + ua.MessageID = decodeUint16(b) + + return nil +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (ua *UnsubackPacket) Details() Details { + return Details{Qos: 0, MessageID: ua.MessageID} +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/unsubscribe.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/unsubscribe.go new file mode 100644 index 000000000..dc6a89ec5 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/unsubscribe.go @@ -0,0 +1,55 @@ +package packets + +import ( + "bytes" + "fmt" + "io" +) + +//UnsubscribePacket is an internal representation of the fields of the +//Unsubscribe MQTT packet +type UnsubscribePacket struct { + FixedHeader + MessageID uint16 + Topics []string +} + +func (u *UnsubscribePacket) String() string { + str := fmt.Sprintf("%s", u.FixedHeader) + str += " " + str += fmt.Sprintf("MessageID: %d", u.MessageID) + return str +} + +func (u *UnsubscribePacket) Write(w io.Writer) error { + var body bytes.Buffer + var err error + body.Write(encodeUint16(u.MessageID)) + for _, topic := range u.Topics { + body.Write(encodeString(topic)) + } + u.FixedHeader.RemainingLength = body.Len() + packet := u.FixedHeader.pack() + packet.Write(body.Bytes()) + _, err = packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (u *UnsubscribePacket) Unpack(b io.Reader) error { + u.MessageID = decodeUint16(b) + var topic string + for topic = decodeString(b); topic != ""; topic = decodeString(b) { + u.Topics = append(u.Topics, topic) + } + + return nil +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (u *UnsubscribePacket) Details() Details { + return Details{Qos: 1, MessageID: u.MessageID} +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/ping.go b/vendor/github.com/eclipse/paho.mqtt.golang/ping.go new file mode 100644 index 000000000..2558db2d8 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/ping.go @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "errors" + "time" + + "github.com/eclipse/paho.mqtt.golang/packets" +) + +func keepalive(c *client) { + DEBUG.Println(PNG, "keepalive starting") + + receiveInterval := c.options.KeepAlive + (1 * time.Second) + pingTimer := timer{Timer: time.NewTimer(c.options.KeepAlive)} + receiveTimer := timer{Timer: time.NewTimer(receiveInterval)} + pingRespTimer := timer{Timer: time.NewTimer(c.options.PingTimeout)} + + pingRespTimer.Stop() + + for { + select { + case <-c.stop: + DEBUG.Println(PNG, "keepalive stopped") + c.workers.Done() + return + case <-pingTimer.C: + sendPing(&pingTimer, &pingRespTimer, c) + case <-c.keepaliveReset: + DEBUG.Println(NET, "resetting ping timer") + pingTimer.Reset(c.options.KeepAlive) + case <-c.pingResp: + DEBUG.Println(NET, "resetting ping timeout timer") + pingRespTimer.Stop() + pingTimer.Reset(c.options.KeepAlive) + receiveTimer.Reset(receiveInterval) + case <-c.packetResp: + DEBUG.Println(NET, "resetting receive timer") + receiveTimer.Reset(receiveInterval) + case <-receiveTimer.C: + receiveTimer.SetRead(true) + receiveTimer.Reset(receiveInterval) + sendPing(&pingTimer, &pingRespTimer, c) + case <-pingRespTimer.C: + pingRespTimer.SetRead(true) + CRITICAL.Println(PNG, "pingresp not received, disconnecting") + c.workers.Done() + c.internalConnLost(errors.New("pingresp not received, disconnecting")) + pingTimer.Stop() + return + } + } +} + +type timer struct { + *time.Timer + readFrom bool +} + +func (t *timer) SetRead(v bool) { + t.readFrom = v +} + +func (t *timer) Stop() bool { + defer t.SetRead(true) + + if !t.Timer.Stop() && !t.readFrom { + <-t.C + return false + } + return true +} + +func (t *timer) Reset(d time.Duration) bool { + defer t.SetRead(false) + t.Stop() + return t.Timer.Reset(d) +} + +func sendPing(pt *timer, rt *timer, c *client) { + pt.SetRead(true) + DEBUG.Println(PNG, "keepalive sending ping") + ping := packets.NewControlPacket(packets.Pingreq).(*packets.PingreqPacket) + //We don't want to wait behind large messages being sent, the Write call + //will block until it it able to send the packet. + ping.Write(c.conn) + + rt.Reset(c.options.PingTimeout) +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/router.go b/vendor/github.com/eclipse/paho.mqtt.golang/router.go new file mode 100644 index 000000000..9e44b0f7f --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/router.go @@ -0,0 +1,161 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "container/list" + "strings" + "sync" + + "github.com/eclipse/paho.mqtt.golang/packets" +) + +// route is a type which associates MQTT Topic strings with a +// callback to be executed upon the arrival of a message associated +// with a subscription to that topic. +type route struct { + topic string + callback MessageHandler +} + +// match takes a slice of strings which represent the route being tested having been split on '/' +// separators, and a slice of strings representing the topic string in the published message, similarly +// split. +// The function determines if the topic string matches the route according to the MQTT topic rules +// and returns a boolean of the outcome +func match(route []string, topic []string) bool { + if len(route) == 0 { + if len(topic) == 0 { + return true + } + return false + } + + if len(topic) == 0 { + if route[0] == "#" { + return true + } + return false + } + + if route[0] == "#" { + return true + } + + if (route[0] == "+") || (route[0] == topic[0]) { + return match(route[1:], topic[1:]) + } + + return false +} + +func routeIncludesTopic(route, topic string) bool { + return match(strings.Split(route, "/"), strings.Split(topic, "/")) +} + +// match takes the topic string of the published message and does a basic compare to the +// string of the current Route, if they match it returns true +func (r *route) match(topic string) bool { + return r.topic == topic || routeIncludesTopic(r.topic, topic) +} + +type router struct { + sync.RWMutex + routes *list.List + defaultHandler MessageHandler + messages chan *packets.PublishPacket + stop chan bool +} + +// newRouter returns a new instance of a Router and channel which can be used to tell the Router +// to stop +func newRouter() (*router, chan bool) { + router := &router{routes: list.New(), messages: make(chan *packets.PublishPacket), stop: make(chan bool)} + stop := router.stop + return router, stop +} + +// addRoute takes a topic string and MessageHandler callback. It looks in the current list of +// routes to see if there is already a matching Route. If there is it replaces the current +// callback with the new one. If not it add a new entry to the list of Routes. +func (r *router) addRoute(topic string, callback MessageHandler) { + r.Lock() + defer r.Unlock() + for e := r.routes.Front(); e != nil; e = e.Next() { + if e.Value.(*route).match(topic) { + r := e.Value.(*route) + r.callback = callback + return + } + } + r.routes.PushBack(&route{topic: topic, callback: callback}) +} + +// deleteRoute takes a route string, looks for a matching Route in the list of Routes. If +// found it removes the Route from the list. +func (r *router) deleteRoute(topic string) { + r.Lock() + defer r.Unlock() + for e := r.routes.Front(); e != nil; e = e.Next() { + if e.Value.(*route).match(topic) { + r.routes.Remove(e) + return + } + } +} + +// setDefaultHandler assigns a default callback that will be called if no matching Route +// is found for an incoming Publish. +func (r *router) setDefaultHandler(handler MessageHandler) { + r.Lock() + defer r.Unlock() + r.defaultHandler = handler +} + +// matchAndDispatch takes a channel of Message pointers as input and starts a go routine that +// takes messages off the channel, matches them against the internal route list and calls the +// associated callback (or the defaultHandler, if one exists and no other route matched). If +// anything is sent down the stop channel the function will end. +func (r *router) matchAndDispatch(messages <-chan *packets.PublishPacket, order bool, client *client) { + go func() { + for { + select { + case message := <-messages: + sent := false + r.RLock() + for e := r.routes.Front(); e != nil; e = e.Next() { + if e.Value.(*route).match(message.TopicName) { + if order { + e.Value.(*route).callback(client, messageFromPublish(message)) + } else { + go e.Value.(*route).callback(client, messageFromPublish(message)) + } + sent = true + } + } + if !sent && r.defaultHandler != nil { + if order { + r.defaultHandler(client, messageFromPublish(message)) + } else { + go r.defaultHandler(client, messageFromPublish(message)) + } + } + r.RUnlock() + case <-r.stop: + return + } + } + }() +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/store.go b/vendor/github.com/eclipse/paho.mqtt.golang/store.go new file mode 100644 index 000000000..1cc9e1d36 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/store.go @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "fmt" + "strconv" + + "github.com/eclipse/paho.mqtt.golang/packets" +) + +const ( + inboundPrefix = "i." + outboundPrefix = "o." +) + +// Store is an interface which can be used to provide implementations +// for message persistence. +// Because we may have to store distinct messages with the same +// message ID, we need a unique key for each message. This is +// possible by prepending "i." or "o." to each message id +type Store interface { + Open() + Put(key string, message packets.ControlPacket) + Get(key string) packets.ControlPacket + All() []string + Del(key string) + Close() + Reset() +} + +// A key MUST have the form "X.[messageid]" +// where X is 'i' or 'o' +func mIDFromKey(key string) uint16 { + s := key[2:] + i, err := strconv.Atoi(s) + chkerr(err) + return uint16(i) +} + +// Return a string of the form "i.[id]" +func inboundKeyFromMID(id uint16) string { + return fmt.Sprintf("%s%d", inboundPrefix, id) +} + +// Return a string of the form "o.[id]" +func outboundKeyFromMID(id uint16) string { + return fmt.Sprintf("%s%d", outboundPrefix, id) +} + +// govern which outgoing messages are persisted +func persistOutbound(s Store, m packets.ControlPacket) { + switch m.Details().Qos { + case 0: + switch m.(type) { + case *packets.PubackPacket, *packets.PubcompPacket: + // Sending puback. delete matching publish + // from ibound + s.Del(inboundKeyFromMID(m.Details().MessageID)) + } + case 1: + switch m.(type) { + case *packets.PublishPacket, *packets.PubrelPacket, *packets.SubscribePacket, *packets.UnsubscribePacket: + // Sending publish. store in obound + // until puback received + s.Put(outboundKeyFromMID(m.Details().MessageID), m) + default: + ERROR.Println(STR, "Asked to persist an invalid message type") + } + case 2: + switch m.(type) { + case *packets.PublishPacket: + // Sending publish. store in obound + // until pubrel received + s.Put(outboundKeyFromMID(m.Details().MessageID), m) + default: + ERROR.Println(STR, "Asked to persist an invalid message type") + } + } +} + +// govern which incoming messages are persisted +func persistInbound(s Store, m packets.ControlPacket) { + switch m.Details().Qos { + case 0: + switch m.(type) { + case *packets.PubackPacket, *packets.SubackPacket, *packets.UnsubackPacket, *packets.PubcompPacket: + // Received a puback. delete matching publish + // from obound + s.Del(outboundKeyFromMID(m.Details().MessageID)) + case *packets.PublishPacket, *packets.PubrecPacket, *packets.PingrespPacket, *packets.ConnackPacket: + default: + ERROR.Println(STR, "Asked to persist an invalid messages type") + } + case 1: + switch m.(type) { + case *packets.PublishPacket, *packets.PubrelPacket: + // Received a publish. store it in ibound + // until puback sent + s.Put(inboundKeyFromMID(m.Details().MessageID), m) + default: + ERROR.Println(STR, "Asked to persist an invalid messages type") + } + case 2: + switch m.(type) { + case *packets.PublishPacket: + // Received a publish. store it in ibound + // until pubrel received + s.Put(inboundKeyFromMID(m.Details().MessageID), m) + default: + ERROR.Println(STR, "Asked to persist an invalid messages type") + } + } +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/token.go b/vendor/github.com/eclipse/paho.mqtt.golang/token.go new file mode 100644 index 000000000..dc54d6d6c --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/token.go @@ -0,0 +1,157 @@ +/* + * Copyright (c) 2014 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Allan Stockdill-Mander + */ + +package mqtt + +import ( + "sync" + "time" + + "github.com/eclipse/paho.mqtt.golang/packets" +) + +//PacketAndToken is a struct that contains both a ControlPacket and a +//Token. This struct is passed via channels between the client interface +//code and the underlying code responsible for sending and receiving +//MQTT messages. +type PacketAndToken struct { + p packets.ControlPacket + t Token +} + +//Token defines the interface for the tokens used to indicate when +//actions have completed. +type Token interface { + Wait() bool + WaitTimeout(time.Duration) bool + flowComplete() + Error() error +} + +type baseToken struct { + m sync.RWMutex + complete chan struct{} + ready bool + err error +} + +// Wait will wait indefinitely for the Token to complete, ie the Publish +// to be sent and confirmed receipt from the broker +func (b *baseToken) Wait() bool { + b.m.Lock() + defer b.m.Unlock() + if !b.ready { + <-b.complete + b.ready = true + } + return b.ready +} + +// WaitTimeout takes a time in ms to wait for the flow associated with the +// Token to complete, returns true if it returned before the timeout or +// returns false if the timeout occurred. In the case of a timeout the Token +// does not have an error set in case the caller wishes to wait again +func (b *baseToken) WaitTimeout(d time.Duration) bool { + b.m.Lock() + defer b.m.Unlock() + if !b.ready { + select { + case <-b.complete: + b.ready = true + case <-time.After(d): + } + } + return b.ready +} + +func (b *baseToken) flowComplete() { + close(b.complete) +} + +func (b *baseToken) Error() error { + b.m.RLock() + defer b.m.RUnlock() + return b.err +} + +func newToken(tType byte) Token { + switch tType { + case packets.Connect: + return &ConnectToken{baseToken: baseToken{complete: make(chan struct{})}} + case packets.Subscribe: + return &SubscribeToken{baseToken: baseToken{complete: make(chan struct{})}, subResult: make(map[string]byte)} + case packets.Publish: + return &PublishToken{baseToken: baseToken{complete: make(chan struct{})}} + case packets.Unsubscribe: + return &UnsubscribeToken{baseToken: baseToken{complete: make(chan struct{})}} + case packets.Disconnect: + return &DisconnectToken{baseToken: baseToken{complete: make(chan struct{})}} + } + return nil +} + +//ConnectToken is an extension of Token containing the extra fields +//required to provide information about calls to Connect() +type ConnectToken struct { + baseToken + returnCode byte +} + +//ReturnCode returns the acknowlegement code in the connack sent +//in response to a Connect() +func (c *ConnectToken) ReturnCode() byte { + c.m.RLock() + defer c.m.RUnlock() + return c.returnCode +} + +//PublishToken is an extension of Token containing the extra fields +//required to provide information about calls to Publish() +type PublishToken struct { + baseToken + messageID uint16 +} + +//MessageID returns the MQTT message ID that was assigned to the +//Publish packet when it was sent to the broker +func (p *PublishToken) MessageID() uint16 { + return p.messageID +} + +//SubscribeToken is an extension of Token containing the extra fields +//required to provide information about calls to Subscribe() +type SubscribeToken struct { + baseToken + subs []string + subResult map[string]byte +} + +//Result returns a map of topics that were subscribed to along with +//the matching return code from the broker. This is either the Qos +//value of the subscription or an error code. +func (s *SubscribeToken) Result() map[string]byte { + s.m.RLock() + defer s.m.RUnlock() + return s.subResult +} + +//UnsubscribeToken is an extension of Token containing the extra fields +//required to provide information about calls to Unsubscribe() +type UnsubscribeToken struct { + baseToken +} + +//DisconnectToken is an extension of Token containing the extra fields +//required to provide information about calls to Disconnect() +type DisconnectToken struct { + baseToken +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/topic.go b/vendor/github.com/eclipse/paho.mqtt.golang/topic.go new file mode 100644 index 000000000..6fa3ad2ac --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/topic.go @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2014 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "errors" + "strings" +) + +//ErrInvalidQos is the error returned when an packet is to be sent +//with an invalid Qos value +var ErrInvalidQos = errors.New("Invalid QoS") + +//ErrInvalidTopicEmptyString is the error returned when a topic string +//is passed in that is 0 length +var ErrInvalidTopicEmptyString = errors.New("Invalid Topic; empty string") + +//ErrInvalidTopicMultilevel is the error returned when a topic string +//is passed in that has the multi level wildcard in any position but +//the last +var ErrInvalidTopicMultilevel = errors.New("Invalid Topic; multi-level wildcard must be last level") + +// Topic Names and Topic Filters +// The MQTT v3.1.1 spec clarifies a number of ambiguities with regard +// to the validity of Topic strings. +// - A Topic must be between 1 and 65535 bytes. +// - A Topic is case sensitive. +// - A Topic may contain whitespace. +// - A Topic containing a leading forward slash is different than a Topic without. +// - A Topic may be "/" (two levels, both empty string). +// - A Topic must be UTF-8 encoded. +// - A Topic may contain any number of levels. +// - A Topic may contain an empty level (two forward slashes in a row). +// - A TopicName may not contain a wildcard. +// - A TopicFilter may only have a # (multi-level) wildcard as the last level. +// - A TopicFilter may contain any number of + (single-level) wildcards. +// - A TopicFilter with a # will match the absense of a level +// Example: a subscription to "foo/#" will match messages published to "foo". + +func validateSubscribeMap(subs map[string]byte) ([]string, []byte, error) { + var topics []string + var qoss []byte + for topic, qos := range subs { + if err := validateTopicAndQos(topic, qos); err != nil { + return nil, nil, err + } + topics = append(topics, topic) + qoss = append(qoss, qos) + } + + return topics, qoss, nil +} + +func validateTopicAndQos(topic string, qos byte) error { + if len(topic) == 0 { + return ErrInvalidTopicEmptyString + } + + levels := strings.Split(topic, "/") + for i, level := range levels { + if level == "#" && i != len(levels)-1 { + return ErrInvalidTopicMultilevel + } + } + + if qos < 0 || qos > 2 { + return ErrInvalidQos + } + return nil +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/trace.go b/vendor/github.com/eclipse/paho.mqtt.golang/trace.go new file mode 100644 index 000000000..2f5a01466 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/trace.go @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "io/ioutil" + "log" +) + +// Internal levels of library output that are initialised to not print +// anything but can be overridden by programmer +var ( + ERROR *log.Logger + CRITICAL *log.Logger + WARN *log.Logger + DEBUG *log.Logger +) + +func init() { + ERROR = log.New(ioutil.Discard, "", 0) + CRITICAL = log.New(ioutil.Discard, "", 0) + WARN = log.New(ioutil.Discard, "", 0) + DEBUG = log.New(ioutil.Discard, "", 0) +} diff --git a/vendor/github.com/go-ini/ini/LICENSE b/vendor/github.com/go-ini/ini/LICENSE new file mode 100644 index 000000000..37ec93a14 --- /dev/null +++ b/vendor/github.com/go-ini/ini/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-ini/ini/Makefile b/vendor/github.com/go-ini/ini/Makefile new file mode 100644 index 000000000..ac034e525 --- /dev/null +++ b/vendor/github.com/go-ini/ini/Makefile @@ -0,0 +1,12 @@ +.PHONY: build test bench vet + +build: vet bench + +test: + go test -v -cover -race + +bench: + go test -v -cover -race -test.bench=. -test.benchmem + +vet: + go vet diff --git a/vendor/github.com/go-ini/ini/README.md b/vendor/github.com/go-ini/ini/README.md new file mode 100644 index 000000000..85947422d --- /dev/null +++ b/vendor/github.com/go-ini/ini/README.md @@ -0,0 +1,740 @@ +INI [![Build Status](https://travis-ci.org/go-ini/ini.svg?branch=master)](https://travis-ci.org/go-ini/ini) [![Sourcegraph](https://sourcegraph.com/github.com/go-ini/ini/-/badge.svg)](https://sourcegraph.com/github.com/go-ini/ini?badge) +=== + +![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) + +Package ini provides INI file read and write functionality in Go. + +[简体中文](README_ZH.md) + +## Feature + +- Load multiple data sources(`[]byte`, file and `io.ReadCloser`) with overwrites. +- Read with recursion values. +- Read with parent-child sections. +- Read with auto-increment key names. +- Read with multiple-line values. +- Read with tons of helper methods. +- Read and convert values to Go types. +- Read and **WRITE** comments of sections and keys. +- Manipulate sections, keys and comments with ease. +- Keep sections and keys in order as you parse and save. + +## Installation + +To use a tagged revision: + + go get gopkg.in/ini.v1 + +To use with latest changes: + + go get github.com/go-ini/ini + +Please add `-u` flag to update in the future. + +### Testing + +If you want to test on your machine, please apply `-t` flag: + + go get -t gopkg.in/ini.v1 + +Please add `-u` flag to update in the future. + +## Getting Started + +### Loading from data sources + +A **Data Source** is either raw data in type `[]byte`, a file name with type `string` or `io.ReadCloser`. You can load **as many data sources as you want**. Passing other types will simply return an error. + +```go +cfg, err := ini.Load([]byte("raw data"), "filename", ioutil.NopCloser(bytes.NewReader([]byte("some other data")))) +``` + +Or start with an empty object: + +```go +cfg := ini.Empty() +``` + +When you cannot decide how many data sources to load at the beginning, you will still be able to **Append()** them later. + +```go +err := cfg.Append("other file", []byte("other raw data")) +``` + +If you have a list of files with possibilities that some of them may not available at the time, and you don't know exactly which ones, you can use `LooseLoad` to ignore nonexistent files without returning error. + +```go +cfg, err := ini.LooseLoad("filename", "filename_404") +``` + +The cool thing is, whenever the file is available to load while you're calling `Reload` method, it will be counted as usual. + +#### Ignore cases of key name + +When you do not care about cases of section and key names, you can use `InsensitiveLoad` to force all names to be lowercased while parsing. + +```go +cfg, err := ini.InsensitiveLoad("filename") +//... + +// sec1 and sec2 are the exactly same section object +sec1, err := cfg.GetSection("Section") +sec2, err := cfg.GetSection("SecTIOn") + +// key1 and key2 are the exactly same key object +key1, err := cfg.GetKey("Key") +key2, err := cfg.GetKey("KeY") +``` + +#### MySQL-like boolean key + +MySQL's configuration allows a key without value as follows: + +```ini +[mysqld] +... +skip-host-cache +skip-name-resolve +``` + +By default, this is considered as missing value. But if you know you're going to deal with those cases, you can assign advanced load options: + +```go +cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf")) +``` + +The value of those keys are always `true`, and when you save to a file, it will keep in the same foramt as you read. + +To generate such keys in your program, you could use `NewBooleanKey`: + +```go +key, err := sec.NewBooleanKey("skip-host-cache") +``` + +#### Comment + +Take care that following format will be treated as comment: + +1. Line begins with `#` or `;` +2. Words after `#` or `;` +3. Words after section name (i.e words after `[some section name]`) + +If you want to save a value with `#` or `;`, please quote them with ``` ` ``` or ``` """ ```. + +### Working with sections + +To get a section, you would need to: + +```go +section, err := cfg.GetSection("section name") +``` + +For a shortcut for default section, just give an empty string as name: + +```go +section, err := cfg.GetSection("") +``` + +When you're pretty sure the section exists, following code could make your life easier: + +```go +section := cfg.Section("section name") +``` + +What happens when the section somehow does not exist? Don't panic, it automatically creates and returns a new section to you. + +To create a new section: + +```go +err := cfg.NewSection("new section") +``` + +To get a list of sections or section names: + +```go +sections := cfg.Sections() +names := cfg.SectionStrings() +``` + +### Working with keys + +To get a key under a section: + +```go +key, err := cfg.Section("").GetKey("key name") +``` + +Same rule applies to key operations: + +```go +key := cfg.Section("").Key("key name") +``` + +To check if a key exists: + +```go +yes := cfg.Section("").HasKey("key name") +``` + +To create a new key: + +```go +err := cfg.Section("").NewKey("name", "value") +``` + +To get a list of keys or key names: + +```go +keys := cfg.Section("").Keys() +names := cfg.Section("").KeyStrings() +``` + +To get a clone hash of keys and corresponding values: + +```go +hash := cfg.Section("").KeysHash() +``` + +### Working with values + +To get a string value: + +```go +val := cfg.Section("").Key("key name").String() +``` + +To validate key value on the fly: + +```go +val := cfg.Section("").Key("key name").Validate(func(in string) string { + if len(in) == 0 { + return "default" + } + return in +}) +``` + +If you do not want any auto-transformation (such as recursive read) for the values, you can get raw value directly (this way you get much better performance): + +```go +val := cfg.Section("").Key("key name").Value() +``` + +To check if raw value exists: + +```go +yes := cfg.Section("").HasValue("test value") +``` + +To get value with types: + +```go +// For boolean values: +// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On +// false when value is: 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off +v, err = cfg.Section("").Key("BOOL").Bool() +v, err = cfg.Section("").Key("FLOAT64").Float64() +v, err = cfg.Section("").Key("INT").Int() +v, err = cfg.Section("").Key("INT64").Int64() +v, err = cfg.Section("").Key("UINT").Uint() +v, err = cfg.Section("").Key("UINT64").Uint64() +v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) +v, err = cfg.Section("").Key("TIME").Time() // RFC3339 + +v = cfg.Section("").Key("BOOL").MustBool() +v = cfg.Section("").Key("FLOAT64").MustFloat64() +v = cfg.Section("").Key("INT").MustInt() +v = cfg.Section("").Key("INT64").MustInt64() +v = cfg.Section("").Key("UINT").MustUint() +v = cfg.Section("").Key("UINT64").MustUint64() +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) +v = cfg.Section("").Key("TIME").MustTime() // RFC3339 + +// Methods start with Must also accept one argument for default value +// when key not found or fail to parse value to given type. +// Except method MustString, which you have to pass a default value. + +v = cfg.Section("").Key("String").MustString("default") +v = cfg.Section("").Key("BOOL").MustBool(true) +v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) +v = cfg.Section("").Key("INT").MustInt(10) +v = cfg.Section("").Key("INT64").MustInt64(99) +v = cfg.Section("").Key("UINT").MustUint(3) +v = cfg.Section("").Key("UINT64").MustUint64(6) +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) +v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 +``` + +What if my value is three-line long? + +```ini +[advance] +ADDRESS = """404 road, +NotFound, State, 5000 +Earth""" +``` + +Not a problem! + +```go +cfg.Section("advance").Key("ADDRESS").String() + +/* --- start --- +404 road, +NotFound, State, 5000 +Earth +------ end --- */ +``` + +That's cool, how about continuation lines? + +```ini +[advance] +two_lines = how about \ + continuation lines? +lots_of_lines = 1 \ + 2 \ + 3 \ + 4 +``` + +Piece of cake! + +```go +cfg.Section("advance").Key("two_lines").String() // how about continuation lines? +cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 +``` + +Well, I hate continuation lines, how do I disable that? + +```go +cfg, err := ini.LoadSources(ini.LoadOptions{ + IgnoreContinuation: true, +}, "filename") +``` + +Holy crap! + +Note that single quotes around values will be stripped: + +```ini +foo = "some value" // foo: some value +bar = 'some value' // bar: some value +``` + +That's all? Hmm, no. + +#### Helper methods of working with values + +To get value with given candidates: + +```go +v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) +v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) +v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) +v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) +v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) +v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) +v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) +v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 +``` + +Default value will be presented if value of key is not in candidates you given, and default value does not need be one of candidates. + +To validate value in a given range: + +```go +vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) +vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) +vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) +vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) +vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) +vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) +vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 +``` + +##### Auto-split values into a slice + +To use zero value of type for invalid inputs: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0] +vals = cfg.Section("").Key("STRINGS").Strings(",") +vals = cfg.Section("").Key("FLOAT64S").Float64s(",") +vals = cfg.Section("").Key("INTS").Ints(",") +vals = cfg.Section("").Key("INT64S").Int64s(",") +vals = cfg.Section("").Key("UINTS").Uints(",") +vals = cfg.Section("").Key("UINT64S").Uint64s(",") +vals = cfg.Section("").Key("TIMES").Times(",") +``` + +To exclude invalid values out of result slice: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [2.2] +vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",") +vals = cfg.Section("").Key("INTS").ValidInts(",") +vals = cfg.Section("").Key("INT64S").ValidInt64s(",") +vals = cfg.Section("").Key("UINTS").ValidUints(",") +vals = cfg.Section("").Key("UINT64S").ValidUint64s(",") +vals = cfg.Section("").Key("TIMES").ValidTimes(",") +``` + +Or to return nothing but error when have invalid inputs: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> error +vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",") +vals = cfg.Section("").Key("INTS").StrictInts(",") +vals = cfg.Section("").Key("INT64S").StrictInt64s(",") +vals = cfg.Section("").Key("UINTS").StrictUints(",") +vals = cfg.Section("").Key("UINT64S").StrictUint64s(",") +vals = cfg.Section("").Key("TIMES").StrictTimes(",") +``` + +### Save your configuration + +Finally, it's time to save your configuration to somewhere. + +A typical way to save configuration is writing it to a file: + +```go +// ... +err = cfg.SaveTo("my.ini") +err = cfg.SaveToIndent("my.ini", "\t") +``` + +Another way to save is writing to a `io.Writer` interface: + +```go +// ... +cfg.WriteTo(writer) +cfg.WriteToIndent(writer, "\t") +``` + +By default, spaces are used to align "=" sign between key and values, to disable that: + +```go +ini.PrettyFormat = false +``` + +## Advanced Usage + +### Recursive Values + +For all value of keys, there is a special syntax `%()s`, where `` is the key name in same section or default section, and `%()s` will be replaced by corresponding value(empty string if key not found). You can use this syntax at most 99 level of recursions. + +```ini +NAME = ini + +[author] +NAME = Unknwon +GITHUB = https://github.com/%(NAME)s + +[package] +FULL_NAME = github.com/go-ini/%(NAME)s +``` + +```go +cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon +cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini +``` + +### Parent-child Sections + +You can use `.` in section name to indicate parent-child relationship between two or more sections. If the key not found in the child section, library will try again on its parent section until there is no parent section. + +```ini +NAME = ini +VERSION = v1 +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +[package] +CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] +``` + +```go +cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 +``` + +#### Retrieve parent keys available to a child section + +```go +cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"] +``` + +### Unparseable Sections + +Sometimes, you have sections that do not contain key-value pairs but raw content, to handle such case, you can use `LoadOptions.UnparsableSections`: + +```go +cfg, err := LoadSources(LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS] +<1> This slide has the fuel listed in the wrong units `)) + +body := cfg.Section("COMMENTS").Body() + +/* --- start --- +<1> This slide has the fuel listed in the wrong units +------ end --- */ +``` + +### Auto-increment Key Names + +If key name is `-` in data source, then it would be seen as special syntax for auto-increment key name start from 1, and every section is independent on counter. + +```ini +[features] +-: Support read/write comments of keys and sections +-: Support auto-increment of key names +-: Support load multiple files to overwrite key values +``` + +```go +cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} +``` + +### Map To Struct + +Want more objective way to play with INI? Cool. + +```ini +Name = Unknwon +age = 21 +Male = true +Born = 1993-01-01T20:17:05Z + +[Note] +Content = Hi is a good man! +Cities = HangZhou, Boston +``` + +```go +type Note struct { + Content string + Cities []string +} + +type Person struct { + Name string + Age int `ini:"age"` + Male bool + Born time.Time + Note + Created time.Time `ini:"-"` +} + +func main() { + cfg, err := ini.Load("path/to/ini") + // ... + p := new(Person) + err = cfg.MapTo(p) + // ... + + // Things can be simpler. + err = ini.MapTo(p, "path/to/ini") + // ... + + // Just map a section? Fine. + n := new(Note) + err = cfg.Section("Note").MapTo(n) + // ... +} +``` + +Can I have default value for field? Absolutely. + +Assign it before you map to struct. It will keep the value as it is if the key is not presented or got wrong type. + +```go +// ... +p := &Person{ + Name: "Joe", +} +// ... +``` + +It's really cool, but what's the point if you can't give me my file back from struct? + +### Reflect From Struct + +Why not? + +```go +type Embeded struct { + Dates []time.Time `delim:"|"` + Places []string `ini:"places,omitempty"` + None []int `ini:",omitempty"` +} + +type Author struct { + Name string `ini:"NAME"` + Male bool + Age int + GPA float64 + NeverMind string `ini:"-"` + *Embeded +} + +func main() { + a := &Author{"Unknwon", true, 21, 2.8, "", + &Embeded{ + []time.Time{time.Now(), time.Now()}, + []string{"HangZhou", "Boston"}, + []int{}, + }} + cfg := ini.Empty() + err = ini.ReflectFrom(cfg, a) + // ... +} +``` + +So, what do I get? + +```ini +NAME = Unknwon +Male = true +Age = 21 +GPA = 2.8 + +[Embeded] +Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 +places = HangZhou,Boston +``` + +#### Name Mapper + +To save your time and make your code cleaner, this library supports [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) between struct field and actual section and key name. + +There are 2 built-in name mappers: + +- `AllCapsUnderscore`: it converts to format `ALL_CAPS_UNDERSCORE` then match section or key. +- `TitleUnderscore`: it converts to format `title_underscore` then match section or key. + +To use them: + +```go +type Info struct { + PackageName string +} + +func main() { + err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini")) + // ... + + cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) + // ... + info := new(Info) + cfg.NameMapper = ini.AllCapsUnderscore + err = cfg.MapTo(info) + // ... +} +``` + +Same rules of name mapper apply to `ini.ReflectFromWithMapper` function. + +#### Value Mapper + +To expand values (e.g. from environment variables), you can use the `ValueMapper` to transform values: + +```go +type Env struct { + Foo string `ini:"foo"` +} + +func main() { + cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n") + cfg.ValueMapper = os.ExpandEnv + // ... + env := &Env{} + err = cfg.Section("env").MapTo(env) +} +``` + +This would set the value of `env.Foo` to the value of the environment variable `MY_VAR`. + +#### Other Notes On Map/Reflect + +Any embedded struct is treated as a section by default, and there is no automatic parent-child relations in map/reflect feature: + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child +} + +type Config struct { + City string + Parent +} +``` + +Example configuration: + +```ini +City = Boston + +[Parent] +Name = Unknwon + +[Child] +Age = 21 +``` + +What if, yes, I'm paranoid, I want embedded struct to be in the same section. Well, all roads lead to Rome. + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child `ini:"Parent"` +} + +type Config struct { + City string + Parent +} +``` + +Example configuration: + +```ini +City = Boston + +[Parent] +Name = Unknwon +Age = 21 +``` + +## Getting Help + +- [API Documentation](https://gowalker.org/gopkg.in/ini.v1) +- [File An Issue](https://github.com/go-ini/ini/issues/new) + +## FAQs + +### What does `BlockMode` field do? + +By default, library lets you read and write values so we need a locker to make sure your data is safe. But in cases that you are very sure about only reading data through the library, you can set `cfg.BlockMode = false` to speed up read operations about **50-70%** faster. + +### Why another INI library? + +Many people are using my another INI library [goconfig](https://github.com/Unknwon/goconfig), so the reason for this one is I would like to make more Go style code. Also when you set `cfg.BlockMode = false`, this one is about **10-30%** faster. + +To make those changes I have to confirm API broken, so it's safer to keep it in another place and start using `gopkg.in` to version my package at this time.(PS: shorter import path) + +## License + +This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. diff --git a/vendor/github.com/go-ini/ini/README_ZH.md b/vendor/github.com/go-ini/ini/README_ZH.md new file mode 100644 index 000000000..163432db9 --- /dev/null +++ b/vendor/github.com/go-ini/ini/README_ZH.md @@ -0,0 +1,727 @@ +本包提供了 Go 语言中读写 INI 文件的功能。 + +## 功能特性 + +- 支持覆盖加载多个数据源(`[]byte`、文件和 `io.ReadCloser`) +- 支持递归读取键值 +- 支持读取父子分区 +- 支持读取自增键名 +- 支持读取多行的键值 +- 支持大量辅助方法 +- 支持在读取时直接转换为 Go 语言类型 +- 支持读取和 **写入** 分区和键的注释 +- 轻松操作分区、键值和注释 +- 在保存文件时分区和键值会保持原有的顺序 + +## 下载安装 + +使用一个特定版本: + + go get gopkg.in/ini.v1 + +使用最新版: + + go get github.com/go-ini/ini + +如需更新请添加 `-u` 选项。 + +### 测试安装 + +如果您想要在自己的机器上运行测试,请使用 `-t` 标记: + + go get -t gopkg.in/ini.v1 + +如需更新请添加 `-u` 选项。 + +## 开始使用 + +### 从数据源加载 + +一个 **数据源** 可以是 `[]byte` 类型的原始数据,`string` 类型的文件路径或 `io.ReadCloser`。您可以加载 **任意多个** 数据源。如果您传递其它类型的数据源,则会直接返回错误。 + +```go +cfg, err := ini.Load([]byte("raw data"), "filename", ioutil.NopCloser(bytes.NewReader([]byte("some other data")))) +``` + +或者从一个空白的文件开始: + +```go +cfg := ini.Empty() +``` + +当您在一开始无法决定需要加载哪些数据源时,仍可以使用 **Append()** 在需要的时候加载它们。 + +```go +err := cfg.Append("other file", []byte("other raw data")) +``` + +当您想要加载一系列文件,但是不能够确定其中哪些文件是不存在的,可以通过调用函数 `LooseLoad` 来忽略它们(`Load` 会因为文件不存在而返回错误): + +```go +cfg, err := ini.LooseLoad("filename", "filename_404") +``` + +更牛逼的是,当那些之前不存在的文件在重新调用 `Reload` 方法的时候突然出现了,那么它们会被正常加载。 + +#### 忽略键名的大小写 + +有时候分区和键的名称大小写混合非常烦人,这个时候就可以通过 `InsensitiveLoad` 将所有分区和键名在读取里强制转换为小写: + +```go +cfg, err := ini.InsensitiveLoad("filename") +//... + +// sec1 和 sec2 指向同一个分区对象 +sec1, err := cfg.GetSection("Section") +sec2, err := cfg.GetSection("SecTIOn") + +// key1 和 key2 指向同一个键对象 +key1, err := cfg.GetKey("Key") +key2, err := cfg.GetKey("KeY") +``` + +#### 类似 MySQL 配置中的布尔值键 + +MySQL 的配置文件中会出现没有具体值的布尔类型的键: + +```ini +[mysqld] +... +skip-host-cache +skip-name-resolve +``` + +默认情况下这被认为是缺失值而无法完成解析,但可以通过高级的加载选项对它们进行处理: + +```go +cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf")) +``` + +这些键的值永远为 `true`,且在保存到文件时也只会输出键名。 + +如果您想要通过程序来生成此类键,则可以使用 `NewBooleanKey`: + +```go +key, err := sec.NewBooleanKey("skip-host-cache") +``` + +#### 关于注释 + +下述几种情况的内容将被视为注释: + +1. 所有以 `#` 或 `;` 开头的行 +2. 所有在 `#` 或 `;` 之后的内容 +3. 分区标签后的文字 (即 `[分区名]` 之后的内容) + +如果你希望使用包含 `#` 或 `;` 的值,请使用 ``` ` ``` 或 ``` """ ``` 进行包覆。 + +### 操作分区(Section) + +获取指定分区: + +```go +section, err := cfg.GetSection("section name") +``` + +如果您想要获取默认分区,则可以用空字符串代替分区名: + +```go +section, err := cfg.GetSection("") +``` + +当您非常确定某个分区是存在的,可以使用以下简便方法: + +```go +section := cfg.Section("section name") +``` + +如果不小心判断错了,要获取的分区其实是不存在的,那会发生什么呢?没事的,它会自动创建并返回一个对应的分区对象给您。 + +创建一个分区: + +```go +err := cfg.NewSection("new section") +``` + +获取所有分区对象或名称: + +```go +sections := cfg.Sections() +names := cfg.SectionStrings() +``` + +### 操作键(Key) + +获取某个分区下的键: + +```go +key, err := cfg.Section("").GetKey("key name") +``` + +和分区一样,您也可以直接获取键而忽略错误处理: + +```go +key := cfg.Section("").Key("key name") +``` + +判断某个键是否存在: + +```go +yes := cfg.Section("").HasKey("key name") +``` + +创建一个新的键: + +```go +err := cfg.Section("").NewKey("name", "value") +``` + +获取分区下的所有键或键名: + +```go +keys := cfg.Section("").Keys() +names := cfg.Section("").KeyStrings() +``` + +获取分区下的所有键值对的克隆: + +```go +hash := cfg.Section("").KeysHash() +``` + +### 操作键值(Value) + +获取一个类型为字符串(string)的值: + +```go +val := cfg.Section("").Key("key name").String() +``` + +获取值的同时通过自定义函数进行处理验证: + +```go +val := cfg.Section("").Key("key name").Validate(func(in string) string { + if len(in) == 0 { + return "default" + } + return in +}) +``` + +如果您不需要任何对值的自动转变功能(例如递归读取),可以直接获取原值(这种方式性能最佳): + +```go +val := cfg.Section("").Key("key name").Value() +``` + +判断某个原值是否存在: + +```go +yes := cfg.Section("").HasValue("test value") +``` + +获取其它类型的值: + +```go +// 布尔值的规则: +// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On +// false 当值为:0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off +v, err = cfg.Section("").Key("BOOL").Bool() +v, err = cfg.Section("").Key("FLOAT64").Float64() +v, err = cfg.Section("").Key("INT").Int() +v, err = cfg.Section("").Key("INT64").Int64() +v, err = cfg.Section("").Key("UINT").Uint() +v, err = cfg.Section("").Key("UINT64").Uint64() +v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) +v, err = cfg.Section("").Key("TIME").Time() // RFC3339 + +v = cfg.Section("").Key("BOOL").MustBool() +v = cfg.Section("").Key("FLOAT64").MustFloat64() +v = cfg.Section("").Key("INT").MustInt() +v = cfg.Section("").Key("INT64").MustInt64() +v = cfg.Section("").Key("UINT").MustUint() +v = cfg.Section("").Key("UINT64").MustUint64() +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) +v = cfg.Section("").Key("TIME").MustTime() // RFC3339 + +// 由 Must 开头的方法名允许接收一个相同类型的参数来作为默认值, +// 当键不存在或者转换失败时,则会直接返回该默认值。 +// 但是,MustString 方法必须传递一个默认值。 + +v = cfg.Seciont("").Key("String").MustString("default") +v = cfg.Section("").Key("BOOL").MustBool(true) +v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) +v = cfg.Section("").Key("INT").MustInt(10) +v = cfg.Section("").Key("INT64").MustInt64(99) +v = cfg.Section("").Key("UINT").MustUint(3) +v = cfg.Section("").Key("UINT64").MustUint64(6) +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) +v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 +``` + +如果我的值有好多行怎么办? + +```ini +[advance] +ADDRESS = """404 road, +NotFound, State, 5000 +Earth""" +``` + +嗯哼?小 case! + +```go +cfg.Section("advance").Key("ADDRESS").String() + +/* --- start --- +404 road, +NotFound, State, 5000 +Earth +------ end --- */ +``` + +赞爆了!那要是我属于一行的内容写不下想要写到第二行怎么办? + +```ini +[advance] +two_lines = how about \ + continuation lines? +lots_of_lines = 1 \ + 2 \ + 3 \ + 4 +``` + +简直是小菜一碟! + +```go +cfg.Section("advance").Key("two_lines").String() // how about continuation lines? +cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 +``` + +可是我有时候觉得两行连在一起特别没劲,怎么才能不自动连接两行呢? + +```go +cfg, err := ini.LoadSources(ini.LoadOptions{ + IgnoreContinuation: true, +}, "filename") +``` + +哇靠给力啊! + +需要注意的是,值两侧的单引号会被自动剔除: + +```ini +foo = "some value" // foo: some value +bar = 'some value' // bar: some value +``` + +这就是全部了?哈哈,当然不是。 + +#### 操作键值的辅助方法 + +获取键值时设定候选值: + +```go +v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) +v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) +v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) +v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) +v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) +v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) +v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) +v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 +``` + +如果获取到的值不是候选值的任意一个,则会返回默认值,而默认值不需要是候选值中的一员。 + +验证获取的值是否在指定范围内: + +```go +vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) +vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) +vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) +vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) +vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) +vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) +vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 +``` + +##### 自动分割键值到切片(slice) + +当存在无效输入时,使用零值代替: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0] +vals = cfg.Section("").Key("STRINGS").Strings(",") +vals = cfg.Section("").Key("FLOAT64S").Float64s(",") +vals = cfg.Section("").Key("INTS").Ints(",") +vals = cfg.Section("").Key("INT64S").Int64s(",") +vals = cfg.Section("").Key("UINTS").Uints(",") +vals = cfg.Section("").Key("UINT64S").Uint64s(",") +vals = cfg.Section("").Key("TIMES").Times(",") +``` + +从结果切片中剔除无效输入: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [2.2] +vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",") +vals = cfg.Section("").Key("INTS").ValidInts(",") +vals = cfg.Section("").Key("INT64S").ValidInt64s(",") +vals = cfg.Section("").Key("UINTS").ValidUints(",") +vals = cfg.Section("").Key("UINT64S").ValidUint64s(",") +vals = cfg.Section("").Key("TIMES").ValidTimes(",") +``` + +当存在无效输入时,直接返回错误: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> error +vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",") +vals = cfg.Section("").Key("INTS").StrictInts(",") +vals = cfg.Section("").Key("INT64S").StrictInt64s(",") +vals = cfg.Section("").Key("UINTS").StrictUints(",") +vals = cfg.Section("").Key("UINT64S").StrictUint64s(",") +vals = cfg.Section("").Key("TIMES").StrictTimes(",") +``` + +### 保存配置 + +终于到了这个时刻,是时候保存一下配置了。 + +比较原始的做法是输出配置到某个文件: + +```go +// ... +err = cfg.SaveTo("my.ini") +err = cfg.SaveToIndent("my.ini", "\t") +``` + +另一个比较高级的做法是写入到任何实现 `io.Writer` 接口的对象中: + +```go +// ... +cfg.WriteTo(writer) +cfg.WriteToIndent(writer, "\t") +``` + +默认情况下,空格将被用于对齐键值之间的等号以美化输出结果,以下代码可以禁用该功能: + +```go +ini.PrettyFormat = false +``` + +## 高级用法 + +### 递归读取键值 + +在获取所有键值的过程中,特殊语法 `%()s` 会被应用,其中 `` 可以是相同分区或者默认分区下的键名。字符串 `%()s` 会被相应的键值所替代,如果指定的键不存在,则会用空字符串替代。您可以最多使用 99 层的递归嵌套。 + +```ini +NAME = ini + +[author] +NAME = Unknwon +GITHUB = https://github.com/%(NAME)s + +[package] +FULL_NAME = github.com/go-ini/%(NAME)s +``` + +```go +cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon +cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini +``` + +### 读取父子分区 + +您可以在分区名称中使用 `.` 来表示两个或多个分区之间的父子关系。如果某个键在子分区中不存在,则会去它的父分区中再次寻找,直到没有父分区为止。 + +```ini +NAME = ini +VERSION = v1 +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +[package] +CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] +``` + +```go +cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 +``` + +#### 获取上级父分区下的所有键名 + +```go +cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"] +``` + +### 无法解析的分区 + +如果遇到一些比较特殊的分区,它们不包含常见的键值对,而是没有固定格式的纯文本,则可以使用 `LoadOptions.UnparsableSections` 进行处理: + +```go +cfg, err := LoadSources(LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS] +<1> This slide has the fuel listed in the wrong units `)) + +body := cfg.Section("COMMENTS").Body() + +/* --- start --- +<1> This slide has the fuel listed in the wrong units +------ end --- */ +``` + +### 读取自增键名 + +如果数据源中的键名为 `-`,则认为该键使用了自增键名的特殊语法。计数器从 1 开始,并且分区之间是相互独立的。 + +```ini +[features] +-: Support read/write comments of keys and sections +-: Support auto-increment of key names +-: Support load multiple files to overwrite key values +``` + +```go +cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} +``` + +### 映射到结构 + +想要使用更加面向对象的方式玩转 INI 吗?好主意。 + +```ini +Name = Unknwon +age = 21 +Male = true +Born = 1993-01-01T20:17:05Z + +[Note] +Content = Hi is a good man! +Cities = HangZhou, Boston +``` + +```go +type Note struct { + Content string + Cities []string +} + +type Person struct { + Name string + Age int `ini:"age"` + Male bool + Born time.Time + Note + Created time.Time `ini:"-"` +} + +func main() { + cfg, err := ini.Load("path/to/ini") + // ... + p := new(Person) + err = cfg.MapTo(p) + // ... + + // 一切竟可以如此的简单。 + err = ini.MapTo(p, "path/to/ini") + // ... + + // 嗯哼?只需要映射一个分区吗? + n := new(Note) + err = cfg.Section("Note").MapTo(n) + // ... +} +``` + +结构的字段怎么设置默认值呢?很简单,只要在映射之前对指定字段进行赋值就可以了。如果键未找到或者类型错误,该值不会发生改变。 + +```go +// ... +p := &Person{ + Name: "Joe", +} +// ... +``` + +这样玩 INI 真的好酷啊!然而,如果不能还给我原来的配置文件,有什么卵用? + +### 从结构反射 + +可是,我有说不能吗? + +```go +type Embeded struct { + Dates []time.Time `delim:"|"` + Places []string `ini:"places,omitempty"` + None []int `ini:",omitempty"` +} + +type Author struct { + Name string `ini:"NAME"` + Male bool + Age int + GPA float64 + NeverMind string `ini:"-"` + *Embeded +} + +func main() { + a := &Author{"Unknwon", true, 21, 2.8, "", + &Embeded{ + []time.Time{time.Now(), time.Now()}, + []string{"HangZhou", "Boston"}, + []int{}, + }} + cfg := ini.Empty() + err = ini.ReflectFrom(cfg, a) + // ... +} +``` + +瞧瞧,奇迹发生了。 + +```ini +NAME = Unknwon +Male = true +Age = 21 +GPA = 2.8 + +[Embeded] +Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 +places = HangZhou,Boston +``` + +#### 名称映射器(Name Mapper) + +为了节省您的时间并简化代码,本库支持类型为 [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) 的名称映射器,该映射器负责结构字段名与分区名和键名之间的映射。 + +目前有 2 款内置的映射器: + +- `AllCapsUnderscore`:该映射器将字段名转换至格式 `ALL_CAPS_UNDERSCORE` 后再去匹配分区名和键名。 +- `TitleUnderscore`:该映射器将字段名转换至格式 `title_underscore` 后再去匹配分区名和键名。 + +使用方法: + +```go +type Info struct{ + PackageName string +} + +func main() { + err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini")) + // ... + + cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) + // ... + info := new(Info) + cfg.NameMapper = ini.AllCapsUnderscore + err = cfg.MapTo(info) + // ... +} +``` + +使用函数 `ini.ReflectFromWithMapper` 时也可应用相同的规则。 + +#### 值映射器(Value Mapper) + +值映射器允许使用一个自定义函数自动展开值的具体内容,例如:运行时获取环境变量: + +```go +type Env struct { + Foo string `ini:"foo"` +} + +func main() { + cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n") + cfg.ValueMapper = os.ExpandEnv + // ... + env := &Env{} + err = cfg.Section("env").MapTo(env) +} +``` + +本例中,`env.Foo` 将会是运行时所获取到环境变量 `MY_VAR` 的值。 + +#### 映射/反射的其它说明 + +任何嵌入的结构都会被默认认作一个不同的分区,并且不会自动产生所谓的父子分区关联: + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child +} + +type Config struct { + City string + Parent +} +``` + +示例配置文件: + +```ini +City = Boston + +[Parent] +Name = Unknwon + +[Child] +Age = 21 +``` + +很好,但是,我就是要嵌入结构也在同一个分区。好吧,你爹是李刚! + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child `ini:"Parent"` +} + +type Config struct { + City string + Parent +} +``` + +示例配置文件: + +```ini +City = Boston + +[Parent] +Name = Unknwon +Age = 21 +``` + +## 获取帮助 + +- [API 文档](https://gowalker.org/gopkg.in/ini.v1) +- [创建工单](https://github.com/go-ini/ini/issues/new) + +## 常见问题 + +### 字段 `BlockMode` 是什么? + +默认情况下,本库会在您进行读写操作时采用锁机制来确保数据时间。但在某些情况下,您非常确定只进行读操作。此时,您可以通过设置 `cfg.BlockMode = false` 来将读操作提升大约 **50-70%** 的性能。 + +### 为什么要写另一个 INI 解析库? + +许多人都在使用我的 [goconfig](https://github.com/Unknwon/goconfig) 来完成对 INI 文件的操作,但我希望使用更加 Go 风格的代码。并且当您设置 `cfg.BlockMode = false` 时,会有大约 **10-30%** 的性能提升。 + +为了做出这些改变,我必须对 API 进行破坏,所以新开一个仓库是最安全的做法。除此之外,本库直接使用 `gopkg.in` 来进行版本化发布。(其实真相是导入路径更短了) diff --git a/vendor/github.com/go-ini/ini/error.go b/vendor/github.com/go-ini/ini/error.go new file mode 100644 index 000000000..80afe7431 --- /dev/null +++ b/vendor/github.com/go-ini/ini/error.go @@ -0,0 +1,32 @@ +// Copyright 2016 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "fmt" +) + +type ErrDelimiterNotFound struct { + Line string +} + +func IsErrDelimiterNotFound(err error) bool { + _, ok := err.(ErrDelimiterNotFound) + return ok +} + +func (err ErrDelimiterNotFound) Error() string { + return fmt.Sprintf("key-value delimiter not found: %s", err.Line) +} diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go new file mode 100644 index 000000000..5211d5abc --- /dev/null +++ b/vendor/github.com/go-ini/ini/ini.go @@ -0,0 +1,556 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package ini provides INI file read and write functionality in Go. +package ini + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "time" +) + +const ( + // Name for default section. You can use this constant or the string literal. + // In most of cases, an empty string is all you need to access the section. + DEFAULT_SECTION = "DEFAULT" + + // Maximum allowed depth when recursively substituing variable names. + _DEPTH_VALUES = 99 + _VERSION = "1.27.0" +) + +// Version returns current package version literal. +func Version() string { + return _VERSION +} + +var ( + // Delimiter to determine or compose a new line. + // This variable will be changed to "\r\n" automatically on Windows + // at package init time. + LineBreak = "\n" + + // Variable regexp pattern: %(variable)s + varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`) + + // Indicate whether to align "=" sign with spaces to produce pretty output + // or reduce all possible spaces for compact format. + PrettyFormat = true + + // Explicitly write DEFAULT section header + DefaultHeader = false +) + +func init() { + if runtime.GOOS == "windows" { + LineBreak = "\r\n" + } +} + +func inSlice(str string, s []string) bool { + for _, v := range s { + if str == v { + return true + } + } + return false +} + +// dataSource is an interface that returns object which can be read and closed. +type dataSource interface { + ReadCloser() (io.ReadCloser, error) +} + +// sourceFile represents an object that contains content on the local file system. +type sourceFile struct { + name string +} + +func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) { + return os.Open(s.name) +} + +type bytesReadCloser struct { + reader io.Reader +} + +func (rc *bytesReadCloser) Read(p []byte) (n int, err error) { + return rc.reader.Read(p) +} + +func (rc *bytesReadCloser) Close() error { + return nil +} + +// sourceData represents an object that contains content in memory. +type sourceData struct { + data []byte +} + +func (s *sourceData) ReadCloser() (io.ReadCloser, error) { + return ioutil.NopCloser(bytes.NewReader(s.data)), nil +} + +// sourceReadCloser represents an input stream with Close method. +type sourceReadCloser struct { + reader io.ReadCloser +} + +func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) { + return s.reader, nil +} + +// File represents a combination of a or more INI file(s) in memory. +type File struct { + // Should make things safe, but sometimes doesn't matter. + BlockMode bool + // Make sure data is safe in multiple goroutines. + lock sync.RWMutex + + // Allow combination of multiple data sources. + dataSources []dataSource + // Actual data is stored here. + sections map[string]*Section + + // To keep data in order. + sectionList []string + + options LoadOptions + + NameMapper + ValueMapper +} + +// newFile initializes File object with given data sources. +func newFile(dataSources []dataSource, opts LoadOptions) *File { + return &File{ + BlockMode: true, + dataSources: dataSources, + sections: make(map[string]*Section), + sectionList: make([]string, 0, 10), + options: opts, + } +} + +func parseDataSource(source interface{}) (dataSource, error) { + switch s := source.(type) { + case string: + return sourceFile{s}, nil + case []byte: + return &sourceData{s}, nil + case io.ReadCloser: + return &sourceReadCloser{s}, nil + default: + return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s) + } +} + +type LoadOptions struct { + // Loose indicates whether the parser should ignore nonexistent files or return error. + Loose bool + // Insensitive indicates whether the parser forces all section and key names to lowercase. + Insensitive bool + // IgnoreContinuation indicates whether to ignore continuation lines while parsing. + IgnoreContinuation bool + // IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value. + IgnoreInlineComment bool + // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing. + // This type of keys are mostly used in my.cnf. + AllowBooleanKeys bool + // AllowShadows indicates whether to keep track of keys with same name under same section. + AllowShadows bool + // Some INI formats allow group blocks that store a block of raw content that doesn't otherwise + // conform to key/value pairs. Specify the names of those blocks here. + UnparseableSections []string +} + +func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) { + sources := make([]dataSource, len(others)+1) + sources[0], err = parseDataSource(source) + if err != nil { + return nil, err + } + for i := range others { + sources[i+1], err = parseDataSource(others[i]) + if err != nil { + return nil, err + } + } + f := newFile(sources, opts) + if err = f.Reload(); err != nil { + return nil, err + } + return f, nil +} + +// Load loads and parses from INI data sources. +// Arguments can be mixed of file name with string type, or raw data in []byte. +// It will return error if list contains nonexistent files. +func Load(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{}, source, others...) +} + +// LooseLoad has exactly same functionality as Load function +// except it ignores nonexistent files instead of returning error. +func LooseLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{Loose: true}, source, others...) +} + +// InsensitiveLoad has exactly same functionality as Load function +// except it forces all section and key names to be lowercased. +func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{Insensitive: true}, source, others...) +} + +// InsensitiveLoad has exactly same functionality as Load function +// except it allows have shadow keys. +func ShadowLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{AllowShadows: true}, source, others...) +} + +// Empty returns an empty file object. +func Empty() *File { + // Ignore error here, we sure our data is good. + f, _ := Load([]byte("")) + return f +} + +// NewSection creates a new section. +func (f *File) NewSection(name string) (*Section, error) { + if len(name) == 0 { + return nil, errors.New("error creating new section: empty section name") + } else if f.options.Insensitive && name != DEFAULT_SECTION { + name = strings.ToLower(name) + } + + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if inSlice(name, f.sectionList) { + return f.sections[name], nil + } + + f.sectionList = append(f.sectionList, name) + f.sections[name] = newSection(f, name) + return f.sections[name], nil +} + +// NewRawSection creates a new section with an unparseable body. +func (f *File) NewRawSection(name, body string) (*Section, error) { + section, err := f.NewSection(name) + if err != nil { + return nil, err + } + + section.isRawSection = true + section.rawBody = body + return section, nil +} + +// NewSections creates a list of sections. +func (f *File) NewSections(names ...string) (err error) { + for _, name := range names { + if _, err = f.NewSection(name); err != nil { + return err + } + } + return nil +} + +// GetSection returns section by given name. +func (f *File) GetSection(name string) (*Section, error) { + if len(name) == 0 { + name = DEFAULT_SECTION + } else if f.options.Insensitive { + name = strings.ToLower(name) + } + + if f.BlockMode { + f.lock.RLock() + defer f.lock.RUnlock() + } + + sec := f.sections[name] + if sec == nil { + return nil, fmt.Errorf("section '%s' does not exist", name) + } + return sec, nil +} + +// Section assumes named section exists and returns a zero-value when not. +func (f *File) Section(name string) *Section { + sec, err := f.GetSection(name) + if err != nil { + // Note: It's OK here because the only possible error is empty section name, + // but if it's empty, this piece of code won't be executed. + sec, _ = f.NewSection(name) + return sec + } + return sec +} + +// Section returns list of Section. +func (f *File) Sections() []*Section { + sections := make([]*Section, len(f.sectionList)) + for i := range f.sectionList { + sections[i] = f.Section(f.sectionList[i]) + } + return sections +} + +// ChildSections returns a list of child sections of given section name. +func (f *File) ChildSections(name string) []*Section { + return f.Section(name).ChildSections() +} + +// SectionStrings returns list of section names. +func (f *File) SectionStrings() []string { + list := make([]string, len(f.sectionList)) + copy(list, f.sectionList) + return list +} + +// DeleteSection deletes a section. +func (f *File) DeleteSection(name string) { + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if len(name) == 0 { + name = DEFAULT_SECTION + } + + for i, s := range f.sectionList { + if s == name { + f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...) + delete(f.sections, name) + return + } + } +} + +func (f *File) reload(s dataSource) error { + r, err := s.ReadCloser() + if err != nil { + return err + } + defer r.Close() + + return f.parse(r) +} + +// Reload reloads and parses all data sources. +func (f *File) Reload() (err error) { + for _, s := range f.dataSources { + if err = f.reload(s); err != nil { + // In loose mode, we create an empty default section for nonexistent files. + if os.IsNotExist(err) && f.options.Loose { + f.parse(bytes.NewBuffer(nil)) + continue + } + return err + } + } + return nil +} + +// Append appends one or more data sources and reloads automatically. +func (f *File) Append(source interface{}, others ...interface{}) error { + ds, err := parseDataSource(source) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + for _, s := range others { + ds, err = parseDataSource(s) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + } + return f.Reload() +} + +// WriteToIndent writes content into io.Writer with given indention. +// If PrettyFormat has been set to be true, +// it will align "=" sign with spaces under each section. +func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { + equalSign := "=" + if PrettyFormat { + equalSign = " = " + } + + // Use buffer to make sure target is safe until finish encoding. + buf := bytes.NewBuffer(nil) + for i, sname := range f.sectionList { + sec := f.Section(sname) + if len(sec.Comment) > 0 { + if sec.Comment[0] != '#' && sec.Comment[0] != ';' { + sec.Comment = "; " + sec.Comment + } + if _, err = buf.WriteString(sec.Comment + LineBreak); err != nil { + return 0, err + } + } + + if i > 0 || DefaultHeader { + if _, err = buf.WriteString("[" + sname + "]" + LineBreak); err != nil { + return 0, err + } + } else { + // Write nothing if default section is empty + if len(sec.keyList) == 0 { + continue + } + } + + if sec.isRawSection { + if _, err = buf.WriteString(sec.rawBody); err != nil { + return 0, err + } + continue + } + + // Count and generate alignment length and buffer spaces using the + // longest key. Keys may be modifed if they contain certain characters so + // we need to take that into account in our calculation. + alignLength := 0 + if PrettyFormat { + for _, kname := range sec.keyList { + keyLength := len(kname) + // First case will surround key by ` and second by """ + if strings.ContainsAny(kname, "\"=:") { + keyLength += 2 + } else if strings.Contains(kname, "`") { + keyLength += 6 + } + + if keyLength > alignLength { + alignLength = keyLength + } + } + } + alignSpaces := bytes.Repeat([]byte(" "), alignLength) + + KEY_LIST: + for _, kname := range sec.keyList { + key := sec.Key(kname) + if len(key.Comment) > 0 { + if len(indent) > 0 && sname != DEFAULT_SECTION { + buf.WriteString(indent) + } + if key.Comment[0] != '#' && key.Comment[0] != ';' { + key.Comment = "; " + key.Comment + } + if _, err = buf.WriteString(key.Comment + LineBreak); err != nil { + return 0, err + } + } + + if len(indent) > 0 && sname != DEFAULT_SECTION { + buf.WriteString(indent) + } + + switch { + case key.isAutoIncrement: + kname = "-" + case strings.ContainsAny(kname, "\"=:"): + kname = "`" + kname + "`" + case strings.Contains(kname, "`"): + kname = `"""` + kname + `"""` + } + + for _, val := range key.ValueWithShadows() { + if _, err = buf.WriteString(kname); err != nil { + return 0, err + } + + if key.isBooleanType { + if kname != sec.keyList[len(sec.keyList)-1] { + buf.WriteString(LineBreak) + } + continue KEY_LIST + } + + // Write out alignment spaces before "=" sign + if PrettyFormat { + buf.Write(alignSpaces[:alignLength-len(kname)]) + } + + // In case key value contains "\n", "`", "\"", "#" or ";" + if strings.ContainsAny(val, "\n`") { + val = `"""` + val + `"""` + } else if strings.ContainsAny(val, "#;") { + val = "`" + val + "`" + } + if _, err = buf.WriteString(equalSign + val + LineBreak); err != nil { + return 0, err + } + } + } + + // Put a line between sections + if _, err = buf.WriteString(LineBreak); err != nil { + return 0, err + } + } + + return buf.WriteTo(w) +} + +// WriteTo writes file content into io.Writer. +func (f *File) WriteTo(w io.Writer) (int64, error) { + return f.WriteToIndent(w, "") +} + +// SaveToIndent writes content to file system with given value indention. +func (f *File) SaveToIndent(filename, indent string) error { + // Note: Because we are truncating with os.Create, + // so it's safer to save to a temporary file location and rename afte done. + tmpPath := filename + "." + strconv.Itoa(time.Now().Nanosecond()) + ".tmp" + defer os.Remove(tmpPath) + + fw, err := os.Create(tmpPath) + if err != nil { + return err + } + + if _, err = f.WriteToIndent(fw, indent); err != nil { + fw.Close() + return err + } + fw.Close() + + // Remove old file and rename the new one. + os.Remove(filename) + return os.Rename(tmpPath, filename) +} + +// SaveTo writes content to file system. +func (f *File) SaveTo(filename string) error { + return f.SaveToIndent(filename, "") +} diff --git a/vendor/github.com/go-ini/ini/key.go b/vendor/github.com/go-ini/ini/key.go new file mode 100644 index 000000000..838356af0 --- /dev/null +++ b/vendor/github.com/go-ini/ini/key.go @@ -0,0 +1,699 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "errors" + "fmt" + "strconv" + "strings" + "time" +) + +// Key represents a key under a section. +type Key struct { + s *Section + name string + value string + isAutoIncrement bool + isBooleanType bool + + isShadow bool + shadows []*Key + + Comment string +} + +// newKey simply return a key object with given values. +func newKey(s *Section, name, val string) *Key { + return &Key{ + s: s, + name: name, + value: val, + } +} + +func (k *Key) addShadow(val string) error { + if k.isShadow { + return errors.New("cannot add shadow to another shadow key") + } else if k.isAutoIncrement || k.isBooleanType { + return errors.New("cannot add shadow to auto-increment or boolean key") + } + + shadow := newKey(k.s, k.name, val) + shadow.isShadow = true + k.shadows = append(k.shadows, shadow) + return nil +} + +// AddShadow adds a new shadow key to itself. +func (k *Key) AddShadow(val string) error { + if !k.s.f.options.AllowShadows { + return errors.New("shadow key is not allowed") + } + return k.addShadow(val) +} + +// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv +type ValueMapper func(string) string + +// Name returns name of key. +func (k *Key) Name() string { + return k.name +} + +// Value returns raw value of key for performance purpose. +func (k *Key) Value() string { + return k.value +} + +// ValueWithShadows returns raw values of key and its shadows if any. +func (k *Key) ValueWithShadows() []string { + if len(k.shadows) == 0 { + return []string{k.value} + } + vals := make([]string, len(k.shadows)+1) + vals[0] = k.value + for i := range k.shadows { + vals[i+1] = k.shadows[i].value + } + return vals +} + +// transformValue takes a raw value and transforms to its final string. +func (k *Key) transformValue(val string) string { + if k.s.f.ValueMapper != nil { + val = k.s.f.ValueMapper(val) + } + + // Fail-fast if no indicate char found for recursive value + if !strings.Contains(val, "%") { + return val + } + for i := 0; i < _DEPTH_VALUES; i++ { + vr := varPattern.FindString(val) + if len(vr) == 0 { + break + } + + // Take off leading '%(' and trailing ')s'. + noption := strings.TrimLeft(vr, "%(") + noption = strings.TrimRight(noption, ")s") + + // Search in the same section. + nk, err := k.s.GetKey(noption) + if err != nil { + // Search again in default section. + nk, _ = k.s.f.Section("").GetKey(noption) + } + + // Substitute by new value and take off leading '%(' and trailing ')s'. + val = strings.Replace(val, vr, nk.value, -1) + } + return val +} + +// String returns string representation of value. +func (k *Key) String() string { + return k.transformValue(k.value) +} + +// Validate accepts a validate function which can +// return modifed result as key value. +func (k *Key) Validate(fn func(string) string) string { + return fn(k.String()) +} + +// parseBool returns the boolean value represented by the string. +// +// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On, +// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off. +// Any other value returns an error. +func parseBool(str string) (value bool, err error) { + switch str { + case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On": + return true, nil + case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off": + return false, nil + } + return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) +} + +// Bool returns bool type value. +func (k *Key) Bool() (bool, error) { + return parseBool(k.String()) +} + +// Float64 returns float64 type value. +func (k *Key) Float64() (float64, error) { + return strconv.ParseFloat(k.String(), 64) +} + +// Int returns int type value. +func (k *Key) Int() (int, error) { + return strconv.Atoi(k.String()) +} + +// Int64 returns int64 type value. +func (k *Key) Int64() (int64, error) { + return strconv.ParseInt(k.String(), 10, 64) +} + +// Uint returns uint type valued. +func (k *Key) Uint() (uint, error) { + u, e := strconv.ParseUint(k.String(), 10, 64) + return uint(u), e +} + +// Uint64 returns uint64 type value. +func (k *Key) Uint64() (uint64, error) { + return strconv.ParseUint(k.String(), 10, 64) +} + +// Duration returns time.Duration type value. +func (k *Key) Duration() (time.Duration, error) { + return time.ParseDuration(k.String()) +} + +// TimeFormat parses with given format and returns time.Time type value. +func (k *Key) TimeFormat(format string) (time.Time, error) { + return time.Parse(format, k.String()) +} + +// Time parses with RFC3339 format and returns time.Time type value. +func (k *Key) Time() (time.Time, error) { + return k.TimeFormat(time.RFC3339) +} + +// MustString returns default value if key value is empty. +func (k *Key) MustString(defaultVal string) string { + val := k.String() + if len(val) == 0 { + k.value = defaultVal + return defaultVal + } + return val +} + +// MustBool always returns value without error, +// it returns false if error occurs. +func (k *Key) MustBool(defaultVal ...bool) bool { + val, err := k.Bool() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatBool(defaultVal[0]) + return defaultVal[0] + } + return val +} + +// MustFloat64 always returns value without error, +// it returns 0.0 if error occurs. +func (k *Key) MustFloat64(defaultVal ...float64) float64 { + val, err := k.Float64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64) + return defaultVal[0] + } + return val +} + +// MustInt always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt(defaultVal ...int) int { + val, err := k.Int() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatInt(int64(defaultVal[0]), 10) + return defaultVal[0] + } + return val +} + +// MustInt64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt64(defaultVal ...int64) int64 { + val, err := k.Int64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatInt(defaultVal[0], 10) + return defaultVal[0] + } + return val +} + +// MustUint always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint(defaultVal ...uint) uint { + val, err := k.Uint() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatUint(uint64(defaultVal[0]), 10) + return defaultVal[0] + } + return val +} + +// MustUint64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint64(defaultVal ...uint64) uint64 { + val, err := k.Uint64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatUint(defaultVal[0], 10) + return defaultVal[0] + } + return val +} + +// MustDuration always returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { + val, err := k.Duration() + if len(defaultVal) > 0 && err != nil { + k.value = defaultVal[0].String() + return defaultVal[0] + } + return val +} + +// MustTimeFormat always parses with given format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { + val, err := k.TimeFormat(format) + if len(defaultVal) > 0 && err != nil { + k.value = defaultVal[0].Format(format) + return defaultVal[0] + } + return val +} + +// MustTime always parses with RFC3339 format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTime(defaultVal ...time.Time) time.Time { + return k.MustTimeFormat(time.RFC3339, defaultVal...) +} + +// In always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) In(defaultVal string, candidates []string) string { + val := k.String() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InFloat64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { + val := k.MustFloat64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt(defaultVal int, candidates []int) int { + val := k.MustInt() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { + val := k.MustInt64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint(defaultVal uint, candidates []uint) uint { + val := k.MustUint() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { + val := k.MustUint64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTimeFormat always parses with given format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { + val := k.MustTimeFormat(format) + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTime always parses with RFC3339 format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { + return k.InTimeFormat(time.RFC3339, defaultVal, candidates) +} + +// RangeFloat64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { + val := k.MustFloat64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt(defaultVal, min, max int) int { + val := k.MustInt() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { + val := k.MustInt64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeTimeFormat checks if value with given format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { + val := k.MustTimeFormat(format) + if val.Unix() < min.Unix() || val.Unix() > max.Unix() { + return defaultVal + } + return val +} + +// RangeTime checks if value with RFC3339 format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { + return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) +} + +// Strings returns list of string divided by given delimiter. +func (k *Key) Strings(delim string) []string { + str := k.String() + if len(str) == 0 { + return []string{} + } + + vals := strings.Split(str, delim) + for i := range vals { + // vals[i] = k.transformValue(strings.TrimSpace(vals[i])) + vals[i] = strings.TrimSpace(vals[i]) + } + return vals +} + +// StringsWithShadows returns list of string divided by given delimiter. +// Shadows will also be appended if any. +func (k *Key) StringsWithShadows(delim string) []string { + vals := k.ValueWithShadows() + results := make([]string, 0, len(vals)*2) + for i := range vals { + if len(vals) == 0 { + continue + } + + results = append(results, strings.Split(vals[i], delim)...) + } + + for i := range results { + results[i] = k.transformValue(strings.TrimSpace(results[i])) + } + return results +} + +// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Float64s(delim string) []float64 { + vals, _ := k.parseFloat64s(k.Strings(delim), true, false) + return vals +} + +// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Ints(delim string) []int { + vals, _ := k.parseInts(k.Strings(delim), true, false) + return vals +} + +// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Int64s(delim string) []int64 { + vals, _ := k.parseInt64s(k.Strings(delim), true, false) + return vals +} + +// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Uints(delim string) []uint { + vals, _ := k.parseUints(k.Strings(delim), true, false) + return vals +} + +// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Uint64s(delim string) []uint64 { + vals, _ := k.parseUint64s(k.Strings(delim), true, false) + return vals +} + +// TimesFormat parses with given format and returns list of time.Time divided by given delimiter. +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). +func (k *Key) TimesFormat(format, delim string) []time.Time { + vals, _ := k.parseTimesFormat(format, k.Strings(delim), true, false) + return vals +} + +// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter. +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). +func (k *Key) Times(delim string) []time.Time { + return k.TimesFormat(time.RFC3339, delim) +} + +// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then +// it will not be included to result list. +func (k *Key) ValidFloat64s(delim string) []float64 { + vals, _ := k.parseFloat64s(k.Strings(delim), false, false) + return vals +} + +// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will +// not be included to result list. +func (k *Key) ValidInts(delim string) []int { + vals, _ := k.parseInts(k.Strings(delim), false, false) + return vals +} + +// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer, +// then it will not be included to result list. +func (k *Key) ValidInt64s(delim string) []int64 { + vals, _ := k.parseInt64s(k.Strings(delim), false, false) + return vals +} + +// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer, +// then it will not be included to result list. +func (k *Key) ValidUints(delim string) []uint { + vals, _ := k.parseUints(k.Strings(delim), false, false) + return vals +} + +// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned +// integer, then it will not be included to result list. +func (k *Key) ValidUint64s(delim string) []uint64 { + vals, _ := k.parseUint64s(k.Strings(delim), false, false) + return vals +} + +// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter. +func (k *Key) ValidTimesFormat(format, delim string) []time.Time { + vals, _ := k.parseTimesFormat(format, k.Strings(delim), false, false) + return vals +} + +// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter. +func (k *Key) ValidTimes(delim string) []time.Time { + return k.ValidTimesFormat(time.RFC3339, delim) +} + +// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictFloat64s(delim string) ([]float64, error) { + return k.parseFloat64s(k.Strings(delim), false, true) +} + +// StrictInts returns list of int divided by given delimiter or error on first invalid input. +func (k *Key) StrictInts(delim string) ([]int, error) { + return k.parseInts(k.Strings(delim), false, true) +} + +// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictInt64s(delim string) ([]int64, error) { + return k.parseInt64s(k.Strings(delim), false, true) +} + +// StrictUints returns list of uint divided by given delimiter or error on first invalid input. +func (k *Key) StrictUints(delim string) ([]uint, error) { + return k.parseUints(k.Strings(delim), false, true) +} + +// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictUint64s(delim string) ([]uint64, error) { + return k.parseUint64s(k.Strings(delim), false, true) +} + +// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter +// or error on first invalid input. +func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) { + return k.parseTimesFormat(format, k.Strings(delim), false, true) +} + +// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter +// or error on first invalid input. +func (k *Key) StrictTimes(delim string) ([]time.Time, error) { + return k.StrictTimesFormat(time.RFC3339, delim) +} + +// parseFloat64s transforms strings to float64s. +func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) { + vals := make([]float64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseFloat(str, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// parseInts transforms strings to ints. +func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) { + vals := make([]int, 0, len(strs)) + for _, str := range strs { + val, err := strconv.Atoi(str) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// parseInt64s transforms strings to int64s. +func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) { + vals := make([]int64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseInt(str, 10, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// parseUints transforms strings to uints. +func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) { + vals := make([]uint, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseUint(str, 10, 0) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, uint(val)) + } + } + return vals, nil +} + +// parseUint64s transforms strings to uint64s. +func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) { + vals := make([]uint64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseUint(str, 10, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// parseTimesFormat transforms strings to times in given format. +func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) { + vals := make([]time.Time, 0, len(strs)) + for _, str := range strs { + val, err := time.Parse(format, str) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// SetValue changes key value. +func (k *Key) SetValue(v string) { + if k.s.f.BlockMode { + k.s.f.lock.Lock() + defer k.s.f.lock.Unlock() + } + + k.value = v + k.s.keysHash[k.name] = v +} diff --git a/vendor/github.com/go-ini/ini/parser.go b/vendor/github.com/go-ini/ini/parser.go new file mode 100644 index 000000000..6c0b10745 --- /dev/null +++ b/vendor/github.com/go-ini/ini/parser.go @@ -0,0 +1,361 @@ +// Copyright 2015 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + "unicode" +) + +type tokenType int + +const ( + _TOKEN_INVALID tokenType = iota + _TOKEN_COMMENT + _TOKEN_SECTION + _TOKEN_KEY +) + +type parser struct { + buf *bufio.Reader + isEOF bool + count int + comment *bytes.Buffer +} + +func newParser(r io.Reader) *parser { + return &parser{ + buf: bufio.NewReader(r), + count: 1, + comment: &bytes.Buffer{}, + } +} + +// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format. +// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding +func (p *parser) BOM() error { + mask, err := p.buf.Peek(2) + if err != nil && err != io.EOF { + return err + } else if len(mask) < 2 { + return nil + } + + switch { + case mask[0] == 254 && mask[1] == 255: + fallthrough + case mask[0] == 255 && mask[1] == 254: + p.buf.Read(mask) + case mask[0] == 239 && mask[1] == 187: + mask, err := p.buf.Peek(3) + if err != nil && err != io.EOF { + return err + } else if len(mask) < 3 { + return nil + } + if mask[2] == 191 { + p.buf.Read(mask) + } + } + return nil +} + +func (p *parser) readUntil(delim byte) ([]byte, error) { + data, err := p.buf.ReadBytes(delim) + if err != nil { + if err == io.EOF { + p.isEOF = true + } else { + return nil, err + } + } + return data, nil +} + +func cleanComment(in []byte) ([]byte, bool) { + i := bytes.IndexAny(in, "#;") + if i == -1 { + return nil, false + } + return in[i:], true +} + +func readKeyName(in []byte) (string, int, error) { + line := string(in) + + // Check if key name surrounded by quotes. + var keyQuote string + if line[0] == '"' { + if len(line) > 6 && string(line[0:3]) == `"""` { + keyQuote = `"""` + } else { + keyQuote = `"` + } + } else if line[0] == '`' { + keyQuote = "`" + } + + // Get out key name + endIdx := -1 + if len(keyQuote) > 0 { + startIdx := len(keyQuote) + // FIXME: fail case -> """"""name"""=value + pos := strings.Index(line[startIdx:], keyQuote) + if pos == -1 { + return "", -1, fmt.Errorf("missing closing key quote: %s", line) + } + pos += startIdx + + // Find key-value delimiter + i := strings.IndexAny(line[pos+startIdx:], "=:") + if i < 0 { + return "", -1, ErrDelimiterNotFound{line} + } + endIdx = pos + i + return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil + } + + endIdx = strings.IndexAny(line, "=:") + if endIdx < 0 { + return "", -1, ErrDelimiterNotFound{line} + } + return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil +} + +func (p *parser) readMultilines(line, val, valQuote string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := string(data) + + pos := strings.LastIndex(next, valQuote) + if pos > -1 { + val += next[:pos] + + comment, has := cleanComment([]byte(next[pos:])) + if has { + p.comment.Write(bytes.TrimSpace(comment)) + } + break + } + val += next + if p.isEOF { + return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next) + } + } + return val, nil +} + +func (p *parser) readContinuationLines(val string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := strings.TrimSpace(string(data)) + + if len(next) == 0 { + break + } + val += next + if val[len(val)-1] != '\\' { + break + } + val = val[:len(val)-1] + } + return val, nil +} + +// hasSurroundedQuote check if and only if the first and last characters +// are quotes \" or \'. +// It returns false if any other parts also contain same kind of quotes. +func hasSurroundedQuote(in string, quote byte) bool { + return len(in) > 2 && in[0] == quote && in[len(in)-1] == quote && + strings.IndexByte(in[1:], quote) == len(in)-2 +} + +func (p *parser) readValue(in []byte, ignoreContinuation, ignoreInlineComment bool) (string, error) { + line := strings.TrimLeftFunc(string(in), unicode.IsSpace) + if len(line) == 0 { + return "", nil + } + + var valQuote string + if len(line) > 3 && string(line[0:3]) == `"""` { + valQuote = `"""` + } else if line[0] == '`' { + valQuote = "`" + } + + if len(valQuote) > 0 { + startIdx := len(valQuote) + pos := strings.LastIndex(line[startIdx:], valQuote) + // Check for multi-line value + if pos == -1 { + return p.readMultilines(line, line[startIdx:], valQuote) + } + + return line[startIdx : pos+startIdx], nil + } + + // Won't be able to reach here if value only contains whitespace + line = strings.TrimSpace(line) + + // Check continuation lines when desired + if !ignoreContinuation && line[len(line)-1] == '\\' { + return p.readContinuationLines(line[:len(line)-1]) + } + + // Check if ignore inline comment + if !ignoreInlineComment { + i := strings.IndexAny(line, "#;") + if i > -1 { + p.comment.WriteString(line[i:]) + line = strings.TrimSpace(line[:i]) + } + } + + // Trim single quotes + if hasSurroundedQuote(line, '\'') || + hasSurroundedQuote(line, '"') { + line = line[1 : len(line)-1] + } + return line, nil +} + +// parse parses data through an io.Reader. +func (f *File) parse(reader io.Reader) (err error) { + p := newParser(reader) + if err = p.BOM(); err != nil { + return fmt.Errorf("BOM: %v", err) + } + + // Ignore error because default section name is never empty string. + section, _ := f.NewSection(DEFAULT_SECTION) + + var line []byte + var inUnparseableSection bool + for !p.isEOF { + line, err = p.readUntil('\n') + if err != nil { + return err + } + + line = bytes.TrimLeftFunc(line, unicode.IsSpace) + if len(line) == 0 { + continue + } + + // Comments + if line[0] == '#' || line[0] == ';' { + // Note: we do not care ending line break, + // it is needed for adding second line, + // so just clean it once at the end when set to value. + p.comment.Write(line) + continue + } + + // Section + if line[0] == '[' { + // Read to the next ']' (TODO: support quoted strings) + // TODO(unknwon): use LastIndexByte when stop supporting Go1.4 + closeIdx := bytes.LastIndex(line, []byte("]")) + if closeIdx == -1 { + return fmt.Errorf("unclosed section: %s", line) + } + + name := string(line[1:closeIdx]) + section, err = f.NewSection(name) + if err != nil { + return err + } + + comment, has := cleanComment(line[closeIdx+1:]) + if has { + p.comment.Write(comment) + } + + section.Comment = strings.TrimSpace(p.comment.String()) + + // Reset aotu-counter and comments + p.comment.Reset() + p.count = 1 + + inUnparseableSection = false + for i := range f.options.UnparseableSections { + if f.options.UnparseableSections[i] == name || + (f.options.Insensitive && strings.ToLower(f.options.UnparseableSections[i]) == strings.ToLower(name)) { + inUnparseableSection = true + continue + } + } + continue + } + + if inUnparseableSection { + section.isRawSection = true + section.rawBody += string(line) + continue + } + + kname, offset, err := readKeyName(line) + if err != nil { + // Treat as boolean key when desired, and whole line is key name. + if IsErrDelimiterNotFound(err) && f.options.AllowBooleanKeys { + kname, err := p.readValue(line, f.options.IgnoreContinuation, f.options.IgnoreInlineComment) + if err != nil { + return err + } + key, err := section.NewBooleanKey(kname) + if err != nil { + return err + } + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + continue + } + return err + } + + // Auto increment. + isAutoIncr := false + if kname == "-" { + isAutoIncr = true + kname = "#" + strconv.Itoa(p.count) + p.count++ + } + + value, err := p.readValue(line[offset:], f.options.IgnoreContinuation, f.options.IgnoreInlineComment) + if err != nil { + return err + } + + key, err := section.NewKey(kname, value) + if err != nil { + return err + } + key.isAutoIncrement = isAutoIncr + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + } + return nil +} diff --git a/vendor/github.com/go-ini/ini/section.go b/vendor/github.com/go-ini/ini/section.go new file mode 100644 index 000000000..94f7375ed --- /dev/null +++ b/vendor/github.com/go-ini/ini/section.go @@ -0,0 +1,248 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "errors" + "fmt" + "strings" +) + +// Section represents a config section. +type Section struct { + f *File + Comment string + name string + keys map[string]*Key + keyList []string + keysHash map[string]string + + isRawSection bool + rawBody string +} + +func newSection(f *File, name string) *Section { + return &Section{ + f: f, + name: name, + keys: make(map[string]*Key), + keyList: make([]string, 0, 10), + keysHash: make(map[string]string), + } +} + +// Name returns name of Section. +func (s *Section) Name() string { + return s.name +} + +// Body returns rawBody of Section if the section was marked as unparseable. +// It still follows the other rules of the INI format surrounding leading/trailing whitespace. +func (s *Section) Body() string { + return strings.TrimSpace(s.rawBody) +} + +// NewKey creates a new key to given section. +func (s *Section) NewKey(name, val string) (*Key, error) { + if len(name) == 0 { + return nil, errors.New("error creating new key: empty key name") + } else if s.f.options.Insensitive { + name = strings.ToLower(name) + } + + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + if inSlice(name, s.keyList) { + if s.f.options.AllowShadows { + if err := s.keys[name].addShadow(val); err != nil { + return nil, err + } + } else { + s.keys[name].value = val + } + return s.keys[name], nil + } + + s.keyList = append(s.keyList, name) + s.keys[name] = newKey(s, name, val) + s.keysHash[name] = val + return s.keys[name], nil +} + +// NewBooleanKey creates a new boolean type key to given section. +func (s *Section) NewBooleanKey(name string) (*Key, error) { + key, err := s.NewKey(name, "true") + if err != nil { + return nil, err + } + + key.isBooleanType = true + return key, nil +} + +// GetKey returns key in section by given name. +func (s *Section) GetKey(name string) (*Key, error) { + // FIXME: change to section level lock? + if s.f.BlockMode { + s.f.lock.RLock() + } + if s.f.options.Insensitive { + name = strings.ToLower(name) + } + key := s.keys[name] + if s.f.BlockMode { + s.f.lock.RUnlock() + } + + if key == nil { + // Check if it is a child-section. + sname := s.name + for { + if i := strings.LastIndex(sname, "."); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + return sec.GetKey(name) + } else { + break + } + } + return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name) + } + return key, nil +} + +// HasKey returns true if section contains a key with given name. +func (s *Section) HasKey(name string) bool { + key, _ := s.GetKey(name) + return key != nil +} + +// Haskey is a backwards-compatible name for HasKey. +func (s *Section) Haskey(name string) bool { + return s.HasKey(name) +} + +// HasValue returns true if section contains given raw value. +func (s *Section) HasValue(value string) bool { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + for _, k := range s.keys { + if value == k.value { + return true + } + } + return false +} + +// Key assumes named Key exists in section and returns a zero-value when not. +func (s *Section) Key(name string) *Key { + key, err := s.GetKey(name) + if err != nil { + // It's OK here because the only possible error is empty key name, + // but if it's empty, this piece of code won't be executed. + key, _ = s.NewKey(name, "") + return key + } + return key +} + +// Keys returns list of keys of section. +func (s *Section) Keys() []*Key { + keys := make([]*Key, len(s.keyList)) + for i := range s.keyList { + keys[i] = s.Key(s.keyList[i]) + } + return keys +} + +// ParentKeys returns list of keys of parent section. +func (s *Section) ParentKeys() []*Key { + var parentKeys []*Key + sname := s.name + for { + if i := strings.LastIndex(sname, "."); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + parentKeys = append(parentKeys, sec.Keys()...) + } else { + break + } + + } + return parentKeys +} + +// KeyStrings returns list of key names of section. +func (s *Section) KeyStrings() []string { + list := make([]string, len(s.keyList)) + copy(list, s.keyList) + return list +} + +// KeysHash returns keys hash consisting of names and values. +func (s *Section) KeysHash() map[string]string { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + hash := map[string]string{} + for key, value := range s.keysHash { + hash[key] = value + } + return hash +} + +// DeleteKey deletes a key from section. +func (s *Section) DeleteKey(name string) { + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + for i, k := range s.keyList { + if k == name { + s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) + delete(s.keys, name) + return + } + } +} + +// ChildSections returns a list of child sections of current section. +// For example, "[parent.child1]" and "[parent.child12]" are child sections +// of section "[parent]". +func (s *Section) ChildSections() []*Section { + prefix := s.name + "." + children := make([]*Section, 0, 3) + for _, name := range s.f.sectionList { + if strings.HasPrefix(name, prefix) { + children = append(children, s.f.sections[name]) + } + } + return children +} diff --git a/vendor/github.com/go-ini/ini/struct.go b/vendor/github.com/go-ini/ini/struct.go new file mode 100644 index 000000000..031c78b8e --- /dev/null +++ b/vendor/github.com/go-ini/ini/struct.go @@ -0,0 +1,450 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "strings" + "time" + "unicode" +) + +// NameMapper represents a ini tag name mapper. +type NameMapper func(string) string + +// Built-in name getters. +var ( + // AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE. + AllCapsUnderscore NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + } + newstr = append(newstr, unicode.ToUpper(chr)) + } + return string(newstr) + } + // TitleUnderscore converts to format title_underscore. + TitleUnderscore NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + chr -= ('A' - 'a') + } + newstr = append(newstr, chr) + } + return string(newstr) + } +) + +func (s *Section) parseFieldName(raw, actual string) string { + if len(actual) > 0 { + return actual + } + if s.f.NameMapper != nil { + return s.f.NameMapper(raw) + } + return raw +} + +func parseDelim(actual string) string { + if len(actual) > 0 { + return actual + } + return "," +} + +var reflectTime = reflect.TypeOf(time.Now()).Kind() + +// setSliceWithProperType sets proper values to slice based on its type. +func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow bool) error { + var strs []string + if allowShadow { + strs = key.StringsWithShadows(delim) + } else { + strs = key.Strings(delim) + } + + numVals := len(strs) + if numVals == 0 { + return nil + } + + var vals interface{} + + sliceOf := field.Type().Elem().Kind() + switch sliceOf { + case reflect.String: + vals = strs + case reflect.Int: + vals, _ = key.parseInts(strs, true, false) + case reflect.Int64: + vals, _ = key.parseInt64s(strs, true, false) + case reflect.Uint: + vals, _ = key.parseUints(strs, true, false) + case reflect.Uint64: + vals, _ = key.parseUint64s(strs, true, false) + case reflect.Float64: + vals, _ = key.parseFloat64s(strs, true, false) + case reflectTime: + vals, _ = key.parseTimesFormat(time.RFC3339, strs, true, false) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + + slice := reflect.MakeSlice(field.Type(), numVals, numVals) + for i := 0; i < numVals; i++ { + switch sliceOf { + case reflect.String: + slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i])) + case reflect.Int: + slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i])) + case reflect.Int64: + slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i])) + case reflect.Uint: + slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i])) + case reflect.Uint64: + slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i])) + case reflect.Float64: + slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i])) + case reflectTime: + slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i])) + } + } + field.Set(slice) + return nil +} + +// setWithProperType sets proper value to field based on its type, +// but it does not return error for failing parsing, +// because we want to use default value that is already assigned to strcut. +func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow bool) error { + switch t.Kind() { + case reflect.String: + if len(key.String()) == 0 { + return nil + } + field.SetString(key.String()) + case reflect.Bool: + boolVal, err := key.Bool() + if err != nil { + return nil + } + field.SetBool(boolVal) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + durationVal, err := key.Duration() + // Skip zero value + if err == nil && int(durationVal) > 0 { + field.Set(reflect.ValueOf(durationVal)) + return nil + } + + intVal, err := key.Int64() + if err != nil || intVal == 0 { + return nil + } + field.SetInt(intVal) + // byte is an alias for uint8, so supporting uint8 breaks support for byte + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + durationVal, err := key.Duration() + // Skip zero value + if err == nil && int(durationVal) > 0 { + field.Set(reflect.ValueOf(durationVal)) + return nil + } + + uintVal, err := key.Uint64() + if err != nil { + return nil + } + field.SetUint(uintVal) + + case reflect.Float32, reflect.Float64: + floatVal, err := key.Float64() + if err != nil { + return nil + } + field.SetFloat(floatVal) + case reflectTime: + timeVal, err := key.Time() + if err != nil { + return nil + } + field.Set(reflect.ValueOf(timeVal)) + case reflect.Slice: + return setSliceWithProperType(key, field, delim, allowShadow) + default: + return fmt.Errorf("unsupported type '%s'", t) + } + return nil +} + +func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool) { + opts := strings.SplitN(tag, ",", 3) + rawName = opts[0] + if len(opts) > 1 { + omitEmpty = opts[1] == "omitempty" + } + if len(opts) > 2 { + allowShadow = opts[2] == "allowshadow" + } + return rawName, omitEmpty, allowShadow +} + +func (s *Section) mapTo(val reflect.Value) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + rawName, _, allowShadow := parseTagOptions(tag) + fieldName := s.parseFieldName(tpField.Name, rawName) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous + isStruct := tpField.Type.Kind() == reflect.Struct + if isAnonymous { + field.Set(reflect.New(tpField.Type.Elem())) + } + + if isAnonymous || isStruct { + if sec, err := s.f.GetSection(fieldName); err == nil { + if err = sec.mapTo(field); err != nil { + return fmt.Errorf("error mapping field(%s): %v", fieldName, err) + } + continue + } + } + + if key, err := s.GetKey(fieldName); err == nil { + delim := parseDelim(tpField.Tag.Get("delim")) + if err = setWithProperType(tpField.Type, key, field, delim, allowShadow); err != nil { + return fmt.Errorf("error mapping field(%s): %v", fieldName, err) + } + } + } + return nil +} + +// MapTo maps section to given struct. +func (s *Section) MapTo(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot map to non-pointer struct") + } + + return s.mapTo(val) +} + +// MapTo maps file to given struct. +func (f *File) MapTo(v interface{}) error { + return f.Section("").MapTo(v) +} + +// MapTo maps data sources to given struct with name mapper. +func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { + cfg, err := Load(source, others...) + if err != nil { + return err + } + cfg.NameMapper = mapper + return cfg.MapTo(v) +} + +// MapTo maps data sources to given struct. +func MapTo(v, source interface{}, others ...interface{}) error { + return MapToWithMapper(v, nil, source, others...) +} + +// reflectSliceWithProperType does the opposite thing as setSliceWithProperType. +func reflectSliceWithProperType(key *Key, field reflect.Value, delim string) error { + slice := field.Slice(0, field.Len()) + if field.Len() == 0 { + return nil + } + + var buf bytes.Buffer + sliceOf := field.Type().Elem().Kind() + for i := 0; i < field.Len(); i++ { + switch sliceOf { + case reflect.String: + buf.WriteString(slice.Index(i).String()) + case reflect.Int, reflect.Int64: + buf.WriteString(fmt.Sprint(slice.Index(i).Int())) + case reflect.Uint, reflect.Uint64: + buf.WriteString(fmt.Sprint(slice.Index(i).Uint())) + case reflect.Float64: + buf.WriteString(fmt.Sprint(slice.Index(i).Float())) + case reflectTime: + buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339)) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + buf.WriteString(delim) + } + key.SetValue(buf.String()[:buf.Len()-1]) + return nil +} + +// reflectWithProperType does the opposite thing as setWithProperType. +func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { + switch t.Kind() { + case reflect.String: + key.SetValue(field.String()) + case reflect.Bool: + key.SetValue(fmt.Sprint(field.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + key.SetValue(fmt.Sprint(field.Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + key.SetValue(fmt.Sprint(field.Uint())) + case reflect.Float32, reflect.Float64: + key.SetValue(fmt.Sprint(field.Float())) + case reflectTime: + key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339))) + case reflect.Slice: + return reflectSliceWithProperType(key, field, delim) + default: + return fmt.Errorf("unsupported type '%s'", t) + } + return nil +} + +// CR: copied from encoding/json/encode.go with modifications of time.Time support. +// TODO: add more test coverage. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflectTime: + return v.Interface().(time.Time).IsZero() + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func (s *Section) reflectFrom(val reflect.Value) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + opts := strings.SplitN(tag, ",", 2) + if len(opts) == 2 && opts[1] == "omitempty" && isEmptyValue(field) { + continue + } + + fieldName := s.parseFieldName(tpField.Name, opts[0]) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) || + (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") { + // Note: The only error here is section doesn't exist. + sec, err := s.f.GetSection(fieldName) + if err != nil { + // Note: fieldName can never be empty here, ignore error. + sec, _ = s.f.NewSection(fieldName) + } + if err = sec.reflectFrom(field); err != nil { + return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) + } + continue + } + + // Note: Same reason as secion. + key, err := s.GetKey(fieldName) + if err != nil { + key, _ = s.NewKey(fieldName, "") + } + if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { + return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) + } + + } + return nil +} + +// ReflectFrom reflects secion from given struct. +func (s *Section) ReflectFrom(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot reflect from non-pointer struct") + } + + return s.reflectFrom(val) +} + +// ReflectFrom reflects file from given struct. +func (f *File) ReflectFrom(v interface{}) error { + return f.Section("").ReflectFrom(v) +} + +// ReflectFrom reflects data sources from given struct with name mapper. +func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error { + cfg.NameMapper = mapper + return cfg.ReflectFrom(v) +} + +// ReflectFrom reflects data sources from given struct. +func ReflectFrom(cfg *File, v interface{}) error { + return ReflectFromWithMapper(cfg, v, nil) +} diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE new file mode 100644 index 000000000..1b1b1921e --- /dev/null +++ b/vendor/github.com/golang/protobuf/LICENSE @@ -0,0 +1,31 @@ +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/golang/protobuf/proto/Makefile b/vendor/github.com/golang/protobuf/proto/Makefile new file mode 100644 index 000000000..e2e0651a9 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/Makefile @@ -0,0 +1,43 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +install: + go install + +test: install generate-test-pbs + go test + + +generate-test-pbs: + make install + make -C testdata + protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto + make diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go new file mode 100644 index 000000000..e392575b3 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/clone.go @@ -0,0 +1,229 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer deep copy and merge. +// TODO: RawMessage. + +package proto + +import ( + "log" + "reflect" + "strings" +) + +// Clone returns a deep copy of a protocol buffer. +func Clone(pb Message) Message { + in := reflect.ValueOf(pb) + if in.IsNil() { + return pb + } + + out := reflect.New(in.Type().Elem()) + // out is empty so a merge is a deep copy. + mergeStruct(out.Elem(), in.Elem()) + return out.Interface().(Message) +} + +// Merge merges src into dst. +// Required and optional fields that are set in src will be set to that value in dst. +// Elements of repeated fields will be appended. +// Merge panics if src and dst are not the same type, or if dst is nil. +func Merge(dst, src Message) { + in := reflect.ValueOf(src) + out := reflect.ValueOf(dst) + if out.IsNil() { + panic("proto: nil destination") + } + if in.Type() != out.Type() { + // Explicit test prior to mergeStruct so that mistyped nils will fail + panic("proto: type mismatch") + } + if in.IsNil() { + // Merging nil into non-nil is a quiet no-op + return + } + mergeStruct(out.Elem(), in.Elem()) +} + +func mergeStruct(out, in reflect.Value) { + sprop := GetProperties(in.Type()) + for i := 0; i < in.NumField(); i++ { + f := in.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) + } + + if emIn, ok := extendable(in.Addr().Interface()); ok { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + uf := in.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return + } + uin := uf.Bytes() + if len(uin) > 0 { + out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) + } +} + +// mergeAny performs a merge between two values of the same type. +// viaPtr indicates whether the values were indirected through a pointer (implying proto2). +// prop is set if this is a struct field (it may be nil). +func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { + if in.Type() == protoMessageType { + if !in.IsNil() { + if out.IsNil() { + out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) + } else { + Merge(out.Interface().(Message), in.Interface().(Message)) + } + } + return + } + switch in.Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + if !viaPtr && isProto3Zero(in) { + return + } + out.Set(in) + case reflect.Interface: + // Probably a oneof field; copy non-nil values. + if in.IsNil() { + return + } + // Allocate destination if it is not set, or set to a different type. + // Otherwise we will merge as normal. + if out.IsNil() || out.Elem().Type() != in.Elem().Type() { + out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) + } + mergeAny(out.Elem(), in.Elem(), false, nil) + case reflect.Map: + if in.Len() == 0 { + return + } + if out.IsNil() { + out.Set(reflect.MakeMap(in.Type())) + } + // For maps with value types of *T or []byte we need to deep copy each value. + elemKind := in.Type().Elem().Kind() + for _, key := range in.MapKeys() { + var val reflect.Value + switch elemKind { + case reflect.Ptr: + val = reflect.New(in.Type().Elem().Elem()) + mergeAny(val, in.MapIndex(key), false, nil) + case reflect.Slice: + val = in.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + default: + val = in.MapIndex(key) + } + out.SetMapIndex(key, val) + } + case reflect.Ptr: + if in.IsNil() { + return + } + if out.IsNil() { + out.Set(reflect.New(in.Elem().Type())) + } + mergeAny(out.Elem(), in.Elem(), true, nil) + case reflect.Slice: + if in.IsNil() { + return + } + if in.Type().Elem().Kind() == reflect.Uint8 { + // []byte is a scalar bytes field, not a repeated field. + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value, and should not + // be merged. + if prop != nil && prop.proto3 && in.Len() == 0 { + return + } + + // Make a deep copy. + // Append to []byte{} instead of []byte(nil) so that we never end up + // with a nil result. + out.SetBytes(append([]byte{}, in.Bytes()...)) + return + } + n := in.Len() + if out.IsNil() { + out.Set(reflect.MakeSlice(in.Type(), 0, n)) + } + switch in.Type().Elem().Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + out.Set(reflect.AppendSlice(out, in)) + default: + for i := 0; i < n; i++ { + x := reflect.Indirect(reflect.New(in.Type().Elem())) + mergeAny(x, in.Index(i), false, nil) + out.Set(reflect.Append(out, x)) + } + } + case reflect.Struct: + mergeStruct(out, in) + default: + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to copy %v", in) + } +} + +func mergeExtension(out, in map[int32]Extension) { + for extNum, eIn := range in { + eOut := Extension{desc: eIn.desc} + if eIn.value != nil { + v := reflect.New(reflect.TypeOf(eIn.value)).Elem() + mergeAny(v, reflect.ValueOf(eIn.value), false, nil) + eOut.value = v.Interface() + } + if eIn.enc != nil { + eOut.enc = make([]byte, len(eIn.enc)) + copy(eOut.enc, eIn.enc) + } + + out[extNum] = eOut + } +} diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go new file mode 100644 index 000000000..aa207298f --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/decode.go @@ -0,0 +1,970 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for decoding protocol buffer data to construct in-memory representations. + */ + +import ( + "errors" + "fmt" + "io" + "os" + "reflect" +) + +// errOverflow is returned when an integer is too large to be represented. +var errOverflow = errors.New("proto: integer overflow") + +// ErrInternalBadWireType is returned by generated code when an incorrect +// wire type is encountered. It does not get returned to user code. +var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") + +// The fundamental decoders that interpret bytes on the wire. +// Those that take integer types all return uint64 and are +// therefore of type valueDecoder. + +// DecodeVarint reads a varint-encoded integer from the slice. +// It returns the integer and the number of bytes consumed, or +// zero if there is not enough. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func DecodeVarint(buf []byte) (x uint64, n int) { + for shift := uint(0); shift < 64; shift += 7 { + if n >= len(buf) { + return 0, 0 + } + b := uint64(buf[n]) + n++ + x |= (b & 0x7F) << shift + if (b & 0x80) == 0 { + return x, n + } + } + + // The number is too large to represent in a 64-bit value. + return 0, 0 +} + +func (p *Buffer) decodeVarintSlow() (x uint64, err error) { + i := p.index + l := len(p.buf) + + for shift := uint(0); shift < 64; shift += 7 { + if i >= l { + err = io.ErrUnexpectedEOF + return + } + b := p.buf[i] + i++ + x |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + p.index = i + return + } + } + + // The number is too large to represent in a 64-bit value. + err = errOverflow + return +} + +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) DecodeVarint() (x uint64, err error) { + i := p.index + buf := p.buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + p.index++ + return uint64(buf[i]), nil + } else if len(buf)-i < 10 { + return p.decodeVarintSlow() + } + + var b uint64 + // we already checked the first byte + x = uint64(buf[i]) - 0x80 + i++ + + b = uint64(buf[i]) + i++ + x += b << 7 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 7 + + b = uint64(buf[i]) + i++ + x += b << 14 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 14 + + b = uint64(buf[i]) + i++ + x += b << 21 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 21 + + b = uint64(buf[i]) + i++ + x += b << 28 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 28 + + b = uint64(buf[i]) + i++ + x += b << 35 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 35 + + b = uint64(buf[i]) + i++ + x += b << 42 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 42 + + b = uint64(buf[i]) + i++ + x += b << 49 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 49 + + b = uint64(buf[i]) + i++ + x += b << 56 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 56 + + b = uint64(buf[i]) + i++ + x += b << 63 + if b&0x80 == 0 { + goto done + } + // x -= 0x80 << 63 // Always zero. + + return 0, errOverflow + +done: + p.index = i + return x, nil +} + +// DecodeFixed64 reads a 64-bit integer from the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) DecodeFixed64() (x uint64, err error) { + // x, err already 0 + i := p.index + 8 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-8]) + x |= uint64(p.buf[i-7]) << 8 + x |= uint64(p.buf[i-6]) << 16 + x |= uint64(p.buf[i-5]) << 24 + x |= uint64(p.buf[i-4]) << 32 + x |= uint64(p.buf[i-3]) << 40 + x |= uint64(p.buf[i-2]) << 48 + x |= uint64(p.buf[i-1]) << 56 + return +} + +// DecodeFixed32 reads a 32-bit integer from the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) DecodeFixed32() (x uint64, err error) { + // x, err already 0 + i := p.index + 4 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-4]) + x |= uint64(p.buf[i-3]) << 8 + x |= uint64(p.buf[i-2]) << 16 + x |= uint64(p.buf[i-1]) << 24 + return +} + +// DecodeZigzag64 reads a zigzag-encoded 64-bit integer +// from the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) DecodeZigzag64() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) + return +} + +// DecodeZigzag32 reads a zigzag-encoded 32-bit integer +// from the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) DecodeZigzag32() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) + return +} + +// These are not ValueDecoders: they produce an array of bytes or a string. +// bytes, embedded messages + +// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { + n, err := p.DecodeVarint() + if err != nil { + return nil, err + } + + nb := int(n) + if nb < 0 { + return nil, fmt.Errorf("proto: bad byte length %d", nb) + } + end := p.index + nb + if end < p.index || end > len(p.buf) { + return nil, io.ErrUnexpectedEOF + } + + if !alloc { + // todo: check if can get more uses of alloc=false + buf = p.buf[p.index:end] + p.index += nb + return + } + + buf = make([]byte, nb) + copy(buf, p.buf[p.index:]) + p.index += nb + return +} + +// DecodeStringBytes reads an encoded string from the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) DecodeStringBytes() (s string, err error) { + buf, err := p.DecodeRawBytes(false) + if err != nil { + return + } + return string(buf), nil +} + +// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. +// If the protocol buffer has extensions, and the field matches, add it as an extension. +// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. +func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { + oi := o.index + + err := o.skip(t, tag, wire) + if err != nil { + return err + } + + if !unrecField.IsValid() { + return nil + } + + ptr := structPointer_Bytes(base, unrecField) + + // Add the skipped field to struct field + obuf := o.buf + + o.buf = *ptr + o.EncodeVarint(uint64(tag<<3 | wire)) + *ptr = append(o.buf, obuf[oi:o.index]...) + + o.buf = obuf + + return nil +} + +// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. +func (o *Buffer) skip(t reflect.Type, tag, wire int) error { + + var u uint64 + var err error + + switch wire { + case WireVarint: + _, err = o.DecodeVarint() + case WireFixed64: + _, err = o.DecodeFixed64() + case WireBytes: + _, err = o.DecodeRawBytes(false) + case WireFixed32: + _, err = o.DecodeFixed32() + case WireStartGroup: + for { + u, err = o.DecodeVarint() + if err != nil { + break + } + fwire := int(u & 0x7) + if fwire == WireEndGroup { + break + } + ftag := int(u >> 3) + err = o.skip(t, ftag, fwire) + if err != nil { + break + } + } + default: + err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) + } + return err +} + +// Unmarshaler is the interface representing objects that can +// unmarshal themselves. The method should reset the receiver before +// decoding starts. The argument points to data that may be +// overwritten, so implementations should not keep references to the +// buffer. +type Unmarshaler interface { + Unmarshal([]byte) error +} + +// Unmarshal parses the protocol buffer representation in buf and places the +// decoded result in pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// Unmarshal resets pb before starting to unmarshal, so any +// existing data in pb is always removed. Use UnmarshalMerge +// to preserve and append to existing data. +func Unmarshal(buf []byte, pb Message) error { + pb.Reset() + return UnmarshalMerge(buf, pb) +} + +// UnmarshalMerge parses the protocol buffer representation in buf and +// writes the decoded result to pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// UnmarshalMerge merges into existing data in pb. +// Most code should use Unmarshal instead. +func UnmarshalMerge(buf []byte, pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// DecodeMessage reads a count-delimited message from the Buffer. +func (p *Buffer) DecodeMessage(pb Message) error { + enc, err := p.DecodeRawBytes(false) + if err != nil { + return err + } + return NewBuffer(enc).Unmarshal(pb) +} + +// DecodeGroup reads a tag-delimited group from the Buffer. +func (p *Buffer) DecodeGroup(pb Message) error { + typ, base, err := getbase(pb) + if err != nil { + return err + } + return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base) +} + +// Unmarshal parses the protocol buffer representation in the +// Buffer and places the decoded result in pb. If the struct +// underlying pb does not match the data in the buffer, the results can be +// unpredictable. +// +// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. +func (p *Buffer) Unmarshal(pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(Unmarshaler); ok { + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + + typ, base, err := getbase(pb) + if err != nil { + return err + } + + err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) + + if collectStats { + stats.Decode++ + } + + return err +} + +// unmarshalType does the work of unmarshaling a structure. +func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { + var state errorState + required, reqFields := prop.reqCount, uint64(0) + + var err error + for err == nil && o.index < len(o.buf) { + oi := o.index + var u uint64 + u, err = o.DecodeVarint() + if err != nil { + break + } + wire := int(u & 0x7) + if wire == WireEndGroup { + if is_group { + if required > 0 { + // Not enough information to determine the exact field. + // (See below.) + return &RequiredNotSetError{"{Unknown}"} + } + return nil // input is satisfied + } + return fmt.Errorf("proto: %s: wiretype end group for non-group", st) + } + tag := int(u >> 3) + if tag <= 0 { + return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) + } + fieldnum, ok := prop.decoderTags.get(tag) + if !ok { + // Maybe it's an extension? + if prop.extendable { + if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) { + if err = o.skip(st, tag, wire); err == nil { + extmap := e.extensionsWrite() + ext := extmap[int32(tag)] // may be missing + ext.enc = append(ext.enc, o.buf[oi:o.index]...) + extmap[int32(tag)] = ext + } + continue + } + } + // Maybe it's a oneof? + if prop.oneofUnmarshaler != nil { + m := structPointer_Interface(base, st).(Message) + // First return value indicates whether tag is a oneof field. + ok, err = prop.oneofUnmarshaler(m, tag, wire, o) + if err == ErrInternalBadWireType { + // Map the error to something more descriptive. + // Do the formatting here to save generated code space. + err = fmt.Errorf("bad wiretype for oneof field in %T", m) + } + if ok { + continue + } + } + err = o.skipAndSave(st, tag, wire, base, prop.unrecField) + continue + } + p := prop.Prop[fieldnum] + + if p.dec == nil { + fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) + continue + } + dec := p.dec + if wire != WireStartGroup && wire != p.WireType { + if wire == WireBytes && p.packedDec != nil { + // a packable field + dec = p.packedDec + } else { + err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) + continue + } + } + decErr := dec(o, p, base) + if decErr != nil && !state.shouldContinue(decErr, p) { + err = decErr + } + if err == nil && p.Required { + // Successfully decoded a required field. + if tag <= 64 { + // use bitmap for fields 1-64 to catch field reuse. + var mask uint64 = 1 << uint64(tag-1) + if reqFields&mask == 0 { + // new required field + reqFields |= mask + required-- + } + } else { + // This is imprecise. It can be fooled by a required field + // with a tag > 64 that is encoded twice; that's very rare. + // A fully correct implementation would require allocating + // a data structure, which we would like to avoid. + required-- + } + } + } + if err == nil { + if is_group { + return io.ErrUnexpectedEOF + } + if state.err != nil { + return state.err + } + if required > 0 { + // Not enough information to determine the exact field. If we use extra + // CPU, we could determine the field only if the missing required field + // has a tag <= 64 and we check reqFields. + return &RequiredNotSetError{"{Unknown}"} + } + } + return err +} + +// Individual type decoders +// For each, +// u is the decoded value, +// v is a pointer to the field (pointer) in the struct + +// Sizes of the pools to allocate inside the Buffer. +// The goal is modest amortization and allocation +// on at least 16-byte boundaries. +const ( + boolPoolSize = 16 + uint32PoolSize = 8 + uint64PoolSize = 4 +) + +// Decode a bool. +func (o *Buffer) dec_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + if len(o.bools) == 0 { + o.bools = make([]bool, boolPoolSize) + } + o.bools[0] = u != 0 + *structPointer_Bool(base, p.field) = &o.bools[0] + o.bools = o.bools[1:] + return nil +} + +func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + *structPointer_BoolVal(base, p.field) = u != 0 + return nil +} + +// Decode an int32. +func (o *Buffer) dec_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) + return nil +} + +func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) + return nil +} + +// Decode an int64. +func (o *Buffer) dec_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word64_Set(structPointer_Word64(base, p.field), o, u) + return nil +} + +func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word64Val_Set(structPointer_Word64Val(base, p.field), o, u) + return nil +} + +// Decode a string. +func (o *Buffer) dec_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + *structPointer_String(base, p.field) = &s + return nil +} + +func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + *structPointer_StringVal(base, p.field) = s + return nil +} + +// Decode a slice of bytes ([]byte). +func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + *structPointer_Bytes(base, p.field) = b + return nil +} + +// Decode a slice of bools ([]bool). +func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + v := structPointer_BoolSlice(base, p.field) + *v = append(*v, u != 0) + return nil +} + +// Decode a slice of bools ([]bool) in packed format. +func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { + v := structPointer_BoolSlice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded bools + fin := o.index + nb + if fin < o.index { + return errOverflow + } + + y := *v + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + y = append(y, u != 0) + } + + *v = y + return nil +} + +// Decode a slice of int32s ([]int32). +func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + structPointer_Word32Slice(base, p.field).Append(uint32(u)) + return nil +} + +// Decode a slice of int32s ([]int32) in packed format. +func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Slice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded int32s + + fin := o.index + nb + if fin < o.index { + return errOverflow + } + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + v.Append(uint32(u)) + } + return nil +} + +// Decode a slice of int64s ([]int64). +func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + + structPointer_Word64Slice(base, p.field).Append(u) + return nil +} + +// Decode a slice of int64s ([]int64) in packed format. +func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Slice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded int64s + + fin := o.index + nb + if fin < o.index { + return errOverflow + } + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + v.Append(u) + } + return nil +} + +// Decode a slice of strings ([]string). +func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + v := structPointer_StringSlice(base, p.field) + *v = append(*v, s) + return nil +} + +// Decode a slice of slice of bytes ([][]byte). +func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + v := structPointer_BytesSlice(base, p.field) + *v = append(*v, b) + return nil +} + +// Decode a map field. +func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + oi := o.index // index at the end of this map entry + o.index -= len(raw) // move buffer back to start of map entry + + mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V + if mptr.Elem().IsNil() { + mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) + } + v := mptr.Elem() // map[K]V + + // Prepare addressable doubly-indirect placeholders for the key and value types. + // See enc_new_map for why. + keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K + keybase := toStructPointer(keyptr.Addr()) // **K + + var valbase structPointer + var valptr reflect.Value + switch p.mtype.Elem().Kind() { + case reflect.Slice: + // []byte + var dummy []byte + valptr = reflect.ValueOf(&dummy) // *[]byte + valbase = toStructPointer(valptr) // *[]byte + case reflect.Ptr: + // message; valptr is **Msg; need to allocate the intermediate pointer + valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V + valptr.Set(reflect.New(valptr.Type().Elem())) + valbase = toStructPointer(valptr) + default: + // everything else + valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V + valbase = toStructPointer(valptr.Addr()) // **V + } + + // Decode. + // This parses a restricted wire format, namely the encoding of a message + // with two fields. See enc_new_map for the format. + for o.index < oi { + // tagcode for key and value properties are always a single byte + // because they have tags 1 and 2. + tagcode := o.buf[o.index] + o.index++ + switch tagcode { + case p.mkeyprop.tagcode[0]: + if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { + return err + } + case p.mvalprop.tagcode[0]: + if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { + return err + } + default: + // TODO: Should we silently skip this instead? + return fmt.Errorf("proto: bad map data tag %d", raw[0]) + } + } + keyelem, valelem := keyptr.Elem(), valptr.Elem() + if !keyelem.IsValid() { + keyelem = reflect.Zero(p.mtype.Key()) + } + if !valelem.IsValid() { + valelem = reflect.Zero(p.mtype.Elem()) + } + + v.SetMapIndex(keyelem, valelem) + return nil +} + +// Decode a group. +func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { + bas := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(bas) { + // allocate new nested message + bas = toStructPointer(reflect.New(p.stype)) + structPointer_SetStructPointer(base, p.field, bas) + } + return o.unmarshalType(p.stype, p.sprop, true, bas) +} + +// Decode an embedded message. +func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { + raw, e := o.DecodeRawBytes(false) + if e != nil { + return e + } + + bas := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(bas) { + // allocate new nested message + bas = toStructPointer(reflect.New(p.stype)) + structPointer_SetStructPointer(base, p.field, bas) + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + iv := structPointer_Interface(bas, p.stype) + return iv.(Unmarshaler).Unmarshal(raw) + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, false, bas) + o.buf = obuf + o.index = oi + + return err +} + +// Decode a slice of embedded messages. +func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { + return o.dec_slice_struct(p, false, base) +} + +// Decode a slice of embedded groups. +func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { + return o.dec_slice_struct(p, true, base) +} + +// Decode a slice of structs ([]*struct). +func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { + v := reflect.New(p.stype) + bas := toStructPointer(v) + structPointer_StructPointerSlice(base, p.field).Append(bas) + + if is_group { + err := o.unmarshalType(p.stype, p.sprop, is_group, bas) + return err + } + + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + iv := v.Interface() + return iv.(Unmarshaler).Unmarshal(raw) + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, is_group, bas) + + o.buf = obuf + o.index = oi + + return err +} diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go new file mode 100644 index 000000000..2b30f8462 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/encode.go @@ -0,0 +1,1362 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "errors" + "fmt" + "reflect" + "sort" +) + +// RequiredNotSetError is the error returned if Marshal is called with +// a protocol buffer struct whose required fields have not +// all been initialized. It is also the error returned if Unmarshal is +// called with an encoded protocol buffer that does not include all the +// required fields. +// +// When printed, RequiredNotSetError reports the first unset required field in a +// message. If the field cannot be precisely determined, it is reported as +// "{Unknown}". +type RequiredNotSetError struct { + field string +} + +func (e *RequiredNotSetError) Error() string { + return fmt.Sprintf("proto: required field %q not set", e.field) +} + +var ( + // errRepeatedHasNil is the error returned if Marshal is called with + // a struct with a repeated field containing a nil element. + errRepeatedHasNil = errors.New("proto: repeated field has nil element") + + // errOneofHasNil is the error returned if Marshal is called with + // a struct with a oneof field containing a nil element. + errOneofHasNil = errors.New("proto: oneof field has nil value") + + // ErrNil is the error returned if Marshal is called with nil. + ErrNil = errors.New("proto: Marshal called with nil") + + // ErrTooLarge is the error returned if Marshal is called with a + // message that encodes to >2GB. + ErrTooLarge = errors.New("proto: message encodes to over 2 GB") +) + +// The fundamental encoders that put bytes on the wire. +// Those that take integer types all accept uint64 and are +// therefore of type valueEncoder. + +const maxVarintBytes = 10 // maximum length of a varint + +// maxMarshalSize is the largest allowed size of an encoded protobuf, +// since C++ and Java use signed int32s for the size. +const maxMarshalSize = 1<<31 - 1 + +// EncodeVarint returns the varint encoding of x. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +// Not used by the package itself, but helpful to clients +// wishing to use the same encoding. +func EncodeVarint(x uint64) []byte { + var buf [maxVarintBytes]byte + var n int + for n = 0; x > 127; n++ { + buf[n] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + buf[n] = uint8(x) + n++ + return buf[0:n] +} + +// EncodeVarint writes a varint-encoded integer to the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) EncodeVarint(x uint64) error { + for x >= 1<<7 { + p.buf = append(p.buf, uint8(x&0x7f|0x80)) + x >>= 7 + } + p.buf = append(p.buf, uint8(x)) + return nil +} + +// SizeVarint returns the varint encoding size of an integer. +func SizeVarint(x uint64) int { + return sizeVarint(x) +} + +func sizeVarint(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} + +// EncodeFixed64 writes a 64-bit integer to the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) EncodeFixed64(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24), + uint8(x>>32), + uint8(x>>40), + uint8(x>>48), + uint8(x>>56)) + return nil +} + +func sizeFixed64(x uint64) int { + return 8 +} + +// EncodeFixed32 writes a 32-bit integer to the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) EncodeFixed32(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24)) + return nil +} + +func sizeFixed32(x uint64) int { + return 4 +} + +// EncodeZigzag64 writes a zigzag-encoded 64-bit integer +// to the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) EncodeZigzag64(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +func sizeZigzag64(x uint64) int { + return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +// EncodeZigzag32 writes a zigzag-encoded 32-bit integer +// to the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) EncodeZigzag32(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +func sizeZigzag32(x uint64) int { + return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) EncodeRawBytes(b []byte) error { + p.EncodeVarint(uint64(len(b))) + p.buf = append(p.buf, b...) + return nil +} + +func sizeRawBytes(b []byte) int { + return sizeVarint(uint64(len(b))) + + len(b) +} + +// EncodeStringBytes writes an encoded string to the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) EncodeStringBytes(s string) error { + p.EncodeVarint(uint64(len(s))) + p.buf = append(p.buf, s...) + return nil +} + +func sizeStringBytes(s string) int { + return sizeVarint(uint64(len(s))) + + len(s) +} + +// Marshaler is the interface representing objects that can marshal themselves. +type Marshaler interface { + Marshal() ([]byte, error) +} + +// Marshal takes the protocol buffer +// and encodes it into the wire format, returning the data. +func Marshal(pb Message) ([]byte, error) { + // Can the object marshal itself? + if m, ok := pb.(Marshaler); ok { + return m.Marshal() + } + p := NewBuffer(nil) + err := p.Marshal(pb) + if p.buf == nil && err == nil { + // Return a non-nil slice on success. + return []byte{}, nil + } + return p.buf, err +} + +// EncodeMessage writes the protocol buffer to the Buffer, +// prefixed by a varint-encoded length. +func (p *Buffer) EncodeMessage(pb Message) error { + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return ErrNil + } + if err == nil { + var state errorState + err = p.enc_len_struct(GetProperties(t.Elem()), base, &state) + } + return err +} + +// Marshal takes the protocol buffer +// and encodes it into the wire format, writing the result to the +// Buffer. +func (p *Buffer) Marshal(pb Message) error { + // Can the object marshal itself? + if m, ok := pb.(Marshaler); ok { + data, err := m.Marshal() + p.buf = append(p.buf, data...) + return err + } + + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return ErrNil + } + if err == nil { + err = p.enc_struct(GetProperties(t.Elem()), base) + } + + if collectStats { + (stats).Encode++ // Parens are to work around a goimports bug. + } + + if len(p.buf) > maxMarshalSize { + return ErrTooLarge + } + return err +} + +// Size returns the encoded size of a protocol buffer. +func Size(pb Message) (n int) { + // Can the object marshal itself? If so, Size is slow. + // TODO: add Size to Marshaler, or add a Sizer interface. + if m, ok := pb.(Marshaler); ok { + b, _ := m.Marshal() + return len(b) + } + + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return 0 + } + if err == nil { + n = size_struct(GetProperties(t.Elem()), base) + } + + if collectStats { + (stats).Size++ // Parens are to work around a goimports bug. + } + + return +} + +// Individual type encoders. + +// Encode a bool. +func (o *Buffer) enc_bool(p *Properties, base structPointer) error { + v := *structPointer_Bool(base, p.field) + if v == nil { + return ErrNil + } + x := 0 + if *v { + x = 1 + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { + v := *structPointer_BoolVal(base, p.field) + if !v { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, 1) + return nil +} + +func size_bool(p *Properties, base structPointer) int { + v := *structPointer_Bool(base, p.field) + if v == nil { + return 0 + } + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +func size_proto3_bool(p *Properties, base structPointer) int { + v := *structPointer_BoolVal(base, p.field) + if !v && !p.oneof { + return 0 + } + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +// Encode an int32. +func (o *Buffer) enc_int32(p *Properties, base structPointer) error { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return ErrNil + } + x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return 0 + } + x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +func size_proto3_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode a uint32. +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return ErrNil + } + x := word32_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return 0 + } + x := word32_Get(v) + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +func size_proto3_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode an int64. +func (o *Buffer) enc_int64(p *Properties, base structPointer) error { + v := structPointer_Word64(base, p.field) + if word64_IsNil(v) { + return ErrNil + } + x := word64_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func size_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64(base, p.field) + if word64_IsNil(v) { + return 0 + } + x := word64_Get(v) + n += len(p.tagcode) + n += p.valSize(x) + return +} + +func size_proto3_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(x) + return +} + +// Encode a string. +func (o *Buffer) enc_string(p *Properties, base structPointer) error { + v := *structPointer_String(base, p.field) + if v == nil { + return ErrNil + } + x := *v + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(x) + return nil +} + +func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { + v := *structPointer_StringVal(base, p.field) + if v == "" { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(v) + return nil +} + +func size_string(p *Properties, base structPointer) (n int) { + v := *structPointer_String(base, p.field) + if v == nil { + return 0 + } + x := *v + n += len(p.tagcode) + n += sizeStringBytes(x) + return +} + +func size_proto3_string(p *Properties, base structPointer) (n int) { + v := *structPointer_StringVal(base, p.field) + if v == "" && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeStringBytes(v) + return +} + +// All protocol buffer fields are nillable, but be careful. +func isNil(v reflect.Value) bool { + switch v.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + } + return false +} + +// Encode a message struct. +func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { + var state errorState + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return state.err + } + + o.buf = append(o.buf, p.tagcode...) + return o.enc_len_struct(p.sprop, structp, &state) +} + +func size_struct_message(p *Properties, base structPointer) int { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n0 := len(p.tagcode) + n1 := sizeRawBytes(data) + return n0 + n1 + } + + n0 := len(p.tagcode) + n1 := size_struct(p.sprop, structp) + n2 := sizeVarint(uint64(n1)) // size of encoded length + return n0 + n1 + n2 +} + +// Encode a group struct. +func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { + var state errorState + b := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(b) { + return ErrNil + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) + err := o.enc_struct(p.sprop, b) + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) + return state.err +} + +func size_struct_group(p *Properties, base structPointer) (n int) { + b := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(b) { + return 0 + } + + n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) + n += size_struct(p.sprop, b) + n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) + return +} + +// Encode a slice of bools ([]bool). +func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return ErrNil + } + for _, x := range s { + o.buf = append(o.buf, p.tagcode...) + v := uint64(0) + if x { + v = 1 + } + p.valEnc(o, v) + } + return nil +} + +func size_slice_bool(p *Properties, base structPointer) int { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return 0 + } + return l * (len(p.tagcode) + 1) // each bool takes exactly one byte +} + +// Encode a slice of bools ([]bool) in packed format. +func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(l)) // each bool takes exactly one byte + for _, x := range s { + v := uint64(0) + if x { + v = 1 + } + p.valEnc(o, v) + } + return nil +} + +func size_slice_packed_bool(p *Properties, base structPointer) (n int) { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return 0 + } + n += len(p.tagcode) + n += sizeVarint(uint64(l)) + n += l // each bool takes exactly one byte + return +} + +// Encode a slice of bytes ([]byte). +func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if s == nil { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(s) + return nil +} + +func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if len(s) == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(s) + return nil +} + +func size_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if s == nil && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeRawBytes(s) + return +} + +func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if len(s) == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeRawBytes(s) + return +} + +// Encode a slice of int32s ([]int32). +func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + p.valEnc(o, uint64(x)) + } + return nil +} + +func size_slice_int32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + n += p.valSize(uint64(x)) + } + return +} + +// Encode a slice of int32s ([]int32) in packed format. +func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + p.valEnc(buf, uint64(x)) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_int32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + bufSize += p.valSize(uint64(x)) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of uint32s ([]uint32). +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + x := s.Index(i) + p.valEnc(o, uint64(x)) + } + return nil +} + +func size_slice_uint32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + x := s.Index(i) + n += p.valSize(uint64(x)) + } + return +} + +// Encode a slice of uint32s ([]uint32) in packed format. +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + p.valEnc(buf, uint64(s.Index(i))) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + bufSize += p.valSize(uint64(s.Index(i))) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of int64s ([]int64). +func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, s.Index(i)) + } + return nil +} + +func size_slice_int64(p *Properties, base structPointer) (n int) { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + n += p.valSize(s.Index(i)) + } + return +} + +// Encode a slice of int64s ([]int64) in packed format. +func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + p.valEnc(buf, s.Index(i)) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_int64(p *Properties, base structPointer) (n int) { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + bufSize += p.valSize(s.Index(i)) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of slice of bytes ([][]byte). +func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { + ss := *structPointer_BytesSlice(base, p.field) + l := len(ss) + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(ss[i]) + } + return nil +} + +func size_slice_slice_byte(p *Properties, base structPointer) (n int) { + ss := *structPointer_BytesSlice(base, p.field) + l := len(ss) + if l == 0 { + return 0 + } + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + n += sizeRawBytes(ss[i]) + } + return +} + +// Encode a slice of strings ([]string). +func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { + ss := *structPointer_StringSlice(base, p.field) + l := len(ss) + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(ss[i]) + } + return nil +} + +func size_slice_string(p *Properties, base structPointer) (n int) { + ss := *structPointer_StringSlice(base, p.field) + l := len(ss) + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + n += sizeStringBytes(ss[i]) + } + return +} + +// Encode a slice of message structs ([]*struct). +func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { + var state errorState + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + for i := 0; i < l; i++ { + structp := s.Index(i) + if structPointer_IsNil(structp) { + return errRepeatedHasNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + continue + } + + o.buf = append(o.buf, p.tagcode...) + err := o.enc_len_struct(p.sprop, structp, &state) + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return errRepeatedHasNil + } + return err + } + } + return state.err +} + +func size_slice_struct_message(p *Properties, base structPointer) (n int) { + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + structp := s.Index(i) + if structPointer_IsNil(structp) { + return // return the size up to this point + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n += sizeRawBytes(data) + continue + } + + n0 := size_struct(p.sprop, structp) + n1 := sizeVarint(uint64(n0)) // size of encoded length + n += n0 + n1 + } + return +} + +// Encode a slice of group structs ([]*struct). +func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { + var state errorState + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + for i := 0; i < l; i++ { + b := s.Index(i) + if structPointer_IsNil(b) { + return errRepeatedHasNil + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) + + err := o.enc_struct(p.sprop, b) + + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return errRepeatedHasNil + } + return err + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) + } + return state.err +} + +func size_slice_struct_group(p *Properties, base structPointer) (n int) { + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) + n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) + for i := 0; i < l; i++ { + b := s.Index(i) + if structPointer_IsNil(b) { + return // return size up to this point + } + + n += size_struct(p.sprop, b) + } + return +} + +// Encode an extension map. +func (o *Buffer) enc_map(p *Properties, base structPointer) error { + exts := structPointer_ExtMap(base, p.field) + if err := encodeExtensionsMap(*exts); err != nil { + return err + } + + return o.enc_map_body(*exts) +} + +func (o *Buffer) enc_exts(p *Properties, base structPointer) error { + exts := structPointer_Extensions(base, p.field) + + v, mu := exts.extensionsRead() + if v == nil { + return nil + } + + mu.Lock() + defer mu.Unlock() + if err := encodeExtensionsMap(v); err != nil { + return err + } + + return o.enc_map_body(v) +} + +func (o *Buffer) enc_map_body(v map[int32]Extension) error { + // Fast-path for common cases: zero or one extensions. + if len(v) <= 1 { + for _, e := range v { + o.buf = append(o.buf, e.enc...) + } + return nil + } + + // Sort keys to provide a deterministic encoding. + keys := make([]int, 0, len(v)) + for k := range v { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + o.buf = append(o.buf, v[int32(k)].enc...) + } + return nil +} + +func size_map(p *Properties, base structPointer) int { + v := structPointer_ExtMap(base, p.field) + return extensionsMapSize(*v) +} + +func size_exts(p *Properties, base structPointer) int { + v := structPointer_Extensions(base, p.field) + return extensionsSize(v) +} + +// Encode a map field. +func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { + var state errorState // XXX: or do we need to plumb this through? + + /* + A map defined as + map map_field = N; + is encoded in the same way as + message MapFieldEntry { + key_type key = 1; + value_type value = 2; + } + repeated MapFieldEntry map_field = N; + */ + + v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V + if v.Len() == 0 { + return nil + } + + keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) + + enc := func() error { + if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { + return err + } + if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil { + return err + } + return nil + } + + // Don't sort map keys. It is not required by the spec, and C++ doesn't do it. + for _, key := range v.MapKeys() { + val := v.MapIndex(key) + + keycopy.Set(key) + valcopy.Set(val) + + o.buf = append(o.buf, p.tagcode...) + if err := o.enc_len_thing(enc, &state); err != nil { + return err + } + } + return nil +} + +func size_new_map(p *Properties, base structPointer) int { + v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V + + keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) + + n := 0 + for _, key := range v.MapKeys() { + val := v.MapIndex(key) + keycopy.Set(key) + valcopy.Set(val) + + // Tag codes for key and val are the responsibility of the sub-sizer. + keysize := p.mkeyprop.size(p.mkeyprop, keybase) + valsize := p.mvalprop.size(p.mvalprop, valbase) + entry := keysize + valsize + // Add on tag code and length of map entry itself. + n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry + } + return n +} + +// mapEncodeScratch returns a new reflect.Value matching the map's value type, +// and a structPointer suitable for passing to an encoder or sizer. +func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { + // Prepare addressable doubly-indirect placeholders for the key and value types. + // This is needed because the element-type encoders expect **T, but the map iteration produces T. + + keycopy = reflect.New(mapType.Key()).Elem() // addressable K + keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K + keyptr.Set(keycopy.Addr()) // + keybase = toStructPointer(keyptr.Addr()) // **K + + // Value types are more varied and require special handling. + switch mapType.Elem().Kind() { + case reflect.Slice: + // []byte + var dummy []byte + valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte + valbase = toStructPointer(valcopy.Addr()) + case reflect.Ptr: + // message; the generated field type is map[K]*Msg (so V is *Msg), + // so we only need one level of indirection. + valcopy = reflect.New(mapType.Elem()).Elem() // addressable V + valbase = toStructPointer(valcopy.Addr()) + default: + // everything else + valcopy = reflect.New(mapType.Elem()).Elem() // addressable V + valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V + valptr.Set(valcopy.Addr()) // + valbase = toStructPointer(valptr.Addr()) // **V + } + return +} + +// Encode a struct. +func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { + var state errorState + // Encode fields in tag order so that decoders may use optimizations + // that depend on the ordering. + // https://developers.google.com/protocol-buffers/docs/encoding#order + for _, i := range prop.order { + p := prop.Prop[i] + if p.enc != nil { + err := p.enc(o, p, base) + if err != nil { + if err == ErrNil { + if p.Required && state.err == nil { + state.err = &RequiredNotSetError{p.Name} + } + } else if err == errRepeatedHasNil { + // Give more context to nil values in repeated fields. + return errors.New("repeated field " + p.OrigName + " has nil element") + } else if !state.shouldContinue(err, p) { + return err + } + } + if len(o.buf) > maxMarshalSize { + return ErrTooLarge + } + } + } + + // Do oneof fields. + if prop.oneofMarshaler != nil { + m := structPointer_Interface(base, prop.stype).(Message) + if err := prop.oneofMarshaler(m, o); err == ErrNil { + return errOneofHasNil + } else if err != nil { + return err + } + } + + // Add unrecognized fields at the end. + if prop.unrecField.IsValid() { + v := *structPointer_Bytes(base, prop.unrecField) + if len(o.buf)+len(v) > maxMarshalSize { + return ErrTooLarge + } + if len(v) > 0 { + o.buf = append(o.buf, v...) + } + } + + return state.err +} + +func size_struct(prop *StructProperties, base structPointer) (n int) { + for _, i := range prop.order { + p := prop.Prop[i] + if p.size != nil { + n += p.size(p, base) + } + } + + // Add unrecognized fields at the end. + if prop.unrecField.IsValid() { + v := *structPointer_Bytes(base, prop.unrecField) + n += len(v) + } + + // Factor in any oneof fields. + if prop.oneofSizer != nil { + m := structPointer_Interface(base, prop.stype).(Message) + n += prop.oneofSizer(m) + } + + return +} + +var zeroes [20]byte // longer than any conceivable sizeVarint + +// Encode a struct, preceded by its encoded length (as a varint). +func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { + return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) +} + +// Encode something, preceded by its encoded length (as a varint). +func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { + iLen := len(o.buf) + o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length + iMsg := len(o.buf) + err := enc() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + lMsg := len(o.buf) - iMsg + lLen := sizeVarint(uint64(lMsg)) + switch x := lLen - (iMsg - iLen); { + case x > 0: // actual length is x bytes larger than the space we reserved + // Move msg x bytes right. + o.buf = append(o.buf, zeroes[:x]...) + copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) + case x < 0: // actual length is x bytes smaller than the space we reserved + // Move msg x bytes left. + copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) + o.buf = o.buf[:len(o.buf)+x] // x is negative + } + // Encode the length in the reserved space. + o.buf = o.buf[:iLen] + o.EncodeVarint(uint64(lMsg)) + o.buf = o.buf[:len(o.buf)+lMsg] + return state.err +} + +// errorState maintains the first error that occurs and updates that error +// with additional context. +type errorState struct { + err error +} + +// shouldContinue reports whether encoding should continue upon encountering the +// given error. If the error is RequiredNotSetError, shouldContinue returns true +// and, if this is the first appearance of that error, remembers it for future +// reporting. +// +// If prop is not nil, it may update any error with additional context about the +// field with the error. +func (s *errorState) shouldContinue(err error, prop *Properties) bool { + // Ignore unset required fields. + reqNotSet, ok := err.(*RequiredNotSetError) + if !ok { + return false + } + if s.err == nil { + if prop != nil { + err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} + } + s.err = err + } + return true +} diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go new file mode 100644 index 000000000..2ed1cf596 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/equal.go @@ -0,0 +1,300 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer comparison. + +package proto + +import ( + "bytes" + "log" + "reflect" + "strings" +) + +/* +Equal returns true iff protocol buffers a and b are equal. +The arguments must both be pointers to protocol buffer structs. + +Equality is defined in this way: + - Two messages are equal iff they are the same type, + corresponding fields are equal, unknown field sets + are equal, and extensions sets are equal. + - Two set scalar fields are equal iff their values are equal. + If the fields are of a floating-point type, remember that + NaN != x for all x, including NaN. If the message is defined + in a proto3 .proto file, fields are not "set"; specifically, + zero length proto3 "bytes" fields are equal (nil == {}). + - Two repeated fields are equal iff their lengths are the same, + and their corresponding elements are equal. Note a "bytes" field, + although represented by []byte, is not a repeated field and the + rule for the scalar fields described above applies. + - Two unset fields are equal. + - Two unknown field sets are equal if their current + encoded state is equal. + - Two extension sets are equal iff they have corresponding + elements that are pairwise equal. + - Two map fields are equal iff their lengths are the same, + and they contain the same set of elements. Zero-length map + fields are equal. + - Every other combination of things are not equal. + +The return value is undefined if a and b are not protocol buffers. +*/ +func Equal(a, b Message) bool { + if a == nil || b == nil { + return a == b + } + v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) + if v1.Type() != v2.Type() { + return false + } + if v1.Kind() == reflect.Ptr { + if v1.IsNil() { + return v2.IsNil() + } + if v2.IsNil() { + return false + } + v1, v2 = v1.Elem(), v2.Elem() + } + if v1.Kind() != reflect.Struct { + return false + } + return equalStruct(v1, v2) +} + +// v1 and v2 are known to have the same type. +func equalStruct(v1, v2 reflect.Value) bool { + sprop := GetProperties(v1.Type()) + for i := 0; i < v1.NumField(); i++ { + f := v1.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + f1, f2 := v1.Field(i), v2.Field(i) + if f.Type.Kind() == reflect.Ptr { + if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { + // both unset + continue + } else if n1 != n2 { + // set/unset mismatch + return false + } + b1, ok := f1.Interface().(raw) + if ok { + b2 := f2.Interface().(raw) + // RawMessage + if !bytes.Equal(b1.Bytes(), b2.Bytes()) { + return false + } + continue + } + f1, f2 = f1.Elem(), f2.Elem() + } + if !equalAny(f1, f2, sprop.Prop[i]) { + return false + } + } + + if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_InternalExtensions") + if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { + return false + } + } + + if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_extensions") + if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { + return false + } + } + + uf := v1.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return true + } + + u1 := uf.Bytes() + u2 := v2.FieldByName("XXX_unrecognized").Bytes() + if !bytes.Equal(u1, u2) { + return false + } + + return true +} + +// v1 and v2 are known to have the same type. +// prop may be nil. +func equalAny(v1, v2 reflect.Value, prop *Properties) bool { + if v1.Type() == protoMessageType { + m1, _ := v1.Interface().(Message) + m2, _ := v2.Interface().(Message) + return Equal(m1, m2) + } + switch v1.Kind() { + case reflect.Bool: + return v1.Bool() == v2.Bool() + case reflect.Float32, reflect.Float64: + return v1.Float() == v2.Float() + case reflect.Int32, reflect.Int64: + return v1.Int() == v2.Int() + case reflect.Interface: + // Probably a oneof field; compare the inner values. + n1, n2 := v1.IsNil(), v2.IsNil() + if n1 || n2 { + return n1 == n2 + } + e1, e2 := v1.Elem(), v2.Elem() + if e1.Type() != e2.Type() { + return false + } + return equalAny(e1, e2, nil) + case reflect.Map: + if v1.Len() != v2.Len() { + return false + } + for _, key := range v1.MapKeys() { + val2 := v2.MapIndex(key) + if !val2.IsValid() { + // This key was not found in the second map. + return false + } + if !equalAny(v1.MapIndex(key), val2, nil) { + return false + } + } + return true + case reflect.Ptr: + // Maps may have nil values in them, so check for nil. + if v1.IsNil() && v2.IsNil() { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return equalAny(v1.Elem(), v2.Elem(), prop) + case reflect.Slice: + if v1.Type().Elem().Kind() == reflect.Uint8 { + // short circuit: []byte + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value. + if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) + } + + if v1.Len() != v2.Len() { + return false + } + for i := 0; i < v1.Len(); i++ { + if !equalAny(v1.Index(i), v2.Index(i), prop) { + return false + } + } + return true + case reflect.String: + return v1.Interface().(string) == v2.Interface().(string) + case reflect.Struct: + return equalStruct(v1, v2) + case reflect.Uint32, reflect.Uint64: + return v1.Uint() == v2.Uint() + } + + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to compare %v", v1) + return false +} + +// base is the struct type that the extensions are based on. +// x1 and x2 are InternalExtensions. +func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { + em1, _ := x1.extensionsRead() + em2, _ := x2.extensionsRead() + return equalExtMap(base, em1, em2) +} + +func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { + if len(em1) != len(em2) { + return false + } + + for extNum, e1 := range em1 { + e2, ok := em2[extNum] + if !ok { + return false + } + + m1, m2 := e1.value, e2.value + + if m1 != nil && m2 != nil { + // Both are unencoded. + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + continue + } + + // At least one is encoded. To do a semantically correct comparison + // we need to unmarshal them first. + var desc *ExtensionDesc + if m := extensionMaps[base]; m != nil { + desc = m[extNum] + } + if desc == nil { + log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) + continue + } + var err error + if m1 == nil { + m1, err = decodeExtension(e1.enc, desc) + } + if m2 == nil && err == nil { + m2, err = decodeExtension(e2.enc, desc) + } + if err != nil { + // The encoded form is invalid. + log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) + return false + } + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + } + + return true +} diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go new file mode 100644 index 000000000..eaad21831 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/extensions.go @@ -0,0 +1,587 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Types and routines for supporting protocol buffer extensions. + */ + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "sync" +) + +// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. +var ErrMissingExtension = errors.New("proto: missing extension") + +// ExtensionRange represents a range of message extensions for a protocol buffer. +// Used in code generated by the protocol compiler. +type ExtensionRange struct { + Start, End int32 // both inclusive +} + +// extendableProto is an interface implemented by any protocol buffer generated by the current +// proto compiler that may be extended. +type extendableProto interface { + Message + ExtensionRangeArray() []ExtensionRange + extensionsWrite() map[int32]Extension + extensionsRead() (map[int32]Extension, sync.Locker) +} + +// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous +// version of the proto compiler that may be extended. +type extendableProtoV1 interface { + Message + ExtensionRangeArray() []ExtensionRange + ExtensionMap() map[int32]Extension +} + +// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. +type extensionAdapter struct { + extendableProtoV1 +} + +func (e extensionAdapter) extensionsWrite() map[int32]Extension { + return e.ExtensionMap() +} + +func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { + return e.ExtensionMap(), notLocker{} +} + +// notLocker is a sync.Locker whose Lock and Unlock methods are nops. +type notLocker struct{} + +func (n notLocker) Lock() {} +func (n notLocker) Unlock() {} + +// extendable returns the extendableProto interface for the given generated proto message. +// If the proto message has the old extension format, it returns a wrapper that implements +// the extendableProto interface. +func extendable(p interface{}) (extendableProto, bool) { + if ep, ok := p.(extendableProto); ok { + return ep, ok + } + if ep, ok := p.(extendableProtoV1); ok { + return extensionAdapter{ep}, ok + } + return nil, false +} + +// XXX_InternalExtensions is an internal representation of proto extensions. +// +// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, +// thus gaining the unexported 'extensions' method, which can be called only from the proto package. +// +// The methods of XXX_InternalExtensions are not concurrency safe in general, +// but calls to logically read-only methods such as has and get may be executed concurrently. +type XXX_InternalExtensions struct { + // The struct must be indirect so that if a user inadvertently copies a + // generated message and its embedded XXX_InternalExtensions, they + // avoid the mayhem of a copied mutex. + // + // The mutex serializes all logically read-only operations to p.extensionMap. + // It is up to the client to ensure that write operations to p.extensionMap are + // mutually exclusive with other accesses. + p *struct { + mu sync.Mutex + extensionMap map[int32]Extension + } +} + +// extensionsWrite returns the extension map, creating it on first use. +func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { + if e.p == nil { + e.p = new(struct { + mu sync.Mutex + extensionMap map[int32]Extension + }) + e.p.extensionMap = make(map[int32]Extension) + } + return e.p.extensionMap +} + +// extensionsRead returns the extensions map for read-only use. It may be nil. +// The caller must hold the returned mutex's lock when accessing Elements within the map. +func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { + if e.p == nil { + return nil, nil + } + return e.p.extensionMap, &e.p.mu +} + +var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() +var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem() + +// ExtensionDesc represents an extension specification. +// Used in generated code from the protocol compiler. +type ExtensionDesc struct { + ExtendedType Message // nil pointer to the type that is being extended + ExtensionType interface{} // nil pointer to the extension type + Field int32 // field number + Name string // fully-qualified name of extension, for text formatting + Tag string // protobuf tag style + Filename string // name of the file in which the extension is defined +} + +func (ed *ExtensionDesc) repeated() bool { + t := reflect.TypeOf(ed.ExtensionType) + return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 +} + +// Extension represents an extension in a message. +type Extension struct { + // When an extension is stored in a message using SetExtension + // only desc and value are set. When the message is marshaled + // enc will be set to the encoded form of the message. + // + // When a message is unmarshaled and contains extensions, each + // extension will have only enc set. When such an extension is + // accessed using GetExtension (or GetExtensions) desc and value + // will be set. + desc *ExtensionDesc + value interface{} + enc []byte +} + +// SetRawExtension is for testing only. +func SetRawExtension(base Message, id int32, b []byte) { + epb, ok := extendable(base) + if !ok { + return + } + extmap := epb.extensionsWrite() + extmap[id] = Extension{enc: b} +} + +// isExtensionField returns true iff the given field number is in an extension range. +func isExtensionField(pb extendableProto, field int32) bool { + for _, er := range pb.ExtensionRangeArray() { + if er.Start <= field && field <= er.End { + return true + } + } + return false +} + +// checkExtensionTypes checks that the given extension is valid for pb. +func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { + var pbi interface{} = pb + // Check the extended type. + if ea, ok := pbi.(extensionAdapter); ok { + pbi = ea.extendableProtoV1 + } + if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { + return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) + } + // Check the range. + if !isExtensionField(pb, extension.Field) { + return errors.New("proto: bad extension number; not in declared ranges") + } + return nil +} + +// extPropKey is sufficient to uniquely identify an extension. +type extPropKey struct { + base reflect.Type + field int32 +} + +var extProp = struct { + sync.RWMutex + m map[extPropKey]*Properties +}{ + m: make(map[extPropKey]*Properties), +} + +func extensionProperties(ed *ExtensionDesc) *Properties { + key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} + + extProp.RLock() + if prop, ok := extProp.m[key]; ok { + extProp.RUnlock() + return prop + } + extProp.RUnlock() + + extProp.Lock() + defer extProp.Unlock() + // Check again. + if prop, ok := extProp.m[key]; ok { + return prop + } + + prop := new(Properties) + prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) + extProp.m[key] = prop + return prop +} + +// encode encodes any unmarshaled (unencoded) extensions in e. +func encodeExtensions(e *XXX_InternalExtensions) error { + m, mu := e.extensionsRead() + if m == nil { + return nil // fast path + } + mu.Lock() + defer mu.Unlock() + return encodeExtensionsMap(m) +} + +// encode encodes any unmarshaled (unencoded) extensions in e. +func encodeExtensionsMap(m map[int32]Extension) error { + for k, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + et := reflect.TypeOf(e.desc.ExtensionType) + props := extensionProperties(e.desc) + + p := NewBuffer(nil) + // If e.value has type T, the encoder expects a *struct{ X T }. + // Pass a *T with a zero field and hope it all works out. + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(e.value)) + if err := props.enc(p, props, toStructPointer(x)); err != nil { + return err + } + e.enc = p.buf + m[k] = e + } + return nil +} + +func extensionsSize(e *XXX_InternalExtensions) (n int) { + m, mu := e.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + defer mu.Unlock() + return extensionsMapSize(m) +} + +func extensionsMapSize(m map[int32]Extension) (n int) { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + et := reflect.TypeOf(e.desc.ExtensionType) + props := extensionProperties(e.desc) + + // If e.value has type T, the encoder expects a *struct{ X T }. + // Pass a *T with a zero field and hope it all works out. + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(e.value)) + n += props.size(props, toStructPointer(x)) + } + return +} + +// HasExtension returns whether the given extension is present in pb. +func HasExtension(pb Message, extension *ExtensionDesc) bool { + // TODO: Check types, field numbers, etc.? + epb, ok := extendable(pb) + if !ok { + return false + } + extmap, mu := epb.extensionsRead() + if extmap == nil { + return false + } + mu.Lock() + _, ok = extmap[extension.Field] + mu.Unlock() + return ok +} + +// ClearExtension removes the given extension from pb. +func ClearExtension(pb Message, extension *ExtensionDesc) { + epb, ok := extendable(pb) + if !ok { + return + } + // TODO: Check types, field numbers, etc.? + extmap := epb.extensionsWrite() + delete(extmap, extension.Field) +} + +// GetExtension parses and returns the given extension of pb. +// If the extension is not present and has no default value it returns ErrMissingExtension. +func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { + epb, ok := extendable(pb) + if !ok { + return nil, errors.New("proto: not an extendable proto") + } + + if err := checkExtensionTypes(epb, extension); err != nil { + return nil, err + } + + emap, mu := epb.extensionsRead() + if emap == nil { + return defaultExtensionValue(extension) + } + mu.Lock() + defer mu.Unlock() + e, ok := emap[extension.Field] + if !ok { + // defaultExtensionValue returns the default value or + // ErrMissingExtension if there is no default. + return defaultExtensionValue(extension) + } + + if e.value != nil { + // Already decoded. Check the descriptor, though. + if e.desc != extension { + // This shouldn't happen. If it does, it means that + // GetExtension was called twice with two different + // descriptors with the same field number. + return nil, errors.New("proto: descriptor conflict") + } + return e.value, nil + } + + v, err := decodeExtension(e.enc, extension) + if err != nil { + return nil, err + } + + // Remember the decoded version and drop the encoded version. + // That way it is safe to mutate what we return. + e.value = v + e.desc = extension + e.enc = nil + emap[extension.Field] = e + return e.value, nil +} + +// defaultExtensionValue returns the default value for extension. +// If no default for an extension is defined ErrMissingExtension is returned. +func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + t := reflect.TypeOf(extension.ExtensionType) + props := extensionProperties(extension) + + sf, _, err := fieldDefault(t, props) + if err != nil { + return nil, err + } + + if sf == nil || sf.value == nil { + // There is no default value. + return nil, ErrMissingExtension + } + + if t.Kind() != reflect.Ptr { + // We do not need to return a Ptr, we can directly return sf.value. + return sf.value, nil + } + + // We need to return an interface{} that is a pointer to sf.value. + value := reflect.New(t).Elem() + value.Set(reflect.New(value.Type().Elem())) + if sf.kind == reflect.Int32 { + // We may have an int32 or an enum, but the underlying data is int32. + // Since we can't set an int32 into a non int32 reflect.value directly + // set it as a int32. + value.Elem().SetInt(int64(sf.value.(int32))) + } else { + value.Elem().Set(reflect.ValueOf(sf.value)) + } + return value.Interface(), nil +} + +// decodeExtension decodes an extension encoded in b. +func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { + o := NewBuffer(b) + + t := reflect.TypeOf(extension.ExtensionType) + + props := extensionProperties(extension) + + // t is a pointer to a struct, pointer to basic type or a slice. + // Allocate a "field" to store the pointer/slice itself; the + // pointer/slice will be stored here. We pass + // the address of this field to props.dec. + // This passes a zero field and a *t and lets props.dec + // interpret it as a *struct{ x t }. + value := reflect.New(t).Elem() + + for { + // Discard wire type and field number varint. It isn't needed. + if _, err := o.DecodeVarint(); err != nil { + return nil, err + } + + if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { + return nil, err + } + + if o.index >= len(o.buf) { + break + } + } + return value.Interface(), nil +} + +// GetExtensions returns a slice of the extensions present in pb that are also listed in es. +// The returned slice has the same length as es; missing extensions will appear as nil elements. +func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { + epb, ok := extendable(pb) + if !ok { + return nil, errors.New("proto: not an extendable proto") + } + extensions = make([]interface{}, len(es)) + for i, e := range es { + extensions[i], err = GetExtension(epb, e) + if err == ErrMissingExtension { + err = nil + } + if err != nil { + return + } + } + return +} + +// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. +// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing +// just the Field field, which defines the extension's field number. +func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { + epb, ok := extendable(pb) + if !ok { + return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb) + } + registeredExtensions := RegisteredExtensions(pb) + + emap, mu := epb.extensionsRead() + if emap == nil { + return nil, nil + } + mu.Lock() + defer mu.Unlock() + extensions := make([]*ExtensionDesc, 0, len(emap)) + for extid, e := range emap { + desc := e.desc + if desc == nil { + desc = registeredExtensions[extid] + if desc == nil { + desc = &ExtensionDesc{Field: extid} + } + } + + extensions = append(extensions, desc) + } + return extensions, nil +} + +// SetExtension sets the specified extension of pb to the specified value. +func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { + epb, ok := extendable(pb) + if !ok { + return errors.New("proto: not an extendable proto") + } + if err := checkExtensionTypes(epb, extension); err != nil { + return err + } + typ := reflect.TypeOf(extension.ExtensionType) + if typ != reflect.TypeOf(value) { + return errors.New("proto: bad extension value type") + } + // nil extension values need to be caught early, because the + // encoder can't distinguish an ErrNil due to a nil extension + // from an ErrNil due to a missing field. Extensions are + // always optional, so the encoder would just swallow the error + // and drop all the extensions from the encoded message. + if reflect.ValueOf(value).IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) + } + + extmap := epb.extensionsWrite() + extmap[extension.Field] = Extension{desc: extension, value: value} + return nil +} + +// ClearAllExtensions clears all extensions from pb. +func ClearAllExtensions(pb Message) { + epb, ok := extendable(pb) + if !ok { + return + } + m := epb.extensionsWrite() + for k := range m { + delete(m, k) + } +} + +// A global registry of extensions. +// The generated code will register the generated descriptors by calling RegisterExtension. + +var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) + +// RegisterExtension is called from the generated code. +func RegisterExtension(desc *ExtensionDesc) { + st := reflect.TypeOf(desc.ExtendedType).Elem() + m := extensionMaps[st] + if m == nil { + m = make(map[int32]*ExtensionDesc) + extensionMaps[st] = m + } + if _, ok := m[desc.Field]; ok { + panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + } + m[desc.Field] = desc +} + +// RegisteredExtensions returns a map of the registered extensions of a +// protocol buffer struct, indexed by the extension number. +// The argument pb should be a nil pointer to the struct type. +func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { + return extensionMaps[reflect.TypeOf(pb).Elem()] +} diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go new file mode 100644 index 000000000..ac4ddbc07 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/lib.go @@ -0,0 +1,898 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package proto converts data structures to and from the wire format of +protocol buffers. It works in concert with the Go source code generated +for .proto files by the protocol compiler. + +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed by the enclosing message's name, or by the + enum's type name if it is a top-level enum. Enum types have a String + method, and a Enum method to assist in message construction. + - Nested messages, groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Oneof field sets are given a single field in their message, + with distinguished wrapper types for each possible field value. + - Marshal and Unmarshal are functions to encode and decode the wire format. + +When the .proto file specifies `syntax="proto3"`, there are some differences: + + - Non-repeated fields of non-message type are values instead of pointers. + - Getters are only generated for message and oneof fields. + - Enum types do not get an Enum method. + +The simplest way to describe this is to see an example. +Given file test.proto, containing + + package example; + + enum FOO { X = 17; } + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + oneof union { + int32 number = 6; + string name = 7; + } + } + +The resulting file, test.pb.go, is: + + package example + + import proto "github.com/golang/protobuf/proto" + import math "math" + + type FOO int32 + const ( + FOO_X FOO = 17 + ) + var FOO_name = map[int32]string{ + 17: "X", + } + var FOO_value = map[string]int32{ + "X": 17, + } + + func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p + } + func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) + } + func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data) + if err != nil { + return err + } + *x = FOO(value) + return nil + } + + type Test struct { + Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` + Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` + Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` + Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + // Types that are valid to be assigned to Union: + // *Test_Number + // *Test_Name + Union isTest_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` + } + func (m *Test) Reset() { *m = Test{} } + func (m *Test) String() string { return proto.CompactTextString(m) } + func (*Test) ProtoMessage() {} + + type isTest_Union interface { + isTest_Union() + } + + type Test_Number struct { + Number int32 `protobuf:"varint,6,opt,name=number"` + } + type Test_Name struct { + Name string `protobuf:"bytes,7,opt,name=name"` + } + + func (*Test_Number) isTest_Union() {} + func (*Test_Name) isTest_Union() {} + + func (m *Test) GetUnion() isTest_Union { + if m != nil { + return m.Union + } + return nil + } + const Default_Test_Type int32 = 77 + + func (m *Test) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" + } + + func (m *Test) GetType() int32 { + if m != nil && m.Type != nil { + return *m.Type + } + return Default_Test_Type + } + + func (m *Test) GetOptionalgroup() *Test_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil + } + + type Test_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` + } + func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } + func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } + + func (m *Test_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" + } + + func (m *Test) GetNumber() int32 { + if x, ok := m.GetUnion().(*Test_Number); ok { + return x.Number + } + return 0 + } + + func (m *Test) GetName() string { + if x, ok := m.GetUnion().(*Test_Name); ok { + return x.Name + } + return "" + } + + func init() { + proto.RegisterEnum("example.FOO", FOO_name, FOO_value) + } + +To create and play with a Test object: + + package main + + import ( + "log" + + "github.com/golang/protobuf/proto" + pb "./example.pb" + ) + + func main() { + test := &pb.Test{ + Label: proto.String("hello"), + Type: proto.Int32(17), + Reps: []int64{1, 2, 3}, + Optionalgroup: &pb.Test_OptionalGroup{ + RequiredField: proto.String("good bye"), + }, + Union: &pb.Test_Name{"fred"}, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := &pb.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // Use a type switch to determine which oneof was set. + switch u := test.Union.(type) { + case *pb.Test_Number: // u.Number contains the number. + case *pb.Test_Name: // u.Name contains the string. + } + // etc. + } +*/ +package proto + +import ( + "encoding/json" + "fmt" + "log" + "reflect" + "sort" + "strconv" + "sync" +) + +// Message is implemented by generated protocol buffer messages. +type Message interface { + Reset() + String() string + ProtoMessage() +} + +// Stats records allocation details about the protocol buffer encoders +// and decoders. Useful for tuning the library itself. +type Stats struct { + Emalloc uint64 // mallocs in encode + Dmalloc uint64 // mallocs in decode + Encode uint64 // number of encodes + Decode uint64 // number of decodes + Chit uint64 // number of cache hits + Cmiss uint64 // number of cache misses + Size uint64 // number of sizes +} + +// Set to true to enable stats collection. +const collectStats = false + +var stats Stats + +// GetStats returns a copy of the global Stats structure. +func GetStats() Stats { return stats } + +// A Buffer is a buffer manager for marshaling and unmarshaling +// protocol buffers. It may be reused between invocations to +// reduce memory usage. It is not necessary to use a Buffer; +// the global functions Marshal and Unmarshal create a +// temporary Buffer and are fine for most applications. +type Buffer struct { + buf []byte // encode/decode byte stream + index int // read point + + // pools of basic types to amortize allocation. + bools []bool + uint32s []uint32 + uint64s []uint64 + + // extra pools, only used with pointer_reflect.go + int32s []int32 + int64s []int64 + float32s []float32 + float64s []float64 +} + +// NewBuffer allocates a new Buffer and initializes its internal data to +// the contents of the argument slice. +func NewBuffer(e []byte) *Buffer { + return &Buffer{buf: e} +} + +// Reset resets the Buffer, ready for marshaling a new protocol buffer. +func (p *Buffer) Reset() { + p.buf = p.buf[0:0] // for reading/writing + p.index = 0 // for reading +} + +// SetBuf replaces the internal buffer with the slice, +// ready for unmarshaling the contents of the slice. +func (p *Buffer) SetBuf(s []byte) { + p.buf = s + p.index = 0 +} + +// Bytes returns the contents of the Buffer. +func (p *Buffer) Bytes() []byte { return p.buf } + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { + return &v +} + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { + return &v +} + +// Int is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it, but unlike Int32 +// its argument value is an int. +func Int(v int) *int32 { + p := new(int32) + *p = int32(v) + return p +} + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { + return &v +} + +// Float32 is a helper routine that allocates a new float32 value +// to store v and returns a pointer to it. +func Float32(v float32) *float32 { + return &v +} + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { + return &v +} + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { + return &v +} + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { + return &v +} + +// EnumName is a helper function to simplify printing protocol buffer enums +// by name. Given an enum map and a value, it returns a useful string. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// UnmarshalJSONEnum is a helper function to simplify recovering enum int values +// from their JSON-encoded representation. Given a map from the enum's symbolic +// names to its int values, and a byte buffer containing the JSON-encoded +// value, it returns an int32 that can be cast to the enum type by the caller. +// +// The function can deal with both JSON representations, numeric and symbolic. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// DebugPrint dumps the encoded data in b in a debugging format with a header +// including the string s. Used in testing but made available for general debugging. +func (p *Buffer) DebugPrint(s string, b []byte) { + var u uint64 + + obuf := p.buf + index := p.index + p.buf = b + p.index = 0 + depth := 0 + + fmt.Printf("\n--- %s ---\n", s) + +out: + for { + for i := 0; i < depth; i++ { + fmt.Print(" ") + } + + index := p.index + if index == len(p.buf) { + break + } + + op, err := p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: fetching op err %v\n", index, err) + break out + } + tag := op >> 3 + wire := op & 7 + + switch wire { + default: + fmt.Printf("%3d: t=%3d unknown wire=%d\n", + index, tag, wire) + break out + + case WireBytes: + var r []byte + + r, err = p.DecodeRawBytes(false) + if err != nil { + break out + } + fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) + if len(r) <= 6 { + for i := 0; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } else { + for i := 0; i < 3; i++ { + fmt.Printf(" %.2x", r[i]) + } + fmt.Printf(" ..") + for i := len(r) - 3; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } + fmt.Printf("\n") + + case WireFixed32: + u, err = p.DecodeFixed32() + if err != nil { + fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) + + case WireFixed64: + u, err = p.DecodeFixed64() + if err != nil { + fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) + + case WireVarint: + u, err = p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) + + case WireStartGroup: + fmt.Printf("%3d: t=%3d start\n", index, tag) + depth++ + + case WireEndGroup: + depth-- + fmt.Printf("%3d: t=%3d end\n", index, tag) + } + } + + if depth != 0 { + fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) + } + fmt.Printf("\n") + + p.buf = obuf + p.index = index +} + +// SetDefaults sets unset protocol buffer fields to their default values. +// It only modifies fields that are both unset and have defined defaults. +// It recursively sets default values in any non-nil sub-messages. +func SetDefaults(pb Message) { + setDefaults(reflect.ValueOf(pb), true, false) +} + +// v is a pointer to a struct. +func setDefaults(v reflect.Value, recur, zeros bool) { + v = v.Elem() + + defaultMu.RLock() + dm, ok := defaults[v.Type()] + defaultMu.RUnlock() + if !ok { + dm = buildDefaultMessage(v.Type()) + defaultMu.Lock() + defaults[v.Type()] = dm + defaultMu.Unlock() + } + + for _, sf := range dm.scalars { + f := v.Field(sf.index) + if !f.IsNil() { + // field already set + continue + } + dv := sf.value + if dv == nil && !zeros { + // no explicit default, and don't want to set zeros + continue + } + fptr := f.Addr().Interface() // **T + // TODO: Consider batching the allocations we do here. + switch sf.kind { + case reflect.Bool: + b := new(bool) + if dv != nil { + *b = dv.(bool) + } + *(fptr.(**bool)) = b + case reflect.Float32: + f := new(float32) + if dv != nil { + *f = dv.(float32) + } + *(fptr.(**float32)) = f + case reflect.Float64: + f := new(float64) + if dv != nil { + *f = dv.(float64) + } + *(fptr.(**float64)) = f + case reflect.Int32: + // might be an enum + if ft := f.Type(); ft != int32PtrType { + // enum + f.Set(reflect.New(ft.Elem())) + if dv != nil { + f.Elem().SetInt(int64(dv.(int32))) + } + } else { + // int32 field + i := new(int32) + if dv != nil { + *i = dv.(int32) + } + *(fptr.(**int32)) = i + } + case reflect.Int64: + i := new(int64) + if dv != nil { + *i = dv.(int64) + } + *(fptr.(**int64)) = i + case reflect.String: + s := new(string) + if dv != nil { + *s = dv.(string) + } + *(fptr.(**string)) = s + case reflect.Uint8: + // exceptional case: []byte + var b []byte + if dv != nil { + db := dv.([]byte) + b = make([]byte, len(db)) + copy(b, db) + } else { + b = []byte{} + } + *(fptr.(*[]byte)) = b + case reflect.Uint32: + u := new(uint32) + if dv != nil { + *u = dv.(uint32) + } + *(fptr.(**uint32)) = u + case reflect.Uint64: + u := new(uint64) + if dv != nil { + *u = dv.(uint64) + } + *(fptr.(**uint64)) = u + default: + log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) + } + } + + for _, ni := range dm.nested { + f := v.Field(ni) + // f is *T or []*T or map[T]*T + switch f.Kind() { + case reflect.Ptr: + if f.IsNil() { + continue + } + setDefaults(f, recur, zeros) + + case reflect.Slice: + for i := 0; i < f.Len(); i++ { + e := f.Index(i) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + + case reflect.Map: + for _, k := range f.MapKeys() { + e := f.MapIndex(k) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + } + } +} + +var ( + // defaults maps a protocol buffer struct type to a slice of the fields, + // with its scalar fields set to their proto-declared non-zero default values. + defaultMu sync.RWMutex + defaults = make(map[reflect.Type]defaultMessage) + + int32PtrType = reflect.TypeOf((*int32)(nil)) +) + +// defaultMessage represents information about the default values of a message. +type defaultMessage struct { + scalars []scalarField + nested []int // struct field index of nested messages +} + +type scalarField struct { + index int // struct field index + kind reflect.Kind // element type (the T in *T or []T) + value interface{} // the proto-declared default value, or nil +} + +// t is a struct type. +func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { + sprop := GetProperties(t) + for _, prop := range sprop.Prop { + fi, ok := sprop.decoderTags.get(prop.Tag) + if !ok { + // XXX_unrecognized + continue + } + ft := t.Field(fi).Type + + sf, nested, err := fieldDefault(ft, prop) + switch { + case err != nil: + log.Print(err) + case nested: + dm.nested = append(dm.nested, fi) + case sf != nil: + sf.index = fi + dm.scalars = append(dm.scalars, *sf) + } + } + + return dm +} + +// fieldDefault returns the scalarField for field type ft. +// sf will be nil if the field can not have a default. +// nestedMessage will be true if this is a nested message. +// Note that sf.index is not set on return. +func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { + var canHaveDefault bool + switch ft.Kind() { + case reflect.Ptr: + if ft.Elem().Kind() == reflect.Struct { + nestedMessage = true + } else { + canHaveDefault = true // proto2 scalar field + } + + case reflect.Slice: + switch ft.Elem().Kind() { + case reflect.Ptr: + nestedMessage = true // repeated message + case reflect.Uint8: + canHaveDefault = true // bytes field + } + + case reflect.Map: + if ft.Elem().Kind() == reflect.Ptr { + nestedMessage = true // map with message values + } + } + + if !canHaveDefault { + if nestedMessage { + return nil, true, nil + } + return nil, false, nil + } + + // We now know that ft is a pointer or slice. + sf = &scalarField{kind: ft.Elem().Kind()} + + // scalar fields without defaults + if !prop.HasDefault { + return sf, false, nil + } + + // a scalar field: either *T or []byte + switch ft.Elem().Kind() { + case reflect.Bool: + x, err := strconv.ParseBool(prop.Default) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Float32: + x, err := strconv.ParseFloat(prop.Default, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) + } + sf.value = float32(x) + case reflect.Float64: + x, err := strconv.ParseFloat(prop.Default, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Int32: + x, err := strconv.ParseInt(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) + } + sf.value = int32(x) + case reflect.Int64: + x, err := strconv.ParseInt(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.String: + sf.value = prop.Default + case reflect.Uint8: + // []byte (not *uint8) + sf.value = []byte(prop.Default) + case reflect.Uint32: + x, err := strconv.ParseUint(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) + } + sf.value = uint32(x) + case reflect.Uint64: + x, err := strconv.ParseUint(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) + } + sf.value = x + default: + return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) + } + + return sf, false, nil +} + +// Map fields may have key types of non-float scalars, strings and enums. +// The easiest way to sort them in some deterministic order is to use fmt. +// If this turns out to be inefficient we can always consider other options, +// such as doing a Schwartzian transform. + +func mapKeys(vs []reflect.Value) sort.Interface { + s := mapKeySorter{ + vs: vs, + // default Less function: textual comparison + less: func(a, b reflect.Value) bool { + return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) + }, + } + + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; + // numeric keys are sorted numerically. + if len(vs) == 0 { + return s + } + switch vs[0].Kind() { + case reflect.Int32, reflect.Int64: + s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } + case reflect.Uint32, reflect.Uint64: + s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + } + + return s +} + +type mapKeySorter struct { + vs []reflect.Value + less func(a, b reflect.Value) bool +} + +func (s mapKeySorter) Len() int { return len(s.vs) } +func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } +func (s mapKeySorter) Less(i, j int) bool { + return s.less(s.vs[i], s.vs[j]) +} + +// isProto3Zero reports whether v is a zero proto3 value. +func isProto3Zero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint32, reflect.Uint64: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.String: + return v.String() == "" + } + return false +} + +// ProtoPackageIsVersion2 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const ProtoPackageIsVersion2 = true + +// ProtoPackageIsVersion1 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const ProtoPackageIsVersion1 = true diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go new file mode 100644 index 000000000..fd982decd --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/message_set.go @@ -0,0 +1,311 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Support for message sets. + */ + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" +) + +// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. +// A message type ID is required for storing a protocol buffer in a message set. +var errNoMessageTypeID = errors.New("proto does not have a message type ID") + +// The first two types (_MessageSet_Item and messageSet) +// model what the protocol compiler produces for the following protocol message: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } +// That is the MessageSet wire format. We can't use a proto to generate these +// because that would introduce a circular dependency between it and this package. + +type _MessageSet_Item struct { + TypeId *int32 `protobuf:"varint,2,req,name=type_id"` + Message []byte `protobuf:"bytes,3,req,name=message"` +} + +type messageSet struct { + Item []*_MessageSet_Item `protobuf:"group,1,rep"` + XXX_unrecognized []byte + // TODO: caching? +} + +// Make sure messageSet is a Message. +var _ Message = (*messageSet)(nil) + +// messageTypeIder is an interface satisfied by a protocol buffer type +// that may be stored in a MessageSet. +type messageTypeIder interface { + MessageTypeId() int32 +} + +func (ms *messageSet) find(pb Message) *_MessageSet_Item { + mti, ok := pb.(messageTypeIder) + if !ok { + return nil + } + id := mti.MessageTypeId() + for _, item := range ms.Item { + if *item.TypeId == id { + return item + } + } + return nil +} + +func (ms *messageSet) Has(pb Message) bool { + if ms.find(pb) != nil { + return true + } + return false +} + +func (ms *messageSet) Unmarshal(pb Message) error { + if item := ms.find(pb); item != nil { + return Unmarshal(item.Message, pb) + } + if _, ok := pb.(messageTypeIder); !ok { + return errNoMessageTypeID + } + return nil // TODO: return error instead? +} + +func (ms *messageSet) Marshal(pb Message) error { + msg, err := Marshal(pb) + if err != nil { + return err + } + if item := ms.find(pb); item != nil { + // reuse existing item + item.Message = msg + return nil + } + + mti, ok := pb.(messageTypeIder) + if !ok { + return errNoMessageTypeID + } + + mtid := mti.MessageTypeId() + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: &mtid, + Message: msg, + }) + return nil +} + +func (ms *messageSet) Reset() { *ms = messageSet{} } +func (ms *messageSet) String() string { return CompactTextString(ms) } +func (*messageSet) ProtoMessage() {} + +// Support for the message_set_wire_format message option. + +func skipVarint(buf []byte) []byte { + i := 0 + for ; buf[i]&0x80 != 0; i++ { + } + return buf[i+1:] +} + +// MarshalMessageSet encodes the extension map represented by m in the message set wire format. +// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSet(exts interface{}) ([]byte, error) { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + if err := encodeExtensions(exts); err != nil { + return nil, err + } + m, _ = exts.extensionsRead() + case map[int32]Extension: + if err := encodeExtensionsMap(exts); err != nil { + return nil, err + } + m = exts + default: + return nil, errors.New("proto: not an extension map") + } + + // Sort extension IDs to provide a deterministic encoding. + // See also enc_map in encode.go. + ids := make([]int, 0, len(m)) + for id := range m { + ids = append(ids, int(id)) + } + sort.Ints(ids) + + ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))} + for _, id := range ids { + e := m[int32(id)] + // Remove the wire type and field number varint, as well as the length varint. + msg := skipVarint(skipVarint(e.enc)) + + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: Int32(int32(id)), + Message: msg, + }) + } + return Marshal(ms) +} + +// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSet(buf []byte, exts interface{}) error { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + m = exts.extensionsWrite() + case map[int32]Extension: + m = exts + default: + return errors.New("proto: not an extension map") + } + + ms := new(messageSet) + if err := Unmarshal(buf, ms); err != nil { + return err + } + for _, item := range ms.Item { + id := *item.TypeId + msg := item.Message + + // Restore wire type and field number varint, plus length varint. + // Be careful to preserve duplicate items. + b := EncodeVarint(uint64(id)<<3 | WireBytes) + if ext, ok := m[id]; ok { + // Existing data; rip off the tag and length varint + // so we join the new data correctly. + // We can assume that ext.enc is set because we are unmarshaling. + o := ext.enc[len(b):] // skip wire type and field number + _, n := DecodeVarint(o) // calculate length of length varint + o = o[n:] // skip length varint + msg = append(o, msg...) // join old data and new data + } + b = append(b, EncodeVarint(uint64(len(msg)))...) + b = append(b, msg...) + + m[id] = Extension{enc: b} + } + return nil +} + +// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. +// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + m, _ = exts.extensionsRead() + case map[int32]Extension: + m = exts + default: + return nil, errors.New("proto: not an extension map") + } + var b bytes.Buffer + b.WriteByte('{') + + // Process the map in key order for deterministic output. + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) // int32Slice defined in text.go + + for i, id := range ids { + ext := m[id] + if i > 0 { + b.WriteByte(',') + } + + msd, ok := messageSetMap[id] + if !ok { + // Unknown type; we can't render it, so skip it. + continue + } + fmt.Fprintf(&b, `"[%s]":`, msd.name) + + x := ext.value + if x == nil { + x = reflect.New(msd.t.Elem()).Interface() + if err := Unmarshal(ext.enc, x.(Message)); err != nil { + return nil, err + } + } + d, err := json.Marshal(x) + if err != nil { + return nil, err + } + b.Write(d) + } + b.WriteByte('}') + return b.Bytes(), nil +} + +// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. +// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { + // Common-case fast path. + if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { + return nil + } + + // This is fairly tricky, and it's not clear that it is needed. + return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") +} + +// A global registry of types that can be used in a MessageSet. + +var messageSetMap = make(map[int32]messageSetDesc) + +type messageSetDesc struct { + t reflect.Type // pointer to struct + name string +} + +// RegisterMessageSetType is called from the generated code. +func RegisterMessageSetType(m Message, fieldNum int32, name string) { + messageSetMap[fieldNum] = messageSetDesc{ + t: reflect.TypeOf(m), + name: name, + } +} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go new file mode 100644 index 000000000..fb512e2e1 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go @@ -0,0 +1,484 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build appengine js + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "math" + "reflect" +) + +// A structPointer is a pointer to a struct. +type structPointer struct { + v reflect.Value +} + +// toStructPointer returns a structPointer equivalent to the given reflect value. +// The reflect value must itself be a pointer to a struct. +func toStructPointer(v reflect.Value) structPointer { + return structPointer{v} +} + +// IsNil reports whether p is nil. +func structPointer_IsNil(p structPointer) bool { + return p.v.IsNil() +} + +// Interface returns the struct pointer as an interface value. +func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { + return p.v.Interface() +} + +// A field identifies a field in a struct, accessible from a structPointer. +// In this implementation, a field is identified by the sequence of field indices +// passed to reflect's FieldByIndex. +type field []int + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return f.Index +} + +// invalidField is an invalid field identifier. +var invalidField = field(nil) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { return f != nil } + +// field returns the given field in the struct as a reflect value. +func structPointer_field(p structPointer, f field) reflect.Value { + // Special case: an extension map entry with a value of type T + // passes a *T to the struct-handling code with a zero field, + // expecting that it will be treated as equivalent to *struct{ X T }, + // which has the same memory layout. We have to handle that case + // specially, because reflect will panic if we call FieldByIndex on a + // non-struct. + if f == nil { + return p.v.Elem() + } + + return p.v.Elem().FieldByIndex(f) +} + +// ifield returns the given field in the struct as an interface value. +func structPointer_ifield(p structPointer, f field) interface{} { + return structPointer_field(p, f).Addr().Interface() +} + +// Bytes returns the address of a []byte field in the struct. +func structPointer_Bytes(p structPointer, f field) *[]byte { + return structPointer_ifield(p, f).(*[]byte) +} + +// BytesSlice returns the address of a [][]byte field in the struct. +func structPointer_BytesSlice(p structPointer, f field) *[][]byte { + return structPointer_ifield(p, f).(*[][]byte) +} + +// Bool returns the address of a *bool field in the struct. +func structPointer_Bool(p structPointer, f field) **bool { + return structPointer_ifield(p, f).(**bool) +} + +// BoolVal returns the address of a bool field in the struct. +func structPointer_BoolVal(p structPointer, f field) *bool { + return structPointer_ifield(p, f).(*bool) +} + +// BoolSlice returns the address of a []bool field in the struct. +func structPointer_BoolSlice(p structPointer, f field) *[]bool { + return structPointer_ifield(p, f).(*[]bool) +} + +// String returns the address of a *string field in the struct. +func structPointer_String(p structPointer, f field) **string { + return structPointer_ifield(p, f).(**string) +} + +// StringVal returns the address of a string field in the struct. +func structPointer_StringVal(p structPointer, f field) *string { + return structPointer_ifield(p, f).(*string) +} + +// StringSlice returns the address of a []string field in the struct. +func structPointer_StringSlice(p structPointer, f field) *[]string { + return structPointer_ifield(p, f).(*[]string) +} + +// Extensions returns the address of an extension map field in the struct. +func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { + return structPointer_ifield(p, f).(*XXX_InternalExtensions) +} + +// ExtMap returns the address of an extension map field in the struct. +func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { + return structPointer_ifield(p, f).(*map[int32]Extension) +} + +// NewAt returns the reflect.Value for a pointer to a field in the struct. +func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { + return structPointer_field(p, f).Addr() +} + +// SetStructPointer writes a *struct field in the struct. +func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { + structPointer_field(p, f).Set(q.v) +} + +// GetStructPointer reads a *struct field in the struct. +func structPointer_GetStructPointer(p structPointer, f field) structPointer { + return structPointer{structPointer_field(p, f)} +} + +// StructPointerSlice the address of a []*struct field in the struct. +func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { + return structPointerSlice{structPointer_field(p, f)} +} + +// A structPointerSlice represents the address of a slice of pointers to structs +// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. +type structPointerSlice struct { + v reflect.Value +} + +func (p structPointerSlice) Len() int { return p.v.Len() } +func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } +func (p structPointerSlice) Append(q structPointer) { + p.v.Set(reflect.Append(p.v, q.v)) +} + +var ( + int32Type = reflect.TypeOf(int32(0)) + uint32Type = reflect.TypeOf(uint32(0)) + float32Type = reflect.TypeOf(float32(0)) + int64Type = reflect.TypeOf(int64(0)) + uint64Type = reflect.TypeOf(uint64(0)) + float64Type = reflect.TypeOf(float64(0)) +) + +// A word32 represents a field of type *int32, *uint32, *float32, or *enum. +// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. +type word32 struct { + v reflect.Value +} + +// IsNil reports whether p is nil. +func word32_IsNil(p word32) bool { + return p.v.IsNil() +} + +// Set sets p to point at a newly allocated word with bits set to x. +func word32_Set(p word32, o *Buffer, x uint32) { + t := p.v.Type().Elem() + switch t { + case int32Type: + if len(o.int32s) == 0 { + o.int32s = make([]int32, uint32PoolSize) + } + o.int32s[0] = int32(x) + p.v.Set(reflect.ValueOf(&o.int32s[0])) + o.int32s = o.int32s[1:] + return + case uint32Type: + if len(o.uint32s) == 0 { + o.uint32s = make([]uint32, uint32PoolSize) + } + o.uint32s[0] = x + p.v.Set(reflect.ValueOf(&o.uint32s[0])) + o.uint32s = o.uint32s[1:] + return + case float32Type: + if len(o.float32s) == 0 { + o.float32s = make([]float32, uint32PoolSize) + } + o.float32s[0] = math.Float32frombits(x) + p.v.Set(reflect.ValueOf(&o.float32s[0])) + o.float32s = o.float32s[1:] + return + } + + // must be enum + p.v.Set(reflect.New(t)) + p.v.Elem().SetInt(int64(int32(x))) +} + +// Get gets the bits pointed at by p, as a uint32. +func word32_Get(p word32) uint32 { + elem := p.v.Elem() + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32(p structPointer, f field) word32 { + return word32{structPointer_field(p, f)} +} + +// A word32Val represents a field of type int32, uint32, float32, or enum. +// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. +type word32Val struct { + v reflect.Value +} + +// Set sets *p to x. +func word32Val_Set(p word32Val, x uint32) { + switch p.v.Type() { + case int32Type: + p.v.SetInt(int64(x)) + return + case uint32Type: + p.v.SetUint(uint64(x)) + return + case float32Type: + p.v.SetFloat(float64(math.Float32frombits(x))) + return + } + + // must be enum + p.v.SetInt(int64(int32(x))) +} + +// Get gets the bits pointed at by p, as a uint32. +func word32Val_Get(p word32Val) uint32 { + elem := p.v + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. +func structPointer_Word32Val(p structPointer, f field) word32Val { + return word32Val{structPointer_field(p, f)} +} + +// A word32Slice is a slice of 32-bit values. +// That is, v.Type() is []int32, []uint32, []float32, or []enum. +type word32Slice struct { + v reflect.Value +} + +func (p word32Slice) Append(x uint32) { + n, m := p.v.Len(), p.v.Cap() + if n < m { + p.v.SetLen(n + 1) + } else { + t := p.v.Type().Elem() + p.v.Set(reflect.Append(p.v, reflect.Zero(t))) + } + elem := p.v.Index(n) + switch elem.Kind() { + case reflect.Int32: + elem.SetInt(int64(int32(x))) + case reflect.Uint32: + elem.SetUint(uint64(x)) + case reflect.Float32: + elem.SetFloat(float64(math.Float32frombits(x))) + } +} + +func (p word32Slice) Len() int { + return p.v.Len() +} + +func (p word32Slice) Index(i int) uint32 { + elem := p.v.Index(i) + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. +func structPointer_Word32Slice(p structPointer, f field) word32Slice { + return word32Slice{structPointer_field(p, f)} +} + +// word64 is like word32 but for 64-bit values. +type word64 struct { + v reflect.Value +} + +func word64_Set(p word64, o *Buffer, x uint64) { + t := p.v.Type().Elem() + switch t { + case int64Type: + if len(o.int64s) == 0 { + o.int64s = make([]int64, uint64PoolSize) + } + o.int64s[0] = int64(x) + p.v.Set(reflect.ValueOf(&o.int64s[0])) + o.int64s = o.int64s[1:] + return + case uint64Type: + if len(o.uint64s) == 0 { + o.uint64s = make([]uint64, uint64PoolSize) + } + o.uint64s[0] = x + p.v.Set(reflect.ValueOf(&o.uint64s[0])) + o.uint64s = o.uint64s[1:] + return + case float64Type: + if len(o.float64s) == 0 { + o.float64s = make([]float64, uint64PoolSize) + } + o.float64s[0] = math.Float64frombits(x) + p.v.Set(reflect.ValueOf(&o.float64s[0])) + o.float64s = o.float64s[1:] + return + } + panic("unreachable") +} + +func word64_IsNil(p word64) bool { + return p.v.IsNil() +} + +func word64_Get(p word64) uint64 { + elem := p.v.Elem() + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return elem.Uint() + case reflect.Float64: + return math.Float64bits(elem.Float()) + } + panic("unreachable") +} + +func structPointer_Word64(p structPointer, f field) word64 { + return word64{structPointer_field(p, f)} +} + +// word64Val is like word32Val but for 64-bit values. +type word64Val struct { + v reflect.Value +} + +func word64Val_Set(p word64Val, o *Buffer, x uint64) { + switch p.v.Type() { + case int64Type: + p.v.SetInt(int64(x)) + return + case uint64Type: + p.v.SetUint(x) + return + case float64Type: + p.v.SetFloat(math.Float64frombits(x)) + return + } + panic("unreachable") +} + +func word64Val_Get(p word64Val) uint64 { + elem := p.v + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return elem.Uint() + case reflect.Float64: + return math.Float64bits(elem.Float()) + } + panic("unreachable") +} + +func structPointer_Word64Val(p structPointer, f field) word64Val { + return word64Val{structPointer_field(p, f)} +} + +type word64Slice struct { + v reflect.Value +} + +func (p word64Slice) Append(x uint64) { + n, m := p.v.Len(), p.v.Cap() + if n < m { + p.v.SetLen(n + 1) + } else { + t := p.v.Type().Elem() + p.v.Set(reflect.Append(p.v, reflect.Zero(t))) + } + elem := p.v.Index(n) + switch elem.Kind() { + case reflect.Int64: + elem.SetInt(int64(int64(x))) + case reflect.Uint64: + elem.SetUint(uint64(x)) + case reflect.Float64: + elem.SetFloat(float64(math.Float64frombits(x))) + } +} + +func (p word64Slice) Len() int { + return p.v.Len() +} + +func (p word64Slice) Index(i int) uint64 { + elem := p.v.Index(i) + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return uint64(elem.Uint()) + case reflect.Float64: + return math.Float64bits(float64(elem.Float())) + } + panic("unreachable") +} + +func structPointer_Word64Slice(p structPointer, f field) word64Slice { + return word64Slice{structPointer_field(p, f)} +} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go new file mode 100644 index 000000000..6b5567d47 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go @@ -0,0 +1,270 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !appengine,!js + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "unsafe" +) + +// NOTE: These type_Foo functions would more idiomatically be methods, +// but Go does not allow methods on pointer types, and we must preserve +// some pointer type for the garbage collector. We use these +// funcs with clunky names as our poor approximation to methods. +// +// An alternative would be +// type structPointer struct { p unsafe.Pointer } +// but that does not registerize as well. + +// A structPointer is a pointer to a struct. +type structPointer unsafe.Pointer + +// toStructPointer returns a structPointer equivalent to the given reflect value. +func toStructPointer(v reflect.Value) structPointer { + return structPointer(unsafe.Pointer(v.Pointer())) +} + +// IsNil reports whether p is nil. +func structPointer_IsNil(p structPointer) bool { + return p == nil +} + +// Interface returns the struct pointer, assumed to have element type t, +// as an interface value. +func structPointer_Interface(p structPointer, t reflect.Type) interface{} { + return reflect.NewAt(t, unsafe.Pointer(p)).Interface() +} + +// A field identifies a field in a struct, accessible from a structPointer. +// In this implementation, a field is identified by its byte offset from the start of the struct. +type field uintptr + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return field(f.Offset) +} + +// invalidField is an invalid field identifier. +const invalidField = ^field(0) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { + return f != ^field(0) +} + +// Bytes returns the address of a []byte field in the struct. +func structPointer_Bytes(p structPointer, f field) *[]byte { + return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BytesSlice returns the address of a [][]byte field in the struct. +func structPointer_BytesSlice(p structPointer, f field) *[][]byte { + return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// Bool returns the address of a *bool field in the struct. +func structPointer_Bool(p structPointer, f field) **bool { + return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BoolVal returns the address of a bool field in the struct. +func structPointer_BoolVal(p structPointer, f field) *bool { + return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BoolSlice returns the address of a []bool field in the struct. +func structPointer_BoolSlice(p structPointer, f field) *[]bool { + return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// String returns the address of a *string field in the struct. +func structPointer_String(p structPointer, f field) **string { + return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StringVal returns the address of a string field in the struct. +func structPointer_StringVal(p structPointer, f field) *string { + return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StringSlice returns the address of a []string field in the struct. +func structPointer_StringSlice(p structPointer, f field) *[]string { + return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// ExtMap returns the address of an extension map field in the struct. +func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { + return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { + return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// NewAt returns the reflect.Value for a pointer to a field in the struct. +func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { + return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) +} + +// SetStructPointer writes a *struct field in the struct. +func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { + *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q +} + +// GetStructPointer reads a *struct field in the struct. +func structPointer_GetStructPointer(p structPointer, f field) structPointer { + return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StructPointerSlice the address of a []*struct field in the struct. +func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { + return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). +type structPointerSlice []structPointer + +func (v *structPointerSlice) Len() int { return len(*v) } +func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } +func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } + +// A word32 is the address of a "pointer to 32-bit value" field. +type word32 **uint32 + +// IsNil reports whether *v is nil. +func word32_IsNil(p word32) bool { + return *p == nil +} + +// Set sets *v to point at a newly allocated word set to x. +func word32_Set(p word32, o *Buffer, x uint32) { + if len(o.uint32s) == 0 { + o.uint32s = make([]uint32, uint32PoolSize) + } + o.uint32s[0] = x + *p = &o.uint32s[0] + o.uint32s = o.uint32s[1:] +} + +// Get gets the value pointed at by *v. +func word32_Get(p word32) uint32 { + return **p +} + +// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32(p structPointer, f field) word32 { + return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// A word32Val is the address of a 32-bit value field. +type word32Val *uint32 + +// Set sets *p to x. +func word32Val_Set(p word32Val, x uint32) { + *p = x +} + +// Get gets the value pointed at by p. +func word32Val_Get(p word32Val) uint32 { + return *p +} + +// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32Val(p structPointer, f field) word32Val { + return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// A word32Slice is a slice of 32-bit values. +type word32Slice []uint32 + +func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } +func (v *word32Slice) Len() int { return len(*v) } +func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } + +// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. +func structPointer_Word32Slice(p structPointer, f field) *word32Slice { + return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// word64 is like word32 but for 64-bit values. +type word64 **uint64 + +func word64_Set(p word64, o *Buffer, x uint64) { + if len(o.uint64s) == 0 { + o.uint64s = make([]uint64, uint64PoolSize) + } + o.uint64s[0] = x + *p = &o.uint64s[0] + o.uint64s = o.uint64s[1:] +} + +func word64_IsNil(p word64) bool { + return *p == nil +} + +func word64_Get(p word64) uint64 { + return **p +} + +func structPointer_Word64(p structPointer, f field) word64 { + return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// word64Val is like word32Val but for 64-bit values. +type word64Val *uint64 + +func word64Val_Set(p word64Val, o *Buffer, x uint64) { + *p = x +} + +func word64Val_Get(p word64Val) uint64 { + return *p +} + +func structPointer_Word64Val(p structPointer, f field) word64Val { + return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// word64Slice is like word32Slice but for 64-bit values. +type word64Slice []uint64 + +func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } +func (v *word64Slice) Len() int { return len(*v) } +func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } + +func structPointer_Word64Slice(p structPointer, f field) *word64Slice { + return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go new file mode 100644 index 000000000..ec2289c00 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/properties.go @@ -0,0 +1,872 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "fmt" + "log" + "os" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +const debug bool = false + +// Constants that identify the encoding of a value on the wire. +const ( + WireVarint = 0 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 + WireFixed32 = 5 +) + +const startSize = 10 // initial slice/string sizes + +// Encoders are defined in encode.go +// An encoder outputs the full representation of a field, including its +// tag and encoder type. +type encoder func(p *Buffer, prop *Properties, base structPointer) error + +// A valueEncoder encodes a single integer in a particular encoding. +type valueEncoder func(o *Buffer, x uint64) error + +// Sizers are defined in encode.go +// A sizer returns the encoded size of a field, including its tag and encoder +// type. +type sizer func(prop *Properties, base structPointer) int + +// A valueSizer returns the encoded size of a single integer in a particular +// encoding. +type valueSizer func(x uint64) int + +// Decoders are defined in decode.go +// A decoder creates a value from its wire representation. +// Unrecognized subelements are saved in unrec. +type decoder func(p *Buffer, prop *Properties, base structPointer) error + +// A valueDecoder decodes a single integer in a particular encoding. +type valueDecoder func(o *Buffer) (x uint64, err error) + +// A oneofMarshaler does the marshaling for all oneof fields in a message. +type oneofMarshaler func(Message, *Buffer) error + +// A oneofUnmarshaler does the unmarshaling for a oneof field in a message. +type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) + +// A oneofSizer does the sizing for all oneof fields in a message. +type oneofSizer func(Message) int + +// tagMap is an optimization over map[int]int for typical protocol buffer +// use-cases. Encoded protocol buffers are often in tag order with small tag +// numbers. +type tagMap struct { + fastTags []int + slowTags map[int]int +} + +// tagMapFastLimit is the upper bound on the tag number that will be stored in +// the tagMap slice rather than its map. +const tagMapFastLimit = 1024 + +func (p *tagMap) get(t int) (int, bool) { + if t > 0 && t < tagMapFastLimit { + if t >= len(p.fastTags) { + return 0, false + } + fi := p.fastTags[t] + return fi, fi >= 0 + } + fi, ok := p.slowTags[t] + return fi, ok +} + +func (p *tagMap) put(t int, fi int) { + if t > 0 && t < tagMapFastLimit { + for len(p.fastTags) < t+1 { + p.fastTags = append(p.fastTags, -1) + } + p.fastTags[t] = fi + return + } + if p.slowTags == nil { + p.slowTags = make(map[int]int) + } + p.slowTags[t] = fi +} + +// StructProperties represents properties for all the fields of a struct. +// decoderTags and decoderOrigNames should only be used by the decoder. +type StructProperties struct { + Prop []*Properties // properties for each field + reqCount int // required count + decoderTags tagMap // map from proto tag to struct field number + decoderOrigNames map[string]int // map from original name to struct field number + order []int // list of struct field numbers in tag order + unrecField field // field id of the XXX_unrecognized []byte field + extendable bool // is this an extendable proto + + oneofMarshaler oneofMarshaler + oneofUnmarshaler oneofUnmarshaler + oneofSizer oneofSizer + stype reflect.Type + + // OneofTypes contains information about the oneof fields in this message. + // It is keyed by the original name of a field. + OneofTypes map[string]*OneofProperties +} + +// OneofProperties represents information about a specific field in a oneof. +type OneofProperties struct { + Type reflect.Type // pointer to generated struct type for this oneof field + Field int // struct field number of the containing oneof in the message + Prop *Properties +} + +// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. +// See encode.go, (*Buffer).enc_struct. + +func (sp *StructProperties) Len() int { return len(sp.order) } +func (sp *StructProperties) Less(i, j int) bool { + return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag +} +func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } + +// Properties represents the protocol-specific behavior of a single struct field. +type Properties struct { + Name string // name of the field, for error messages + OrigName string // original name before protocol compiler (always set) + JSONName string // name to use for JSON; determined by protoc + Wire string + WireType int + Tag int + Required bool + Optional bool + Repeated bool + Packed bool // relevant for repeated primitives only + Enum string // set for enum types only + proto3 bool // whether this is known to be a proto3 field; set for []byte only + oneof bool // whether this is a oneof field + + Default string // default value + HasDefault bool // whether an explicit default was provided + def_uint64 uint64 + + enc encoder + valEnc valueEncoder // set for bool and numeric types only + field field + tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) + tagbuf [8]byte + stype reflect.Type // set for struct types only + sprop *StructProperties // set for struct types only + isMarshaler bool + isUnmarshaler bool + + mtype reflect.Type // set for map types only + mkeyprop *Properties // set for map types only + mvalprop *Properties // set for map types only + + size sizer + valSize valueSizer // set for bool and numeric types only + + dec decoder + valDec valueDecoder // set for bool and numeric types only + + // If this is a packable field, this will be the decoder for the packed version of the field. + packedDec decoder +} + +// String formats the properties in the protobuf struct field tag style. +func (p *Properties) String() string { + s := p.Wire + s = "," + s += strconv.Itoa(p.Tag) + if p.Required { + s += ",req" + } + if p.Optional { + s += ",opt" + } + if p.Repeated { + s += ",rep" + } + if p.Packed { + s += ",packed" + } + s += ",name=" + p.OrigName + if p.JSONName != p.OrigName { + s += ",json=" + p.JSONName + } + if p.proto3 { + s += ",proto3" + } + if p.oneof { + s += ",oneof" + } + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if p.HasDefault { + s += ",def=" + p.Default + } + return s +} + +// Parse populates p by parsing a string in the protobuf struct field tag style. +func (p *Properties) Parse(s string) { + // "bytes,49,opt,name=foo,def=hello!" + fields := strings.Split(s, ",") // breaks def=, but handled below. + if len(fields) < 2 { + fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) + return + } + + p.Wire = fields[0] + switch p.Wire { + case "varint": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeVarint + p.valDec = (*Buffer).DecodeVarint + p.valSize = sizeVarint + case "fixed32": + p.WireType = WireFixed32 + p.valEnc = (*Buffer).EncodeFixed32 + p.valDec = (*Buffer).DecodeFixed32 + p.valSize = sizeFixed32 + case "fixed64": + p.WireType = WireFixed64 + p.valEnc = (*Buffer).EncodeFixed64 + p.valDec = (*Buffer).DecodeFixed64 + p.valSize = sizeFixed64 + case "zigzag32": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeZigzag32 + p.valDec = (*Buffer).DecodeZigzag32 + p.valSize = sizeZigzag32 + case "zigzag64": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeZigzag64 + p.valDec = (*Buffer).DecodeZigzag64 + p.valSize = sizeZigzag64 + case "bytes", "group": + p.WireType = WireBytes + // no numeric converter for non-numeric types + default: + fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) + return + } + + var err error + p.Tag, err = strconv.Atoi(fields[1]) + if err != nil { + return + } + + for i := 2; i < len(fields); i++ { + f := fields[i] + switch { + case f == "req": + p.Required = true + case f == "opt": + p.Optional = true + case f == "rep": + p.Repeated = true + case f == "packed": + p.Packed = true + case strings.HasPrefix(f, "name="): + p.OrigName = f[5:] + case strings.HasPrefix(f, "json="): + p.JSONName = f[5:] + case strings.HasPrefix(f, "enum="): + p.Enum = f[5:] + case f == "proto3": + p.proto3 = true + case f == "oneof": + p.oneof = true + case strings.HasPrefix(f, "def="): + p.HasDefault = true + p.Default = f[4:] // rest of string + if i+1 < len(fields) { + // Commas aren't escaped, and def is always last. + p.Default += "," + strings.Join(fields[i+1:], ",") + break + } + } + } +} + +func logNoSliceEnc(t1, t2 reflect.Type) { + fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) +} + +var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() + +// Initialize the fields for encoding and decoding. +func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { + p.enc = nil + p.dec = nil + p.size = nil + + switch t1 := typ; t1.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) + + // proto3 scalar types + + case reflect.Bool: + p.enc = (*Buffer).enc_proto3_bool + p.dec = (*Buffer).dec_proto3_bool + p.size = size_proto3_bool + case reflect.Int32: + p.enc = (*Buffer).enc_proto3_int32 + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_proto3_int32 + case reflect.Uint32: + p.enc = (*Buffer).enc_proto3_uint32 + p.dec = (*Buffer).dec_proto3_int32 // can reuse + p.size = size_proto3_uint32 + case reflect.Int64, reflect.Uint64: + p.enc = (*Buffer).enc_proto3_int64 + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_proto3_int64 + case reflect.Float32: + p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_proto3_uint32 + case reflect.Float64: + p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_proto3_int64 + case reflect.String: + p.enc = (*Buffer).enc_proto3_string + p.dec = (*Buffer).dec_proto3_string + p.size = size_proto3_string + + case reflect.Ptr: + switch t2 := t1.Elem(); t2.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) + break + case reflect.Bool: + p.enc = (*Buffer).enc_bool + p.dec = (*Buffer).dec_bool + p.size = size_bool + case reflect.Int32: + p.enc = (*Buffer).enc_int32 + p.dec = (*Buffer).dec_int32 + p.size = size_int32 + case reflect.Uint32: + p.enc = (*Buffer).enc_uint32 + p.dec = (*Buffer).dec_int32 // can reuse + p.size = size_uint32 + case reflect.Int64, reflect.Uint64: + p.enc = (*Buffer).enc_int64 + p.dec = (*Buffer).dec_int64 + p.size = size_int64 + case reflect.Float32: + p.enc = (*Buffer).enc_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_int32 + p.size = size_uint32 + case reflect.Float64: + p.enc = (*Buffer).enc_int64 // can just treat them as bits + p.dec = (*Buffer).dec_int64 + p.size = size_int64 + case reflect.String: + p.enc = (*Buffer).enc_string + p.dec = (*Buffer).dec_string + p.size = size_string + case reflect.Struct: + p.stype = t1.Elem() + p.isMarshaler = isMarshaler(t1) + p.isUnmarshaler = isUnmarshaler(t1) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_struct_message + p.dec = (*Buffer).dec_struct_message + p.size = size_struct_message + } else { + p.enc = (*Buffer).enc_struct_group + p.dec = (*Buffer).dec_struct_group + p.size = size_struct_group + } + } + + case reflect.Slice: + switch t2 := t1.Elem(); t2.Kind() { + default: + logNoSliceEnc(t1, t2) + break + case reflect.Bool: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_bool + p.size = size_slice_packed_bool + } else { + p.enc = (*Buffer).enc_slice_bool + p.size = size_slice_bool + } + p.dec = (*Buffer).dec_slice_bool + p.packedDec = (*Buffer).dec_slice_packed_bool + case reflect.Int32: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int32 + p.size = size_slice_packed_int32 + } else { + p.enc = (*Buffer).enc_slice_int32 + p.size = size_slice_int32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case reflect.Uint32: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_uint32 + p.size = size_slice_packed_uint32 + } else { + p.enc = (*Buffer).enc_slice_uint32 + p.size = size_slice_uint32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case reflect.Int64, reflect.Uint64: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int64 + p.size = size_slice_packed_int64 + } else { + p.enc = (*Buffer).enc_slice_int64 + p.size = size_slice_int64 + } + p.dec = (*Buffer).dec_slice_int64 + p.packedDec = (*Buffer).dec_slice_packed_int64 + case reflect.Uint8: + p.dec = (*Buffer).dec_slice_byte + if p.proto3 { + p.enc = (*Buffer).enc_proto3_slice_byte + p.size = size_proto3_slice_byte + } else { + p.enc = (*Buffer).enc_slice_byte + p.size = size_slice_byte + } + case reflect.Float32, reflect.Float64: + switch t2.Bits() { + case 32: + // can just treat them as bits + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_uint32 + p.size = size_slice_packed_uint32 + } else { + p.enc = (*Buffer).enc_slice_uint32 + p.size = size_slice_uint32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case 64: + // can just treat them as bits + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int64 + p.size = size_slice_packed_int64 + } else { + p.enc = (*Buffer).enc_slice_int64 + p.size = size_slice_int64 + } + p.dec = (*Buffer).dec_slice_int64 + p.packedDec = (*Buffer).dec_slice_packed_int64 + default: + logNoSliceEnc(t1, t2) + break + } + case reflect.String: + p.enc = (*Buffer).enc_slice_string + p.dec = (*Buffer).dec_slice_string + p.size = size_slice_string + case reflect.Ptr: + switch t3 := t2.Elem(); t3.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) + break + case reflect.Struct: + p.stype = t2.Elem() + p.isMarshaler = isMarshaler(t2) + p.isUnmarshaler = isUnmarshaler(t2) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_slice_struct_message + p.dec = (*Buffer).dec_slice_struct_message + p.size = size_slice_struct_message + } else { + p.enc = (*Buffer).enc_slice_struct_group + p.dec = (*Buffer).dec_slice_struct_group + p.size = size_slice_struct_group + } + } + case reflect.Slice: + switch t2.Elem().Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) + break + case reflect.Uint8: + p.enc = (*Buffer).enc_slice_slice_byte + p.dec = (*Buffer).dec_slice_slice_byte + p.size = size_slice_slice_byte + } + } + + case reflect.Map: + p.enc = (*Buffer).enc_new_map + p.dec = (*Buffer).dec_new_map + p.size = size_new_map + + p.mtype = t1 + p.mkeyprop = &Properties{} + p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) + p.mvalprop = &Properties{} + vtype := p.mtype.Elem() + if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { + // The value type is not a message (*T) or bytes ([]byte), + // so we need encoders for the pointer to this type. + vtype = reflect.PtrTo(vtype) + } + p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) + } + + // precalculate tag code + wire := p.WireType + if p.Packed { + wire = WireBytes + } + x := uint32(p.Tag)<<3 | uint32(wire) + i := 0 + for i = 0; x > 127; i++ { + p.tagbuf[i] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + p.tagbuf[i] = uint8(x) + p.tagcode = p.tagbuf[0 : i+1] + + if p.stype != nil { + if lockGetProp { + p.sprop = GetProperties(p.stype) + } else { + p.sprop = getPropertiesLocked(p.stype) + } + } +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() + unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() +) + +// isMarshaler reports whether type t implements Marshaler. +func isMarshaler(t reflect.Type) bool { + // We're checking for (likely) pointer-receiver methods + // so if t is not a pointer, something is very wrong. + // The calls above only invoke isMarshaler on pointer types. + if t.Kind() != reflect.Ptr { + panic("proto: misuse of isMarshaler") + } + return t.Implements(marshalerType) +} + +// isUnmarshaler reports whether type t implements Unmarshaler. +func isUnmarshaler(t reflect.Type) bool { + // We're checking for (likely) pointer-receiver methods + // so if t is not a pointer, something is very wrong. + // The calls above only invoke isUnmarshaler on pointer types. + if t.Kind() != reflect.Ptr { + panic("proto: misuse of isUnmarshaler") + } + return t.Implements(unmarshalerType) +} + +// Init populates the properties from a protocol buffer struct tag. +func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { + p.init(typ, name, tag, f, true) +} + +func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { + // "bytes,49,opt,def=hello!" + p.Name = name + p.OrigName = name + if f != nil { + p.field = toField(f) + } + if tag == "" { + return + } + p.Parse(tag) + p.setEncAndDec(typ, f, lockGetProp) +} + +var ( + propertiesMu sync.RWMutex + propertiesMap = make(map[reflect.Type]*StructProperties) +) + +// GetProperties returns the list of properties for the type represented by t. +// t must represent a generated struct type of a protocol message. +func GetProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic("proto: type must have kind struct") + } + + // Most calls to GetProperties in a long-running program will be + // retrieving details for types we have seen before. + propertiesMu.RLock() + sprop, ok := propertiesMap[t] + propertiesMu.RUnlock() + if ok { + if collectStats { + stats.Chit++ + } + return sprop + } + + propertiesMu.Lock() + sprop = getPropertiesLocked(t) + propertiesMu.Unlock() + return sprop +} + +// getPropertiesLocked requires that propertiesMu is held. +func getPropertiesLocked(t reflect.Type) *StructProperties { + if prop, ok := propertiesMap[t]; ok { + if collectStats { + stats.Chit++ + } + return prop + } + if collectStats { + stats.Cmiss++ + } + + prop := new(StructProperties) + // in case of recursive protos, fill this in now. + propertiesMap[t] = prop + + // build properties + prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) || + reflect.PtrTo(t).Implements(extendableProtoV1Type) + prop.unrecField = invalidField + prop.Prop = make([]*Properties, t.NumField()) + prop.order = make([]int, t.NumField()) + + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + p := new(Properties) + name := f.Name + p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + + if f.Name == "XXX_InternalExtensions" { // special case + p.enc = (*Buffer).enc_exts + p.dec = nil // not needed + p.size = size_exts + } else if f.Name == "XXX_extensions" { // special case + p.enc = (*Buffer).enc_map + p.dec = nil // not needed + p.size = size_map + } else if f.Name == "XXX_unrecognized" { // special case + prop.unrecField = toField(&f) + } + oneof := f.Tag.Get("protobuf_oneof") // special case + if oneof != "" { + // Oneof fields don't use the traditional protobuf tag. + p.OrigName = oneof + } + prop.Prop[i] = p + prop.order[i] = i + if debug { + print(i, " ", f.Name, " ", t.String(), " ") + if p.Tag > 0 { + print(p.String()) + } + print("\n") + } + if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" { + fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") + } + } + + // Re-order prop.order. + sort.Sort(prop) + + type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + } + if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { + var oots []interface{} + prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs() + prop.stype = t + + // Interpret oneof metadata. + prop.OneofTypes = make(map[string]*OneofProperties) + for _, oot := range oots { + oop := &OneofProperties{ + Type: reflect.ValueOf(oot).Type(), // *T + Prop: new(Properties), + } + sft := oop.Type.Elem().Field(0) + oop.Prop.Name = sft.Name + oop.Prop.Parse(sft.Tag.Get("protobuf")) + // There will be exactly one interface field that + // this new value is assignable to. + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Type.Kind() != reflect.Interface { + continue + } + if !oop.Type.AssignableTo(f.Type) { + continue + } + oop.Field = i + break + } + prop.OneofTypes[oop.Prop.OrigName] = oop + } + } + + // build required counts + // build tags + reqCount := 0 + prop.decoderOrigNames = make(map[string]int) + for i, p := range prop.Prop { + if strings.HasPrefix(p.Name, "XXX_") { + // Internal fields should not appear in tags/origNames maps. + // They are handled specially when encoding and decoding. + continue + } + if p.Required { + reqCount++ + } + prop.decoderTags.put(p.Tag, i) + prop.decoderOrigNames[p.OrigName] = i + } + prop.reqCount = reqCount + + return prop +} + +// Return the Properties object for the x[0]'th field of the structure. +func propByIndex(t reflect.Type, x []int) *Properties { + if len(x) != 1 { + fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) + return nil + } + prop := GetProperties(t) + return prop.Prop[x[0]] +} + +// Get the address and type of a pointer to a struct from an interface. +func getbase(pb Message) (t reflect.Type, b structPointer, err error) { + if pb == nil { + err = ErrNil + return + } + // get the reflect type of the pointer to the struct. + t = reflect.TypeOf(pb) + // get the address of the struct. + value := reflect.ValueOf(pb) + b = toStructPointer(value) + return +} + +// A global registry of enum types. +// The generated code will register the generated maps by calling RegisterEnum. + +var enumValueMaps = make(map[string]map[string]int32) + +// RegisterEnum is called from the generated code to install the enum descriptor +// maps into the global table to aid parsing text format protocol buffers. +func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { + if _, ok := enumValueMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumValueMaps[typeName] = valueMap +} + +// EnumValueMap returns the mapping from names to integers of the +// enum type enumType, or a nil if not found. +func EnumValueMap(enumType string) map[string]int32 { + return enumValueMaps[enumType] +} + +// A registry of all linked message types. +// The string is a fully-qualified proto name ("pkg.Message"). +var ( + protoTypes = make(map[string]reflect.Type) + revProtoTypes = make(map[reflect.Type]string) +) + +// RegisterType is called from generated code and maps from the fully qualified +// proto name to the type (pointer to struct) of the protocol buffer. +func RegisterType(x Message, name string) { + if _, ok := protoTypes[name]; ok { + // TODO: Some day, make this a panic. + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoTypes[name] = t + revProtoTypes[t] = name +} + +// MessageName returns the fully-qualified proto name for the given message type. +func MessageName(x Message) string { + type xname interface { + XXX_MessageName() string + } + if m, ok := x.(xname); ok { + return m.XXX_MessageName() + } + return revProtoTypes[reflect.TypeOf(x)] +} + +// MessageType returns the message type (pointer to struct) for a named message. +func MessageType(name string) reflect.Type { return protoTypes[name] } + +// A registry of all linked proto files. +var ( + protoFiles = make(map[string][]byte) // file name => fileDescriptor +) + +// RegisterFile is called from generated code and maps from the +// full file name of a .proto file to its compressed FileDescriptorProto. +func RegisterFile(filename string, fileDescriptor []byte) { + protoFiles[filename] = fileDescriptor +} + +// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. +func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go new file mode 100644 index 000000000..965876bf0 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text.go @@ -0,0 +1,854 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for writing the text protocol buffer format. + +import ( + "bufio" + "bytes" + "encoding" + "errors" + "fmt" + "io" + "log" + "math" + "reflect" + "sort" + "strings" +) + +var ( + newline = []byte("\n") + spaces = []byte(" ") + gtNewline = []byte(">\n") + endBraceNewline = []byte("}\n") + backslashN = []byte{'\\', 'n'} + backslashR = []byte{'\\', 'r'} + backslashT = []byte{'\\', 't'} + backslashDQ = []byte{'\\', '"'} + backslashBS = []byte{'\\', '\\'} + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +type writer interface { + io.Writer + WriteByte(byte) error +} + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + ind int + complete bool // if the current position is a complete line + compact bool // whether to write out as a one-liner + w writer +} + +func (w *textWriter) WriteString(s string) (n int, err error) { + if !strings.Contains(s, "\n") { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + return io.WriteString(w.w, s) + } + // WriteString is typically called without newlines, so this + // codepath and its copy are rare. We copy to avoid + // duplicating all of Write's logic here. + return w.Write([]byte(s)) +} + +func (w *textWriter) Write(p []byte) (n int, err error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + n, err = w.w.Write(p) + w.complete = false + return n, err + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + if err := w.w.WriteByte(' '); err != nil { + return n, err + } + n++ + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + if i+1 < len(frags) { + if err := w.w.WriteByte('\n'); err != nil { + return n, err + } + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + err := w.w.WriteByte(c) + w.complete = c == '\n' + return err +} + +func (w *textWriter) indent() { w.ind++ } + +func (w *textWriter) unindent() { + if w.ind == 0 { + log.Print("proto: textWriter unindented too far") + return + } + w.ind-- +} + +func writeName(w *textWriter, props *Properties) error { + if _, err := w.WriteString(props.OrigName); err != nil { + return err + } + if props.Wire != "group" { + return w.WriteByte(':') + } + return nil +} + +// raw is the interface satisfied by RawMessage. +type raw interface { + Bytes() []byte +} + +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// isAny reports whether sv is a google.protobuf.Any message +func isAny(sv reflect.Value) bool { + type wkt interface { + XXX_WellKnownType() string + } + t, ok := sv.Addr().Interface().(wkt) + return ok && t.XXX_WellKnownType() == "Any" +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { + turl := sv.FieldByName("TypeUrl") + val := sv.FieldByName("Value") + if !turl.IsValid() || !val.IsValid() { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + b, ok := val.Interface().([]byte) + if !ok { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + parts := strings.Split(turl.String(), "/") + mt := MessageType(parts[len(parts)-1]) + if mt == nil { + return false, nil + } + m := reflect.New(mt.Elem()) + if err := Unmarshal(b, m.Interface().(Message)); err != nil { + return false, nil + } + w.Write([]byte("[")) + u := turl.String() + if requiresQuotes(u) { + writeString(w, u) + } else { + w.Write([]byte(u)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.ind++ + } + if err := tm.writeStruct(w, m.Elem()); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.ind-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { + if tm.ExpandAny && isAny(sv) { + if canExpand, err := tm.writeProto3Any(w, sv); canExpand { + return err + } + } + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < sv.NumField(); i++ { + fv := sv.Field(i) + props := sprops.Prop[i] + name := st.Field(i).Name + + if strings.HasPrefix(name, "XXX_") { + // There are two XXX_ fields: + // XXX_unrecognized []byte + // XXX_extensions map[int32]proto.Extension + // The first is handled here; + // the second is handled at the bottom of this function. + if name == "XXX_unrecognized" && !fv.IsNil() { + if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Field not filled in. This could be an optional field or + // a required field that wasn't filled in. Either way, there + // isn't anything we can show for it. + continue + } + if fv.Kind() == reflect.Slice && fv.IsNil() { + // Repeated field that is empty, or a bytes field that is unused. + continue + } + + if props.Repeated && fv.Kind() == reflect.Slice { + // Repeated field. + for j := 0; j < fv.Len(); j++ { + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + v := fv.Index(j) + if v.Kind() == reflect.Ptr && v.IsNil() { + // A nil message in a repeated field is not valid, + // but we can handle that more gracefully than panicking. + if _, err := w.Write([]byte("\n")); err != nil { + return err + } + continue + } + if err := tm.writeAny(w, v, props); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Map { + // Map fields are rendered as a repeated struct with key/value fields. + keys := fv.MapKeys() + sort.Sort(mapKeys(keys)) + for _, key := range keys { + val := fv.MapIndex(key) + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + // open struct + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + // key + if _, err := w.WriteString("key:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, key, props.mkeyprop); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + // nil values aren't legal, but we can avoid panicking because of them. + if val.Kind() != reflect.Ptr || !val.IsNil() { + // value + if _, err := w.WriteString("value:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, val, props.mvalprop); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + // close struct + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { + // empty bytes field + continue + } + if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { + // proto3 non-repeated scalar field; skip if zero value + if isProto3Zero(fv) { + continue + } + } + + if fv.Kind() == reflect.Interface { + // Check if it is a oneof. + if st.Field(i).Tag.Get("protobuf_oneof") != "" { + // fv is nil, or holds a pointer to generated struct. + // That generated struct has exactly one field, + // which has a protobuf struct tag. + if fv.IsNil() { + continue + } + inner := fv.Elem().Elem() // interface -> *T -> T + tag := inner.Type().Field(0).Tag.Get("protobuf") + props = new(Properties) // Overwrite the outer props var, but not its pointee. + props.Parse(tag) + // Write the value in the oneof, not the oneof itself. + fv = inner.Field(0) + + // Special case to cope with malformed messages gracefully: + // If the value in the oneof is a nil pointer, don't panic + // in writeAny. + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Use errors.New so writeAny won't render quotes. + msg := errors.New("/* nil */") + fv = reflect.ValueOf(&msg).Elem() + } + } + } + + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if b, ok := fv.Interface().(raw); ok { + if err := writeRaw(w, b.Bytes()); err != nil { + return err + } + continue + } + + // Enums have a String method, so writeAny will work fine. + if err := tm.writeAny(w, fv, props); err != nil { + return err + } + + if err := w.WriteByte('\n'); err != nil { + return err + } + } + + // Extensions (the XXX_extensions field). + pv := sv.Addr() + if _, ok := extendable(pv.Interface()); ok { + if err := tm.writeExtensions(w, pv); err != nil { + return err + } + } + + return nil +} + +// writeRaw writes an uninterpreted raw message. +func writeRaw(w *textWriter, b []byte) error { + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if err := writeUnknownStruct(w, b); err != nil { + return err + } + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + return nil +} + +// writeAny writes an arbitrary field. +func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { + v = reflect.Indirect(v) + + // Floats have special cases. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + x := v.Float() + var b []byte + switch { + case math.IsInf(x, 1): + b = posInf + case math.IsInf(x, -1): + b = negInf + case math.IsNaN(x): + b = nan + } + if b != nil { + _, err := w.Write(b) + return err + } + // Other values are handled below. + } + + // We don't attempt to serialise every possible value type; only those + // that can occur in protocol buffers. + switch v.Kind() { + case reflect.Slice: + // Should only be a []byte; repeated fields are handled in writeStruct. + if err := writeString(w, string(v.Bytes())); err != nil { + return err + } + case reflect.String: + if err := writeString(w, v.String()); err != nil { + return err + } + case reflect.Struct: + // Required/optional group/message. + var bra, ket byte = '<', '>' + if props != nil && props.Wire == "group" { + bra, ket = '{', '}' + } + if err := w.WriteByte(bra); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if etm, ok := v.Interface().(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = w.Write(text); err != nil { + return err + } + } else if err := tm.writeStruct(w, v); err != nil { + return err + } + w.unindent() + if err := w.WriteByte(ket); err != nil { + return err + } + default: + _, err := fmt.Fprint(w, v.Interface()) + return err + } + return nil +} + +// equivalent to C's isprint. +func isprint(c byte) bool { + return c >= 0x20 && c < 0x7f +} + +// writeString writes a string in the protocol buffer text format. +// It is similar to strconv.Quote except we don't use Go escape sequences, +// we treat the string as a byte sequence, and we use octal escapes. +// These differences are to maintain interoperability with the other +// languages' implementations of the text format. +func writeString(w *textWriter, s string) error { + // use WriteByte here to get any needed indent + if err := w.WriteByte('"'); err != nil { + return err + } + // Loop over the bytes, not the runes. + for i := 0; i < len(s); i++ { + var err error + // Divergence from C++: we don't escape apostrophes. + // There's no need to escape them, and the C++ parser + // copes with a naked apostrophe. + switch c := s[i]; c { + case '\n': + _, err = w.w.Write(backslashN) + case '\r': + _, err = w.w.Write(backslashR) + case '\t': + _, err = w.w.Write(backslashT) + case '"': + _, err = w.w.Write(backslashDQ) + case '\\': + _, err = w.w.Write(backslashBS) + default: + if isprint(c) { + err = w.w.WriteByte(c) + } else { + _, err = fmt.Fprintf(w.w, "\\%03o", c) + } + } + if err != nil { + return err + } + } + return w.WriteByte('"') +} + +func writeUnknownStruct(w *textWriter, data []byte) (err error) { + if !w.compact { + if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { + return err + } + } + b := NewBuffer(data) + for b.index < len(b.buf) { + x, err := b.DecodeVarint() + if err != nil { + _, err := fmt.Fprintf(w, "/* %v */\n", err) + return err + } + wire, tag := x&7, x>>3 + if wire == WireEndGroup { + w.unindent() + if _, err := w.Write(endBraceNewline); err != nil { + return err + } + continue + } + if _, err := fmt.Fprint(w, tag); err != nil { + return err + } + if wire != WireStartGroup { + if err := w.WriteByte(':'); err != nil { + return err + } + } + if !w.compact || wire == WireStartGroup { + if err := w.WriteByte(' '); err != nil { + return err + } + } + switch wire { + case WireBytes: + buf, e := b.DecodeRawBytes(false) + if e == nil { + _, err = fmt.Fprintf(w, "%q", buf) + } else { + _, err = fmt.Fprintf(w, "/* %v */", e) + } + case WireFixed32: + x, err = b.DecodeFixed32() + err = writeUnknownInt(w, x, err) + case WireFixed64: + x, err = b.DecodeFixed64() + err = writeUnknownInt(w, x, err) + case WireStartGroup: + err = w.WriteByte('{') + w.indent() + case WireVarint: + x, err = b.DecodeVarint() + err = writeUnknownInt(w, x, err) + default: + _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) + } + if err != nil { + return err + } + if err = w.WriteByte('\n'); err != nil { + return err + } + } + return nil +} + +func writeUnknownInt(w *textWriter, x uint64, err error) error { + if err == nil { + _, err = fmt.Fprint(w, x) + } else { + _, err = fmt.Fprintf(w, "/* %v */", err) + } + return err +} + +type int32Slice []int32 + +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// writeExtensions writes all the extensions in pv. +// pv is assumed to be a pointer to a protocol message struct that is extendable. +func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { + emap := extensionMaps[pv.Type().Elem()] + ep, _ := extendable(pv.Interface()) + + // Order the extensions by ID. + // This isn't strictly necessary, but it will give us + // canonical output, which will also make testing easier. + m, mu := ep.extensionsRead() + if m == nil { + return nil + } + mu.Lock() + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + mu.Unlock() + + for _, extNum := range ids { + ext := m[extNum] + var desc *ExtensionDesc + if emap != nil { + desc = emap[extNum] + } + if desc == nil { + // Unknown extension. + if err := writeUnknownStruct(w, ext.enc); err != nil { + return err + } + continue + } + + pb, err := GetExtension(ep, desc) + if err != nil { + return fmt.Errorf("failed getting extension: %v", err) + } + + // Repeated extensions will appear as a slice. + if !desc.repeated() { + if err := tm.writeExtension(w, desc.Name, pb); err != nil { + return err + } + } else { + v := reflect.ValueOf(pb) + for i := 0; i < v.Len(); i++ { + if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { + return err + } + } + } + } + return nil +} + +func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { + if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + remain := w.ind * 2 + for remain > 0 { + n := remain + if n > len(spaces) { + n = len(spaces) + } + w.w.Write(spaces[:n]) + remain -= n + } + w.complete = false +} + +// TextMarshaler is a configurable text format marshaler. +type TextMarshaler struct { + Compact bool // use compact text format (one line). + ExpandAny bool // expand google.protobuf.Any messages of known types +} + +// Marshal writes a given protocol buffer in text format. +// The only errors returned are from w. +func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { + val := reflect.ValueOf(pb) + if pb == nil || val.IsNil() { + w.Write([]byte("")) + return nil + } + var bw *bufio.Writer + ww, ok := w.(writer) + if !ok { + bw = bufio.NewWriter(w) + ww = bw + } + aw := &textWriter{ + w: ww, + complete: true, + compact: tm.Compact, + } + + if etm, ok := pb.(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = aw.Write(text); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil + } + // Dereference the received pointer so we don't have outer < and >. + v := reflect.Indirect(val) + if err := tm.writeStruct(aw, v); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil +} + +// Text is the same as Marshal, but returns the string directly. +func (tm *TextMarshaler) Text(pb Message) string { + var buf bytes.Buffer + tm.Marshal(&buf, pb) + return buf.String() +} + +var ( + defaultTextMarshaler = TextMarshaler{} + compactTextMarshaler = TextMarshaler{Compact: true} +) + +// TODO: consider removing some of the Marshal functions below. + +// MarshalText writes a given protocol buffer in text format. +// The only errors returned are from w. +func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } + +// MarshalTextString is the same as MarshalText, but returns the string directly. +func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } + +// CompactText writes a given protocol buffer in compact text format (one line). +func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } + +// CompactTextString is the same as CompactText, but returns the string directly. +func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go new file mode 100644 index 000000000..61f83c1e1 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text_parser.go @@ -0,0 +1,895 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for parsing the Text protocol buffer format. +// TODO: message sets. + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "unicode/utf8" +) + +// Error string emitted when deserializing Any and fields are already set +const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" + +type ParseError struct { + Message string + Line int // 1-based line number + Offset int // 0-based byte offset from start of input +} + +func (p *ParseError) Error() string { + if p.Line == 1 { + // show offset only for first line + return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) + } + return fmt.Sprintf("line %d: %v", p.Line, p.Message) +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func (t *token) String() string { + if t.err == nil { + return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) + } + return fmt.Sprintf("parse error: %v", t.err) +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +// Numbers and identifiers are matched by [-+._A-Za-z0-9] +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func isQuote(c byte) bool { + switch c { + case '"', '\'': + return true + } + return false +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +var ( + errBadUTF8 = errors.New("proto: bad UTF-8") + errBadHex = errors.New("proto: bad hexadecimal") +) + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + base := 8 + ss := s[:2] + s = s[2:] + if r == 'x' || r == 'X' { + base = 16 + } else { + ss = string(r) + ss + } + i, err := strconv.ParseUint(ss, base, 8) + if err != nil { + return "", "", err + } + return string([]byte{byte(i)}), s, nil + case 'u', 'U': + n := 4 + if r == 'U' { + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) + } + + bs := make([]byte, n/2) + for i := 0; i < n; i += 2 { + a, ok1 := unhex(s[i]) + b, ok2 := unhex(s[i+1]) + if !ok1 || !ok2 { + return "", "", errBadHex + } + bs[i/2] = a<<4 | b + } + s = s[n:] + return string(bs), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +// Adapted from src/pkg/strconv/quote.go. +func unhex(b byte) (v byte, ok bool) { + switch { + case '0' <= b && b <= '9': + return b - '0', true + case 'a' <= b && b <= 'f': + return b - 'a' + 10, true + case 'A' <= b && b <= 'F': + return b - 'A' + 10, true + } + return 0, false +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || !isQuote(p.s[0]) { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +// Return a RequiredNotSetError indicating which required field was not set. +func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < st.NumField(); i++ { + if !isNil(sv.Field(i)) { + continue + } + + props := sprops.Prop[i] + if props.Required { + return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} + } + } + return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen +} + +// Returns the index in the struct for the named field, as well as the parsed tag properties. +func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { + i, ok := sprops.decoderOrigNames[name] + if ok { + return i, sprops.Prop[i], true + } + return -1, nil, false +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + // Colon is optional when the field is a group or message. + needColon := true + switch props.Wire { + case "group": + needColon = false + case "bytes": + // A "bytes" field is either a message, a string, or a repeated field; + // those three become *T, *string and []T respectively, so we can check for + // this field being a pointer to a non-string. + if typ.Kind() == reflect.Ptr { + // *T or *string + if typ.Elem().Kind() == reflect.String { + break + } + } else if typ.Kind() == reflect.Slice { + // []T or []*T + if typ.Elem().Kind() != reflect.Ptr { + break + } + } else if typ.Kind() == reflect.String { + // The proto3 exception is for a string field, + // which requires a colon. + break + } + needColon = false + } + if needColon { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +func (p *textParser) readStruct(sv reflect.Value, terminator string) error { + st := sv.Type() + sprops := GetProperties(st) + reqCount := sprops.reqCount + var reqFieldErr error + fieldSet := make(map[string]bool) + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + // Looks like an extension or an Any. + // + // TODO: Check whether we need to handle + // namespace rooted names (e.g. ".something.Foo"). + extName, err := p.consumeExtName() + if err != nil { + return err + } + + if s := strings.LastIndex(extName, "/"); s >= 0 { + // If it contains a slash, it's an Any type URL. + messageName := extName[s+1:] + mt := MessageType(messageName) + if mt == nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) + } + tok = p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + v := reflect.New(mt.Elem()) + if pe := p.readStruct(v.Elem(), terminator); pe != nil { + return pe + } + b, err := Marshal(v.Interface().(Message)) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", messageName, err) + } + if fieldSet["type_url"] { + return p.errorf(anyRepeatedlyUnpacked, "type_url") + } + if fieldSet["value"] { + return p.errorf(anyRepeatedlyUnpacked, "value") + } + sv.FieldByName("TypeUrl").SetString(extName) + sv.FieldByName("Value").SetBytes(b) + fieldSet["type_url"] = true + fieldSet["value"] = true + continue + } + + var desc *ExtensionDesc + // This could be faster, but it's functional. + // TODO: Do something smarter than a linear scan. + for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { + if d.Name == extName { + desc = d + break + } + } + if desc == nil { + return p.errorf("unrecognized extension %q", extName) + } + + props := &Properties{} + props.Parse(desc.Tag) + + typ := reflect.TypeOf(desc.ExtensionType) + if err := p.checkForColon(props, typ); err != nil { + return err + } + + rep := desc.repeated() + + // Read the extension structure, and set it in + // the value we're constructing. + var ext reflect.Value + if !rep { + ext = reflect.New(typ).Elem() + } else { + ext = reflect.New(typ.Elem()).Elem() + } + if err := p.readAny(ext, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + ep := sv.Addr().Interface().(Message) + if !rep { + SetExtension(ep, desc, ext.Interface()) + } else { + old, err := GetExtension(ep, desc) + var sl reflect.Value + if err == nil { + sl = reflect.ValueOf(old) // existing slice + } else { + sl = reflect.MakeSlice(typ, 0, 1) + } + sl = reflect.Append(sl, ext) + SetExtension(ep, desc, sl.Interface()) + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + continue + } + + // This is a normal, non-extension field. + name := tok.value + var dst reflect.Value + fi, props, ok := structFieldByName(sprops, name) + if ok { + dst = sv.Field(fi) + } else if oop, ok := sprops.OneofTypes[name]; ok { + // It is a oneof. + props = oop.Prop + nv := reflect.New(oop.Type.Elem()) + dst = nv.Elem().Field(0) + field := sv.Field(oop.Field) + if !field.IsNil() { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) + } + field.Set(nv) + } + if !dst.IsValid() { + return p.errorf("unknown field name %q in %v", name, st) + } + + if dst.Kind() == reflect.Map { + // Consume any colon. + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Construct the map if it doesn't already exist. + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + key := reflect.New(dst.Type().Key()).Elem() + val := reflect.New(dst.Type().Elem()).Elem() + + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // However, implementations may omit key or value, and technically + // we should support them in any order. See b/28924776 for a time + // this went wrong. + + tok := p.next() + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + switch tok.value { + case "key": + if err := p.consumeToken(":"); err != nil { + return err + } + if err := p.readAny(key, props.mkeyprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + case "value": + if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { + return err + } + if err := p.readAny(val, props.mvalprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + default: + p.back() + return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) + } + } + + dst.SetMapIndex(key, val) + continue + } + + // Check that it's not already set if it's not a repeated field. + if !props.Repeated && fieldSet[name] { + return p.errorf("non-repeated field %q was repeated", name) + } + + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Parse into the field. + fieldSet[name] = true + if err := p.readAny(dst, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + if props.Required { + reqCount-- + } + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + + } + + if reqCount > 0 { + return p.missingRequiredFieldError(sv) + } + return reqFieldErr +} + +// consumeExtName consumes extension name or expanded Any type URL and the +// following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + } + return strings.Join(parts, ""), nil +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in readStruct to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) readAny(v reflect.Value, props *Properties) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "" { + return p.errorf("unexpected EOF") + } + + switch fv := v; fv.Kind() { + case reflect.Slice: + at := v.Type() + if at.Elem().Kind() == reflect.Uint8 { + // Special case for []byte + if tok.value[0] != '"' && tok.value[0] != '\'' { + // Deliberately written out here, as the error after + // this switch statement would write "invalid []byte: ...", + // which is not as user-friendly. + return p.errorf("invalid string: %v", tok.value) + } + bytes := []byte(tok.unquoted) + fv.Set(reflect.ValueOf(bytes)) + return nil + } + // Repeated field. + if tok.value == "[" { + // Repeated field with list notation, like [1,2,3]. + for { + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + err := p.readAny(fv.Index(fv.Len()-1), props) + if err != nil { + return err + } + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "]" { + break + } + if tok.value != "," { + return p.errorf("Expected ']' or ',' found %q", tok.value) + } + } + return nil + } + // One value of the repeated field. + p.back() + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + return p.readAny(fv.Index(fv.Len()-1), props) + case reflect.Bool: + // true/1/t/True or false/f/0/False. + switch tok.value { + case "true", "1", "t", "True": + fv.SetBool(true) + return nil + case "false", "0", "f", "False": + fv.SetBool(false) + return nil + } + case reflect.Float32, reflect.Float64: + v := tok.value + // Ignore 'f' for compatibility with output generated by C++, but don't + // remove 'f' when the value is "-inf" or "inf". + if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { + v = v[:len(v)-1] + } + if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { + fv.SetFloat(f) + return nil + } + case reflect.Int32: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + fv.SetInt(x) + return nil + } + + if len(props.Enum) == 0 { + break + } + m, ok := enumValueMaps[props.Enum] + if !ok { + break + } + x, ok := m[tok.value] + if !ok { + break + } + fv.SetInt(int64(x)) + return nil + case reflect.Int64: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + fv.SetInt(x) + return nil + } + + case reflect.Ptr: + // A basic field (indirected through pointer), or a repeated message/group + p.back() + fv.Set(reflect.New(fv.Type().Elem())) + return p.readAny(fv.Elem(), props) + case reflect.String: + if tok.value[0] == '"' || tok.value[0] == '\'' { + fv.SetString(tok.unquoted) + return nil + } + case reflect.Struct: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + // TODO: Handle nested messages which implement encoding.TextUnmarshaler. + return p.readStruct(fv, terminator) + case reflect.Uint32: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + fv.SetUint(uint64(x)) + return nil + } + case reflect.Uint64: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + fv.SetUint(x) + return nil + } + } + return p.errorf("invalid %v: %v", v.Type(), tok.value) +} + +// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb +// before starting to unmarshal, so any existing data in pb is always removed. +// If a required field is not set and no other error occurs, +// UnmarshalText returns *RequiredNotSetError. +func UnmarshalText(s string, pb Message) error { + if um, ok := pb.(encoding.TextUnmarshaler); ok { + err := um.UnmarshalText([]byte(s)) + return err + } + pb.Reset() + v := reflect.ValueOf(pb) + if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { + return pe + } + return nil +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile new file mode 100644 index 000000000..4942418e3 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile @@ -0,0 +1,39 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Not stored here, but descriptor.proto is in https://github.com/google/protobuf/ +# at src/google/protobuf/descriptor.proto +regenerate: + echo WARNING! THIS RULE IS PROBABLY NOT RIGHT FOR YOUR INSTALLATION + protoc --go_out=. -I$(HOME)/src/protobuf/src $(HOME)/src/protobuf/src/google/protobuf/descriptor.proto && \ + sed 's,^package google_protobuf,package descriptor,' google/protobuf/descriptor.pb.go > \ + $(GOPATH)/src/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go && \ + rm -f google/protobuf/descriptor.pb.go diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go new file mode 100644 index 000000000..a1d8a764f --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go @@ -0,0 +1,2065 @@ +// Code generated by protoc-gen-go. +// source: google/protobuf/descriptor.proto +// DO NOT EDIT! + +/* +Package descriptor is a generated protocol buffer package. + +It is generated from these files: + google/protobuf/descriptor.proto + +It has these top-level messages: + FileDescriptorSet + FileDescriptorProto + DescriptorProto + FieldDescriptorProto + OneofDescriptorProto + EnumDescriptorProto + EnumValueDescriptorProto + ServiceDescriptorProto + MethodDescriptorProto + FileOptions + MessageOptions + FieldOptions + OneofOptions + EnumOptions + EnumValueOptions + ServiceOptions + MethodOptions + UninterpretedOption + SourceCodeInfo + GeneratedCodeInfo +*/ +package descriptor + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type FieldDescriptorProto_Type int32 + +const ( + // 0 is reserved for errors. + // Order is weird for historical reasons. + FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 + FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 + FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 + FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 + FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 + FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 + FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 + FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 + FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 + // New in version 2. + FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 + FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 + FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 + FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 + FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 + FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 + FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 +) + +var FieldDescriptorProto_Type_name = map[int32]string{ + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", +} +var FieldDescriptorProto_Type_value = map[string]int32{ + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, +} + +func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { + p := new(FieldDescriptorProto_Type) + *p = x + return p +} +func (x FieldDescriptorProto_Type) String() string { + return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) +} +func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type") + if err != nil { + return err + } + *x = FieldDescriptorProto_Type(value) + return nil +} +func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{3, 0} } + +type FieldDescriptorProto_Label int32 + +const ( + // 0 is reserved for errors + FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 + FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 +) + +var FieldDescriptorProto_Label_name = map[int32]string{ + 1: "LABEL_OPTIONAL", + 2: "LABEL_REQUIRED", + 3: "LABEL_REPEATED", +} +var FieldDescriptorProto_Label_value = map[string]int32{ + "LABEL_OPTIONAL": 1, + "LABEL_REQUIRED": 2, + "LABEL_REPEATED": 3, +} + +func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { + p := new(FieldDescriptorProto_Label) + *p = x + return p +} +func (x FieldDescriptorProto_Label) String() string { + return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) +} +func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label") + if err != nil { + return err + } + *x = FieldDescriptorProto_Label(value) + return nil +} +func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{3, 1} +} + +// Generated classes can be optimized for speed or code size. +type FileOptions_OptimizeMode int32 + +const ( + FileOptions_SPEED FileOptions_OptimizeMode = 1 + // etc. + FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 + FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 +) + +var FileOptions_OptimizeMode_name = map[int32]string{ + 1: "SPEED", + 2: "CODE_SIZE", + 3: "LITE_RUNTIME", +} +var FileOptions_OptimizeMode_value = map[string]int32{ + "SPEED": 1, + "CODE_SIZE": 2, + "LITE_RUNTIME": 3, +} + +func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { + p := new(FileOptions_OptimizeMode) + *p = x + return p +} +func (x FileOptions_OptimizeMode) String() string { + return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) +} +func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode") + if err != nil { + return err + } + *x = FileOptions_OptimizeMode(value) + return nil +} +func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{9, 0} } + +type FieldOptions_CType int32 + +const ( + // Default mode. + FieldOptions_STRING FieldOptions_CType = 0 + FieldOptions_CORD FieldOptions_CType = 1 + FieldOptions_STRING_PIECE FieldOptions_CType = 2 +) + +var FieldOptions_CType_name = map[int32]string{ + 0: "STRING", + 1: "CORD", + 2: "STRING_PIECE", +} +var FieldOptions_CType_value = map[string]int32{ + "STRING": 0, + "CORD": 1, + "STRING_PIECE": 2, +} + +func (x FieldOptions_CType) Enum() *FieldOptions_CType { + p := new(FieldOptions_CType) + *p = x + return p +} +func (x FieldOptions_CType) String() string { + return proto.EnumName(FieldOptions_CType_name, int32(x)) +} +func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType") + if err != nil { + return err + } + *x = FieldOptions_CType(value) + return nil +} +func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{11, 0} } + +type FieldOptions_JSType int32 + +const ( + // Use the default type. + FieldOptions_JS_NORMAL FieldOptions_JSType = 0 + // Use JavaScript strings. + FieldOptions_JS_STRING FieldOptions_JSType = 1 + // Use JavaScript numbers. + FieldOptions_JS_NUMBER FieldOptions_JSType = 2 +) + +var FieldOptions_JSType_name = map[int32]string{ + 0: "JS_NORMAL", + 1: "JS_STRING", + 2: "JS_NUMBER", +} +var FieldOptions_JSType_value = map[string]int32{ + "JS_NORMAL": 0, + "JS_STRING": 1, + "JS_NUMBER": 2, +} + +func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { + p := new(FieldOptions_JSType) + *p = x + return p +} +func (x FieldOptions_JSType) String() string { + return proto.EnumName(FieldOptions_JSType_name, int32(x)) +} +func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") + if err != nil { + return err + } + *x = FieldOptions_JSType(value) + return nil +} +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{11, 1} } + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +type FileDescriptorSet struct { + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } +func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorSet) ProtoMessage() {} +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { + if m != nil { + return m.File + } + return nil +} + +// Describes a complete .proto file. +type FileDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` + // Names of files imported by this file. + Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` + // Indexes of the public imported files in the dependency list above. + PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` + // All top-level definitions in this file. + MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` + Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } +func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorProto) ProtoMessage() {} +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *FileDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FileDescriptorProto) GetPackage() string { + if m != nil && m.Package != nil { + return *m.Package + } + return "" +} + +func (m *FileDescriptorProto) GetDependency() []string { + if m != nil { + return m.Dependency + } + return nil +} + +func (m *FileDescriptorProto) GetPublicDependency() []int32 { + if m != nil { + return m.PublicDependency + } + return nil +} + +func (m *FileDescriptorProto) GetWeakDependency() []int32 { + if m != nil { + return m.WeakDependency + } + return nil +} + +func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto { + if m != nil { + return m.MessageType + } + return nil +} + +func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto { + if m != nil { + return m.Service + } + return nil +} + +func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *FileDescriptorProto) GetOptions() *FileOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { + if m != nil { + return m.SourceCodeInfo + } + return nil +} + +func (m *FileDescriptorProto) GetSyntax() string { + if m != nil && m.Syntax != nil { + return *m.Syntax + } + return "" +} + +// Describes a message type. +type DescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` + NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` + OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` + Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` + ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } +func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto) ProtoMessage() {} +func (*DescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *DescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *DescriptorProto) GetField() []*FieldDescriptorProto { + if m != nil { + return m.Field + } + return nil +} + +func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *DescriptorProto) GetNestedType() []*DescriptorProto { + if m != nil { + return m.NestedType + } + return nil +} + +func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { + if m != nil { + return m.ExtensionRange + } + return nil +} + +func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { + if m != nil { + return m.OneofDecl + } + return nil +} + +func (m *DescriptorProto) GetOptions() *MessageOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *DescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +type DescriptorProto_ExtensionRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } +func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ExtensionRange) ProtoMessage() {} +func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{2, 0} +} + +func (m *DescriptorProto_ExtensionRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +// Range of reserved tag numbers. Reserved tag numbers may not be used by +// fields or extension ranges in the same message. Reserved ranges may +// not overlap. +type DescriptorProto_ReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } +func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ReservedRange) ProtoMessage() {} +func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{2, 1} +} + +func (m *DescriptorProto_ReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +// Describes a field within a message. +type FieldDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` + Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } +func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FieldDescriptorProto) ProtoMessage() {} +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *FieldDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FieldDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { + if m != nil && m.Label != nil { + return *m.Label + } + return FieldDescriptorProto_LABEL_OPTIONAL +} + +func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { + if m != nil && m.Type != nil { + return *m.Type + } + return FieldDescriptorProto_TYPE_DOUBLE +} + +func (m *FieldDescriptorProto) GetTypeName() string { + if m != nil && m.TypeName != nil { + return *m.TypeName + } + return "" +} + +func (m *FieldDescriptorProto) GetExtendee() string { + if m != nil && m.Extendee != nil { + return *m.Extendee + } + return "" +} + +func (m *FieldDescriptorProto) GetDefaultValue() string { + if m != nil && m.DefaultValue != nil { + return *m.DefaultValue + } + return "" +} + +func (m *FieldDescriptorProto) GetOneofIndex() int32 { + if m != nil && m.OneofIndex != nil { + return *m.OneofIndex + } + return 0 +} + +func (m *FieldDescriptorProto) GetJsonName() string { + if m != nil && m.JsonName != nil { + return *m.JsonName + } + return "" +} + +func (m *FieldDescriptorProto) GetOptions() *FieldOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a oneof. +type OneofDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } +func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*OneofDescriptorProto) ProtoMessage() {} +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *OneofDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *OneofDescriptorProto) GetOptions() *OneofOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes an enum type. +type EnumDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } +func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto) ProtoMessage() {} +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *EnumDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { + if m != nil { + return m.Value + } + return nil +} + +func (m *EnumDescriptorProto) GetOptions() *EnumOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a value within an enum. +type EnumValueDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } +func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumValueDescriptorProto) ProtoMessage() {} +func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *EnumValueDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumValueDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a service. +type ServiceDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } +func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*ServiceDescriptorProto) ProtoMessage() {} +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *ServiceDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { + if m != nil { + return m.Method + } + return nil +} + +func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a method of a service. +type MethodDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` + OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` + Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` + // Identifies if client streams multiple client messages + ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` + // Identifies if server streams multiple server messages + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } +func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*MethodDescriptorProto) ProtoMessage() {} +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +const Default_MethodDescriptorProto_ClientStreaming bool = false +const Default_MethodDescriptorProto_ServerStreaming bool = false + +func (m *MethodDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MethodDescriptorProto) GetInputType() string { + if m != nil && m.InputType != nil { + return *m.InputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOutputType() string { + if m != nil && m.OutputType != nil { + return *m.OutputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOptions() *MethodOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *MethodDescriptorProto) GetClientStreaming() bool { + if m != nil && m.ClientStreaming != nil { + return *m.ClientStreaming + } + return Default_MethodDescriptorProto_ClientStreaming +} + +func (m *MethodDescriptorProto) GetServerStreaming() bool { + if m != nil && m.ServerStreaming != nil { + return *m.ServerStreaming + } + return Default_MethodDescriptorProto_ServerStreaming +} + +type FileOptions struct { + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` + // This option does nothing. + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` + OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` + JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` + PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"` + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` + // Namespace for generated classes; defaults to the package. + CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FileOptions) Reset() { *m = FileOptions{} } +func (m *FileOptions) String() string { return proto.CompactTextString(m) } +func (*FileOptions) ProtoMessage() {} +func (*FileOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +var extRange_FileOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FileOptions +} + +const Default_FileOptions_JavaMultipleFiles bool = false +const Default_FileOptions_JavaStringCheckUtf8 bool = false +const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED +const Default_FileOptions_CcGenericServices bool = false +const Default_FileOptions_JavaGenericServices bool = false +const Default_FileOptions_PyGenericServices bool = false +const Default_FileOptions_Deprecated bool = false +const Default_FileOptions_CcEnableArenas bool = false + +func (m *FileOptions) GetJavaPackage() string { + if m != nil && m.JavaPackage != nil { + return *m.JavaPackage + } + return "" +} + +func (m *FileOptions) GetJavaOuterClassname() string { + if m != nil && m.JavaOuterClassname != nil { + return *m.JavaOuterClassname + } + return "" +} + +func (m *FileOptions) GetJavaMultipleFiles() bool { + if m != nil && m.JavaMultipleFiles != nil { + return *m.JavaMultipleFiles + } + return Default_FileOptions_JavaMultipleFiles +} + +func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { + if m != nil && m.JavaGenerateEqualsAndHash != nil { + return *m.JavaGenerateEqualsAndHash + } + return false +} + +func (m *FileOptions) GetJavaStringCheckUtf8() bool { + if m != nil && m.JavaStringCheckUtf8 != nil { + return *m.JavaStringCheckUtf8 + } + return Default_FileOptions_JavaStringCheckUtf8 +} + +func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { + if m != nil && m.OptimizeFor != nil { + return *m.OptimizeFor + } + return Default_FileOptions_OptimizeFor +} + +func (m *FileOptions) GetGoPackage() string { + if m != nil && m.GoPackage != nil { + return *m.GoPackage + } + return "" +} + +func (m *FileOptions) GetCcGenericServices() bool { + if m != nil && m.CcGenericServices != nil { + return *m.CcGenericServices + } + return Default_FileOptions_CcGenericServices +} + +func (m *FileOptions) GetJavaGenericServices() bool { + if m != nil && m.JavaGenericServices != nil { + return *m.JavaGenericServices + } + return Default_FileOptions_JavaGenericServices +} + +func (m *FileOptions) GetPyGenericServices() bool { + if m != nil && m.PyGenericServices != nil { + return *m.PyGenericServices + } + return Default_FileOptions_PyGenericServices +} + +func (m *FileOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FileOptions_Deprecated +} + +func (m *FileOptions) GetCcEnableArenas() bool { + if m != nil && m.CcEnableArenas != nil { + return *m.CcEnableArenas + } + return Default_FileOptions_CcEnableArenas +} + +func (m *FileOptions) GetObjcClassPrefix() string { + if m != nil && m.ObjcClassPrefix != nil { + return *m.ObjcClassPrefix + } + return "" +} + +func (m *FileOptions) GetCsharpNamespace() string { + if m != nil && m.CsharpNamespace != nil { + return *m.CsharpNamespace + } + return "" +} + +func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MessageOptions struct { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementions still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MessageOptions) Reset() { *m = MessageOptions{} } +func (m *MessageOptions) String() string { return proto.CompactTextString(m) } +func (*MessageOptions) ProtoMessage() {} +func (*MessageOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +var extRange_MessageOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MessageOptions +} + +const Default_MessageOptions_MessageSetWireFormat bool = false +const Default_MessageOptions_NoStandardDescriptorAccessor bool = false +const Default_MessageOptions_Deprecated bool = false + +func (m *MessageOptions) GetMessageSetWireFormat() bool { + if m != nil && m.MessageSetWireFormat != nil { + return *m.MessageSetWireFormat + } + return Default_MessageOptions_MessageSetWireFormat +} + +func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool { + if m != nil && m.NoStandardDescriptorAccessor != nil { + return *m.NoStandardDescriptorAccessor + } + return Default_MessageOptions_NoStandardDescriptorAccessor +} + +func (m *MessageOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MessageOptions_Deprecated +} + +func (m *MessageOptions) GetMapEntry() bool { + if m != nil && m.MapEntry != nil { + return *m.MapEntry + } + return false +} + +func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type FieldOptions struct { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). By default these types are + // represented as JavaScript strings. This avoids loss of precision that can + // happen when a large value is converted to a floating point JavaScript + // numbers. Specifying JS_NUMBER for the jstype causes the generated + // JavaScript code to use the JavaScript "number" type instead of strings. + // This option is an enum to permit additional types to be added, + // e.g. goog.math.Integer. + Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // For Google-internal migration only. Do not use. + Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FieldOptions) Reset() { *m = FieldOptions{} } +func (m *FieldOptions) String() string { return proto.CompactTextString(m) } +func (*FieldOptions) ProtoMessage() {} +func (*FieldOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +var extRange_FieldOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FieldOptions +} + +const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING +const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL +const Default_FieldOptions_Lazy bool = false +const Default_FieldOptions_Deprecated bool = false +const Default_FieldOptions_Weak bool = false + +func (m *FieldOptions) GetCtype() FieldOptions_CType { + if m != nil && m.Ctype != nil { + return *m.Ctype + } + return Default_FieldOptions_Ctype +} + +func (m *FieldOptions) GetPacked() bool { + if m != nil && m.Packed != nil { + return *m.Packed + } + return false +} + +func (m *FieldOptions) GetJstype() FieldOptions_JSType { + if m != nil && m.Jstype != nil { + return *m.Jstype + } + return Default_FieldOptions_Jstype +} + +func (m *FieldOptions) GetLazy() bool { + if m != nil && m.Lazy != nil { + return *m.Lazy + } + return Default_FieldOptions_Lazy +} + +func (m *FieldOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FieldOptions_Deprecated +} + +func (m *FieldOptions) GetWeak() bool { + if m != nil && m.Weak != nil { + return *m.Weak + } + return Default_FieldOptions_Weak +} + +func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type OneofOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OneofOptions) Reset() { *m = OneofOptions{} } +func (m *OneofOptions) String() string { return proto.CompactTextString(m) } +func (*OneofOptions) ProtoMessage() {} +func (*OneofOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +var extRange_OneofOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OneofOptions +} + +func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumOptions struct { + // Set this option to true to allow mapping different tag names to the same + // value. + AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumOptions) Reset() { *m = EnumOptions{} } +func (m *EnumOptions) String() string { return proto.CompactTextString(m) } +func (*EnumOptions) ProtoMessage() {} +func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +var extRange_EnumOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumOptions +} + +const Default_EnumOptions_Deprecated bool = false + +func (m *EnumOptions) GetAllowAlias() bool { + if m != nil && m.AllowAlias != nil { + return *m.AllowAlias + } + return false +} + +func (m *EnumOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumOptions_Deprecated +} + +func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumValueOptions struct { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } +func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } +func (*EnumValueOptions) ProtoMessage() {} +func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +var extRange_EnumValueOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumValueOptions +} + +const Default_EnumValueOptions_Deprecated bool = false + +func (m *EnumValueOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumValueOptions_Deprecated +} + +func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type ServiceOptions struct { + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } +func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } +func (*ServiceOptions) ProtoMessage() {} +func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +var extRange_ServiceOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ServiceOptions +} + +const Default_ServiceOptions_Deprecated bool = false + +func (m *ServiceOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_ServiceOptions_Deprecated +} + +func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MethodOptions struct { + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MethodOptions) Reset() { *m = MethodOptions{} } +func (m *MethodOptions) String() string { return proto.CompactTextString(m) } +func (*MethodOptions) ProtoMessage() {} +func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +var extRange_MethodOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MethodOptions +} + +const Default_MethodOptions_Deprecated bool = false + +func (m *MethodOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MethodOptions_Deprecated +} + +func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +type UninterpretedOption struct { + Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } +func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption) ProtoMessage() {} +func (*UninterpretedOption) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + +func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { + if m != nil { + return m.Name + } + return nil +} + +func (m *UninterpretedOption) GetIdentifierValue() string { + if m != nil && m.IdentifierValue != nil { + return *m.IdentifierValue + } + return "" +} + +func (m *UninterpretedOption) GetPositiveIntValue() uint64 { + if m != nil && m.PositiveIntValue != nil { + return *m.PositiveIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetNegativeIntValue() int64 { + if m != nil && m.NegativeIntValue != nil { + return *m.NegativeIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetDoubleValue() float64 { + if m != nil && m.DoubleValue != nil { + return *m.DoubleValue + } + return 0 +} + +func (m *UninterpretedOption) GetStringValue() []byte { + if m != nil { + return m.StringValue + } + return nil +} + +func (m *UninterpretedOption) GetAggregateValue() string { + if m != nil && m.AggregateValue != nil { + return *m.AggregateValue + } + return "" +} + +// The name of the uninterpreted option. Each string represents a segment in +// a dot-separated name. is_extension is true iff a segment represents an +// extension (denoted with parentheses in options specs in .proto files). +// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents +// "foo.(bar.baz).qux". +type UninterpretedOption_NamePart struct { + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } +func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption_NamePart) ProtoMessage() {} +func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{17, 0} +} + +func (m *UninterpretedOption_NamePart) GetNamePart() string { + if m != nil && m.NamePart != nil { + return *m.NamePart + } + return "" +} + +func (m *UninterpretedOption_NamePart) GetIsExtension() bool { + if m != nil && m.IsExtension != nil { + return *m.IsExtension + } + return false +} + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +type SourceCodeInfo struct { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendent. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } +func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo) ProtoMessage() {} +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { + if m != nil { + return m.Location + } + return nil +} + +type SourceCodeInfo_Location struct { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` + TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` + LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } +func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo_Location) ProtoMessage() {} +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18, 0} } + +func (m *SourceCodeInfo_Location) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *SourceCodeInfo_Location) GetSpan() []int32 { + if m != nil { + return m.Span + } + return nil +} + +func (m *SourceCodeInfo_Location) GetLeadingComments() string { + if m != nil && m.LeadingComments != nil { + return *m.LeadingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetTrailingComments() string { + if m != nil && m.TrailingComments != nil { + return *m.TrailingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { + if m != nil { + return m.LeadingDetachedComments + } + return nil +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +type GeneratedCodeInfo struct { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } +func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo) ProtoMessage() {} +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } + +func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { + if m != nil { + return m.Annotation + } + return nil +} + +type GeneratedCodeInfo_Annotation struct { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Identifies the filesystem path to the original source .proto. + SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } +func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} +func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{19, 0} +} + +func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { + if m != nil && m.SourceFile != nil { + return *m.SourceFile + } + return "" +} + +func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { + if m != nil && m.Begin != nil { + return *m.Begin + } + return 0 +} + +func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func init() { + proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") + proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") + proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") + proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") + proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") + proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") + proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") + proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") + proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") + proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") + proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") + proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") + proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") + proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") + proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") + proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") + proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") + proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") + proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions") + proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption") + proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") + proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") + proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") + proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") + proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) + proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) + proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) + proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) +} + +func init() { proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 2295 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x59, 0x4f, 0x6f, 0x1b, 0xc7, + 0x15, 0xcf, 0xf2, 0x9f, 0xc8, 0x47, 0x8a, 0x1a, 0x8d, 0x14, 0x67, 0xad, 0xfc, 0xb1, 0xcc, 0xd8, + 0xb1, 0x6c, 0xb7, 0x74, 0x20, 0xff, 0x89, 0xa3, 0x14, 0x29, 0x28, 0x71, 0xad, 0xd0, 0x90, 0x44, + 0x76, 0x29, 0xb5, 0x4e, 0x2e, 0x8b, 0xd1, 0xee, 0x90, 0x5a, 0x7b, 0x39, 0xbb, 0xdd, 0x5d, 0xda, + 0x56, 0x4e, 0x06, 0x7a, 0xea, 0xa5, 0xe7, 0xa2, 0x2d, 0x7a, 0xc8, 0x25, 0x40, 0x3f, 0x40, 0x0f, + 0xfd, 0x0a, 0x05, 0x0a, 0xf4, 0x2b, 0x14, 0x05, 0xda, 0x6f, 0xd0, 0x6b, 0x31, 0x33, 0xbb, 0xcb, + 0x5d, 0xfe, 0x89, 0xd5, 0x00, 0x49, 0x7a, 0x12, 0xe7, 0xf7, 0x7e, 0xef, 0xcd, 0x9b, 0x37, 0x6f, + 0xde, 0xbc, 0x1d, 0xc1, 0xe6, 0xd0, 0x75, 0x87, 0x0e, 0xbd, 0xe3, 0xf9, 0x6e, 0xe8, 0x9e, 0x8e, + 0x07, 0x77, 0x2c, 0x1a, 0x98, 0xbe, 0xed, 0x85, 0xae, 0xdf, 0x14, 0x18, 0x5e, 0x91, 0x8c, 0x66, + 0xcc, 0x68, 0x1c, 0xc2, 0xea, 0x23, 0xdb, 0xa1, 0xed, 0x84, 0xd8, 0xa7, 0x21, 0x7e, 0x08, 0x85, + 0x81, 0xed, 0x50, 0x55, 0xd9, 0xcc, 0x6f, 0x55, 0xb7, 0xaf, 0x35, 0xa7, 0x94, 0x9a, 0x59, 0x8d, + 0x1e, 0x87, 0x75, 0xa1, 0xd1, 0xf8, 0x67, 0x01, 0xd6, 0xe6, 0x48, 0x31, 0x86, 0x02, 0x23, 0x23, + 0x6e, 0x51, 0xd9, 0xaa, 0xe8, 0xe2, 0x37, 0x56, 0x61, 0xc9, 0x23, 0xe6, 0x33, 0x32, 0xa4, 0x6a, + 0x4e, 0xc0, 0xf1, 0x10, 0xbf, 0x07, 0x60, 0x51, 0x8f, 0x32, 0x8b, 0x32, 0xf3, 0x5c, 0xcd, 0x6f, + 0xe6, 0xb7, 0x2a, 0x7a, 0x0a, 0xc1, 0xb7, 0x61, 0xd5, 0x1b, 0x9f, 0x3a, 0xb6, 0x69, 0xa4, 0x68, + 0xb0, 0x99, 0xdf, 0x2a, 0xea, 0x48, 0x0a, 0xda, 0x13, 0xf2, 0x0d, 0x58, 0x79, 0x41, 0xc9, 0xb3, + 0x34, 0xb5, 0x2a, 0xa8, 0x75, 0x0e, 0xa7, 0x88, 0x7b, 0x50, 0x1b, 0xd1, 0x20, 0x20, 0x43, 0x6a, + 0x84, 0xe7, 0x1e, 0x55, 0x0b, 0x62, 0xf5, 0x9b, 0x33, 0xab, 0x9f, 0x5e, 0x79, 0x35, 0xd2, 0x3a, + 0x3e, 0xf7, 0x28, 0x6e, 0x41, 0x85, 0xb2, 0xf1, 0x48, 0x5a, 0x28, 0x2e, 0x88, 0x9f, 0xc6, 0xc6, + 0xa3, 0x69, 0x2b, 0x65, 0xae, 0x16, 0x99, 0x58, 0x0a, 0xa8, 0xff, 0xdc, 0x36, 0xa9, 0x5a, 0x12, + 0x06, 0x6e, 0xcc, 0x18, 0xe8, 0x4b, 0xf9, 0xb4, 0x8d, 0x58, 0x0f, 0xef, 0x41, 0x85, 0xbe, 0x0c, + 0x29, 0x0b, 0x6c, 0x97, 0xa9, 0x4b, 0xc2, 0xc8, 0xf5, 0x39, 0xbb, 0x48, 0x1d, 0x6b, 0xda, 0xc4, + 0x44, 0x0f, 0x3f, 0x80, 0x25, 0xd7, 0x0b, 0x6d, 0x97, 0x05, 0x6a, 0x79, 0x53, 0xd9, 0xaa, 0x6e, + 0xbf, 0x33, 0x37, 0x11, 0xba, 0x92, 0xa3, 0xc7, 0x64, 0xdc, 0x01, 0x14, 0xb8, 0x63, 0xdf, 0xa4, + 0x86, 0xe9, 0x5a, 0xd4, 0xb0, 0xd9, 0xc0, 0x55, 0x2b, 0xc2, 0xc0, 0x95, 0xd9, 0x85, 0x08, 0xe2, + 0x9e, 0x6b, 0xd1, 0x0e, 0x1b, 0xb8, 0x7a, 0x3d, 0xc8, 0x8c, 0xf1, 0x25, 0x28, 0x05, 0xe7, 0x2c, + 0x24, 0x2f, 0xd5, 0x9a, 0xc8, 0x90, 0x68, 0xd4, 0xf8, 0x4f, 0x11, 0x56, 0x2e, 0x92, 0x62, 0x9f, + 0x40, 0x71, 0xc0, 0x57, 0xa9, 0xe6, 0xfe, 0x97, 0x18, 0x48, 0x9d, 0x6c, 0x10, 0x4b, 0xdf, 0x32, + 0x88, 0x2d, 0xa8, 0x32, 0x1a, 0x84, 0xd4, 0x92, 0x19, 0x91, 0xbf, 0x60, 0x4e, 0x81, 0x54, 0x9a, + 0x4d, 0xa9, 0xc2, 0xb7, 0x4a, 0xa9, 0x27, 0xb0, 0x92, 0xb8, 0x64, 0xf8, 0x84, 0x0d, 0xe3, 0xdc, + 0xbc, 0xf3, 0x3a, 0x4f, 0x9a, 0x5a, 0xac, 0xa7, 0x73, 0x35, 0xbd, 0x4e, 0x33, 0x63, 0xdc, 0x06, + 0x70, 0x19, 0x75, 0x07, 0x86, 0x45, 0x4d, 0x47, 0x2d, 0x2f, 0x88, 0x52, 0x97, 0x53, 0x66, 0xa2, + 0xe4, 0x4a, 0xd4, 0x74, 0xf0, 0xc7, 0x93, 0x54, 0x5b, 0x5a, 0x90, 0x29, 0x87, 0xf2, 0x90, 0xcd, + 0x64, 0xdb, 0x09, 0xd4, 0x7d, 0xca, 0xf3, 0x9e, 0x5a, 0xd1, 0xca, 0x2a, 0xc2, 0x89, 0xe6, 0x6b, + 0x57, 0xa6, 0x47, 0x6a, 0x72, 0x61, 0xcb, 0x7e, 0x7a, 0x88, 0xdf, 0x87, 0x04, 0x30, 0x44, 0x5a, + 0x81, 0xa8, 0x42, 0xb5, 0x18, 0x3c, 0x22, 0x23, 0xba, 0xf1, 0x10, 0xea, 0xd9, 0xf0, 0xe0, 0x75, + 0x28, 0x06, 0x21, 0xf1, 0x43, 0x91, 0x85, 0x45, 0x5d, 0x0e, 0x30, 0x82, 0x3c, 0x65, 0x96, 0xa8, + 0x72, 0x45, 0x9d, 0xff, 0xdc, 0xf8, 0x08, 0x96, 0x33, 0xd3, 0x5f, 0x54, 0xb1, 0xf1, 0xdb, 0x12, + 0xac, 0xcf, 0xcb, 0xb9, 0xb9, 0xe9, 0x7f, 0x09, 0x4a, 0x6c, 0x3c, 0x3a, 0xa5, 0xbe, 0x9a, 0x17, + 0x16, 0xa2, 0x11, 0x6e, 0x41, 0xd1, 0x21, 0xa7, 0xd4, 0x51, 0x0b, 0x9b, 0xca, 0x56, 0x7d, 0xfb, + 0xf6, 0x85, 0xb2, 0xba, 0x79, 0xc0, 0x55, 0x74, 0xa9, 0x89, 0x3f, 0x85, 0x42, 0x54, 0xe2, 0xb8, + 0x85, 0x5b, 0x17, 0xb3, 0xc0, 0x73, 0x51, 0x17, 0x7a, 0xf8, 0x6d, 0xa8, 0xf0, 0xbf, 0x32, 0xb6, + 0x25, 0xe1, 0x73, 0x99, 0x03, 0x3c, 0xae, 0x78, 0x03, 0xca, 0x22, 0xcd, 0x2c, 0x1a, 0x5f, 0x0d, + 0xc9, 0x98, 0x6f, 0x8c, 0x45, 0x07, 0x64, 0xec, 0x84, 0xc6, 0x73, 0xe2, 0x8c, 0xa9, 0x48, 0x98, + 0x8a, 0x5e, 0x8b, 0xc0, 0x9f, 0x73, 0x0c, 0x5f, 0x81, 0xaa, 0xcc, 0x4a, 0x9b, 0x59, 0xf4, 0xa5, + 0xa8, 0x3e, 0x45, 0x5d, 0x26, 0x6a, 0x87, 0x23, 0x7c, 0xfa, 0xa7, 0x81, 0xcb, 0xe2, 0xad, 0x15, + 0x53, 0x70, 0x40, 0x4c, 0xff, 0xd1, 0x74, 0xe1, 0x7b, 0x77, 0xfe, 0xf2, 0xa6, 0x73, 0xb1, 0xf1, + 0xe7, 0x1c, 0x14, 0xc4, 0x79, 0x5b, 0x81, 0xea, 0xf1, 0xe7, 0x3d, 0xcd, 0x68, 0x77, 0x4f, 0x76, + 0x0f, 0x34, 0xa4, 0xe0, 0x3a, 0x80, 0x00, 0x1e, 0x1d, 0x74, 0x5b, 0xc7, 0x28, 0x97, 0x8c, 0x3b, + 0x47, 0xc7, 0x0f, 0xee, 0xa1, 0x7c, 0xa2, 0x70, 0x22, 0x81, 0x42, 0x9a, 0x70, 0x77, 0x1b, 0x15, + 0x31, 0x82, 0x9a, 0x34, 0xd0, 0x79, 0xa2, 0xb5, 0x1f, 0xdc, 0x43, 0xa5, 0x2c, 0x72, 0x77, 0x1b, + 0x2d, 0xe1, 0x65, 0xa8, 0x08, 0x64, 0xb7, 0xdb, 0x3d, 0x40, 0xe5, 0xc4, 0x66, 0xff, 0x58, 0xef, + 0x1c, 0xed, 0xa3, 0x4a, 0x62, 0x73, 0x5f, 0xef, 0x9e, 0xf4, 0x10, 0x24, 0x16, 0x0e, 0xb5, 0x7e, + 0xbf, 0xb5, 0xaf, 0xa1, 0x6a, 0xc2, 0xd8, 0xfd, 0xfc, 0x58, 0xeb, 0xa3, 0x5a, 0xc6, 0xad, 0xbb, + 0xdb, 0x68, 0x39, 0x99, 0x42, 0x3b, 0x3a, 0x39, 0x44, 0x75, 0xbc, 0x0a, 0xcb, 0x72, 0x8a, 0xd8, + 0x89, 0x95, 0x29, 0xe8, 0xc1, 0x3d, 0x84, 0x26, 0x8e, 0x48, 0x2b, 0xab, 0x19, 0xe0, 0xc1, 0x3d, + 0x84, 0x1b, 0x7b, 0x50, 0x14, 0xd9, 0x85, 0x31, 0xd4, 0x0f, 0x5a, 0xbb, 0xda, 0x81, 0xd1, 0xed, + 0x1d, 0x77, 0xba, 0x47, 0xad, 0x03, 0xa4, 0x4c, 0x30, 0x5d, 0xfb, 0xd9, 0x49, 0x47, 0xd7, 0xda, + 0x28, 0x97, 0xc6, 0x7a, 0x5a, 0xeb, 0x58, 0x6b, 0xa3, 0x7c, 0xc3, 0x84, 0xf5, 0x79, 0x75, 0x66, + 0xee, 0xc9, 0x48, 0x6d, 0x71, 0x6e, 0xc1, 0x16, 0x0b, 0x5b, 0x33, 0x5b, 0xfc, 0x95, 0x02, 0x6b, + 0x73, 0x6a, 0xed, 0xdc, 0x49, 0x7e, 0x0a, 0x45, 0x99, 0xa2, 0xf2, 0xf6, 0xb9, 0x39, 0xb7, 0x68, + 0x8b, 0x84, 0x9d, 0xb9, 0x81, 0x84, 0x5e, 0xfa, 0x06, 0xce, 0x2f, 0xb8, 0x81, 0xb9, 0x89, 0x19, + 0x27, 0x7f, 0xa5, 0x80, 0xba, 0xc8, 0xf6, 0x6b, 0x0a, 0x45, 0x2e, 0x53, 0x28, 0x3e, 0x99, 0x76, + 0xe0, 0xea, 0xe2, 0x35, 0xcc, 0x78, 0xf1, 0xb5, 0x02, 0x97, 0xe6, 0x37, 0x2a, 0x73, 0x7d, 0xf8, + 0x14, 0x4a, 0x23, 0x1a, 0x9e, 0xb9, 0xf1, 0x65, 0xfd, 0xc1, 0x9c, 0x2b, 0x80, 0x8b, 0xa7, 0x63, + 0x15, 0x69, 0xa5, 0xef, 0x90, 0xfc, 0xa2, 0x6e, 0x43, 0x7a, 0x33, 0xe3, 0xe9, 0xaf, 0x73, 0xf0, + 0xe6, 0x5c, 0xe3, 0x73, 0x1d, 0x7d, 0x17, 0xc0, 0x66, 0xde, 0x38, 0x94, 0x17, 0xb2, 0xac, 0x4f, + 0x15, 0x81, 0x88, 0xb3, 0xcf, 0x6b, 0xcf, 0x38, 0x4c, 0xe4, 0x79, 0x21, 0x07, 0x09, 0x09, 0xc2, + 0xc3, 0x89, 0xa3, 0x05, 0xe1, 0xe8, 0x7b, 0x0b, 0x56, 0x3a, 0x73, 0xd7, 0x7d, 0x08, 0xc8, 0x74, + 0x6c, 0xca, 0x42, 0x23, 0x08, 0x7d, 0x4a, 0x46, 0x36, 0x1b, 0x8a, 0x02, 0x5c, 0xde, 0x29, 0x0e, + 0x88, 0x13, 0x50, 0x7d, 0x45, 0x8a, 0xfb, 0xb1, 0x94, 0x6b, 0x88, 0x5b, 0xc6, 0x4f, 0x69, 0x94, + 0x32, 0x1a, 0x52, 0x9c, 0x68, 0x34, 0x7e, 0xb3, 0x04, 0xd5, 0x54, 0x5b, 0x87, 0xaf, 0x42, 0xed, + 0x29, 0x79, 0x4e, 0x8c, 0xb8, 0x55, 0x97, 0x91, 0xa8, 0x72, 0xac, 0x17, 0xb5, 0xeb, 0x1f, 0xc2, + 0xba, 0xa0, 0xb8, 0xe3, 0x90, 0xfa, 0x86, 0xe9, 0x90, 0x20, 0x10, 0x41, 0x2b, 0x0b, 0x2a, 0xe6, + 0xb2, 0x2e, 0x17, 0xed, 0xc5, 0x12, 0x7c, 0x1f, 0xd6, 0x84, 0xc6, 0x68, 0xec, 0x84, 0xb6, 0xe7, + 0x50, 0x83, 0x7f, 0x3c, 0x04, 0xa2, 0x10, 0x27, 0x9e, 0xad, 0x72, 0xc6, 0x61, 0x44, 0xe0, 0x1e, + 0x05, 0xb8, 0x0d, 0xef, 0x0a, 0xb5, 0x21, 0x65, 0xd4, 0x27, 0x21, 0x35, 0xe8, 0x2f, 0xc7, 0xc4, + 0x09, 0x0c, 0xc2, 0x2c, 0xe3, 0x8c, 0x04, 0x67, 0xea, 0x3a, 0x37, 0xb0, 0x9b, 0x53, 0x15, 0xfd, + 0x32, 0x27, 0xee, 0x47, 0x3c, 0x4d, 0xd0, 0x5a, 0xcc, 0xfa, 0x8c, 0x04, 0x67, 0x78, 0x07, 0x2e, + 0x09, 0x2b, 0x41, 0xe8, 0xdb, 0x6c, 0x68, 0x98, 0x67, 0xd4, 0x7c, 0x66, 0x8c, 0xc3, 0xc1, 0x43, + 0xf5, 0xed, 0xf4, 0xfc, 0xc2, 0xc3, 0xbe, 0xe0, 0xec, 0x71, 0xca, 0x49, 0x38, 0x78, 0x88, 0xfb, + 0x50, 0xe3, 0x9b, 0x31, 0xb2, 0xbf, 0xa4, 0xc6, 0xc0, 0xf5, 0xc5, 0xcd, 0x52, 0x9f, 0x73, 0xb2, + 0x53, 0x11, 0x6c, 0x76, 0x23, 0x85, 0x43, 0xd7, 0xa2, 0x3b, 0xc5, 0x7e, 0x4f, 0xd3, 0xda, 0x7a, + 0x35, 0xb6, 0xf2, 0xc8, 0xf5, 0x79, 0x42, 0x0d, 0xdd, 0x24, 0xc0, 0x55, 0x99, 0x50, 0x43, 0x37, + 0x0e, 0xef, 0x7d, 0x58, 0x33, 0x4d, 0xb9, 0x66, 0xdb, 0x34, 0xa2, 0x16, 0x3f, 0x50, 0x51, 0x26, + 0x58, 0xa6, 0xb9, 0x2f, 0x09, 0x51, 0x8e, 0x07, 0xf8, 0x63, 0x78, 0x73, 0x12, 0xac, 0xb4, 0xe2, + 0xea, 0xcc, 0x2a, 0xa7, 0x55, 0xef, 0xc3, 0x9a, 0x77, 0x3e, 0xab, 0x88, 0x33, 0x33, 0x7a, 0xe7, + 0xd3, 0x6a, 0xd7, 0xc5, 0x67, 0x9b, 0x4f, 0x4d, 0x12, 0x52, 0x4b, 0x7d, 0x2b, 0xcd, 0x4e, 0x09, + 0xf0, 0x1d, 0x40, 0xa6, 0x69, 0x50, 0x46, 0x4e, 0x1d, 0x6a, 0x10, 0x9f, 0x32, 0x12, 0xa8, 0x57, + 0xd2, 0xe4, 0xba, 0x69, 0x6a, 0x42, 0xda, 0x12, 0x42, 0x7c, 0x0b, 0x56, 0xdd, 0xd3, 0xa7, 0xa6, + 0xcc, 0x2c, 0xc3, 0xf3, 0xe9, 0xc0, 0x7e, 0xa9, 0x5e, 0x13, 0x61, 0x5a, 0xe1, 0x02, 0x91, 0x57, + 0x3d, 0x01, 0xe3, 0x9b, 0x80, 0xcc, 0xe0, 0x8c, 0xf8, 0x9e, 0xb8, 0xda, 0x03, 0x8f, 0x98, 0x54, + 0xbd, 0x2e, 0xa9, 0x12, 0x3f, 0x8a, 0x61, 0xfc, 0x04, 0xd6, 0xc7, 0xcc, 0x66, 0x21, 0xf5, 0x3d, + 0x9f, 0xf2, 0x0e, 0x5d, 0x1e, 0x33, 0xf5, 0x5f, 0x4b, 0x0b, 0x7a, 0xec, 0x93, 0x34, 0x5b, 0xee, + 0xae, 0xbe, 0x36, 0x9e, 0x05, 0x1b, 0x3b, 0x50, 0x4b, 0x6f, 0x3a, 0xae, 0x80, 0xdc, 0x76, 0xa4, + 0xf0, 0x0b, 0x74, 0xaf, 0xdb, 0xe6, 0x57, 0xdf, 0x17, 0x1a, 0xca, 0xf1, 0x2b, 0xf8, 0xa0, 0x73, + 0xac, 0x19, 0xfa, 0xc9, 0xd1, 0x71, 0xe7, 0x50, 0x43, 0xf9, 0x5b, 0x95, 0xf2, 0xbf, 0x97, 0xd0, + 0xab, 0x57, 0xaf, 0x5e, 0xe5, 0x1e, 0x17, 0xca, 0x1f, 0xa0, 0x1b, 0x8d, 0xbf, 0xe6, 0xa0, 0x9e, + 0x6d, 0x7e, 0xf1, 0x4f, 0xe0, 0xad, 0xf8, 0x4b, 0x35, 0xa0, 0xa1, 0xf1, 0xc2, 0xf6, 0x45, 0x36, + 0x8e, 0x88, 0x6c, 0x1f, 0x93, 0x40, 0xae, 0x47, 0xac, 0x3e, 0x0d, 0x7f, 0x61, 0xfb, 0x3c, 0xd7, + 0x46, 0x24, 0xc4, 0x07, 0x70, 0x85, 0xb9, 0x46, 0x10, 0x12, 0x66, 0x11, 0xdf, 0x32, 0x26, 0x6f, + 0x04, 0x06, 0x31, 0x4d, 0x1a, 0x04, 0xae, 0xbc, 0x05, 0x12, 0x2b, 0xef, 0x30, 0xb7, 0x1f, 0x91, + 0x27, 0xe5, 0xb1, 0x15, 0x51, 0xa7, 0x36, 0x3d, 0xbf, 0x68, 0xd3, 0xdf, 0x86, 0xca, 0x88, 0x78, + 0x06, 0x65, 0xa1, 0x7f, 0x2e, 0x5a, 0xb6, 0xb2, 0x5e, 0x1e, 0x11, 0x4f, 0xe3, 0xe3, 0xef, 0x6e, + 0x27, 0xb2, 0xd1, 0x2c, 0xa3, 0x4a, 0xe3, 0x1f, 0x79, 0xa8, 0xa5, 0x9b, 0x37, 0xde, 0x0b, 0x9b, + 0xa2, 0x50, 0x2b, 0xe2, 0x28, 0xbf, 0xff, 0x8d, 0xad, 0x5e, 0x73, 0x8f, 0x57, 0xf0, 0x9d, 0x92, + 0x6c, 0xa9, 0x74, 0xa9, 0xc9, 0x6f, 0x4f, 0x7e, 0x78, 0xa9, 0x6c, 0xd4, 0xcb, 0x7a, 0x34, 0xc2, + 0xfb, 0x50, 0x7a, 0x1a, 0x08, 0xdb, 0x25, 0x61, 0xfb, 0xda, 0x37, 0xdb, 0x7e, 0xdc, 0x17, 0xc6, + 0x2b, 0x8f, 0xfb, 0xc6, 0x51, 0x57, 0x3f, 0x6c, 0x1d, 0xe8, 0x91, 0x3a, 0xbe, 0x0c, 0x05, 0x87, + 0x7c, 0x79, 0x9e, 0xad, 0xf5, 0x02, 0xba, 0x68, 0xf8, 0x2f, 0x43, 0xe1, 0x05, 0x25, 0xcf, 0xb2, + 0x15, 0x56, 0x40, 0xdf, 0xe1, 0x31, 0xb8, 0x03, 0x45, 0x11, 0x2f, 0x0c, 0x10, 0x45, 0x0c, 0xbd, + 0x81, 0xcb, 0x50, 0xd8, 0xeb, 0xea, 0xfc, 0x28, 0x20, 0xa8, 0x49, 0xd4, 0xe8, 0x75, 0xb4, 0x3d, + 0x0d, 0xe5, 0x1a, 0xf7, 0xa1, 0x24, 0x83, 0xc0, 0x8f, 0x49, 0x12, 0x06, 0xf4, 0x46, 0x34, 0x8c, + 0x6c, 0x28, 0xb1, 0xf4, 0xe4, 0x70, 0x57, 0xd3, 0x51, 0x2e, 0xbb, 0xc9, 0x05, 0x54, 0x6c, 0x04, + 0x50, 0x4b, 0x77, 0x6f, 0xdf, 0x4b, 0x7e, 0x35, 0xfe, 0xa2, 0x40, 0x35, 0xd5, 0x8d, 0xf1, 0x3e, + 0x80, 0x38, 0x8e, 0xfb, 0xc2, 0x20, 0x8e, 0x4d, 0x82, 0x28, 0x35, 0x40, 0x40, 0x2d, 0x8e, 0x5c, + 0x74, 0xeb, 0xbe, 0x17, 0xe7, 0xff, 0xa8, 0x00, 0x9a, 0xee, 0xe4, 0xa6, 0x1c, 0x54, 0x7e, 0x50, + 0x07, 0xff, 0xa0, 0x40, 0x3d, 0xdb, 0xbe, 0x4d, 0xb9, 0x77, 0xf5, 0x07, 0x75, 0xef, 0xf7, 0x0a, + 0x2c, 0x67, 0x9a, 0xb6, 0xff, 0x2b, 0xef, 0x7e, 0x97, 0x87, 0xb5, 0x39, 0x7a, 0xb8, 0x15, 0x75, + 0xb7, 0xb2, 0xe1, 0xfe, 0xf1, 0x45, 0xe6, 0x6a, 0xf2, 0xfb, 0xb3, 0x47, 0xfc, 0x30, 0x6a, 0x86, + 0x6f, 0x02, 0xb2, 0x2d, 0xca, 0x42, 0x7b, 0x60, 0x53, 0x3f, 0xfa, 0x22, 0x97, 0x2d, 0xef, 0xca, + 0x04, 0x97, 0x1f, 0xe5, 0x3f, 0x02, 0xec, 0xb9, 0x81, 0x1d, 0xda, 0xcf, 0xa9, 0x61, 0xb3, 0xf8, + 0xf3, 0x9d, 0xb7, 0xc0, 0x05, 0x1d, 0xc5, 0x92, 0x0e, 0x0b, 0x13, 0x36, 0xa3, 0x43, 0x32, 0xc5, + 0xe6, 0x15, 0x30, 0xaf, 0xa3, 0x58, 0x92, 0xb0, 0xaf, 0x42, 0xcd, 0x72, 0xc7, 0xbc, 0xa1, 0x90, + 0x3c, 0x5e, 0x70, 0x15, 0xbd, 0x2a, 0xb1, 0x84, 0x12, 0x75, 0x7c, 0x93, 0x77, 0x83, 0x9a, 0x5e, + 0x95, 0x98, 0xa4, 0xdc, 0x80, 0x15, 0x32, 0x1c, 0xfa, 0xdc, 0x78, 0x6c, 0x48, 0xf6, 0xb0, 0xf5, + 0x04, 0x16, 0xc4, 0x8d, 0xc7, 0x50, 0x8e, 0xe3, 0xc0, 0x6f, 0x36, 0x1e, 0x09, 0xc3, 0x93, 0xaf, + 0x37, 0xb9, 0xad, 0x8a, 0x5e, 0x66, 0xb1, 0xf0, 0x2a, 0xd4, 0xec, 0xc0, 0x98, 0x3c, 0x23, 0xe6, + 0x36, 0x73, 0x5b, 0x65, 0xbd, 0x6a, 0x07, 0xc9, 0xbb, 0x51, 0xe3, 0xeb, 0x1c, 0xd4, 0xb3, 0xcf, + 0xa0, 0xb8, 0x0d, 0x65, 0xc7, 0x35, 0x89, 0x48, 0x04, 0xf9, 0x06, 0xbf, 0xf5, 0x9a, 0x97, 0xd3, + 0xe6, 0x41, 0xc4, 0xd7, 0x13, 0xcd, 0x8d, 0xbf, 0x29, 0x50, 0x8e, 0x61, 0x7c, 0x09, 0x0a, 0x1e, + 0x09, 0xcf, 0x84, 0xb9, 0xe2, 0x6e, 0x0e, 0x29, 0xba, 0x18, 0x73, 0x3c, 0xf0, 0x08, 0x13, 0x29, + 0x10, 0xe1, 0x7c, 0xcc, 0xf7, 0xd5, 0xa1, 0xc4, 0x12, 0x0d, 0xb2, 0x3b, 0x1a, 0x51, 0x16, 0x06, + 0xf1, 0xbe, 0x46, 0xf8, 0x5e, 0x04, 0xe3, 0xdb, 0xb0, 0x1a, 0xfa, 0xc4, 0x76, 0x32, 0xdc, 0x82, + 0xe0, 0xa2, 0x58, 0x90, 0x90, 0x77, 0xe0, 0x72, 0x6c, 0xd7, 0xa2, 0x21, 0x31, 0xcf, 0xa8, 0x35, + 0x51, 0x2a, 0x89, 0x37, 0xb6, 0xb7, 0x22, 0x42, 0x3b, 0x92, 0xc7, 0xba, 0x8d, 0xbf, 0x2b, 0xb0, + 0x1a, 0xb7, 0xf4, 0x56, 0x12, 0xac, 0x43, 0x00, 0xc2, 0x98, 0x1b, 0xa6, 0xc3, 0x35, 0x9b, 0xca, + 0x33, 0x7a, 0xcd, 0x56, 0xa2, 0xa4, 0xa7, 0x0c, 0x6c, 0x8c, 0x00, 0x26, 0x92, 0x85, 0x61, 0xbb, + 0x02, 0xd5, 0xe8, 0x8d, 0x5b, 0xfc, 0xa3, 0x44, 0x7e, 0x04, 0x82, 0x84, 0x78, 0xef, 0x8f, 0xd7, + 0xa1, 0x78, 0x4a, 0x87, 0x36, 0x8b, 0x5e, 0xde, 0xe4, 0x20, 0x7e, 0xcf, 0x2b, 0x24, 0xef, 0x79, + 0xbb, 0x4f, 0x60, 0xcd, 0x74, 0x47, 0xd3, 0xee, 0xee, 0xa2, 0xa9, 0x0f, 0xd1, 0xe0, 0x33, 0xe5, + 0x0b, 0x98, 0x74, 0x6a, 0x5f, 0xe5, 0xf2, 0xfb, 0xbd, 0xdd, 0x3f, 0xe5, 0x36, 0xf6, 0xa5, 0x5e, + 0x2f, 0x5e, 0xa6, 0x4e, 0x07, 0x0e, 0x35, 0xb9, 0xeb, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x5f, + 0x1c, 0x48, 0x4f, 0x0d, 0x1a, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go new file mode 100644 index 000000000..f2c6906b9 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -0,0 +1,155 @@ +// Code generated by protoc-gen-go. +// source: github.com/golang/protobuf/ptypes/any/any.proto +// DO NOT EDIT! + +/* +Package any is a generated protocol buffer package. + +It is generated from these files: + github.com/golang/protobuf/ptypes/any/any.proto + +It has these top-level messages: + Any +*/ +package any + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +type Any struct { + // A URL/resource name whose content describes the type of the + // serialized protocol buffer message. + // + // For URLs which use the scheme `http`, `https`, or no scheme, the + // following restrictions and interpretations apply: + // + // * If no scheme is provided, `https` is assumed. + // * The last segment of the URL's path must represent the fully + // qualified name of the type (as in `path/google.protobuf.Duration`). + // The name should be in a canonical form (e.g., leading "." is + // not accepted). + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"` + // Must be a valid serialized protocol buffer of the above specified type. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *Any) Reset() { *m = Any{} } +func (m *Any) String() string { return proto.CompactTextString(m) } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Any) XXX_WellKnownType() string { return "Any" } + +func init() { + proto.RegisterType((*Any)(nil), "google.protobuf.Any") +} + +func init() { proto.RegisterFile("github.com/golang/protobuf/ptypes/any/any.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 187 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xd2, 0x4f, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, + 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xcc, + 0xab, 0x04, 0x61, 0x3d, 0xb0, 0xb8, 0x10, 0x7f, 0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x1e, 0x4c, + 0x95, 0x92, 0x19, 0x17, 0xb3, 0x63, 0x5e, 0xa5, 0x90, 0x24, 0x17, 0x07, 0x48, 0x79, 0x7c, 0x69, + 0x51, 0x8e, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x3b, 0x88, 0x1f, 0x5a, 0x94, 0x23, 0x24, + 0xc2, 0xc5, 0x5a, 0x96, 0x98, 0x53, 0x9a, 0x2a, 0xc1, 0xa4, 0xc0, 0xa8, 0xc1, 0x13, 0x04, 0xe1, + 0x38, 0x15, 0x71, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x19, 0xe7, 0xc4, 0xe1, 0x98, 0x57, 0x19, + 0x00, 0xe2, 0x04, 0x30, 0x46, 0xa9, 0x12, 0xe5, 0xb8, 0x05, 0x8c, 0x8c, 0x8b, 0x98, 0x98, 0xdd, + 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x4c, 0x0b, 0x80, 0xaa, 0xd2, 0x0b, 0x4f, 0xcd, 0xc9, + 0xf1, 0xce, 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0xa9, 0x4e, 0x62, 0x03, 0x6b, 0x37, 0x06, 0x04, 0x00, + 0x00, 0xff, 0xff, 0xc6, 0x4d, 0x03, 0x23, 0xf6, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto new file mode 100644 index 000000000..81dcf46cc --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.proto @@ -0,0 +1,140 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "github.com/golang/protobuf/ptypes/any"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "AnyProto"; +option java_multiple_files = true; +option java_generate_equals_and_hash = true; +option objc_class_prefix = "GPB"; + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +message Any { + // A URL/resource name whose content describes the type of the + // serialized protocol buffer message. + // + // For URLs which use the scheme `http`, `https`, or no scheme, the + // following restrictions and interpretations apply: + // + // * If no scheme is provided, `https` is assumed. + // * The last segment of the URL's path must represent the fully + // qualified name of the type (as in `path/google.protobuf.Duration`). + // The name should be in a canonical form (e.g., leading "." is + // not accepted). + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + string type_url = 1; + + // Must be a valid serialized protocol buffer of the above specified type. + bytes value = 2; +} diff --git a/vendor/github.com/googleapis/gax-go/CONTRIBUTING.md b/vendor/github.com/googleapis/gax-go/CONTRIBUTING.md new file mode 100644 index 000000000..2827b7d3f --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/CONTRIBUTING.md @@ -0,0 +1,27 @@ +Want to contribute? Great! First, read this page (including the small print at the end). + +### Before you contribute +Before we can use your code, you must sign the +[Google Individual Contributor License Agreement] +(https://cla.developers.google.com/about/google-individual) +(CLA), which you can do online. The CLA is necessary mainly because you own the +copyright to your changes, even after your contribution becomes part of our +codebase, so we need your permission to use and distribute your code. We also +need to be sure of various other things—for instance that you'll tell us if you +know that your code infringes on other people's patents. You don't have to sign +the CLA until after you've submitted your code for review and a member has +approved it, but you must do it before we can put your code into our codebase. +Before you start working on a larger contribution, you should get in touch with +us first through the issue tracker with your idea so that we can help out and +possibly guide you. Coordinating up front makes it much easier to avoid +frustration later on. + +### Code reviews +All submissions, including submissions by project members, require review. We +use Github pull requests for this purpose. + +### The small print +Contributions made by corporations are covered by a different agreement than +the one above, the +[Software Grant and Corporate Contributor License Agreement] +(https://cla.developers.google.com/about/google-corporate). diff --git a/vendor/github.com/googleapis/gax-go/LICENSE b/vendor/github.com/googleapis/gax-go/LICENSE new file mode 100644 index 000000000..6d16b6578 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/LICENSE @@ -0,0 +1,27 @@ +Copyright 2016, Google Inc. +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/googleapis/gax-go/README.md b/vendor/github.com/googleapis/gax-go/README.md new file mode 100644 index 000000000..3cedd5be9 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/README.md @@ -0,0 +1,24 @@ +Google API Extensions for Go +============================ + +[![Build Status](https://travis-ci.org/googleapis/gax-go.svg?branch=master)](https://travis-ci.org/googleapis/gax-go) +[![Code Coverage](https://img.shields.io/codecov/c/github/googleapis/gax-go.svg)](https://codecov.io/github/googleapis/gax-go) + +Google API Extensions for Go (gax-go) is a set of modules which aids the +development of APIs for clients and servers based on `gRPC` and Google API +conventions. + +Application code will rarely need to use this library directly, +but the code generated automatically from API definition files can use it +to simplify code generation and to provide more convenient and idiomatic API surface. + +**This project is currently experimental and not supported.** + +Go Versions +=========== +This library requires Go 1.6 or above. + +License +======= +BSD - please see [LICENSE](https://github.com/googleapis/gax-go/blob/master/LICENSE) +for more information. diff --git a/vendor/github.com/googleapis/gax-go/call_option.go b/vendor/github.com/googleapis/gax-go/call_option.go new file mode 100644 index 000000000..536cb8ce3 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/call_option.go @@ -0,0 +1,149 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + "math/rand" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +// CallOption is an option used by Invoke to control behaviors of RPC calls. +// CallOption works by modifying relevant fields of CallSettings. +type CallOption interface { + // Resolve applies the option by modifying cs. + Resolve(cs *CallSettings) +} + +// Retryer is used by Invoke to determine retry behavior. +type Retryer interface { + // Retry reports whether a request should be retriedand how long to pause before retrying + // if the previous attempt returned with err. Invoke never calls Retry with nil error. + Retry(err error) (pause time.Duration, shouldRetry bool) +} + +type retryerOption func() Retryer + +func (o retryerOption) Resolve(s *CallSettings) { + s.Retry = o +} + +// WithRetry sets CallSettings.Retry to fn. +func WithRetry(fn func() Retryer) CallOption { + return retryerOption(fn) +} + +// OnCodes returns a Retryer that retries if and only if +// the previous attempt returns a GRPC error whose error code is stored in cc. +// Pause times between retries are specified by bo. +// +// bo is only used for its parameters; each Retryer has its own copy. +func OnCodes(cc []codes.Code, bo Backoff) Retryer { + return &boRetryer{ + backoff: bo, + codes: append([]codes.Code(nil), cc...), + } +} + +type boRetryer struct { + backoff Backoff + codes []codes.Code +} + +func (r *boRetryer) Retry(err error) (time.Duration, bool) { + c := grpc.Code(err) + for _, rc := range r.codes { + if c == rc { + return r.backoff.Pause(), true + } + } + return 0, false +} + +// Backoff implements exponential backoff. +// The wait time between retries is a random value between 0 and the "retry envelope". +// The envelope starts at Initial and increases by the factor of Multiplier every retry, +// but is capped at Max. +type Backoff struct { + // Initial is the initial value of the retry envelope, defaults to 1 second. + Initial time.Duration + + // Max is the maximum value of the retry envelope, defaults to 30 seconds. + Max time.Duration + + // Multiplier is the factor by which the retry envelope increases. + // It should be greater than 1 and defaults to 2. + Multiplier float64 + + // cur is the current retry envelope + cur time.Duration +} + +func (bo *Backoff) Pause() time.Duration { + if bo.Initial == 0 { + bo.Initial = time.Second + } + if bo.cur == 0 { + bo.cur = bo.Initial + } + if bo.Max == 0 { + bo.Max = 30 * time.Second + } + if bo.Multiplier < 1 { + bo.Multiplier = 2 + } + d := time.Duration(rand.Int63n(int64(bo.cur))) + bo.cur = time.Duration(float64(bo.cur) * bo.Multiplier) + if bo.cur > bo.Max { + bo.cur = bo.Max + } + return d +} + +type grpcOpt []grpc.CallOption + +func (o grpcOpt) Resolve(s *CallSettings) { + s.GRPC = o +} + +func WithGRPCOptions(opt ...grpc.CallOption) CallOption { + return grpcOpt(append([]grpc.CallOption(nil), opt...)) +} + +type CallSettings struct { + // Retry returns a Retryer to be used to control retry logic of a method call. + // If Retry is nil or the returned Retryer is nil, the call will not be retried. + Retry func() Retryer + + // CallOptions to be forwarded to GRPC. + GRPC []grpc.CallOption +} diff --git a/vendor/github.com/googleapis/gax-go/gax.go b/vendor/github.com/googleapis/gax-go/gax.go new file mode 100644 index 000000000..5ebedff0d --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/gax.go @@ -0,0 +1,40 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package gax contains a set of modules which aid the development of APIs +// for clients and servers based on gRPC and Google API conventions. +// +// Application code will rarely need to use this library directly. +// However, code generated automatically from API definition files can use it +// to simplify code generation and to provide more convenient and idiomatic API surfaces. +// +// This project is currently experimental and not supported. +package gax + +const Version = "0.1.0" diff --git a/vendor/github.com/googleapis/gax-go/header.go b/vendor/github.com/googleapis/gax-go/header.go new file mode 100644 index 000000000..d81455ecc --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/header.go @@ -0,0 +1,24 @@ +package gax + +import "bytes" + +// XGoogHeader is for use by the Google Cloud Libraries only. +// +// XGoogHeader formats key-value pairs. +// The resulting string is suitable for x-goog-api-client header. +func XGoogHeader(keyval ...string) string { + if len(keyval) == 0 { + return "" + } + if len(keyval)%2 != 0 { + panic("gax.Header: odd argument count") + } + var buf bytes.Buffer + for i := 0; i < len(keyval); i += 2 { + buf.WriteByte(' ') + buf.WriteString(keyval[i]) + buf.WriteByte('/') + buf.WriteString(keyval[i+1]) + } + return buf.String()[1:] +} diff --git a/vendor/github.com/googleapis/gax-go/invoke.go b/vendor/github.com/googleapis/gax-go/invoke.go new file mode 100644 index 000000000..86049d826 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/invoke.go @@ -0,0 +1,90 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + "time" + + "golang.org/x/net/context" +) + +// A user defined call stub. +type APICall func(context.Context, CallSettings) error + +// Invoke calls the given APICall, +// performing retries as specified by opts, if any. +func Invoke(ctx context.Context, call APICall, opts ...CallOption) error { + var settings CallSettings + for _, opt := range opts { + opt.Resolve(&settings) + } + return invoke(ctx, call, settings, Sleep) +} + +// Sleep is similar to time.Sleep, but it can be interrupted by ctx.Done() closing. +// If interrupted, Sleep returns ctx.Err(). +func Sleep(ctx context.Context, d time.Duration) error { + t := time.NewTimer(d) + select { + case <-ctx.Done(): + t.Stop() + return ctx.Err() + case <-t.C: + return nil + } +} + +type sleeper func(ctx context.Context, d time.Duration) error + +// invoke implements Invoke, taking an additional sleeper argument for testing. +func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper) error { + var retryer Retryer + for { + err := call(ctx, settings) + if err == nil { + return nil + } + if settings.Retry == nil { + return err + } + if retryer == nil { + if r := settings.Retry(); r != nil { + retryer = r + } else { + return err + } + } + if d, ok := retryer.Retry(err); !ok { + return err + } else if err = sp(ctx, d); err != nil { + return err + } + } +} diff --git a/vendor/github.com/googleapis/gax-go/path_template.go b/vendor/github.com/googleapis/gax-go/path_template.go new file mode 100644 index 000000000..41bda94cb --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/path_template.go @@ -0,0 +1,176 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + "errors" + "fmt" + "strings" +) + +type matcher interface { + match([]string) (int, error) + String() string +} + +type segment struct { + matcher + name string +} + +type labelMatcher string + +func (ls labelMatcher) match(segments []string) (int, error) { + if len(segments) == 0 { + return 0, fmt.Errorf("expected %s but no more segments found", ls) + } + if segments[0] != string(ls) { + return 0, fmt.Errorf("expected %s but got %s", ls, segments[0]) + } + return 1, nil +} + +func (ls labelMatcher) String() string { + return string(ls) +} + +type wildcardMatcher int + +func (wm wildcardMatcher) match(segments []string) (int, error) { + if len(segments) == 0 { + return 0, errors.New("no more segments found") + } + return 1, nil +} + +func (wm wildcardMatcher) String() string { + return "*" +} + +type pathWildcardMatcher int + +func (pwm pathWildcardMatcher) match(segments []string) (int, error) { + length := len(segments) - int(pwm) + if length <= 0 { + return 0, errors.New("not sufficient segments are supplied for path wildcard") + } + return length, nil +} + +func (pwm pathWildcardMatcher) String() string { + return "**" +} + +type ParseError struct { + Pos int + Template string + Message string +} + +func (pe ParseError) Error() string { + return fmt.Sprintf("at %d of template '%s', %s", pe.Pos, pe.Template, pe.Message) +} + +// PathTemplate manages the template to build and match with paths used +// by API services. It holds a template and variable names in it, and +// it can extract matched patterns from a path string or build a path +// string from a binding. +// +// See http.proto in github.com/googleapis/googleapis/ for the details of +// the template syntax. +type PathTemplate struct { + segments []segment +} + +// NewPathTemplate parses a path template, and returns a PathTemplate +// instance if successful. +func NewPathTemplate(template string) (*PathTemplate, error) { + return parsePathTemplate(template) +} + +// MustCompilePathTemplate is like NewPathTemplate but panics if the +// expression cannot be parsed. It simplifies safe initialization of +// global variables holding compiled regular expressions. +func MustCompilePathTemplate(template string) *PathTemplate { + pt, err := NewPathTemplate(template) + if err != nil { + panic(err) + } + return pt +} + +// Match attempts to match the given path with the template, and returns +// the mapping of the variable name to the matched pattern string. +func (pt *PathTemplate) Match(path string) (map[string]string, error) { + paths := strings.Split(path, "/") + values := map[string]string{} + for _, segment := range pt.segments { + length, err := segment.match(paths) + if err != nil { + return nil, err + } + if segment.name != "" { + value := strings.Join(paths[:length], "/") + if oldValue, ok := values[segment.name]; ok { + values[segment.name] = oldValue + "/" + value + } else { + values[segment.name] = value + } + } + paths = paths[length:] + } + if len(paths) != 0 { + return nil, fmt.Errorf("Trailing path %s remains after the matching", strings.Join(paths, "/")) + } + return values, nil +} + +// Render creates a path string from its template and the binding from +// the variable name to the value. +func (pt *PathTemplate) Render(binding map[string]string) (string, error) { + result := make([]string, 0, len(pt.segments)) + var lastVariableName string + for _, segment := range pt.segments { + name := segment.name + if lastVariableName != "" && name == lastVariableName { + continue + } + lastVariableName = name + if name == "" { + result = append(result, segment.String()) + } else if value, ok := binding[name]; ok { + result = append(result, value) + } else { + return "", fmt.Errorf("%s is not found", name) + } + } + built := strings.Join(result, "/") + return built, nil +} diff --git a/vendor/github.com/googleapis/gax-go/path_template_parser.go b/vendor/github.com/googleapis/gax-go/path_template_parser.go new file mode 100644 index 000000000..79c8e759c --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/path_template_parser.go @@ -0,0 +1,227 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + "fmt" + "io" + "strings" +) + +// This parser follows the syntax of path templates, from +// https://github.com/googleapis/googleapis/blob/master/google/api/http.proto. +// The differences are that there is no custom verb, we allow the initial slash +// to be absent, and that we are not strict as +// https://tools.ietf.org/html/rfc6570 about the characters in identifiers and +// literals. + +type pathTemplateParser struct { + r *strings.Reader + runeCount int // the number of the current rune in the original string + nextVar int // the number to use for the next unnamed variable + seenName map[string]bool // names we've seen already + seenPathWildcard bool // have we seen "**" already? +} + +func parsePathTemplate(template string) (pt *PathTemplate, err error) { + p := &pathTemplateParser{ + r: strings.NewReader(template), + seenName: map[string]bool{}, + } + + // Handle panics with strings like errors. + // See pathTemplateParser.error, below. + defer func() { + if x := recover(); x != nil { + errmsg, ok := x.(errString) + if !ok { + panic(x) + } + pt = nil + err = ParseError{p.runeCount, template, string(errmsg)} + } + }() + + segs := p.template() + // If there is a path wildcard, set its length. We can't do this + // until we know how many segments we've got all together. + for i, seg := range segs { + if _, ok := seg.matcher.(pathWildcardMatcher); ok { + segs[i].matcher = pathWildcardMatcher(len(segs) - i - 1) + break + } + } + return &PathTemplate{segments: segs}, nil + +} + +// Used to indicate errors "thrown" by this parser. We don't use string because +// many parts of the standard library panic with strings. +type errString string + +// Terminates parsing immediately with an error. +func (p *pathTemplateParser) error(msg string) { + panic(errString(msg)) +} + +// Template = [ "/" ] Segments +func (p *pathTemplateParser) template() []segment { + var segs []segment + if p.consume('/') { + // Initial '/' needs an initial empty matcher. + segs = append(segs, segment{matcher: labelMatcher("")}) + } + return append(segs, p.segments("")...) +} + +// Segments = Segment { "/" Segment } +func (p *pathTemplateParser) segments(name string) []segment { + var segs []segment + for { + subsegs := p.segment(name) + segs = append(segs, subsegs...) + if !p.consume('/') { + break + } + } + return segs +} + +// Segment = "*" | "**" | LITERAL | Variable +func (p *pathTemplateParser) segment(name string) []segment { + if p.consume('*') { + if name == "" { + name = fmt.Sprintf("$%d", p.nextVar) + p.nextVar++ + } + if p.consume('*') { + if p.seenPathWildcard { + p.error("multiple '**' disallowed") + } + p.seenPathWildcard = true + // We'll change 0 to the right number at the end. + return []segment{{name: name, matcher: pathWildcardMatcher(0)}} + } + return []segment{{name: name, matcher: wildcardMatcher(0)}} + } + if p.consume('{') { + if name != "" { + p.error("recursive named bindings are not allowed") + } + return p.variable() + } + return []segment{{name: name, matcher: labelMatcher(p.literal())}} +} + +// Variable = "{" FieldPath [ "=" Segments ] "}" +// "{" is already consumed. +func (p *pathTemplateParser) variable() []segment { + // Simplification: treat FieldPath as LITERAL, instead of IDENT { '.' IDENT } + name := p.literal() + if p.seenName[name] { + p.error(name + " appears multiple times") + } + p.seenName[name] = true + var segs []segment + if p.consume('=') { + segs = p.segments(name) + } else { + // "{var}" is equivalent to "{var=*}" + segs = []segment{{name: name, matcher: wildcardMatcher(0)}} + } + if !p.consume('}') { + p.error("expected '}'") + } + return segs +} + +// A literal is any sequence of characters other than a few special ones. +// The list of stop characters is not quite the same as in the template RFC. +func (p *pathTemplateParser) literal() string { + lit := p.consumeUntil("/*}{=") + if lit == "" { + p.error("empty literal") + } + return lit +} + +// Read runes until EOF or one of the runes in stopRunes is encountered. +// If the latter, unread the stop rune. Return the accumulated runes as a string. +func (p *pathTemplateParser) consumeUntil(stopRunes string) string { + var runes []rune + for { + r, ok := p.readRune() + if !ok { + break + } + if strings.IndexRune(stopRunes, r) >= 0 { + p.unreadRune() + break + } + runes = append(runes, r) + } + return string(runes) +} + +// If the next rune is r, consume it and return true. +// Otherwise, leave the input unchanged and return false. +func (p *pathTemplateParser) consume(r rune) bool { + rr, ok := p.readRune() + if !ok { + return false + } + if r == rr { + return true + } + p.unreadRune() + return false +} + +// Read the next rune from the input. Return it. +// The second return value is false at EOF. +func (p *pathTemplateParser) readRune() (rune, bool) { + r, _, err := p.r.ReadRune() + if err == io.EOF { + return r, false + } + if err != nil { + p.error(err.Error()) + } + p.runeCount++ + return r, true +} + +// Put the last rune that was read back on the input. +func (p *pathTemplateParser) unreadRune() { + if err := p.r.UnreadRune(); err != nil { + p.error(err.Error()) + } + p.runeCount-- +} diff --git a/vendor/github.com/minio/minio-go/api-error-response.go b/vendor/github.com/minio/minio-go/api-error-response.go index ff8b8b109..e0019a334 100644 --- a/vendor/github.com/minio/minio-go/api-error-response.go +++ b/vendor/github.com/minio/minio-go/api-error-response.go @@ -161,6 +161,9 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) if errResp.Region == "" { errResp.Region = resp.Header.Get("x-amz-bucket-region") } + if errResp.Code == "InvalidRegion" && errResp.Region != "" { + errResp.Message = fmt.Sprintf("Region does not match, expecting region '%s'.", errResp.Region) + } // Save headers returned in the API XML error errResp.Headers = resp.Header diff --git a/vendor/github.com/minio/minio-go/api-get-object-file.go b/vendor/github.com/minio/minio-go/api-get-object-file.go index 477a0969f..c4193e934 100644 --- a/vendor/github.com/minio/minio-go/api-get-object-file.go +++ b/vendor/github.com/minio/minio-go/api-get-object-file.go @@ -20,15 +20,17 @@ import ( "io" "os" "path/filepath" + + "github.com/minio/minio-go/pkg/s3utils" ) // FGetObject - download contents of an object to a local file. func (c Client) FGetObject(bucketName, objectName, filePath string) error { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err } - if err := isValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(objectName); err != nil { return err } diff --git a/vendor/github.com/minio/minio-go/api-get-object.go b/vendor/github.com/minio/minio-go/api-get-object.go index 2abd4608e..1078d2f98 100644 --- a/vendor/github.com/minio/minio-go/api-get-object.go +++ b/vendor/github.com/minio/minio-go/api-get-object.go @@ -26,11 +26,12 @@ import ( "time" "github.com/minio/minio-go/pkg/encrypt" + "github.com/minio/minio-go/pkg/s3utils" ) -// GetEncryptedObject deciphers and streams data stored in the server after applying a specifed encryption materiels -func (c Client) GetEncryptedObject(bucketName, objectName string, encryptMaterials encrypt.Materials) (io.Reader, error) { - +// GetEncryptedObject deciphers and streams data stored in the server after applying a specified encryption materials, +// returned stream should be closed by the caller. +func (c Client) GetEncryptedObject(bucketName, objectName string, encryptMaterials encrypt.Materials) (io.ReadCloser, error) { if encryptMaterials == nil { return nil, ErrInvalidArgument("Unable to recognize empty encryption properties") } @@ -57,10 +58,10 @@ func (c Client) GetEncryptedObject(bucketName, objectName string, encryptMateria // GetObject - returns an seekable, readable object. func (c Client) GetObject(bucketName, objectName string) (*Object, error) { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return nil, err } - if err := isValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(objectName); err != nil { return nil, err } @@ -328,14 +329,14 @@ func (o *Object) setOffset(bytesRead int64) error { // Update the currentOffset. o.currOffset += bytesRead - if o.currOffset >= o.objectInfo.Size { + if o.objectInfo.Size > -1 && o.currOffset >= o.objectInfo.Size { return io.EOF } return nil } // Read reads up to len(b) bytes into b. It returns the number of -// bytes read (0 <= n <= len(p)) and any error encountered. Returns +// bytes read (0 <= n <= len(b)) and any error encountered. Returns // io.EOF upon end of file. func (o *Object) Read(b []byte) (n int, err error) { if o == nil { @@ -442,7 +443,7 @@ func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) { if o.objectInfoSet { // If offset is negative than we return io.EOF. // If offset is greater than or equal to object size we return io.EOF. - if offset >= o.objectInfo.Size || offset < 0 { + if (o.objectInfo.Size > -1 && offset >= o.objectInfo.Size) || offset < 0 { return 0, io.EOF } } @@ -542,16 +543,20 @@ func (o *Object) Seek(offset int64, whence int) (n int64, err error) { default: return 0, ErrInvalidArgument(fmt.Sprintf("Invalid whence %d", whence)) case 0: - if offset > o.objectInfo.Size { + if o.objectInfo.Size > -1 && offset > o.objectInfo.Size { return 0, io.EOF } o.currOffset = offset case 1: - if o.currOffset+offset > o.objectInfo.Size { + if o.objectInfo.Size > -1 && o.currOffset+offset > o.objectInfo.Size { return 0, io.EOF } o.currOffset += offset case 2: + // If we don't know the object size return an error for io.SeekEnd + if o.objectInfo.Size < 0 { + return 0, ErrInvalidArgument("Whence END is not supported when the object size is unknown") + } // Seeking to positive offset is valid for whence '2', but // since we are backing a Reader we have reached 'EOF' if // offset is positive. @@ -623,10 +628,10 @@ func newObject(reqCh chan<- getRequest, resCh <-chan getResponse, doneCh chan<- // go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. func (c Client) getObject(bucketName, objectName string, reqHeaders RequestHeaders) (io.ReadCloser, ObjectInfo, error) { // Validate input arguments. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return nil, ObjectInfo{}, err } - if err := isValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(objectName); err != nil { return nil, ObjectInfo{}, err } diff --git a/vendor/github.com/minio/minio-go/api-get-policy.go b/vendor/github.com/minio/minio-go/api-get-policy.go index 7491df330..10ccdc66b 100644 --- a/vendor/github.com/minio/minio-go/api-get-policy.go +++ b/vendor/github.com/minio/minio-go/api-get-policy.go @@ -23,15 +23,16 @@ import ( "net/url" "github.com/minio/minio-go/pkg/policy" + "github.com/minio/minio-go/pkg/s3utils" ) // GetBucketPolicy - get bucket policy at a given path. func (c Client) GetBucketPolicy(bucketName, objectPrefix string) (bucketPolicy policy.BucketPolicy, err error) { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return policy.BucketPolicyNone, err } - if err := isValidObjectPrefix(objectPrefix); err != nil { + if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { return policy.BucketPolicyNone, err } policyInfo, err := c.getBucketPolicy(bucketName) @@ -48,10 +49,10 @@ func (c Client) GetBucketPolicy(bucketName, objectPrefix string) (bucketPolicy p // ListBucketPolicies - list all policies for a given prefix and all its children. func (c Client) ListBucketPolicies(bucketName, objectPrefix string) (bucketPolicies map[string]policy.BucketPolicy, err error) { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return map[string]policy.BucketPolicy{}, err } - if err := isValidObjectPrefix(objectPrefix); err != nil { + if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { return map[string]policy.BucketPolicy{}, err } policyInfo, err := c.getBucketPolicy(bucketName) diff --git a/vendor/github.com/minio/minio-go/api-list.go b/vendor/github.com/minio/minio-go/api-list.go index 6a228179e..6de1fe9b3 100644 --- a/vendor/github.com/minio/minio-go/api-list.go +++ b/vendor/github.com/minio/minio-go/api-list.go @@ -17,10 +17,13 @@ package minio import ( + "errors" "fmt" "net/http" "net/url" "strings" + + "github.com/minio/minio-go/pkg/s3utils" ) // ListBuckets list all buckets owned by this authenticated user. @@ -69,7 +72,7 @@ func (c Client) ListBuckets() ([]BucketInfo, error) { // // Create a done channel. // doneCh := make(chan struct{}) // defer close(doneCh) -// // Recurively list all objects in 'mytestbucket' +// // Recursively list all objects in 'mytestbucket' // recursive := true // for message := range api.ListObjectsV2("mytestbucket", "starthere", recursive, doneCh) { // fmt.Println(message) @@ -84,18 +87,21 @@ func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, d // If recursive we do not delimit. delimiter = "" } + // Return object owner information by default fetchOwner := true + // Validate bucket name. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { defer close(objectStatCh) objectStatCh <- ObjectInfo{ Err: err, } return objectStatCh } + // Validate incoming object prefix. - if err := isValidObjectPrefix(objectPrefix); err != nil { + if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { defer close(objectStatCh) objectStatCh <- ObjectInfo{ Err: err, @@ -120,7 +126,6 @@ func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, d // If contents are available loop through and send over channel. for _, object := range result.Contents { - // Save the marker. select { // Send object content. case objectStatCh <- object: @@ -133,12 +138,12 @@ func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, d // Send all common prefixes if any. // NOTE: prefixes are only present if the request is delimited. for _, obj := range result.CommonPrefixes { - object := ObjectInfo{} - object.Key = obj.Prefix - object.Size = 0 select { // Send object prefixes. - case objectStatCh <- object: + case objectStatCh <- ObjectInfo{ + Key: obj.Prefix, + Size: 0, + }: // If receives done from the caller, return here. case <-doneCh: return @@ -170,11 +175,11 @@ func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, d // ?max-keys - Sets the maximum number of keys returned in the response body. func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int) (ListBucketV2Result, error) { // Validate bucket name. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return ListBucketV2Result{}, err } // Validate object prefix. - if err := isValidObjectPrefix(objectPrefix); err != nil { + if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { return ListBucketV2Result{}, err } // Get resources properly escaped and lined up before @@ -227,10 +232,17 @@ func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken s // Decode listBuckets XML. listBucketResult := ListBucketV2Result{} - err = xmlDecoder(resp.Body, &listBucketResult) - if err != nil { + if err = xmlDecoder(resp.Body, &listBucketResult); err != nil { return listBucketResult, err } + + // This is an additional verification check to make + // sure proper responses are received. + if listBucketResult.IsTruncated && listBucketResult.NextContinuationToken == "" { + return listBucketResult, errors.New("Truncated response should have continuation token set") + } + + // Success. return listBucketResult, nil } @@ -266,7 +278,7 @@ func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, don delimiter = "" } // Validate bucket name. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { defer close(objectStatCh) objectStatCh <- ObjectInfo{ Err: err, @@ -274,7 +286,7 @@ func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, don return objectStatCh } // Validate incoming object prefix. - if err := isValidObjectPrefix(objectPrefix); err != nil { + if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { defer close(objectStatCh) objectStatCh <- ObjectInfo{ Err: err, @@ -350,11 +362,11 @@ func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, don // ?max-keys - Sets the maximum number of keys returned in the response body. func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int) (ListBucketResult, error) { // Validate bucket name. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return ListBucketResult{}, err } // Validate object prefix. - if err := isValidObjectPrefix(objectPrefix); err != nil { + if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { return ListBucketResult{}, err } // Get resources properly escaped and lined up before @@ -442,7 +454,7 @@ func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive delimiter = "" } // Validate bucket name. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { defer close(objectMultipartStatCh) objectMultipartStatCh <- ObjectMultipartInfo{ Err: err, @@ -450,7 +462,7 @@ func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive return objectMultipartStatCh } // Validate incoming object prefix. - if err := isValidObjectPrefix(objectPrefix); err != nil { + if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { defer close(objectMultipartStatCh) objectMultipartStatCh <- ObjectMultipartInfo{ Err: err, diff --git a/vendor/github.com/minio/minio-go/api-notification.go b/vendor/github.com/minio/minio-go/api-notification.go index cbea1c6da..25a283af5 100644 --- a/vendor/github.com/minio/minio-go/api-notification.go +++ b/vendor/github.com/minio/minio-go/api-notification.go @@ -30,7 +30,7 @@ import ( // GetBucketNotification - get bucket notification at a given path. func (c Client) GetBucketNotification(bucketName string) (bucketNotification BucketNotification, err error) { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return BucketNotification{}, err } notification, err := c.getBucketNotification(bucketName) @@ -140,7 +140,7 @@ func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, even defer close(notificationInfoCh) // Validate the bucket name. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { notificationInfoCh <- NotificationInfo{ Err: err, } @@ -155,7 +155,7 @@ func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, even return } - // Continously run and listen on bucket notification. + // Continuously run and listen on bucket notification. // Create a done channel to control 'ListObjects' go routine. retryDoneCh := make(chan struct{}, 1) diff --git a/vendor/github.com/minio/minio-go/api-presigned.go b/vendor/github.com/minio/minio-go/api-presigned.go index f9d05ab9b..8cfcb55fb 100644 --- a/vendor/github.com/minio/minio-go/api-presigned.go +++ b/vendor/github.com/minio/minio-go/api-presigned.go @@ -42,10 +42,10 @@ func (c Client) presignURL(method string, bucketName string, objectName string, if method == "" { return nil, ErrInvalidArgument("method cannot be empty.") } - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return nil, err } - if err := isValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(objectName); err != nil { return nil, err } if err := isValidExpiry(expires); err != nil { @@ -122,21 +122,38 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[str return nil, nil, err } + // Get credentials from the configured credentials provider. + credValues, err := c.credsProvider.Get() + if err != nil { + return nil, nil, err + } + + var ( + signerType = credValues.SignerType + sessionToken = credValues.SessionToken + accessKeyID = credValues.AccessKeyID + secretAccessKey = credValues.SecretAccessKey + ) + + if signerType.IsAnonymous() { + return nil, nil, ErrInvalidArgument("Presigned operations are not supported for anonymous credentials") + } + // Keep time. t := time.Now().UTC() // For signature version '2' handle here. - if c.signature.isV2() { + if signerType.IsV2() { policyBase64 := p.base64() p.formData["policy"] = policyBase64 // For Google endpoint set this value to be 'GoogleAccessId'. if s3utils.IsGoogleEndpoint(c.endpointURL) { - p.formData["GoogleAccessId"] = c.accessKeyID + p.formData["GoogleAccessId"] = accessKeyID } else { // For all other endpoints set this value to be 'AWSAccessKeyId'. - p.formData["AWSAccessKeyId"] = c.accessKeyID + p.formData["AWSAccessKeyId"] = accessKeyID } // Sign the policy. - p.formData["signature"] = s3signer.PostPresignSignatureV2(policyBase64, c.secretAccessKey) + p.formData["signature"] = s3signer.PostPresignSignatureV2(policyBase64, secretAccessKey) return u, p.formData, nil } @@ -159,7 +176,7 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[str } // Add a credential policy. - credential := s3signer.GetCredential(c.accessKeyID, location, t) + credential := s3signer.GetCredential(accessKeyID, location, t) if err = p.addNewPolicy(policyCondition{ matchType: "eq", condition: "$x-amz-credential", @@ -168,13 +185,27 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[str return nil, nil, err } + if sessionToken != "" { + if err = p.addNewPolicy(policyCondition{ + matchType: "eq", + condition: "$x-amz-security-token", + value: sessionToken, + }); err != nil { + return nil, nil, err + } + } + // Get base64 encoded policy. policyBase64 := p.base64() + // Fill in the form data. p.formData["policy"] = policyBase64 p.formData["x-amz-algorithm"] = signV4Algorithm p.formData["x-amz-credential"] = credential p.formData["x-amz-date"] = t.Format(iso8601DateFormat) - p.formData["x-amz-signature"] = s3signer.PostPresignSignatureV4(policyBase64, t, c.secretAccessKey, location) + if sessionToken != "" { + p.formData["x-amz-security-token"] = sessionToken + } + p.formData["x-amz-signature"] = s3signer.PostPresignSignatureV4(policyBase64, t, secretAccessKey, location) return u, p.formData, nil } diff --git a/vendor/github.com/minio/minio-go/api-put-bucket.go b/vendor/github.com/minio/minio-go/api-put-bucket.go index 001da6de3..fd37dc192 100644 --- a/vendor/github.com/minio/minio-go/api-put-bucket.go +++ b/vendor/github.com/minio/minio-go/api-put-bucket.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * (C) 2015, 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,18 +19,14 @@ package minio import ( "bytes" - "encoding/base64" - "encoding/hex" "encoding/json" "encoding/xml" "fmt" - "io/ioutil" "net/http" "net/url" - "path" "github.com/minio/minio-go/pkg/policy" - "github.com/minio/minio-go/pkg/s3signer" + "github.com/minio/minio-go/pkg/s3utils" ) /// Bucket operations @@ -50,95 +47,23 @@ func (c Client) MakeBucket(bucketName string, location string) (err error) { }() // Validate the input arguments. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketNameStrict(bucketName); err != nil { return err } // If location is empty, treat is a default region 'us-east-1'. if location == "" { location = "us-east-1" - } - - // Try creating bucket with the provided region, in case of - // invalid region error let's guess the appropriate region - // from S3 API headers - - // Create a done channel to control 'newRetryTimer' go routine. - doneCh := make(chan struct{}, 1) - - // Indicate to our routine to exit cleanly upon return. - defer close(doneCh) - - // Blank indentifier is kept here on purpose since 'range' without - // blank identifiers is only supported since go1.4 - // https://golang.org/doc/go1.4#forrange. - for _ = range c.newRetryTimer(MaxRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter, doneCh) { - // Initialize the makeBucket request. - req, err := c.makeBucketRequest(bucketName, location) - if err != nil { - return err + // For custom region clients, default + // to custom region instead not 'us-east-1'. + if c.region != "" { + location = c.region } - - // Execute make bucket request. - resp, err := c.do(req) - defer closeResponse(resp) - if err != nil { - return err - } - - if resp.StatusCode != http.StatusOK { - err := httpRespToErrorResponse(resp, bucketName, "") - errResp := ToErrorResponse(err) - if errResp.Code == "InvalidRegion" && errResp.Region != "" { - // Fetch bucket region found in headers - // of S3 error response, attempt bucket - // create again. - location = errResp.Region - continue - } - // Nothing to retry, fail. - return err - } - - // Control reaches here when bucket create was successful, - // break out. - break } - - // Success. - return nil -} - -// Low level wrapper API For makeBucketRequest. -func (c Client) makeBucketRequest(bucketName string, location string) (*http.Request, error) { - // Validate input arguments. - if err := isValidBucketName(bucketName); err != nil { - return nil, err - } - - // In case of Amazon S3. The make bucket issued on - // already existing bucket would fail with - // 'AuthorizationMalformed' error if virtual style is - // used. So we default to 'path style' as that is the - // preferred method here. The final location of the - // 'bucket' is provided through XML LocationConstraint - // data with the request. - targetURL := c.endpointURL - targetURL.Path = path.Join(bucketName, "") + "/" - - // get a new HTTP request for the method. - req, err := http.NewRequest("PUT", targetURL.String(), nil) - if err != nil { - return nil, err - } - - // set UserAgent for the request. - c.setUserAgent(req) - - // set sha256 sum for signature calculation only with - // signature version '4'. - if c.signature.isV4() { - req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{}))) + // PUT bucket request metadata. + reqMetadata := requestMetadata{ + bucketName: bucketName, + bucketLocation: location, } // If location is not 'us-east-1' create bucket location config. @@ -148,30 +73,29 @@ func (c Client) makeBucketRequest(bucketName string, location string) (*http.Req var createBucketConfigBytes []byte createBucketConfigBytes, err = xml.Marshal(createBucketConfig) if err != nil { - return nil, err + return err } - createBucketConfigBuffer := bytes.NewBuffer(createBucketConfigBytes) - req.Body = ioutil.NopCloser(createBucketConfigBuffer) - req.ContentLength = int64(len(createBucketConfigBytes)) - // Set content-md5. - req.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(sumMD5(createBucketConfigBytes))) - if c.signature.isV4() { - // Set sha256. - req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256(createBucketConfigBytes))) + reqMetadata.contentMD5Bytes = sumMD5(createBucketConfigBytes) + reqMetadata.contentSHA256Bytes = sum256(createBucketConfigBytes) + reqMetadata.contentBody = bytes.NewReader(createBucketConfigBytes) + reqMetadata.contentLength = int64(len(createBucketConfigBytes)) + } + + // Execute PUT to create a new bucket. + resp, err := c.executeMethod("PUT", reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + + if resp != nil { + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") } } - // Sign the request. - if c.signature.isV4() { - // Signature calculated for MakeBucket request should be for 'us-east-1', - // regardless of the bucket's location constraint. - req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1") - } else if c.signature.isV2() { - req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey) - } - - // Return signed request. - return req, nil + // Success. + return nil } // SetBucketPolicy set the access permissions on an existing bucket. @@ -184,10 +108,10 @@ func (c Client) makeBucketRequest(bucketName string, location string) (*http.Req // writeonly - anonymous put/delete access to a given object prefix. func (c Client) SetBucketPolicy(bucketName string, objectPrefix string, bucketPolicy policy.BucketPolicy) error { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err } - if err := isValidObjectPrefix(objectPrefix); err != nil { + if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { return err } @@ -216,7 +140,7 @@ func (c Client) SetBucketPolicy(bucketName string, objectPrefix string, bucketPo // Saves a new bucket policy. func (c Client) putBucketPolicy(bucketName string, policyInfo policy.BucketAccessPolicy) error { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err } @@ -262,7 +186,7 @@ func (c Client) putBucketPolicy(bucketName string, policyInfo policy.BucketAcces // Removes all policies on a bucket. func (c Client) removeBucketPolicy(bucketName string) error { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err } // Get resources properly escaped and lined up before @@ -286,7 +210,7 @@ func (c Client) removeBucketPolicy(bucketName string) error { // SetBucketNotification saves a new bucket notification. func (c Client) SetBucketNotification(bucketName string, bucketNotification BucketNotification) error { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err } diff --git a/vendor/github.com/minio/minio-go/api-put-object-common.go b/vendor/github.com/minio/minio-go/api-put-object-common.go index 68a459f4a..213fc21f4 100644 --- a/vendor/github.com/minio/minio-go/api-put-object-common.go +++ b/vendor/github.com/minio/minio-go/api-put-object-common.go @@ -23,6 +23,8 @@ import ( "io/ioutil" "math" "os" + + "github.com/minio/minio-go/pkg/s3utils" ) // Verify if reader is *os.File @@ -43,23 +45,6 @@ func isReadAt(reader io.Reader) (ok bool) { return } -// shouldUploadPart - verify if part should be uploaded. -func shouldUploadPart(objPart ObjectPart, uploadReq uploadPartReq) bool { - // If part not found should upload the part. - if uploadReq.Part == nil { - return true - } - // if size mismatches should upload the part. - if objPart.Size != uploadReq.Part.Size { - return true - } - // if md5sum mismatches should upload the part. - if objPart.ETag != uploadReq.Part.ETag { - return true - } - return false -} - // optimalPartInfo - calculate the optimal part info for a given // object size. // @@ -168,10 +153,10 @@ func hashCopyN(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte, // or initiate a new request to fetch a new upload id. func (c Client) newUploadID(bucketName, objectName string, metaData map[string][]string) (uploadID string, err error) { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return "", err } - if err := isValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(objectName); err != nil { return "", err } @@ -183,49 +168,6 @@ func (c Client) newUploadID(bucketName, objectName string, metaData map[string][ return initMultipartUploadResult.UploadID, nil } -// getMpartUploadSession returns the upload id and the uploaded parts to continue a previous upload session -// or initiate a new multipart session if no current one found -func (c Client) getMpartUploadSession(bucketName, objectName string, metaData map[string][]string) (string, map[int]ObjectPart, error) { - // A map of all uploaded parts. - var partsInfo map[int]ObjectPart - var err error - - uploadID, err := c.findUploadID(bucketName, objectName) - if err != nil { - return "", nil, err - } - - if uploadID == "" { - // Initiates a new multipart request - uploadID, err = c.newUploadID(bucketName, objectName, metaData) - if err != nil { - return "", nil, err - } - } else { - // Fetch previously upload parts and maximum part size. - partsInfo, err = c.listObjectParts(bucketName, objectName, uploadID) - if err != nil { - // When the server returns NoSuchUpload even if its previouls acknowleged the existance of the upload id, - // initiate a new multipart upload - if respErr, ok := err.(ErrorResponse); ok && respErr.Code == "NoSuchUpload" { - uploadID, err = c.newUploadID(bucketName, objectName, metaData) - if err != nil { - return "", nil, err - } - } else { - return "", nil, err - } - } - } - - // Allocate partsInfo if not done yet - if partsInfo == nil { - partsInfo = make(map[int]ObjectPart) - } - - return uploadID, partsInfo, nil -} - // computeHash - Calculates hashes for an input read Seeker. func computeHash(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte, reader io.ReadSeeker) (size int64, err error) { hashWriter := ioutil.Discard diff --git a/vendor/github.com/minio/minio-go/api-put-object-copy.go b/vendor/github.com/minio/minio-go/api-put-object-copy.go index 56978d427..d9e2f1b57 100644 --- a/vendor/github.com/minio/minio-go/api-put-object-copy.go +++ b/vendor/github.com/minio/minio-go/api-put-object-copy.go @@ -25,10 +25,10 @@ import ( // CopyObject - copy a source object into a new object with the provided name in the provided bucket func (c Client) CopyObject(bucketName string, objectName string, objectSource string, cpCond CopyConditions) error { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err } - if err := isValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(objectName); err != nil { return err } if objectSource == "" { diff --git a/vendor/github.com/minio/minio-go/api-put-object-file.go b/vendor/github.com/minio/minio-go/api-put-object-file.go index 09fec769d..0dc355ecf 100644 --- a/vendor/github.com/minio/minio-go/api-put-object-file.go +++ b/vendor/github.com/minio/minio-go/api-put-object-file.go @@ -17,11 +17,7 @@ package minio import ( - "crypto/md5" - "crypto/sha256" - "encoding/hex" "fmt" - "hash" "io" "io/ioutil" "mime" @@ -35,10 +31,10 @@ import ( // FPutObject - Create an object in a bucket, with contents from file at filePath. func (c Client) FPutObject(bucketName, objectName, filePath, contentType string) (n int64, err error) { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return 0, err } - if err := isValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(objectName); err != nil { return 0, err } @@ -77,17 +73,8 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string) objMetadata["Content-Type"] = []string{contentType} // NOTE: Google Cloud Storage multipart Put is not compatible with Amazon S3 APIs. - // Current implementation will only upload a maximum of 5GiB to Google Cloud Storage servers. if s3utils.IsGoogleEndpoint(c.endpointURL) { - if fileSize > int64(maxSinglePutObjectSize) { - return 0, ErrorResponse{ - Code: "NotImplemented", - Message: fmt.Sprintf("Invalid Content-Length %d for file uploads to Google Cloud Storage.", fileSize), - Key: objectName, - BucketName: bucketName, - } - } - // Do not compute MD5 for Google Cloud Storage. Uploads up to 5GiB in size. + // Do not compute MD5 for Google Cloud Storage. return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, objMetadata, nil) } @@ -118,22 +105,20 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string) // putObjectMultipartFromFile - Creates object from contents of *os.File // // NOTE: This function is meant to be used for readers with local -// file as in *os.File. This function resumes by skipping all the -// necessary parts which were already uploaded by verifying them -// against MD5SUM of each individual parts. This function also -// effectively utilizes file system capabilities of reading from -// specific sections and not having to create temporary files. +// file as in *os.File. This function effectively utilizes file +// system capabilities of reading from specific sections and not +// having to create temporary files. func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileReader io.ReaderAt, fileSize int64, metaData map[string][]string, progress io.Reader) (int64, error) { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return 0, err } - if err := isValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(objectName); err != nil { return 0, err } - // Get the upload id of a previously partially uploaded object or initiate a new multipart upload - uploadID, partsInfo, err := c.getMpartUploadSession(bucketName, objectName, metaData) + // Initiate a new multipart upload. + uploadID, err := c.newUploadID(bucketName, objectName, metaData) if err != nil { return 0, err } @@ -161,6 +146,9 @@ func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileRe // Just for readability. lastPartNumber := totalPartsCount + // Initialize parts uploaded map. + partsInfo := make(map[int]ObjectPart) + // Send each part through the partUploadCh to be uploaded. for p := 1; p <= totalPartsCount; p++ { part, ok := partsInfo[p] @@ -179,12 +167,7 @@ func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileRe for uploadReq := range uploadPartsCh { // Add hash algorithms that need to be calculated by computeHash() // In case of a non-v4 signature or https connection, sha256 is not needed. - hashAlgos := make(map[string]hash.Hash) - hashSums := make(map[string][]byte) - hashAlgos["md5"] = md5.New() - if c.signature.isV4() && !c.secure { - hashAlgos["sha256"] = sha256.New() - } + hashAlgos, hashSums := c.hashMaterials() // If partNumber was not uploaded we calculate the missing // part offset and size. For all other part numbers we @@ -213,36 +196,24 @@ func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileRe return } - // Create the part to be uploaded. - verifyObjPart := ObjectPart{ - ETag: hex.EncodeToString(hashSums["md5"]), - PartNumber: uploadReq.PartNum, - Size: partSize, - } - - // If this is the last part do not give it the full part size. - if uploadReq.PartNum == lastPartNumber { - verifyObjPart.Size = lastPartSize - } - - // Verify if part should be uploaded. - if shouldUploadPart(verifyObjPart, uploadReq) { - // Proceed to upload the part. - var objPart ObjectPart - objPart, err = c.uploadPart(bucketName, objectName, uploadID, sectionReader, uploadReq.PartNum, hashSums["md5"], hashSums["sha256"], prtSize) - if err != nil { - uploadedPartsCh <- uploadedPartRes{ - Error: err, - } - // Exit the goroutine. - return + // Proceed to upload the part. + var objPart ObjectPart + objPart, err = c.uploadPart(bucketName, objectName, uploadID, sectionReader, uploadReq.PartNum, + hashSums["md5"], hashSums["sha256"], prtSize) + if err != nil { + uploadedPartsCh <- uploadedPartRes{ + Error: err, } - // Save successfully uploaded part metadata. - uploadReq.Part = &objPart + // Exit the goroutine. + return } + + // Save successfully uploaded part metadata. + uploadReq.Part = &objPart + // Return through the channel the part size. uploadedPartsCh <- uploadedPartRes{ - Size: verifyObjPart.Size, + Size: missingPartSize, PartNum: uploadReq.PartNum, Part: uploadReq.Part, Error: nil, diff --git a/vendor/github.com/minio/minio-go/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/api-put-object-multipart.go index 3a299f65b..507fd65d6 100644 --- a/vendor/github.com/minio/minio-go/api-put-object-multipart.go +++ b/vendor/github.com/minio/minio-go/api-put-object-multipart.go @@ -18,12 +18,8 @@ package minio import ( "bytes" - "crypto/md5" - "crypto/sha256" - "encoding/hex" "encoding/xml" "fmt" - "hash" "io" "io/ioutil" "net/http" @@ -32,9 +28,11 @@ import ( "sort" "strconv" "strings" + + "github.com/minio/minio-go/pkg/s3utils" ) -// Comprehensive put object operation involving multipart resumable uploads. +// Comprehensive put object operation involving multipart uploads. // // Following code handles these types of readers. // @@ -42,9 +40,6 @@ import ( // - *minio.Object // - Any reader which has a method 'ReadAt()' // -// If we exhaust all the known types, code proceeds to use stream as -// is where each part is re-downloaded, checksummed and verified -// before upload. func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) { if size > 0 && size > minPartSize { // Verify if reader is *os.File, then use file system functionalities. @@ -68,31 +63,22 @@ func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Read // putObjectMultipartStreamNoChecksum - upload a large object using // multipart upload and streaming signature for signing payload. -// N B We don't resume an incomplete multipart upload, we overwrite -// existing parts of an incomplete upload. func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string, reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (int64, error) { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return 0, err } - if err := isValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(objectName); err != nil { return 0, err } - // Get the upload id of a previously partially uploaded object or initiate a new multipart upload - uploadID, err := c.findUploadID(bucketName, objectName) + // Initiates a new multipart request + uploadID, err := c.newUploadID(bucketName, objectName, metadata) if err != nil { return 0, err } - if uploadID == "" { - // Initiates a new multipart request - uploadID, err = c.newUploadID(bucketName, objectName, metadata) - if err != nil { - return 0, err - } - } // Calculate the optimal parts info for a given size. totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size) @@ -176,10 +162,10 @@ func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string // special case where size is unknown i.e '-1'. func (c Client) putObjectMultipartStream(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return 0, err } - if err := isValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(objectName); err != nil { return 0, err } @@ -189,8 +175,8 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i // Complete multipart upload. var complMultipartUpload completeMultipartUpload - // Get the upload id of a previously partially uploaded object or initiate a new multipart upload - uploadID, partsInfo, err := c.getMpartUploadSession(bucketName, objectName, metaData) + // Initiate a new multipart upload. + uploadID, err := c.newUploadID(bucketName, objectName, metaData) if err != nil { return 0, err } @@ -207,15 +193,13 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i // Initialize a temporary buffer. tmpBuffer := new(bytes.Buffer) + // Initialize parts uploaded map. + partsInfo := make(map[int]ObjectPart) + for partNumber <= totalPartsCount { // Choose hash algorithms to be calculated by hashCopyN, avoid sha256 // with non-v4 signature request or HTTPS connection - hashSums := make(map[string][]byte) - hashAlgos := make(map[string]hash.Hash) - hashAlgos["md5"] = md5.New() - if c.signature.isV4() && !c.secure { - hashAlgos["sha256"] = sha256.New() - } + hashAlgos, hashSums := c.hashMaterials() // Calculates hash sums while copying partSize bytes into tmpBuffer. prtSize, rErr := hashCopyN(hashAlgos, hashSums, tmpBuffer, reader, partSize) @@ -228,31 +212,23 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i // as we read from the source. reader = newHook(tmpBuffer, progress) - part, ok := partsInfo[partNumber] + // Proceed to upload the part. + var objPart ObjectPart + objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber, hashSums["md5"], hashSums["sha256"], prtSize) + if err != nil { + // Reset the temporary buffer upon any error. + tmpBuffer.Reset() + return totalUploadedSize, err + } - // Verify if part should be uploaded. - if !ok || shouldUploadPart(ObjectPart{ - ETag: hex.EncodeToString(hashSums["md5"]), - PartNumber: partNumber, - Size: prtSize, - }, uploadPartReq{PartNum: partNumber, Part: &part}) { - // Proceed to upload the part. - var objPart ObjectPart - objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber, hashSums["md5"], hashSums["sha256"], prtSize) - if err != nil { - // Reset the temporary buffer upon any error. - tmpBuffer.Reset() + // Save successfully uploaded part metadata. + partsInfo[partNumber] = objPart + + // Update the progress reader for the skipped part. + if progress != nil { + if _, err = io.CopyN(ioutil.Discard, progress, prtSize); err != nil { return totalUploadedSize, err } - // Save successfully uploaded part metadata. - partsInfo[partNumber] = objPart - } else { - // Update the progress reader for the skipped part. - if progress != nil { - if _, err = io.CopyN(ioutil.Discard, progress, prtSize); err != nil { - return totalUploadedSize, err - } - } } // Reset the temporary buffer. @@ -305,10 +281,10 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i // initiateMultipartUpload - Initiates a multipart upload and returns an upload ID. func (c Client) initiateMultipartUpload(bucketName, objectName string, metaData map[string][]string) (initiateMultipartUploadResult, error) { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return initiateMultipartUploadResult{}, err } - if err := isValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(objectName); err != nil { return initiateMultipartUploadResult{}, err } @@ -359,10 +335,10 @@ func (c Client) initiateMultipartUpload(bucketName, objectName string, metaData // uploadPart - Uploads a part in a multipart upload. func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Reader, partNumber int, md5Sum, sha256Sum []byte, size int64) (ObjectPart, error) { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return ObjectPart{}, err } - if err := isValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(objectName); err != nil { return ObjectPart{}, err } if size > maxPartSize { @@ -419,10 +395,10 @@ func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Re // completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts. func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string, complete completeMultipartUpload) (completeMultipartUploadResult, error) { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return completeMultipartUploadResult{}, err } - if err := isValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(objectName); err != nil { return completeMultipartUploadResult{}, err } diff --git a/vendor/github.com/minio/minio-go/api-put-object-progress.go b/vendor/github.com/minio/minio-go/api-put-object-progress.go index f3844127e..fc4c40ad4 100644 --- a/vendor/github.com/minio/minio-go/api-put-object-progress.go +++ b/vendor/github.com/minio/minio-go/api-put-object-progress.go @@ -20,6 +20,7 @@ import ( "io" "strings" + "github.com/minio/minio-go/pkg/credentials" "github.com/minio/minio-go/pkg/encrypt" "github.com/minio/minio-go/pkg/s3utils" ) @@ -57,10 +58,10 @@ func (c Client) PutEncryptedObject(bucketName, objectName string, reader io.Read // PutObjectWithMetadata - with metadata. func (c Client) PutObjectWithMetadata(bucketName, objectName string, reader io.Reader, metaData map[string][]string, progress io.Reader) (n int64, err error) { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return 0, err } - if err := isValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(objectName); err != nil { return 0, err } if reader == nil { @@ -82,20 +83,8 @@ func (c Client) PutObjectWithMetadata(bucketName, objectName string, reader io.R } // NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT. - // So we fall back to single PUT operation with the maximum limit of 5GiB. if s3utils.IsGoogleEndpoint(c.endpointURL) { - if size <= -1 { - return 0, ErrorResponse{ - Code: "NotImplemented", - Message: "Content-Length cannot be negative for file uploads to Google Cloud Storage.", - Key: objectName, - BucketName: bucketName, - } - } - if size > maxSinglePutObjectSize { - return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) - } - // Do not compute MD5 for Google Cloud Storage. Uploads up to 5GiB in size. + // Do not compute MD5 for Google Cloud Storage. return c.putObjectNoChecksum(bucketName, objectName, reader, size, metaData, progress) } @@ -103,6 +92,7 @@ func (c Client) PutObjectWithMetadata(bucketName, objectName string, reader io.R if size < minPartSize && size >= 0 { return c.putObjectSingle(bucketName, objectName, reader, size, metaData, progress) } + // For all sizes greater than 5MiB do multipart. n, err = c.putObjectMultipart(bucketName, objectName, reader, size, metaData, progress) if err != nil { @@ -143,8 +133,8 @@ func (c Client) PutObjectStreamingWithProgress(bucketName, objectName string, re BucketName: bucketName, } } - // This method should return error with signature v2 minioClient. - if c.signature.isV2() { + + if c.overrideSignerType.IsV2() { return 0, ErrorResponse{ Code: "NotImplemented", Message: "AWS streaming signature v4 is not supported with minio client initialized for AWS signature v2", @@ -173,8 +163,8 @@ func (c Client) PutObjectStreamingWithProgress(bucketName, objectName string, re return c.putObjectMultipartStream(bucketName, objectName, reader, size, metadata, progress) } - // Set signature type to streaming signature v4. - c.signature = SignatureV4Streaming + // Set streaming signature. + c.overrideSignerType = credentials.SignatureV4Streaming if size < minPartSize && size >= 0 { return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress) diff --git a/vendor/github.com/minio/minio-go/api-put-object-readat.go b/vendor/github.com/minio/minio-go/api-put-object-readat.go index ebf422638..1c20f1818 100644 --- a/vendor/github.com/minio/minio-go/api-put-object-readat.go +++ b/vendor/github.com/minio/minio-go/api-put-object-readat.go @@ -18,13 +18,12 @@ package minio import ( "bytes" - "crypto/md5" - "crypto/sha256" "fmt" - "hash" "io" "io/ioutil" "sort" + + "github.com/minio/minio-go/pkg/s3utils" ) // uploadedPartRes - the response received from a part upload. @@ -40,19 +39,6 @@ type uploadPartReq struct { Part *ObjectPart // Size of the part uploaded. } -// shouldUploadPartReadAt - verify if part should be uploaded. -func shouldUploadPartReadAt(objPart ObjectPart, uploadReq uploadPartReq) bool { - // If part not found part should be uploaded. - if uploadReq.Part == nil { - return true - } - // if size mismatches part should be uploaded. - if uploadReq.Part.Size != objPart.Size { - return true - } - return false -} - // putObjectMultipartFromReadAt - Uploads files bigger than 5MiB. Supports reader // of type which implements io.ReaderAt interface (ReadAt method). // @@ -65,15 +51,15 @@ func shouldUploadPartReadAt(objPart ObjectPart, uploadReq uploadPartReq) bool { // stream after uploading all the contents successfully. func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, reader io.ReaderAt, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return 0, err } - if err := isValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(objectName); err != nil { return 0, err } - // Get the upload id of a previously partially uploaded object or initiate a new multipart upload - uploadID, partsInfo, err := c.getMpartUploadSession(bucketName, objectName, metaData) + // Initiate a new multipart upload. + uploadID, err := c.newUploadID(bucketName, objectName, metaData) if err != nil { return 0, err } @@ -90,9 +76,6 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read return 0, err } - // Used for readability, lastPartNumber is always totalPartsCount. - lastPartNumber := totalPartsCount - // Declare a channel that sends the next part number to be uploaded. // Buffered to 10000 because thats the maximum number of parts allowed // by S3. @@ -103,6 +86,12 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read // by S3. uploadedPartsCh := make(chan uploadedPartRes, 10000) + // Used for readability, lastPartNumber is always totalPartsCount. + lastPartNumber := totalPartsCount + + // Initialize parts uploaded map. + partsInfo := make(map[int]ObjectPart) + // Send each part number to the channel to be processed. for p := 1; p <= totalPartsCount; p++ { part, ok := partsInfo[p] @@ -143,12 +132,7 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read // Choose the needed hash algorithms to be calculated by hashCopyBuffer. // Sha256 is avoided in non-v4 signature requests or HTTPS connections - hashSums := make(map[string][]byte) - hashAlgos := make(map[string]hash.Hash) - hashAlgos["md5"] = md5.New() - if c.signature.isV4() && !c.secure { - hashAlgos["sha256"] = sha256.New() - } + hashAlgos, hashSums := c.hashMaterials() var prtSize int64 var err error @@ -163,37 +147,25 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read return } - // Verify object if its uploaded. - verifyObjPart := ObjectPart{ - PartNumber: uploadReq.PartNum, - Size: partSize, - } - // Special case if we see a last part number, save last part - // size as the proper part size. - if uploadReq.PartNum == lastPartNumber { - verifyObjPart.Size = lastPartSize + // Proceed to upload the part. + var objPart ObjectPart + objPart, err = c.uploadPart(bucketName, objectName, uploadID, tmpBuffer, + uploadReq.PartNum, hashSums["md5"], hashSums["sha256"], prtSize) + if err != nil { + uploadedPartsCh <- uploadedPartRes{ + Size: 0, + Error: err, + } + // Exit the goroutine. + return } - // Only upload the necessary parts. Otherwise return size through channel - // to update any progress bar. - if shouldUploadPartReadAt(verifyObjPart, uploadReq) { - // Proceed to upload the part. - var objPart ObjectPart - objPart, err = c.uploadPart(bucketName, objectName, uploadID, tmpBuffer, uploadReq.PartNum, hashSums["md5"], hashSums["sha256"], prtSize) - if err != nil { - uploadedPartsCh <- uploadedPartRes{ - Size: 0, - Error: err, - } - // Exit the goroutine. - return - } - // Save successfully uploaded part metadata. - uploadReq.Part = &objPart - } + // Save successfully uploaded part metadata. + uploadReq.Part = &objPart + // Send successful part info through the channel. uploadedPartsCh <- uploadedPartRes{ - Size: verifyObjPart.Size, + Size: missingPartSize, PartNum: uploadReq.PartNum, Part: uploadReq.Part, Error: nil, diff --git a/vendor/github.com/minio/minio-go/api-put-object.go b/vendor/github.com/minio/minio-go/api-put-object.go index e218075df..fbcfb171e 100644 --- a/vendor/github.com/minio/minio-go/api-put-object.go +++ b/vendor/github.com/minio/minio-go/api-put-object.go @@ -17,10 +17,6 @@ package minio import ( - "bytes" - "crypto/md5" - "crypto/sha256" - "hash" "io" "io/ioutil" "net/http" @@ -28,6 +24,8 @@ import ( "reflect" "runtime" "strings" + + "github.com/minio/minio-go/pkg/s3utils" ) // toInt - converts go value to its integer representation based @@ -109,14 +107,24 @@ func getReaderSize(reader io.Reader) (size int64, err error) { case "|0", "|1": return } - size = st.Size() + var pos int64 + pos, err = v.Seek(0, 1) // SeekCurrent. + if err != nil { + return -1, err + } + size = st.Size() - pos case *Object: var st ObjectInfo st, err = v.Stat() if err != nil { return } - size = st.Size + var pos int64 + pos, err = v.Seek(0, 1) // SeekCurrent. + if err != nil { + return -1, err + } + size = st.Size - pos } } // Returns the size here. @@ -135,14 +143,13 @@ func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].Part // // You must have WRITE permissions on a bucket to create an object. // -// - For size smaller than 5MiB PutObject automatically does a single atomic Put operation. -// - For size larger than 5MiB PutObject automatically does a resumable multipart Put operation. +// - For size smaller than 64MiB PutObject automatically does a single atomic Put operation. +// - For size larger than 64MiB PutObject automatically does a multipart Put operation. // - For size input as -1 PutObject does a multipart Put operation until input stream reaches EOF. // Maximum object size that can be uploaded through this operation will be 5TiB. // // NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT. // So we fall back to single PUT operation with the maximum limit of 5GiB. -// func (c Client) PutObject(bucketName, objectName string, reader io.Reader, contentType string) (n int64, err error) { return c.PutObjectWithProgress(bucketName, objectName, reader, contentType, nil) } @@ -151,14 +158,17 @@ func (c Client) PutObject(bucketName, objectName string, reader io.Reader, conte // is used for Google Cloud Storage since Google's multipart API is not S3 compatible. func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return 0, err } - if err := isValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(objectName); err != nil { return 0, err } - if size > maxSinglePutObjectSize { - return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) + if size > 0 { + readerAt, ok := reader.(io.ReaderAt) + if ok { + reader = io.NewSectionReader(readerAt, 0, size) + } } // Update progress reader appropriately to the latest offset as we @@ -181,10 +191,10 @@ func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Rea // This special function is used as a fallback when multipart upload fails. func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return 0, err } - if err := isValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(objectName); err != nil { return 0, err } if size > maxSinglePutObjectSize { @@ -197,41 +207,27 @@ func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader, // Add the appropriate hash algorithms that need to be calculated by hashCopyN // In case of non-v4 signature request or HTTPS connection, sha256 is not needed. - hashAlgos := make(map[string]hash.Hash) - hashSums := make(map[string][]byte) - hashAlgos["md5"] = md5.New() - if c.signature.isV4() && !c.secure { - hashAlgos["sha256"] = sha256.New() - } + hashAlgos, hashSums := c.hashMaterials() - if size <= minPartSize { - // Initialize a new temporary buffer. - tmpBuffer := new(bytes.Buffer) - size, err = hashCopyN(hashAlgos, hashSums, tmpBuffer, reader, size) - reader = bytes.NewReader(tmpBuffer.Bytes()) - tmpBuffer.Reset() - } else { - // Initialize a new temporary file. - var tmpFile *tempFile - tmpFile, err = newTempFile("single$-putobject-single") - if err != nil { - return 0, err - } - defer tmpFile.Close() - size, err = hashCopyN(hashAlgos, hashSums, tmpFile, reader, size) - if err != nil { - return 0, err - } - // Seek back to beginning of the temporary file. - if _, err = tmpFile.Seek(0, 0); err != nil { - return 0, err - } - reader = tmpFile + // Initialize a new temporary file. + tmpFile, err := newTempFile("single$-putobject-single") + if err != nil { + return 0, err } + defer tmpFile.Close() + + size, err = hashCopyN(hashAlgos, hashSums, tmpFile, reader, size) // Return error if its not io.EOF. if err != nil && err != io.EOF { return 0, err } + + // Seek back to beginning of the temporary file. + if _, err = tmpFile.Seek(0, 0); err != nil { + return 0, err + } + reader = tmpFile + // Execute put object. st, err := c.putObjectDo(bucketName, objectName, reader, hashSums["md5"], hashSums["sha256"], size, metaData) if err != nil { @@ -253,21 +249,13 @@ func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader, // NOTE: You must have WRITE permissions on a bucket to add an object to it. func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5Sum []byte, sha256Sum []byte, size int64, metaData map[string][]string) (ObjectInfo, error) { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return ObjectInfo{}, err } - if err := isValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(objectName); err != nil { return ObjectInfo{}, err } - if size <= -1 { - return ObjectInfo{}, ErrEntityTooSmall(size, bucketName, objectName) - } - - if size > maxSinglePutObjectSize { - return ObjectInfo{}, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) - } - // Set headers. customHeader := make(http.Header) diff --git a/vendor/github.com/minio/minio-go/api-remove.go b/vendor/github.com/minio/minio-go/api-remove.go index 73790f002..3574cbc1a 100644 --- a/vendor/github.com/minio/minio-go/api-remove.go +++ b/vendor/github.com/minio/minio-go/api-remove.go @@ -22,6 +22,8 @@ import ( "io" "net/http" "net/url" + + "github.com/minio/minio-go/pkg/s3utils" ) // RemoveBucket deletes the bucket name. @@ -30,7 +32,7 @@ import ( // in the bucket must be deleted before successfully attempting this request. func (c Client) RemoveBucket(bucketName string) error { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err } // Execute DELETE on bucket. @@ -57,10 +59,10 @@ func (c Client) RemoveBucket(bucketName string) error { // RemoveObject remove an object from a bucket. func (c Client) RemoveObject(bucketName, objectName string) error { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err } - if err := isValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(objectName); err != nil { return err } // Execute DELETE on objectName. @@ -132,7 +134,7 @@ func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan errorCh := make(chan RemoveObjectError, 1) // Validate if bucket name is valid. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { defer close(errorCh) errorCh <- RemoveObjectError{ Err: err, @@ -174,7 +176,7 @@ func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan } } if count == 0 { - // Multi Objects Delete API doesn't accept empty object list, quit immediatly + // Multi Objects Delete API doesn't accept empty object list, quit immediately break } if count < maxEntries { @@ -212,10 +214,10 @@ func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan // RemoveIncompleteUpload aborts an partially uploaded object. func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err } - if err := isValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(objectName); err != nil { return err } // Find multipart upload id of the object to be aborted. @@ -237,10 +239,10 @@ func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error { // uploadID, all previously uploaded parts are deleted. func (c Client) abortMultipartUpload(bucketName, objectName, uploadID string) error { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err } - if err := isValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(objectName); err != nil { return err } diff --git a/vendor/github.com/minio/minio-go/api-s3-datatypes.go b/vendor/github.com/minio/minio-go/api-s3-datatypes.go index ec63d6b94..4b297407b 100644 --- a/vendor/github.com/minio/minio-go/api-s3-datatypes.go +++ b/vendor/github.com/minio/minio-go/api-s3-datatypes.go @@ -36,8 +36,8 @@ type owner struct { ID string } -// commonPrefix container for prefix response. -type commonPrefix struct { +// CommonPrefix container for prefix response. +type CommonPrefix struct { Prefix string } @@ -45,7 +45,7 @@ type commonPrefix struct { type ListBucketV2Result struct { // A response can contain CommonPrefixes only if you have // specified a delimiter. - CommonPrefixes []commonPrefix + CommonPrefixes []CommonPrefix // Metadata about each object returned. Contents []ObjectInfo Delimiter string @@ -74,7 +74,7 @@ type ListBucketV2Result struct { type ListBucketResult struct { // A response can contain CommonPrefixes only if you have // specified a delimiter. - CommonPrefixes []commonPrefix + CommonPrefixes []CommonPrefix // Metadata about each object returned. Contents []ObjectInfo Delimiter string @@ -116,7 +116,7 @@ type ListMultipartUploadsResult struct { Prefix string Delimiter string // A response can contain CommonPrefixes only if you specify a delimiter. - CommonPrefixes []commonPrefix + CommonPrefixes []CommonPrefix } // initiator container for who initiated multipart upload. diff --git a/vendor/github.com/minio/minio-go/api-stat.go b/vendor/github.com/minio/minio-go/api-stat.go index 5b3dfe1b4..d4fb9c65c 100644 --- a/vendor/github.com/minio/minio-go/api-stat.go +++ b/vendor/github.com/minio/minio-go/api-stat.go @@ -28,7 +28,7 @@ import ( // BucketExists verify if bucket exists and you have permission to access it. func (c Client) BucketExists(bucketName string) (bool, error) { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return false, err } @@ -80,10 +80,10 @@ func extractObjMetadata(header http.Header) http.Header { // StatObject verifies if object exists and you have permission to access. func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return ObjectInfo{}, err } - if err := isValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(objectName); err != nil { return ObjectInfo{}, err } reqHeaders := NewHeadReqHeaders() @@ -93,10 +93,10 @@ func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) { // Lower level API for statObject supporting pre-conditions and range headers. func (c Client) statObject(bucketName, objectName string, reqHeaders RequestHeaders) (ObjectInfo, error) { // Input validation. - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return ObjectInfo{}, err } - if err := isValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(objectName); err != nil { return ObjectInfo{}, err } @@ -126,12 +126,13 @@ func (c Client) statObject(bucketName, objectName string, reqHeaders RequestHead md5sum := strings.TrimPrefix(resp.Header.Get("ETag"), "\"") md5sum = strings.TrimSuffix(md5sum, "\"") - // Content-Length is not valid for Google Cloud Storage, do not verify. + // Parse content length is exists var size int64 = -1 - if !s3utils.IsGoogleEndpoint(c.endpointURL) { - // Parse content length. - size, err = strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) + contentLengthStr := resp.Header.Get("Content-Length") + if contentLengthStr != "" { + size, err = strconv.ParseInt(contentLengthStr, 10, 64) if err != nil { + // Content-Length is not valid return ObjectInfo{}, ErrorResponse{ Code: "InternalError", Message: "Content-Length is invalid. " + reportIssue, diff --git a/vendor/github.com/minio/minio-go/api.go b/vendor/github.com/minio/minio-go/api.go index 46d69214a..39a364303 100644 --- a/vendor/github.com/minio/minio-go/api.go +++ b/vendor/github.com/minio/minio-go/api.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * (C) 2015, 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,23 +19,27 @@ package minio import ( "bytes" + "crypto/md5" + "crypto/sha256" "encoding/base64" "encoding/hex" "errors" "fmt" + "hash" "io" "io/ioutil" "math/rand" + "net" "net/http" "net/http/httputil" "net/url" "os" - "regexp" "runtime" "strings" "sync" "time" + "github.com/minio/minio-go/pkg/credentials" "github.com/minio/minio-go/pkg/s3signer" "github.com/minio/minio-go/pkg/s3utils" ) @@ -46,14 +51,11 @@ type Client struct { // Parsed endpoint url provided by the user. endpointURL url.URL - // AccessKeyID required for authorized requests. - accessKeyID string - // SecretAccessKey required for authorized requests. - secretAccessKey string - // Choose a signature type if necessary. - signature SignatureType - // Set to 'true' if Client has no access and secret keys. - anonymous bool + // Holds various credential providers. + credsProvider *credentials.Credentials + + // Custom signerType value overrides all credentials. + overrideSignerType credentials.SignatureType // User supplied. appInfo struct { @@ -85,7 +87,7 @@ type Client struct { // Global constants. const ( libraryName = "minio-go" - libraryVersion = "2.0.4" + libraryVersion = "2.1.0" ) // User Agent should always following the below style. @@ -100,58 +102,58 @@ const ( // NewV2 - instantiate minio client with Amazon S3 signature version // '2' compatibility. func NewV2(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) { - clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, secure) + creds := credentials.NewStaticV2(accessKeyID, secretAccessKey, "") + clnt, err := privateNew(endpoint, creds, secure, "") if err != nil { return nil, err } - - // Set to use signature version '2'. - clnt.signature = SignatureV2 + clnt.overrideSignerType = credentials.SignatureV2 return clnt, nil } // NewV4 - instantiate minio client with Amazon S3 signature version // '4' compatibility. func NewV4(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) { - clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, secure) + creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "") + clnt, err := privateNew(endpoint, creds, secure, "") if err != nil { return nil, err } - - // Set to use signature version '4'. - clnt.signature = SignatureV4 + clnt.overrideSignerType = credentials.SignatureV4 return clnt, nil } -// New - instantiate minio client Client, adds automatic verification of signature. +// New - instantiate minio client, adds automatic verification of signature. func New(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Client, error) { - return NewWithRegion(endpoint, accessKeyID, secretAccessKey, secure, "") + creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "") + clnt, err := privateNew(endpoint, creds, secure, "") + if err != nil { + return nil, err + } + // Google cloud storage should be set to signature V2, force it if not. + if s3utils.IsGoogleEndpoint(clnt.endpointURL) { + clnt.overrideSignerType = credentials.SignatureV2 + } + // If Amazon S3 set to signature v4. + if s3utils.IsAmazonEndpoint(clnt.endpointURL) { + clnt.overrideSignerType = credentials.SignatureV4 + } + return clnt, nil +} + +// NewWithCredentials - instantiate minio client with credentials provider +// for retrieving credentials from various credentials provider such as +// IAM, File, Env etc. +func NewWithCredentials(endpoint string, creds *credentials.Credentials, secure bool, region string) (*Client, error) { + return privateNew(endpoint, creds, secure, region) } // NewWithRegion - instantiate minio client, with region configured. Unlike New(), // NewWithRegion avoids bucket-location lookup operations and it is slightly faster. // Use this function when if your application deals with single region. func NewWithRegion(endpoint, accessKeyID, secretAccessKey string, secure bool, region string) (*Client, error) { - clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, secure) - if err != nil { - return nil, err - } - - // Google cloud storage should be set to signature V2, force it if not. - if s3utils.IsGoogleEndpoint(clnt.endpointURL) { - clnt.signature = SignatureV2 - } - - // If Amazon S3 set to signature v2.n - if s3utils.IsAmazonEndpoint(clnt.endpointURL) { - clnt.signature = SignatureV4 - } - - // Sets custom region, if region is empty bucket location cache is used automatically. - clnt.region = region - - // Success.. - return clnt, nil + creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "") + return privateNew(endpoint, creds, secure, region) } // lockedRandSource provides protected rand source, implements rand.Source interface. @@ -188,7 +190,7 @@ func redirectHeaders(req *http.Request, via []*http.Request) error { return nil } -func privateNew(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Client, error) { +func privateNew(endpoint string, creds *credentials.Credentials, secure bool, region string) (*Client, error) { // construct endpoint. endpointURL, err := getEndpointURL(endpoint, secure) if err != nil { @@ -197,8 +199,9 @@ func privateNew(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Cl // instantiate new Client. clnt := new(Client) - clnt.accessKeyID = accessKeyID - clnt.secretAccessKey = secretAccessKey + + // Save the credentials. + clnt.credsProvider = creds // Remember whether we are using https or not clnt.secure = secure @@ -212,7 +215,10 @@ func privateNew(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Cl CheckRedirect: redirectHeaders, } - // Instantiae bucket location cache. + // Sets custom region, if region is empty bucket location cache is used automatically. + clnt.region = region + + // Instantiate bucket location cache. clnt.bucketLocCache = newBucketLocationCache() // Introduce a new locked random seed. @@ -286,6 +292,29 @@ func (c *Client) SetS3TransferAccelerate(accelerateEndpoint string) { } } +// Hash materials provides relevant initialized hash algo writers +// based on the expected signature type. +// +// - For signature v4 request if the connection is insecure compute only sha256. +// - For signature v4 request if the connection is secure compute only md5. +// - For anonymous request compute md5. +func (c *Client) hashMaterials() (hashAlgos map[string]hash.Hash, hashSums map[string][]byte) { + hashSums = make(map[string][]byte) + hashAlgos = make(map[string]hash.Hash) + if c.overrideSignerType.IsV4() { + if c.secure { + hashAlgos["md5"] = md5.New() + } else { + hashAlgos["sha256"] = sha256.New() + } + } else { + if c.overrideSignerType.IsAnonymous() { + hashAlgos["md5"] = md5.New() + } + } + return hashAlgos, hashSums +} + // requestMetadata - is container for all the values to make a request. type requestMetadata struct { // If set newRequest presigns the URL. @@ -306,40 +335,6 @@ type requestMetadata struct { contentMD5Bytes []byte } -// regCred matches credential string in HTTP header -var regCred = regexp.MustCompile("Credential=([A-Z0-9]+)/") - -// regCred matches signature string in HTTP header -var regSign = regexp.MustCompile("Signature=([[0-9a-f]+)") - -// Filter out signature value from Authorization header. -func (c Client) filterSignature(req *http.Request) { - if _, ok := req.Header["Authorization"]; !ok { - return - } - // Handle if Signature V2. - if c.signature.isV2() { - // Set a temporary redacted auth - req.Header.Set("Authorization", "AWS **REDACTED**:**REDACTED**") - return - } - - /// Signature V4 authorization header. - - // Save the original auth. - origAuth := req.Header.Get("Authorization") - // Strip out accessKeyID from: - // Credential=////aws4_request - newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/") - - // Strip out 256-bit signature from: Signature=<256-bit signature> - newAuth = regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**") - - // Set a temporary redacted auth - req.Header.Set("Authorization", newAuth) - return -} - // dumpHTTP - dump HTTP request and response. func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error { // Starts http dump. @@ -349,7 +344,10 @@ func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error { } // Filter out Signature field from Authorization header. - c.filterSignature(req) + origAuth := req.Header.Get("Authorization") + if origAuth != "" { + req.Header.Set("Authorization", redactSignature(origAuth)) + } // Only display request header. reqTrace, err := httputil.DumpRequestOut(req, false) @@ -478,6 +476,13 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt case os.Stdin, os.Stdout, os.Stderr: isRetryable = false } + // Figure out if the body can be closed - if yes + // we will definitely close it upon the function + // return. + bodyCloser, ok := metadata.contentBody.(io.Closer) + if ok { + defer bodyCloser.Close() + } } // Create a done channel to control 'newRetryTimer' go routine. @@ -553,9 +558,14 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt // Bucket region if set in error response and the error // code dictates invalid region, we can retry the request // with the new region. - if errResponse.Code == "InvalidRegion" && errResponse.Region != "" { - c.bucketLocCache.Set(metadata.bucketName, errResponse.Region) - continue // Retry. + // + // Additionally we should only retry if bucketLocation and custom + // region is empty. + if metadata.bucketLocation == "" && c.region == "" { + if res.StatusCode == http.StatusBadRequest && errResponse.Region != "" { + c.bucketLocCache.Set(metadata.bucketName, errResponse.Region) + continue // Retry. + } } // Verify if error response code is retryable. @@ -581,53 +591,81 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R method = "POST" } - // Default all requests to "us-east-1" or "cn-north-1" (china region) - location := "us-east-1" - if s3utils.IsAmazonChinaEndpoint(c.endpointURL) { - // For china specifically we need to set everything to - // cn-north-1 for now, there is no easier way until AWS S3 - // provides a cleaner compatible API across "us-east-1" and - // China region. - location = "cn-north-1" - } - - // Gather location only if bucketName is present. - if metadata.bucketName != "" { - location, err = c.getBucketLocation(metadata.bucketName) - if err != nil { - return nil, err + location := metadata.bucketLocation + if location == "" { + if metadata.bucketName != "" { + // Gather location only if bucketName is present. + location, err = c.getBucketLocation(metadata.bucketName) + if err != nil { + if ToErrorResponse(err).Code != "AccessDenied" { + return nil, err + } + } + // Upon AccessDenied error on fetching bucket location, default + // to possible locations based on endpoint URL. This can usually + // happen when GetBucketLocation() is disabled using IAM policies. + } + if location == "" { + location = getDefaultLocation(c.endpointURL, c.region) } } - // Save location. - metadata.bucketLocation = location - // Construct a new target URL. - targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, metadata.bucketLocation, metadata.queryValues) + targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, location, metadata.queryValues) if err != nil { return nil, err } + // Go net/http notoriously closes the request body. + // - The request Body, if non-nil, will be closed by the underlying Transport, even on errors. + // This can cause underlying *os.File seekers to fail, avoid that + // by making sure to wrap the closer as a nop. + var body io.ReadCloser + if metadata.contentBody != nil { + body = ioutil.NopCloser(metadata.contentBody) + } + // Initialize a new HTTP request for the method. - req, err = http.NewRequest(method, targetURL.String(), metadata.contentBody) + req, err = http.NewRequest(method, targetURL.String(), body) if err != nil { return nil, err } - // Anonymous request. - anonymous := c.accessKeyID == "" || c.secretAccessKey == "" + // Get credentials from the configured credentials provider. + value, err := c.credsProvider.Get() + if err != nil { + return nil, err + } + + var ( + signerType = value.SignerType + accessKeyID = value.AccessKeyID + secretAccessKey = value.SecretAccessKey + sessionToken = value.SessionToken + ) + + // Custom signer set then override the behavior. + if c.overrideSignerType != credentials.SignatureDefault { + signerType = c.overrideSignerType + } + + // If signerType returned by credentials helper is anonymous, + // then do not sign regardless of signerType override. + if value.SignerType == credentials.SignatureAnonymous { + signerType = credentials.SignatureAnonymous + } // Generate presign url if needed, return right here. if metadata.expires != 0 && metadata.presignURL { - if anonymous { - return nil, ErrInvalidArgument("Requests cannot be presigned with anonymous credentials.") + if signerType.IsAnonymous() { + return nil, ErrInvalidArgument("Presigned URLs cannot be generated with anonymous credentials.") } - if c.signature.isV2() { + if signerType.IsV2() { // Presign URL with signature v2. - req = s3signer.PreSignV2(*req, c.accessKeyID, c.secretAccessKey, metadata.expires) - } else if c.signature.isV4() { + req = s3signer.PreSignV2(*req, accessKeyID, secretAccessKey, metadata.expires) + } else if signerType.IsV4() { // Presign URL with signature v4. - req = s3signer.PreSignV4(*req, c.accessKeyID, c.secretAccessKey, location, metadata.expires) + req = s3signer.PreSignV4(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.expires) } return req, nil } @@ -640,9 +678,11 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R req.Header.Set(k, v[0]) } - // set incoming content-length. - if metadata.contentLength > 0 { - req.ContentLength = metadata.contentLength + // Set incoming content-length. + req.ContentLength = metadata.contentLength + if req.ContentLength <= -1 { + // For unknown content length, we upload using transfer-encoding: chunked. + req.TransferEncoding = []string{"chunked"} } // set md5Sum for content protection. @@ -650,17 +690,18 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R req.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(metadata.contentMD5Bytes)) } - if anonymous { + // For anonymous requests just return. + if signerType.IsAnonymous() { return req, nil - } // Sign the request for all authenticated requests. + } switch { - case c.signature.isV2(): + case signerType.IsV2(): // Add signature version '2' authorization header. - req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey) - case c.signature.isStreamingV4() && method == "PUT": - req = s3signer.StreamingSignV4(req, c.accessKeyID, - c.secretAccessKey, location, metadata.contentLength, time.Now().UTC()) + req = s3signer.SignV2(*req, accessKeyID, secretAccessKey) + case signerType.IsStreamingV4() && method == "PUT": + req = s3signer.StreamingSignV4(req, accessKeyID, + secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC()) default: // Set sha256 sum for signature calculation only with signature version '4'. shaHeader := unsignedPayload @@ -670,7 +711,7 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R req.Header.Set("X-Amz-Content-Sha256", shaHeader) // Add signature version '4' authorization header. - req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, location) + req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, location) } // Return request. @@ -701,14 +742,26 @@ func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, que // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html host = c.s3AccelerateEndpoint } else { - // Fetch new host based on the bucket location. - host = getS3Endpoint(bucketLocation) + // Do not change the host if the endpoint URL is a FIPS S3 endpoint. + if !s3utils.IsAmazonFIPSGovCloudEndpoint(c.endpointURL) { + // Fetch new host based on the bucket location. + host = getS3Endpoint(bucketLocation) + } } } // Save scheme. scheme := c.endpointURL.Scheme + // Strip port 80 and 443 so we won't send these ports in Host header. + // The reason is that browsers and curl automatically remove :80 and :443 + // with the generated presigned urls, then a signature mismatch error. + if h, p, err := net.SplitHostPort(host); err == nil { + if scheme == "http" && p == "80" || scheme == "https" && p == "443" { + host = h + } + } + urlStr := scheme + "://" + host + "/" // Make URL only if bucketName is available, otherwise use the // endpoint URL. @@ -732,13 +785,16 @@ func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, que } } } + // If there are any query values, add them to the end. if len(queryValues) > 0 { urlStr = urlStr + "?" + s3utils.QueryEncode(queryValues) } + u, err := url.Parse(urlStr) if err != nil { return nil, err } + return u, nil } diff --git a/vendor/github.com/minio/minio-go/appveyor.yml b/vendor/github.com/minio/minio-go/appveyor.yml index be746a7bf..4f5c1b390 100644 --- a/vendor/github.com/minio/minio-go/appveyor.yml +++ b/vendor/github.com/minio/minio-go/appveyor.yml @@ -17,6 +17,8 @@ install: - go version - go env - go get -u github.com/golang/lint/golint + - go get -u github.com/go-ini/ini + - go get -u github.com/minio/go-homedir - go get -u github.com/remyoudompheng/go-misc/deadcode - go get -u github.com/gordonklaus/ineffassign diff --git a/vendor/github.com/minio/minio-go/bucket-cache.go b/vendor/github.com/minio/minio-go/bucket-cache.go index 28799c69d..6d2a40f78 100644 --- a/vendor/github.com/minio/minio-go/bucket-cache.go +++ b/vendor/github.com/minio/minio-go/bucket-cache.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * (C) 2015, 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -23,6 +24,7 @@ import ( "path" "sync" + "github.com/minio/minio-go/pkg/credentials" "github.com/minio/minio-go/pkg/s3signer" "github.com/minio/minio-go/pkg/s3utils" ) @@ -71,7 +73,7 @@ func (r *bucketLocationCache) Delete(bucketName string) { // GetBucketLocation - get location for the bucket name from location cache, if not // fetch freshly by making a new request. func (c Client) GetBucketLocation(bucketName string) (string, error) { - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return "", err } return c.getBucketLocation(bucketName) @@ -80,21 +82,27 @@ func (c Client) GetBucketLocation(bucketName string) (string, error) { // getBucketLocation - Get location for the bucketName from location map cache, if not // fetch freshly by making a new request. func (c Client) getBucketLocation(bucketName string) (string, error) { - if err := isValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { return "", err } + // Region set then no need to fetch bucket location. + if c.region != "" { + return c.region, nil + } + if s3utils.IsAmazonChinaEndpoint(c.endpointURL) { // For china specifically we need to set everything to // cn-north-1 for now, there is no easier way until AWS S3 // provides a cleaner compatible API across "us-east-1" and // China region. return "cn-north-1", nil - } - - // Region set then no need to fetch bucket location. - if c.region != "" { - return c.region, nil + } else if s3utils.IsAmazonGovCloudEndpoint(c.endpointURL) { + // For us-gov specifically we need to set everything to + // us-gov-west-1 for now, there is no easier way until AWS S3 + // provides a cleaner compatible API across "us-east-1" and + // Gov cloud region. + return "us-gov-west-1", nil } if location, ok := c.bucketLocCache.Get(bucketName); ok { @@ -181,8 +189,33 @@ func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, erro // Set UserAgent for the request. c.setUserAgent(req) + // Get credentials from the configured credentials provider. + value, err := c.credsProvider.Get() + if err != nil { + return nil, err + } + + var ( + signerType = value.SignerType + accessKeyID = value.AccessKeyID + secretAccessKey = value.SecretAccessKey + sessionToken = value.SessionToken + ) + + // Custom signer set then override the behavior. + if c.overrideSignerType != credentials.SignatureDefault { + signerType = c.overrideSignerType + } + + // If signerType returned by credentials helper is anonymous, + // then do not sign regardless of signerType override. + if value.SignerType == credentials.SignatureAnonymous { + signerType = credentials.SignatureAnonymous + } + // Set sha256 sum for signature calculation only with signature version '4'. - if c.signature.isV4() { + switch { + case signerType.IsV4(): var contentSha256 string if c.secure { contentSha256 = unsignedPayload @@ -190,13 +223,10 @@ func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, erro contentSha256 = hex.EncodeToString(sum256([]byte{})) } req.Header.Set("X-Amz-Content-Sha256", contentSha256) + req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, "us-east-1") + case signerType.IsV2(): + req = s3signer.SignV2(*req, accessKeyID, secretAccessKey) } - // Sign the request. - if c.signature.isV4() { - req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1") - } else if c.signature.isV2() { - req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey) - } return req, nil } diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/chain.go b/vendor/github.com/minio/minio-go/pkg/credentials/chain.go new file mode 100644 index 000000000..6b0e57440 --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/credentials/chain.go @@ -0,0 +1,89 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import "fmt" + +// A Chain will search for a provider which returns credentials +// and cache that provider until Retrieve is called again. +// +// The Chain provides a way of chaining multiple providers together +// which will pick the first available using priority order of the +// Providers in the list. +// +// If none of the Providers retrieve valid credentials Value, ChainProvider's +// Retrieve() will return the error, collecting all errors from all providers. +// +// If a Provider is found which returns valid credentials Value ChainProvider +// will cache that Provider for all calls to IsExpired(), until Retrieve is +// called again. +// +// creds := credentials.NewChainCredentials( +// []credentials.Provider{ +// &credentials.EnvAWSS3{}, +// &credentials.EnvMinio{}, +// }) +// +// // Usage of ChainCredentials. +// mc, err := minio.NewWithCredentials(endpoint, creds, secure, "us-east-1") +// if err != nil { +// log.Fatalln(err) +// } +// +type Chain struct { + Providers []Provider + curr Provider +} + +// NewChainCredentials returns a pointer to a new Credentials object +// wrapping a chain of providers. +func NewChainCredentials(providers []Provider) *Credentials { + return New(&Chain{ + Providers: append([]Provider{}, providers...), + }) +} + +// Retrieve returns the credentials value or error if no provider returned +// without error. +// +// If a provider is found it will be cached and any calls to IsExpired() +// will return the expired state of the cached provider. +func (c *Chain) Retrieve() (Value, error) { + var errs []error + for _, p := range c.Providers { + creds, err := p.Retrieve() + if err != nil { + errs = append(errs, err) + continue + } // Success. + c.curr = p + return creds, nil + } + c.curr = nil + return Value{}, fmt.Errorf("No valid providers found %v", errs) +} + +// IsExpired will returned the expired state of the currently cached provider +// if there is one. If there is no current provider, true will be returned. +func (c *Chain) IsExpired() bool { + if c.curr != nil { + return c.curr.IsExpired() + } + + return true +} diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/config.json.sample b/vendor/github.com/minio/minio-go/pkg/credentials/config.json.sample new file mode 100644 index 000000000..130746f4b --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/credentials/config.json.sample @@ -0,0 +1,17 @@ +{ + "version": "8", + "hosts": { + "play": { + "url": "https://play.minio.io:9000", + "accessKey": "Q3AM3UQ867SPQQA43P2F", + "secretKey": "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", + "api": "S3v2" + }, + "s3": { + "url": "https://s3.amazonaws.com", + "accessKey": "accessKey", + "secretKey": "secret", + "api": "S3v4" + } + } +} \ No newline at end of file diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/credentials.go b/vendor/github.com/minio/minio-go/pkg/credentials/credentials.go new file mode 100644 index 000000000..cc3000532 --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/credentials/credentials.go @@ -0,0 +1,175 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "sync" + "time" +) + +// A Value is the AWS credentials value for individual credential fields. +type Value struct { + // AWS Access key ID + AccessKeyID string + + // AWS Secret Access Key + SecretAccessKey string + + // AWS Session Token + SessionToken string + + // Signature Type. + SignerType SignatureType +} + +// A Provider is the interface for any component which will provide credentials +// Value. A provider is required to manage its own Expired state, and what to +// be expired means. +type Provider interface { + // Retrieve returns nil if it successfully retrieved the value. + // Error is returned if the value were not obtainable, or empty. + Retrieve() (Value, error) + + // IsExpired returns if the credentials are no longer valid, and need + // to be retrieved. + IsExpired() bool +} + +// A Expiry provides shared expiration logic to be used by credentials +// providers to implement expiry functionality. +// +// The best method to use this struct is as an anonymous field within the +// provider's struct. +// +// Example: +// type IAMCredentialProvider struct { +// Expiry +// ... +// } +type Expiry struct { + // The date/time when to expire on + expiration time.Time + + // If set will be used by IsExpired to determine the current time. + // Defaults to time.Now if CurrentTime is not set. + CurrentTime func() time.Time +} + +// SetExpiration sets the expiration IsExpired will check when called. +// +// If window is greater than 0 the expiration time will be reduced by the +// window value. +// +// Using a window is helpful to trigger credentials to expire sooner than +// the expiration time given to ensure no requests are made with expired +// tokens. +func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { + e.expiration = expiration + if window > 0 { + e.expiration = e.expiration.Add(-window) + } +} + +// IsExpired returns if the credentials are expired. +func (e *Expiry) IsExpired() bool { + if e.CurrentTime == nil { + e.CurrentTime = time.Now + } + return e.expiration.Before(e.CurrentTime()) +} + +// Credentials - A container for synchronous safe retrieval of credentials Value. +// Credentials will cache the credentials value until they expire. Once the value +// expires the next Get will attempt to retrieve valid credentials. +// +// Credentials is safe to use across multiple goroutines and will manage the +// synchronous state so the Providers do not need to implement their own +// synchronization. +// +// The first Credentials.Get() will always call Provider.Retrieve() to get the +// first instance of the credentials Value. All calls to Get() after that +// will return the cached credentials Value until IsExpired() returns true. +type Credentials struct { + sync.Mutex + + creds Value + forceRefresh bool + provider Provider +} + +// New returns a pointer to a new Credentials with the provider set. +func New(provider Provider) *Credentials { + return &Credentials{ + provider: provider, + forceRefresh: true, + } +} + +// Get returns the credentials value, or error if the credentials Value failed +// to be retrieved. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +func (c *Credentials) Get() (Value, error) { + c.Lock() + defer c.Unlock() + + if c.isExpired() { + creds, err := c.provider.Retrieve() + if err != nil { + return Value{}, err + } + c.creds = creds + c.forceRefresh = false + } + + return c.creds, nil +} + +// Expire expires the credentials and forces them to be retrieved on the +// next call to Get(). +// +// This will override the Provider's expired state, and force Credentials +// to call the Provider's Retrieve(). +func (c *Credentials) Expire() { + c.Lock() + defer c.Unlock() + + c.forceRefresh = true +} + +// IsExpired returns if the credentials are no longer valid, and need +// to be refreshed. +// +// If the Credentials were forced to be expired with Expire() this will +// reflect that override. +func (c *Credentials) IsExpired() bool { + c.Lock() + defer c.Unlock() + + return c.isExpired() +} + +// isExpired helper method wrapping the definition of expired credentials. +func (c *Credentials) isExpired() bool { + return c.forceRefresh || c.provider.IsExpired() +} diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/credentials.sample b/vendor/github.com/minio/minio-go/pkg/credentials/credentials.sample new file mode 100644 index 000000000..7fc91d9d2 --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/credentials/credentials.sample @@ -0,0 +1,12 @@ +[default] +aws_access_key_id = accessKey +aws_secret_access_key = secret +aws_session_token = token + +[no_token] +aws_access_key_id = accessKey +aws_secret_access_key = secret + +[with_colon] +aws_access_key_id: accessKey +aws_secret_access_key: secret diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/doc.go b/vendor/github.com/minio/minio-go/pkg/credentials/doc.go new file mode 100644 index 000000000..fa1908aeb --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/credentials/doc.go @@ -0,0 +1,45 @@ +// Package credentials provides credential retrieval and management +// for S3 compatible object storage. +// +// By default the Credentials.Get() will cache the successful result of a +// Provider's Retrieve() until Provider.IsExpired() returns true. At which +// point Credentials will call Provider's Retrieve() to get new credential Value. +// +// The Provider is responsible for determining when credentials have expired. +// It is also important to note that Credentials will always call Retrieve the +// first time Credentials.Get() is called. +// +// Example of using the environment variable credentials. +// +// creds := NewFromEnv() +// // Retrieve the credentials value +// credValue, err := creds.Get() +// if err != nil { +// // handle error +// } +// +// Example of forcing credentials to expire and be refreshed on the next Get(). +// This may be helpful to proactively expire credentials and refresh them sooner +// than they would naturally expire on their own. +// +// creds := NewFromIAM("") +// creds.Expire() +// credsValue, err := creds.Get() +// // New credentials will be retrieved instead of from cache. +// +// +// Custom Provider +// +// Each Provider built into this package also provides a helper method to generate +// a Credentials pointer setup with the provider. To use a custom Provider just +// create a type which satisfies the Provider interface and pass it to the +// NewCredentials method. +// +// type MyProvider struct{} +// func (m *MyProvider) Retrieve() (Value, error) {...} +// func (m *MyProvider) IsExpired() bool {...} +// +// creds := NewCredentials(&MyProvider{}) +// credValue, err := creds.Get() +// +package credentials diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/env_aws.go b/vendor/github.com/minio/minio-go/pkg/credentials/env_aws.go new file mode 100644 index 000000000..11934433c --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/credentials/env_aws.go @@ -0,0 +1,71 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import "os" + +// A EnvAWS retrieves credentials from the environment variables of the +// running process. EnvAWSironment credentials never expire. +// +// EnvAWSironment variables used: +// +// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY. +// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY. +// * Secret Token: AWS_SESSION_TOKEN. +type EnvAWS struct { + retrieved bool +} + +// NewEnvAWS returns a pointer to a new Credentials object +// wrapping the environment variable provider. +func NewEnvAWS() *Credentials { + return New(&EnvAWS{}) +} + +// Retrieve retrieves the keys from the environment. +func (e *EnvAWS) Retrieve() (Value, error) { + e.retrieved = false + + id := os.Getenv("AWS_ACCESS_KEY_ID") + if id == "" { + id = os.Getenv("AWS_ACCESS_KEY") + } + + secret := os.Getenv("AWS_SECRET_ACCESS_KEY") + if secret == "" { + secret = os.Getenv("AWS_SECRET_KEY") + } + + signerType := SignatureV4 + if id == "" || secret == "" { + signerType = SignatureAnonymous + } + + e.retrieved = true + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: os.Getenv("AWS_SESSION_TOKEN"), + SignerType: signerType, + }, nil +} + +// IsExpired returns if the credentials have been retrieved. +func (e *EnvAWS) IsExpired() bool { + return !e.retrieved +} diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/env_minio.go b/vendor/github.com/minio/minio-go/pkg/credentials/env_minio.go new file mode 100644 index 000000000..791087ef5 --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/credentials/env_minio.go @@ -0,0 +1,62 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import "os" + +// A EnvMinio retrieves credentials from the environment variables of the +// running process. EnvMinioironment credentials never expire. +// +// EnvMinioironment variables used: +// +// * Access Key ID: MINIO_ACCESS_KEY. +// * Secret Access Key: MINIO_SECRET_KEY. +type EnvMinio struct { + retrieved bool +} + +// NewEnvMinio returns a pointer to a new Credentials object +// wrapping the environment variable provider. +func NewEnvMinio() *Credentials { + return New(&EnvMinio{}) +} + +// Retrieve retrieves the keys from the environment. +func (e *EnvMinio) Retrieve() (Value, error) { + e.retrieved = false + + id := os.Getenv("MINIO_ACCESS_KEY") + secret := os.Getenv("MINIO_SECRET_KEY") + + signerType := SignatureV4 + if id == "" || secret == "" { + signerType = SignatureAnonymous + } + + e.retrieved = true + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SignerType: signerType, + }, nil +} + +// IsExpired returns if the credentials have been retrieved. +func (e *EnvMinio) IsExpired() bool { + return !e.retrieved +} diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/file_aws_credentials.go b/vendor/github.com/minio/minio-go/pkg/credentials/file_aws_credentials.go new file mode 100644 index 000000000..1be621385 --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/credentials/file_aws_credentials.go @@ -0,0 +1,120 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "os" + "path/filepath" + + "github.com/go-ini/ini" + homedir "github.com/minio/go-homedir" +) + +// A FileAWSCredentials retrieves credentials from the current user's home +// directory, and keeps track if those credentials are expired. +// +// Profile ini file example: $HOME/.aws/credentials +type FileAWSCredentials struct { + // Path to the shared credentials file. + // + // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the + // env value is empty will default to current user's home directory. + // Linux/OSX: "$HOME/.aws/credentials" + // Windows: "%USERPROFILE%\.aws\credentials" + filename string + + // AWS Profile to extract credentials from the shared credentials file. If empty + // will default to environment variable "AWS_PROFILE" or "default" if + // environment variable is also not set. + profile string + + // retrieved states if the credentials have been successfully retrieved. + retrieved bool +} + +// NewFileAWSCredentials returns a pointer to a new Credentials object +// wrapping the Profile file provider. +func NewFileAWSCredentials(filename string, profile string) *Credentials { + return New(&FileAWSCredentials{ + filename: filename, + profile: profile, + }) +} + +// Retrieve reads and extracts the shared credentials from the current +// users home directory. +func (p *FileAWSCredentials) Retrieve() (Value, error) { + if p.filename == "" { + p.filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE") + if p.filename == "" { + homeDir, err := homedir.Dir() + if err != nil { + return Value{}, err + } + p.filename = filepath.Join(homeDir, ".aws", "credentials") + } + } + if p.profile == "" { + p.profile = os.Getenv("AWS_PROFILE") + if p.profile == "" { + p.profile = "default" + } + } + + p.retrieved = false + + iniProfile, err := loadProfile(p.filename, p.profile) + if err != nil { + return Value{}, err + } + + // Default to empty string if not found. + id := iniProfile.Key("aws_access_key_id") + // Default to empty string if not found. + secret := iniProfile.Key("aws_secret_access_key") + // Default to empty string if not found. + token := iniProfile.Key("aws_session_token") + + p.retrieved = true + return Value{ + AccessKeyID: id.String(), + SecretAccessKey: secret.String(), + SessionToken: token.String(), + SignerType: SignatureV4, + }, nil +} + +// IsExpired returns if the shared credentials have expired. +func (p *FileAWSCredentials) IsExpired() bool { + return !p.retrieved +} + +// loadProfiles loads from the file pointed to by shared credentials filename for profile. +// The credentials retrieved from the profile will be returned or error. Error will be +// returned if it fails to read from the file, or the data is invalid. +func loadProfile(filename, profile string) (*ini.Section, error) { + config, err := ini.Load(filename) + if err != nil { + return nil, err + } + iniProfile, err := config.GetSection(profile) + if err != nil { + return nil, err + } + return iniProfile, nil +} diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/file_minio_client.go b/vendor/github.com/minio/minio-go/pkg/credentials/file_minio_client.go new file mode 100644 index 000000000..9e26dd302 --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/credentials/file_minio_client.go @@ -0,0 +1,129 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + "runtime" + + homedir "github.com/minio/go-homedir" +) + +// A FileMinioClient retrieves credentials from the current user's home +// directory, and keeps track if those credentials are expired. +// +// Configuration file example: $HOME/.mc/config.json +type FileMinioClient struct { + // Path to the shared credentials file. + // + // If empty will look for "MINIO_SHARED_CREDENTIALS_FILE" env variable. If the + // env value is empty will default to current user's home directory. + // Linux/OSX: "$HOME/.mc/config.json" + // Windows: "%USERALIAS%\mc\config.json" + filename string + + // Minio Alias to extract credentials from the shared credentials file. If empty + // will default to environment variable "MINIO_ALIAS" or "default" if + // environment variable is also not set. + alias string + + // retrieved states if the credentials have been successfully retrieved. + retrieved bool +} + +// NewFileMinioClient returns a pointer to a new Credentials object +// wrapping the Alias file provider. +func NewFileMinioClient(filename string, alias string) *Credentials { + return New(&FileMinioClient{ + filename: filename, + alias: alias, + }) +} + +// Retrieve reads and extracts the shared credentials from the current +// users home directory. +func (p *FileMinioClient) Retrieve() (Value, error) { + if p.filename == "" { + homeDir, err := homedir.Dir() + if err != nil { + return Value{}, err + } + p.filename = filepath.Join(homeDir, ".mc", "config.json") + if runtime.GOOS == "windows" { + p.filename = filepath.Join(homeDir, "mc", "config.json") + } + } + + if p.alias == "" { + p.alias = os.Getenv("MINIO_ALIAS") + if p.alias == "" { + p.alias = "s3" + } + } + + p.retrieved = false + + hostCfg, err := loadAlias(p.filename, p.alias) + if err != nil { + return Value{}, err + } + + p.retrieved = true + return Value{ + AccessKeyID: hostCfg.AccessKey, + SecretAccessKey: hostCfg.SecretKey, + SignerType: parseSignatureType(hostCfg.API), + }, nil +} + +// IsExpired returns if the shared credentials have expired. +func (p *FileMinioClient) IsExpired() bool { + return !p.retrieved +} + +// hostConfig configuration of a host. +type hostConfig struct { + URL string `json:"url"` + AccessKey string `json:"accessKey"` + SecretKey string `json:"secretKey"` + API string `json:"api"` +} + +// config config version. +type config struct { + Version string `json:"version"` + Hosts map[string]hostConfig `json:"hosts"` +} + +// loadAliass loads from the file pointed to by shared credentials filename for alias. +// The credentials retrieved from the alias will be returned or error. Error will be +// returned if it fails to read from the file. +func loadAlias(filename, alias string) (hostConfig, error) { + cfg := &config{} + configBytes, err := ioutil.ReadFile(filename) + if err != nil { + return hostConfig{}, err + } + if err = json.Unmarshal(configBytes, cfg); err != nil { + return hostConfig{}, err + } + return cfg.Hosts[alias], nil +} diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/iam_aws.go b/vendor/github.com/minio/minio-go/pkg/credentials/iam_aws.go new file mode 100644 index 000000000..ee24a213b --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/credentials/iam_aws.go @@ -0,0 +1,196 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "bufio" + "encoding/json" + "errors" + "net/http" + "net/url" + "path" + "time" +) + +// DefaultExpiryWindow - Default expiry window. +// ExpiryWindow will allow the credentials to trigger refreshing +// prior to the credentials actually expiring. This is beneficial +// so race conditions with expiring credentials do not cause +// request to fail unexpectedly due to ExpiredTokenException exceptions. +const DefaultExpiryWindow = time.Second * 10 // 10 secs + +// A IAM retrieves credentials from the EC2 service, and keeps track if +// those credentials are expired. +type IAM struct { + Expiry + + // Required http Client to use when connecting to IAM metadata service. + Client *http.Client + + // Custom endpoint in place of + endpoint string +} + +// redirectHeaders copies all headers when following a redirect URL. +// This won't be needed anymore from go 1.8 (https://github.com/golang/go/issues/4800) +func redirectHeaders(req *http.Request, via []*http.Request) error { + if len(via) == 0 { + return nil + } + for key, val := range via[0].Header { + req.Header[key] = val + } + return nil +} + +// NewIAM returns a pointer to a new Credentials object wrapping +// the IAM. Takes a ConfigProvider to create a EC2Metadata client. +// The ConfigProvider is satisfied by the session.Session type. +func NewIAM(endpoint string) *Credentials { + if endpoint == "" { + // IAM Roles for Amazon EC2 + // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html + endpoint = "http://169.254.169.254" + } + p := &IAM{ + Client: &http.Client{ + Transport: http.DefaultTransport, + CheckRedirect: redirectHeaders, + }, + endpoint: endpoint, + } + return New(p) +} + +// Retrieve retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired +func (m *IAM) Retrieve() (Value, error) { + credsList, err := requestCredList(m.Client, m.endpoint) + if err != nil { + return Value{}, err + } + + if len(credsList) == 0 { + return Value{}, errors.New("empty EC2 Role list") + } + credsName := credsList[0] + + roleCreds, err := requestCred(m.Client, m.endpoint, credsName) + if err != nil { + return Value{}, err + } + + // Expiry window is set to 10secs. + m.SetExpiration(roleCreds.Expiration, DefaultExpiryWindow) + + return Value{ + AccessKeyID: roleCreds.AccessKeyID, + SecretAccessKey: roleCreds.SecretAccessKey, + SessionToken: roleCreds.Token, + SignerType: SignatureV4, + }, nil +} + +// A ec2RoleCredRespBody provides the shape for unmarshaling credential +// request responses. +type ec2RoleCredRespBody struct { + // Success State + Expiration time.Time + AccessKeyID string + SecretAccessKey string + Token string + + // Error state + Code string + Message string +} + +const iamSecurityCredsPath = "/latest/meta-data/iam/security-credentials" + +// requestCredList requests a list of credentials from the EC2 service. +// If there are no credentials, or there is an error making or receiving the request +func requestCredList(client *http.Client, endpoint string) ([]string, error) { + u, err := url.Parse(endpoint) + if err != nil { + return nil, err + } + u.Path = iamSecurityCredsPath + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return nil, err + } + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return nil, errors.New(resp.Status) + } + + credsList := []string{} + s := bufio.NewScanner(resp.Body) + for s.Scan() { + credsList = append(credsList, s.Text()) + } + + if err := s.Err(); err != nil { + return nil, err + } + + return credsList, nil +} + +// requestCred requests the credentials for a specific credentials from the EC2 service. +// +// If the credentials cannot be found, or there is an error reading the response +// and error will be returned. +func requestCred(client *http.Client, endpoint string, credsName string) (ec2RoleCredRespBody, error) { + u, err := url.Parse(endpoint) + if err != nil { + return ec2RoleCredRespBody{}, err + } + + u.Path = path.Join(iamSecurityCredsPath, credsName) + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return ec2RoleCredRespBody{}, err + } + + resp, err := client.Do(req) + if err != nil { + return ec2RoleCredRespBody{}, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return ec2RoleCredRespBody{}, errors.New(resp.Status) + } + + respCreds := ec2RoleCredRespBody{} + if err := json.NewDecoder(resp.Body).Decode(&respCreds); err != nil { + return ec2RoleCredRespBody{}, err + } + + if respCreds.Code != "Success" { + // If an error code was returned something failed requesting the role. + return ec2RoleCredRespBody{}, errors.New(respCreds.Message) + } + + return respCreds, nil +} diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/signature-type.go b/vendor/github.com/minio/minio-go/pkg/credentials/signature-type.go new file mode 100644 index 000000000..c64ad6c23 --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/credentials/signature-type.go @@ -0,0 +1,76 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import "strings" + +// SignatureType is type of Authorization requested for a given HTTP request. +type SignatureType int + +// Different types of supported signatures - default is SignatureV4 or SignatureDefault. +const ( + // SignatureDefault is always set to v4. + SignatureDefault SignatureType = iota + SignatureV4 + SignatureV2 + SignatureV4Streaming + SignatureAnonymous // Anonymous signature signifies, no signature. +) + +// IsV2 - is signature SignatureV2? +func (s SignatureType) IsV2() bool { + return s == SignatureV2 +} + +// IsV4 - is signature SignatureV4? +func (s SignatureType) IsV4() bool { + return s == SignatureV4 || s == SignatureDefault +} + +// IsStreamingV4 - is signature SignatureV4Streaming? +func (s SignatureType) IsStreamingV4() bool { + return s == SignatureV4Streaming +} + +// IsAnonymous - is signature empty? +func (s SignatureType) IsAnonymous() bool { + return s == SignatureAnonymous +} + +// Stringer humanized version of signature type, +// strings returned here are case insensitive. +func (s SignatureType) String() string { + if s.IsV2() { + return "S3v2" + } else if s.IsV4() { + return "S3v4" + } else if s.IsStreamingV4() { + return "S3v4Streaming" + } + return "Anonymous" +} + +func parseSignatureType(str string) SignatureType { + if strings.EqualFold(str, "S3v4") { + return SignatureV4 + } else if strings.EqualFold(str, "S3v2") { + return SignatureV2 + } else if strings.EqualFold(str, "S3v4Streaming") { + return SignatureV4Streaming + } + return SignatureAnonymous +} diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/static.go b/vendor/github.com/minio/minio-go/pkg/credentials/static.go new file mode 100644 index 000000000..25aff5696 --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/credentials/static.go @@ -0,0 +1,67 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +// A Static is a set of credentials which are set programmatically, +// and will never expire. +type Static struct { + Value +} + +// NewStaticV2 returns a pointer to a new Credentials object +// wrapping a static credentials value provider, signature is +// set to v2. If access and secret are not specified then +// regardless of signature type set it Value will return +// as anonymous. +func NewStaticV2(id, secret, token string) *Credentials { + return NewStatic(id, secret, token, SignatureV2) +} + +// NewStaticV4 is similar to NewStaticV2 with similar considerations. +func NewStaticV4(id, secret, token string) *Credentials { + return NewStatic(id, secret, token, SignatureV4) +} + +// NewStatic returns a pointer to a new Credentials object +// wrapping a static credentials value provider. +func NewStatic(id, secret, token string, signerType SignatureType) *Credentials { + return New(&Static{ + Value: Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + SignerType: signerType, + }, + }) +} + +// Retrieve returns the static credentials. +func (s *Static) Retrieve() (Value, error) { + if s.AccessKeyID == "" || s.SecretAccessKey == "" { + // Anonymous is not an error + return Value{SignerType: SignatureAnonymous}, nil + } + return s.Value, nil +} + +// IsExpired returns if the credentials are expired. +// +// For Static, the credentials never expired. +func (s *Static) IsExpired() bool { + return false +} diff --git a/vendor/github.com/minio/minio-go/pkg/encrypt/cbc.go b/vendor/github.com/minio/minio-go/pkg/encrypt/cbc.go index 7670e68f4..be45e52f4 100644 --- a/vendor/github.com/minio/minio-go/pkg/encrypt/cbc.go +++ b/vendor/github.com/minio/minio-go/pkg/encrypt/cbc.go @@ -89,6 +89,15 @@ func NewCBCSecureMaterials(key Key) (*CBCSecureMaterials, error) { } +// Close implements closes the internal stream. +func (s *CBCSecureMaterials) Close() error { + closer, ok := s.stream.(io.Closer) + if ok { + return closer.Close() + } + return nil +} + // SetupEncryptMode - tells CBC that we are going to encrypt data func (s *CBCSecureMaterials) SetupEncryptMode(stream io.Reader) error { // Set mode to encrypt diff --git a/vendor/github.com/minio/minio-go/pkg/encrypt/interface.go b/vendor/github.com/minio/minio-go/pkg/encrypt/interface.go index 2fd75033f..8b8554336 100644 --- a/vendor/github.com/minio/minio-go/pkg/encrypt/interface.go +++ b/vendor/github.com/minio/minio-go/pkg/encrypt/interface.go @@ -25,6 +25,9 @@ import "io" // Materials - provides generic interface to encrypt any stream of data. type Materials interface { + // Closes the wrapped stream properly, initiated by the caller. + Close() error + // Returns encrypted/decrypted data, io.Reader compatible. Read(b []byte) (int, error) diff --git a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go index cbb889d8d..b2d46e178 100644 --- a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go +++ b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go @@ -583,7 +583,7 @@ func GetPolicies(statements []Statement, bucketName string) map[string]BucketPol r = r[:len(r)-1] asterisk = "*" } - objectPath := r[len(awsResourcePrefix+bucketName)+1 : len(r)] + objectPath := r[len(awsResourcePrefix+bucketName)+1:] p := GetPolicy(statements, bucketName, objectPath) policyRules[bucketName+"/"+objectPath+asterisk] = p } diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go index 755fd1ac5..c2f0baee6 100644 --- a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go +++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go @@ -92,9 +92,12 @@ func buildChunkStringToSign(t time.Time, region, previousSig string, chunkData [ // prepareStreamingRequest - prepares a request with appropriate // headers before computing the seed signature. -func prepareStreamingRequest(req *http.Request, dataLen int64, timestamp time.Time) { +func prepareStreamingRequest(req *http.Request, sessionToken string, dataLen int64, timestamp time.Time) { // Set x-amz-content-sha256 header. req.Header.Set("X-Amz-Content-Sha256", streamingSignAlgorithm) + if sessionToken != "" { + req.Header.Set("X-Amz-Security-Token", sessionToken) + } req.Header.Set("Content-Encoding", streamingEncoding) req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat)) @@ -138,6 +141,7 @@ func (s *StreamingReader) setSeedSignature(req *http.Request) { type StreamingReader struct { accessKeyID string secretAccessKey string + sessionToken string region string prevSignature string seedSignature string @@ -195,16 +199,17 @@ func (s *StreamingReader) setStreamingAuthHeader(req *http.Request) { // StreamingSignV4 - provides chunked upload signatureV4 support by // implementing io.Reader. -func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, +func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionToken, region string, dataLen int64, reqTime time.Time) *http.Request { // Set headers needed for streaming signature. - prepareStreamingRequest(req, dataLen, reqTime) + prepareStreamingRequest(req, sessionToken, dataLen, reqTime) stReader := &StreamingReader{ baseReadCloser: req.Body, accessKeyID: accessKeyID, secretAccessKey: secretAccessKey, + sessionToken: sessionToken, region: region, reqTime: reqTime, chunkBuf: make([]byte, payloadChunkSize), diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go index 245fb08c3..0d75dc162 100644 --- a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go +++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go @@ -206,7 +206,7 @@ func getStringToSignV4(t time.Time, location, canonicalRequest string) string { // PreSignV4 presign the request, in accordance with // http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html. -func PreSignV4(req http.Request, accessKeyID, secretAccessKey, location string, expires int64) *http.Request { +func PreSignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string, expires int64) *http.Request { // Presign is not needed for anonymous credentials. if accessKeyID == "" || secretAccessKey == "" { return &req @@ -228,6 +228,10 @@ func PreSignV4(req http.Request, accessKeyID, secretAccessKey, location string, query.Set("X-Amz-Expires", strconv.FormatInt(expires, 10)) query.Set("X-Amz-SignedHeaders", signedHeaders) query.Set("X-Amz-Credential", credential) + // Set session token if available. + if sessionToken != "" { + query.Set("X-Amz-Security-Token", sessionToken) + } req.URL.RawQuery = query.Encode() // Get canonical request. @@ -260,7 +264,7 @@ func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, l // SignV4 sign the request before Do(), in accordance with // http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html. -func SignV4(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request { +func SignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string) *http.Request { // Signature calculation is not needed for anonymous credentials. if accessKeyID == "" || secretAccessKey == "" { return &req @@ -272,6 +276,11 @@ func SignV4(req http.Request, accessKeyID, secretAccessKey, location string) *ht // Set x-amz-date. req.Header.Set("X-Amz-Date", t.Format(iso8601DateFormat)) + // Set session token if available. + if sessionToken != "" { + req.Header.Set("X-Amz-Security-Token", sessionToken) + } + // Get canonical request. canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders) diff --git a/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go b/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go index a3b6ed845..9d6ac4d81 100644 --- a/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go +++ b/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go @@ -19,6 +19,7 @@ package s3utils import ( "bytes" "encoding/hex" + "errors" "net" "net/url" "regexp" @@ -84,10 +85,29 @@ func IsAmazonEndpoint(endpointURL url.URL) bool { if IsAmazonChinaEndpoint(endpointURL) { return true } - + if IsAmazonGovCloudEndpoint(endpointURL) { + return true + } return endpointURL.Host == "s3.amazonaws.com" } +// IsAmazonGovCloudEndpoint - Match if it is exactly Amazon S3 GovCloud endpoint. +func IsAmazonGovCloudEndpoint(endpointURL url.URL) bool { + if endpointURL == sentinelURL { + return false + } + return (endpointURL.Host == "s3-us-gov-west-1.amazonaws.com" || + IsAmazonFIPSGovCloudEndpoint(endpointURL)) +} + +// IsAmazonFIPSGovCloudEndpoint - Match if it is exactly Amazon S3 FIPS GovCloud endpoint. +func IsAmazonFIPSGovCloudEndpoint(endpointURL url.URL) bool { + if endpointURL == sentinelURL { + return false + } + return endpointURL.Host == "s3-fips-us-gov-west-1.amazonaws.com" +} + // IsAmazonChinaEndpoint - Match if it is exactly Amazon S3 China endpoint. // Customers who wish to use the new Beijing Region are required // to sign up for a separate set of account credentials unique to @@ -181,3 +201,74 @@ func EncodePath(pathName string) string { } return encodedPathname } + +// We support '.' with bucket names but we fallback to using path +// style requests instead for such buckets. +var ( + validBucketName = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9\.\-]{1,61}[A-Za-z0-9]$`) + validBucketNameStrict = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`) + ipAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`) +) + +// Common checker for both stricter and basic validation. +func checkBucketNameCommon(bucketName string, strict bool) (err error) { + if strings.TrimSpace(bucketName) == "" { + return errors.New("Bucket name cannot be empty") + } + if len(bucketName) < 3 { + return errors.New("Bucket name cannot be smaller than 3 characters") + } + if len(bucketName) > 63 { + return errors.New("Bucket name cannot be greater than 63 characters") + } + if ipAddress.MatchString(bucketName) { + return errors.New("Bucket name cannot be an ip address") + } + if strings.Contains(bucketName, "..") { + return errors.New("Bucket name contains invalid characters") + } + if strict { + if !validBucketNameStrict.MatchString(bucketName) { + err = errors.New("Bucket name contains invalid characters") + } + return err + } + if !validBucketName.MatchString(bucketName) { + err = errors.New("Bucket name contains invalid characters") + } + return err +} + +// CheckValidBucketName - checks if we have a valid input bucket name. +// This is a non stricter version. +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html +func CheckValidBucketName(bucketName string) (err error) { + return checkBucketNameCommon(bucketName, false) +} + +// CheckValidBucketNameStrict - checks if we have a valid input bucket name. +// This is a stricter version. +func CheckValidBucketNameStrict(bucketName string) (err error) { + return checkBucketNameCommon(bucketName, true) +} + +// CheckValidObjectNamePrefix - checks if we have a valid input object name prefix. +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html +func CheckValidObjectNamePrefix(objectName string) error { + if len(objectName) > 1024 { + return errors.New("Object name cannot be greater than 1024 characters") + } + if !utf8.ValidString(objectName) { + return errors.New("Object name with non UTF-8 strings are not supported") + } + return nil +} + +// CheckValidObjectName - checks if we have a valid input object name. +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html +func CheckValidObjectName(objectName string) error { + if strings.TrimSpace(objectName) == "" { + return errors.New("Object name cannot be empty") + } + return CheckValidObjectNamePrefix(objectName) +} diff --git a/vendor/github.com/minio/minio-go/request-headers.go b/vendor/github.com/minio/minio-go/request-headers.go index fa23b2fe3..76c87202d 100644 --- a/vendor/github.com/minio/minio-go/request-headers.go +++ b/vendor/github.com/minio/minio-go/request-headers.go @@ -48,7 +48,7 @@ func (c RequestHeaders) SetMatchETag(etag string) error { if etag == "" { return ErrInvalidArgument("ETag cannot be empty.") } - c.Set("If-Match", etag) + c.Set("If-Match", "\""+etag+"\"") return nil } @@ -57,7 +57,7 @@ func (c RequestHeaders) SetMatchETagExcept(etag string) error { if etag == "" { return ErrInvalidArgument("ETag cannot be empty.") } - c.Set("If-None-Match", etag) + c.Set("If-None-Match", "\""+etag+"\"") return nil } diff --git a/vendor/github.com/minio/minio-go/s3-endpoints.go b/vendor/github.com/minio/minio-go/s3-endpoints.go index d7fa5e038..c02f3f1fa 100644 --- a/vendor/github.com/minio/minio-go/s3-endpoints.go +++ b/vendor/github.com/minio/minio-go/s3-endpoints.go @@ -33,6 +33,7 @@ var awsS3EndpointMap = map[string]string{ "ap-northeast-1": "s3-ap-northeast-1.amazonaws.com", "ap-northeast-2": "s3-ap-northeast-2.amazonaws.com", "sa-east-1": "s3-sa-east-1.amazonaws.com", + "us-gov-west-1": "s3-us-gov-west-1.amazonaws.com", "cn-north-1": "s3.cn-north-1.amazonaws.com.cn", } diff --git a/vendor/github.com/minio/minio-go/s3-error.go b/vendor/github.com/minio/minio-go/s3-error.go index 11b40a0f8..c5aff9bbc 100644 --- a/vendor/github.com/minio/minio-go/s3-error.go +++ b/vendor/github.com/minio/minio-go/s3-error.go @@ -25,7 +25,7 @@ var s3ErrorResponseMap = map[string]string{ "EntityTooLarge": "Your proposed upload exceeds the maximum allowed object size.", "IncompleteBody": "You did not provide the number of bytes specified by the Content-Length HTTP header.", "InternalError": "We encountered an internal error, please try again.", - "InvalidAccessKeyID": "The access key ID you provided does not exist in our records.", + "InvalidAccessKeyId": "The access key ID you provided does not exist in our records.", "InvalidBucketName": "The specified bucket is not valid.", "InvalidDigest": "The Content-Md5 you specified is not valid.", "InvalidRange": "The requested range is not satisfiable", diff --git a/vendor/github.com/minio/minio-go/signature-type.go b/vendor/github.com/minio/minio-go/signature-type.go deleted file mode 100644 index f9a57c3f1..000000000 --- a/vendor/github.com/minio/minio-go/signature-type.go +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -// SignatureType is type of Authorization requested for a given HTTP request. -type SignatureType int - -// Different types of supported signatures - default is Latest i.e SignatureV4. -const ( - Latest SignatureType = iota - SignatureV4 - SignatureV2 - SignatureV4Streaming -) - -var emptySHA256 = sum256(nil) - -// isV2 - is signature SignatureV2? -func (s SignatureType) isV2() bool { - return s == SignatureV2 -} - -// isV4 - is signature SignatureV4? -func (s SignatureType) isV4() bool { - return s == SignatureV4 || s == Latest -} - -// isStreamingV4 - is signature SignatureV4Streaming? -func (s SignatureType) isStreamingV4() bool { - return s == SignatureV4Streaming -} diff --git a/vendor/github.com/minio/minio-go/utils.go b/vendor/github.com/minio/minio-go/utils.go index 93cd1712f..d7f0181e8 100644 --- a/vendor/github.com/minio/minio-go/utils.go +++ b/vendor/github.com/minio/minio-go/utils.go @@ -28,7 +28,6 @@ import ( "regexp" "strings" "time" - "unicode/utf8" "github.com/minio/minio-go/pkg/s3utils" ) @@ -110,6 +109,8 @@ func closeResponse(resp *http.Response) { } } +var emptySHA256 = sum256(nil) + // Sentinel URL is the default url value which is invalid. var sentinelURL = url.URL{} @@ -146,63 +147,6 @@ func isValidExpiry(expires time.Duration) error { return nil } -// We support '.' with bucket names but we fallback to using path -// style requests instead for such buckets. -var validBucketName = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`) - -// Invalid bucket name with double dot. -var invalidDotBucketName = regexp.MustCompile(`\.\.`) - -// isValidBucketName - verify bucket name in accordance with -// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html -func isValidBucketName(bucketName string) error { - if strings.TrimSpace(bucketName) == "" { - return ErrInvalidBucketName("Bucket name cannot be empty.") - } - if len(bucketName) < 3 { - return ErrInvalidBucketName("Bucket name cannot be smaller than 3 characters.") - } - if len(bucketName) > 63 { - return ErrInvalidBucketName("Bucket name cannot be greater than 63 characters.") - } - if bucketName[0] == '.' || bucketName[len(bucketName)-1] == '.' { - return ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot.") - } - if invalidDotBucketName.MatchString(bucketName) { - return ErrInvalidBucketName("Bucket name cannot have successive periods.") - } - if !validBucketName.MatchString(bucketName) { - return ErrInvalidBucketName("Bucket name contains invalid characters.") - } - return nil -} - -// isValidObjectName - verify object name in accordance with -// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html -func isValidObjectName(objectName string) error { - if strings.TrimSpace(objectName) == "" { - return ErrInvalidObjectName("Object name cannot be empty.") - } - if len(objectName) > 1024 { - return ErrInvalidObjectName("Object name cannot be greater than 1024 characters.") - } - if !utf8.ValidString(objectName) { - return ErrInvalidBucketName("Object name with non UTF-8 strings are not supported.") - } - return nil -} - -// isValidObjectPrefix - verify if object prefix is valid. -func isValidObjectPrefix(objectPrefix string) error { - if len(objectPrefix) > 1024 { - return ErrInvalidObjectPrefix("Object prefix cannot be greater than 1024 characters.") - } - if !utf8.ValidString(objectPrefix) { - return ErrInvalidObjectPrefix("Object prefix with non UTF-8 strings are not supported.") - } - return nil -} - // make a copy of http.Header func cloneHeader(h http.Header) http.Header { h2 := make(http.Header, len(h)) @@ -225,3 +169,46 @@ func filterHeader(header http.Header, filterKeys []string) (filteredHeader http. } return filteredHeader } + +// regCred matches credential string in HTTP header +var regCred = regexp.MustCompile("Credential=([A-Z0-9]+)/") + +// regCred matches signature string in HTTP header +var regSign = regexp.MustCompile("Signature=([[0-9a-f]+)") + +// Redact out signature value from authorization string. +func redactSignature(origAuth string) string { + if !strings.HasPrefix(origAuth, signV4Algorithm) { + // Set a temporary redacted auth + return "AWS **REDACTED**:**REDACTED**" + } + + /// Signature V4 authorization header. + + // Strip out accessKeyID from: + // Credential=////aws4_request + newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/") + + // Strip out 256-bit signature from: Signature=<256-bit signature> + return regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**") +} + +// Get default location returns the location based on the input +// URL `u`, if region override is provided then all location +// defaults to regionOverride. +// +// If no other cases match then the location is set to `us-east-1` +// as a last resort. +func getDefaultLocation(u url.URL, regionOverride string) (location string) { + if regionOverride != "" { + return regionOverride + } + if s3utils.IsAmazonChinaEndpoint(u) { + return "cn-north-1" + } + if s3utils.IsAmazonGovCloudEndpoint(u) { + return "us-gov-west-1" + } + // Default to location to 'us-east-1'. + return "us-east-1" +} diff --git a/vendor/golang.org/x/net/http2/Dockerfile b/vendor/golang.org/x/net/http2/Dockerfile new file mode 100644 index 000000000..53fc52579 --- /dev/null +++ b/vendor/golang.org/x/net/http2/Dockerfile @@ -0,0 +1,51 @@ +# +# This Dockerfile builds a recent curl with HTTP/2 client support, using +# a recent nghttp2 build. +# +# See the Makefile for how to tag it. If Docker and that image is found, the +# Go tests use this curl binary for integration tests. +# + +FROM ubuntu:trusty + +RUN apt-get update && \ + apt-get upgrade -y && \ + apt-get install -y git-core build-essential wget + +RUN apt-get install -y --no-install-recommends \ + autotools-dev libtool pkg-config zlib1g-dev \ + libcunit1-dev libssl-dev libxml2-dev libevent-dev \ + automake autoconf + +# The list of packages nghttp2 recommends for h2load: +RUN apt-get install -y --no-install-recommends make binutils \ + autoconf automake autotools-dev \ + libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \ + libev-dev libevent-dev libjansson-dev libjemalloc-dev \ + cython python3.4-dev python-setuptools + +# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached: +ENV NGHTTP2_VER 895da9a +RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git + +WORKDIR /root/nghttp2 +RUN git reset --hard $NGHTTP2_VER +RUN autoreconf -i +RUN automake +RUN autoconf +RUN ./configure +RUN make +RUN make install + +WORKDIR /root +RUN wget http://curl.haxx.se/download/curl-7.45.0.tar.gz +RUN tar -zxvf curl-7.45.0.tar.gz +WORKDIR /root/curl-7.45.0 +RUN ./configure --with-ssl --with-nghttp2=/usr/local +RUN make +RUN make install +RUN ldconfig + +CMD ["-h"] +ENTRYPOINT ["/usr/local/bin/curl"] + diff --git a/vendor/golang.org/x/net/http2/Makefile b/vendor/golang.org/x/net/http2/Makefile new file mode 100644 index 000000000..55fd826f7 --- /dev/null +++ b/vendor/golang.org/x/net/http2/Makefile @@ -0,0 +1,3 @@ +curlimage: + docker build -t gohttp2/curl . + diff --git a/vendor/golang.org/x/net/http2/README b/vendor/golang.org/x/net/http2/README new file mode 100644 index 000000000..360d5aa37 --- /dev/null +++ b/vendor/golang.org/x/net/http2/README @@ -0,0 +1,20 @@ +This is a work-in-progress HTTP/2 implementation for Go. + +It will eventually live in the Go standard library and won't require +any changes to your code to use. It will just be automatic. + +Status: + +* The server support is pretty good. A few things are missing + but are being worked on. +* The client work has just started but shares a lot of code + is coming along much quicker. + +Docs are at https://godoc.org/golang.org/x/net/http2 + +Demo test server at https://http2.golang.org/ + +Help & bug reports welcome! + +Contributing: https://golang.org/doc/contribute.html +Bugs: https://golang.org/issue/new?title=x/net/http2:+ diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go new file mode 100644 index 000000000..bdf5652b0 --- /dev/null +++ b/vendor/golang.org/x/net/http2/client_conn_pool.go @@ -0,0 +1,256 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Transport code's client connection pooling. + +package http2 + +import ( + "crypto/tls" + "net/http" + "sync" +) + +// ClientConnPool manages a pool of HTTP/2 client connections. +type ClientConnPool interface { + GetClientConn(req *http.Request, addr string) (*ClientConn, error) + MarkDead(*ClientConn) +} + +// clientConnPoolIdleCloser is the interface implemented by ClientConnPool +// implementations which can close their idle connections. +type clientConnPoolIdleCloser interface { + ClientConnPool + closeIdleConnections() +} + +var ( + _ clientConnPoolIdleCloser = (*clientConnPool)(nil) + _ clientConnPoolIdleCloser = noDialClientConnPool{} +) + +// TODO: use singleflight for dialing and addConnCalls? +type clientConnPool struct { + t *Transport + + mu sync.Mutex // TODO: maybe switch to RWMutex + // TODO: add support for sharing conns based on cert names + // (e.g. share conn for googleapis.com and appspot.com) + conns map[string][]*ClientConn // key is host:port + dialing map[string]*dialCall // currently in-flight dials + keys map[*ClientConn][]string + addConnCalls map[string]*addConnCall // in-flight addConnIfNeede calls +} + +func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { + return p.getClientConn(req, addr, dialOnMiss) +} + +const ( + dialOnMiss = true + noDialOnMiss = false +) + +func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) { + if isConnectionCloseRequest(req) && dialOnMiss { + // It gets its own connection. + const singleUse = true + cc, err := p.t.dialClientConn(addr, singleUse) + if err != nil { + return nil, err + } + return cc, nil + } + p.mu.Lock() + for _, cc := range p.conns[addr] { + if cc.CanTakeNewRequest() { + p.mu.Unlock() + return cc, nil + } + } + if !dialOnMiss { + p.mu.Unlock() + return nil, ErrNoCachedConn + } + call := p.getStartDialLocked(addr) + p.mu.Unlock() + <-call.done + return call.res, call.err +} + +// dialCall is an in-flight Transport dial call to a host. +type dialCall struct { + p *clientConnPool + done chan struct{} // closed when done + res *ClientConn // valid after done is closed + err error // valid after done is closed +} + +// requires p.mu is held. +func (p *clientConnPool) getStartDialLocked(addr string) *dialCall { + if call, ok := p.dialing[addr]; ok { + // A dial is already in-flight. Don't start another. + return call + } + call := &dialCall{p: p, done: make(chan struct{})} + if p.dialing == nil { + p.dialing = make(map[string]*dialCall) + } + p.dialing[addr] = call + go call.dial(addr) + return call +} + +// run in its own goroutine. +func (c *dialCall) dial(addr string) { + const singleUse = false // shared conn + c.res, c.err = c.p.t.dialClientConn(addr, singleUse) + close(c.done) + + c.p.mu.Lock() + delete(c.p.dialing, addr) + if c.err == nil { + c.p.addConnLocked(addr, c.res) + } + c.p.mu.Unlock() +} + +// addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't +// already exist. It coalesces concurrent calls with the same key. +// This is used by the http1 Transport code when it creates a new connection. Because +// the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know +// the protocol), it can get into a situation where it has multiple TLS connections. +// This code decides which ones live or die. +// The return value used is whether c was used. +// c is never closed. +func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) { + p.mu.Lock() + for _, cc := range p.conns[key] { + if cc.CanTakeNewRequest() { + p.mu.Unlock() + return false, nil + } + } + call, dup := p.addConnCalls[key] + if !dup { + if p.addConnCalls == nil { + p.addConnCalls = make(map[string]*addConnCall) + } + call = &addConnCall{ + p: p, + done: make(chan struct{}), + } + p.addConnCalls[key] = call + go call.run(t, key, c) + } + p.mu.Unlock() + + <-call.done + if call.err != nil { + return false, call.err + } + return !dup, nil +} + +type addConnCall struct { + p *clientConnPool + done chan struct{} // closed when done + err error +} + +func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) { + cc, err := t.NewClientConn(tc) + + p := c.p + p.mu.Lock() + if err != nil { + c.err = err + } else { + p.addConnLocked(key, cc) + } + delete(p.addConnCalls, key) + p.mu.Unlock() + close(c.done) +} + +func (p *clientConnPool) addConn(key string, cc *ClientConn) { + p.mu.Lock() + p.addConnLocked(key, cc) + p.mu.Unlock() +} + +// p.mu must be held +func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) { + for _, v := range p.conns[key] { + if v == cc { + return + } + } + if p.conns == nil { + p.conns = make(map[string][]*ClientConn) + } + if p.keys == nil { + p.keys = make(map[*ClientConn][]string) + } + p.conns[key] = append(p.conns[key], cc) + p.keys[cc] = append(p.keys[cc], key) +} + +func (p *clientConnPool) MarkDead(cc *ClientConn) { + p.mu.Lock() + defer p.mu.Unlock() + for _, key := range p.keys[cc] { + vv, ok := p.conns[key] + if !ok { + continue + } + newList := filterOutClientConn(vv, cc) + if len(newList) > 0 { + p.conns[key] = newList + } else { + delete(p.conns, key) + } + } + delete(p.keys, cc) +} + +func (p *clientConnPool) closeIdleConnections() { + p.mu.Lock() + defer p.mu.Unlock() + // TODO: don't close a cc if it was just added to the pool + // milliseconds ago and has never been used. There's currently + // a small race window with the HTTP/1 Transport's integration + // where it can add an idle conn just before using it, and + // somebody else can concurrently call CloseIdleConns and + // break some caller's RoundTrip. + for _, vv := range p.conns { + for _, cc := range vv { + cc.closeIfIdle() + } + } +} + +func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn { + out := in[:0] + for _, v := range in { + if v != exclude { + out = append(out, v) + } + } + // If we filtered it out, zero out the last item to prevent + // the GC from seeing it. + if len(in) != len(out) { + in[len(in)-1] = nil + } + return out +} + +// noDialClientConnPool is an implementation of http2.ClientConnPool +// which never dials. We let the HTTP/1.1 client dial and use its TLS +// connection instead. +type noDialClientConnPool struct{ *clientConnPool } + +func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { + return p.getClientConn(req, addr, noDialOnMiss) +} diff --git a/vendor/golang.org/x/net/http2/configure_transport.go b/vendor/golang.org/x/net/http2/configure_transport.go new file mode 100644 index 000000000..4f720f530 --- /dev/null +++ b/vendor/golang.org/x/net/http2/configure_transport.go @@ -0,0 +1,80 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.6 + +package http2 + +import ( + "crypto/tls" + "fmt" + "net/http" +) + +func configureTransport(t1 *http.Transport) (*Transport, error) { + connPool := new(clientConnPool) + t2 := &Transport{ + ConnPool: noDialClientConnPool{connPool}, + t1: t1, + } + connPool.t = t2 + if err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil { + return nil, err + } + if t1.TLSClientConfig == nil { + t1.TLSClientConfig = new(tls.Config) + } + if !strSliceContains(t1.TLSClientConfig.NextProtos, "h2") { + t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...) + } + if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") { + t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1") + } + upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper { + addr := authorityAddr("https", authority) + if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil { + go c.Close() + return erringRoundTripper{err} + } else if !used { + // Turns out we don't need this c. + // For example, two goroutines made requests to the same host + // at the same time, both kicking off TCP dials. (since protocol + // was unknown) + go c.Close() + } + return t2 + } + if m := t1.TLSNextProto; len(m) == 0 { + t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{ + "h2": upgradeFn, + } + } else { + m["h2"] = upgradeFn + } + return t2, nil +} + +// registerHTTPSProtocol calls Transport.RegisterProtocol but +// convering panics into errors. +func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) { + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + }() + t.RegisterProtocol("https", rt) + return nil +} + +// noDialH2RoundTripper is a RoundTripper which only tries to complete the request +// if there's already has a cached connection to the host. +type noDialH2RoundTripper struct{ t *Transport } + +func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + res, err := rt.t.RoundTrip(req) + if err == ErrNoCachedConn { + return nil, http.ErrSkipAltProtocol + } + return res, err +} diff --git a/vendor/golang.org/x/net/http2/databuffer.go b/vendor/golang.org/x/net/http2/databuffer.go new file mode 100644 index 000000000..a3067f8de --- /dev/null +++ b/vendor/golang.org/x/net/http2/databuffer.go @@ -0,0 +1,146 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "errors" + "fmt" + "sync" +) + +// Buffer chunks are allocated from a pool to reduce pressure on GC. +// The maximum wasted space per dataBuffer is 2x the largest size class, +// which happens when the dataBuffer has multiple chunks and there is +// one unread byte in both the first and last chunks. We use a few size +// classes to minimize overheads for servers that typically receive very +// small request bodies. +// +// TODO: Benchmark to determine if the pools are necessary. The GC may have +// improved enough that we can instead allocate chunks like this: +// make([]byte, max(16<<10, expectedBytesRemaining)) +var ( + dataChunkSizeClasses = []int{ + 1 << 10, + 2 << 10, + 4 << 10, + 8 << 10, + 16 << 10, + } + dataChunkPools = [...]sync.Pool{ + {New: func() interface{} { return make([]byte, 1<<10) }}, + {New: func() interface{} { return make([]byte, 2<<10) }}, + {New: func() interface{} { return make([]byte, 4<<10) }}, + {New: func() interface{} { return make([]byte, 8<<10) }}, + {New: func() interface{} { return make([]byte, 16<<10) }}, + } +) + +func getDataBufferChunk(size int64) []byte { + i := 0 + for ; i < len(dataChunkSizeClasses)-1; i++ { + if size <= int64(dataChunkSizeClasses[i]) { + break + } + } + return dataChunkPools[i].Get().([]byte) +} + +func putDataBufferChunk(p []byte) { + for i, n := range dataChunkSizeClasses { + if len(p) == n { + dataChunkPools[i].Put(p) + return + } + } + panic(fmt.Sprintf("unexpected buffer len=%v", len(p))) +} + +// dataBuffer is an io.ReadWriter backed by a list of data chunks. +// Each dataBuffer is used to read DATA frames on a single stream. +// The buffer is divided into chunks so the server can limit the +// total memory used by a single connection without limiting the +// request body size on any single stream. +type dataBuffer struct { + chunks [][]byte + r int // next byte to read is chunks[0][r] + w int // next byte to write is chunks[len(chunks)-1][w] + size int // total buffered bytes + expected int64 // we expect at least this many bytes in future Write calls (ignored if <= 0) +} + +var errReadEmpty = errors.New("read from empty dataBuffer") + +// Read copies bytes from the buffer into p. +// It is an error to read when no data is available. +func (b *dataBuffer) Read(p []byte) (int, error) { + if b.size == 0 { + return 0, errReadEmpty + } + var ntotal int + for len(p) > 0 && b.size > 0 { + readFrom := b.bytesFromFirstChunk() + n := copy(p, readFrom) + p = p[n:] + ntotal += n + b.r += n + b.size -= n + // If the first chunk has been consumed, advance to the next chunk. + if b.r == len(b.chunks[0]) { + putDataBufferChunk(b.chunks[0]) + end := len(b.chunks) - 1 + copy(b.chunks[:end], b.chunks[1:]) + b.chunks[end] = nil + b.chunks = b.chunks[:end] + b.r = 0 + } + } + return ntotal, nil +} + +func (b *dataBuffer) bytesFromFirstChunk() []byte { + if len(b.chunks) == 1 { + return b.chunks[0][b.r:b.w] + } + return b.chunks[0][b.r:] +} + +// Len returns the number of bytes of the unread portion of the buffer. +func (b *dataBuffer) Len() int { + return b.size +} + +// Write appends p to the buffer. +func (b *dataBuffer) Write(p []byte) (int, error) { + ntotal := len(p) + for len(p) > 0 { + // If the last chunk is empty, allocate a new chunk. Try to allocate + // enough to fully copy p plus any additional bytes we expect to + // receive. However, this may allocate less than len(p). + want := int64(len(p)) + if b.expected > want { + want = b.expected + } + chunk := b.lastChunkOrAlloc(want) + n := copy(chunk[b.w:], p) + p = p[n:] + b.w += n + b.size += n + b.expected -= int64(n) + } + return ntotal, nil +} + +func (b *dataBuffer) lastChunkOrAlloc(want int64) []byte { + if len(b.chunks) != 0 { + last := b.chunks[len(b.chunks)-1] + if b.w < len(last) { + return last + } + } + chunk := getDataBufferChunk(want) + b.chunks = append(b.chunks, chunk) + b.w = 0 + return chunk +} diff --git a/vendor/golang.org/x/net/http2/errors.go b/vendor/golang.org/x/net/http2/errors.go new file mode 100644 index 000000000..20fd7626a --- /dev/null +++ b/vendor/golang.org/x/net/http2/errors.go @@ -0,0 +1,130 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "errors" + "fmt" +) + +// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec. +type ErrCode uint32 + +const ( + ErrCodeNo ErrCode = 0x0 + ErrCodeProtocol ErrCode = 0x1 + ErrCodeInternal ErrCode = 0x2 + ErrCodeFlowControl ErrCode = 0x3 + ErrCodeSettingsTimeout ErrCode = 0x4 + ErrCodeStreamClosed ErrCode = 0x5 + ErrCodeFrameSize ErrCode = 0x6 + ErrCodeRefusedStream ErrCode = 0x7 + ErrCodeCancel ErrCode = 0x8 + ErrCodeCompression ErrCode = 0x9 + ErrCodeConnect ErrCode = 0xa + ErrCodeEnhanceYourCalm ErrCode = 0xb + ErrCodeInadequateSecurity ErrCode = 0xc + ErrCodeHTTP11Required ErrCode = 0xd +) + +var errCodeName = map[ErrCode]string{ + ErrCodeNo: "NO_ERROR", + ErrCodeProtocol: "PROTOCOL_ERROR", + ErrCodeInternal: "INTERNAL_ERROR", + ErrCodeFlowControl: "FLOW_CONTROL_ERROR", + ErrCodeSettingsTimeout: "SETTINGS_TIMEOUT", + ErrCodeStreamClosed: "STREAM_CLOSED", + ErrCodeFrameSize: "FRAME_SIZE_ERROR", + ErrCodeRefusedStream: "REFUSED_STREAM", + ErrCodeCancel: "CANCEL", + ErrCodeCompression: "COMPRESSION_ERROR", + ErrCodeConnect: "CONNECT_ERROR", + ErrCodeEnhanceYourCalm: "ENHANCE_YOUR_CALM", + ErrCodeInadequateSecurity: "INADEQUATE_SECURITY", + ErrCodeHTTP11Required: "HTTP_1_1_REQUIRED", +} + +func (e ErrCode) String() string { + if s, ok := errCodeName[e]; ok { + return s + } + return fmt.Sprintf("unknown error code 0x%x", uint32(e)) +} + +// ConnectionError is an error that results in the termination of the +// entire connection. +type ConnectionError ErrCode + +func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: %s", ErrCode(e)) } + +// StreamError is an error that only affects one stream within an +// HTTP/2 connection. +type StreamError struct { + StreamID uint32 + Code ErrCode + Cause error // optional additional detail +} + +func streamError(id uint32, code ErrCode) StreamError { + return StreamError{StreamID: id, Code: code} +} + +func (e StreamError) Error() string { + if e.Cause != nil { + return fmt.Sprintf("stream error: stream ID %d; %v; %v", e.StreamID, e.Code, e.Cause) + } + return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code) +} + +// 6.9.1 The Flow Control Window +// "If a sender receives a WINDOW_UPDATE that causes a flow control +// window to exceed this maximum it MUST terminate either the stream +// or the connection, as appropriate. For streams, [...]; for the +// connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code." +type goAwayFlowError struct{} + +func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" } + +// connErrorReason wraps a ConnectionError with an informative error about why it occurs. + +// Errors of this type are only returned by the frame parser functions +// and converted into ConnectionError(ErrCodeProtocol). +type connError struct { + Code ErrCode + Reason string +} + +func (e connError) Error() string { + return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason) +} + +type pseudoHeaderError string + +func (e pseudoHeaderError) Error() string { + return fmt.Sprintf("invalid pseudo-header %q", string(e)) +} + +type duplicatePseudoHeaderError string + +func (e duplicatePseudoHeaderError) Error() string { + return fmt.Sprintf("duplicate pseudo-header %q", string(e)) +} + +type headerFieldNameError string + +func (e headerFieldNameError) Error() string { + return fmt.Sprintf("invalid header field name %q", string(e)) +} + +type headerFieldValueError string + +func (e headerFieldValueError) Error() string { + return fmt.Sprintf("invalid header field value %q", string(e)) +} + +var ( + errMixPseudoHeaderTypes = errors.New("mix of request and response pseudo headers") + errPseudoAfterRegular = errors.New("pseudo header field after regular") +) diff --git a/vendor/golang.org/x/net/http2/flow.go b/vendor/golang.org/x/net/http2/flow.go new file mode 100644 index 000000000..957de2542 --- /dev/null +++ b/vendor/golang.org/x/net/http2/flow.go @@ -0,0 +1,50 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Flow control + +package http2 + +// flow is the flow control window's size. +type flow struct { + // n is the number of DATA bytes we're allowed to send. + // A flow is kept both on a conn and a per-stream. + n int32 + + // conn points to the shared connection-level flow that is + // shared by all streams on that conn. It is nil for the flow + // that's on the conn directly. + conn *flow +} + +func (f *flow) setConnFlow(cf *flow) { f.conn = cf } + +func (f *flow) available() int32 { + n := f.n + if f.conn != nil && f.conn.n < n { + n = f.conn.n + } + return n +} + +func (f *flow) take(n int32) { + if n > f.available() { + panic("internal error: took too much") + } + f.n -= n + if f.conn != nil { + f.conn.n -= n + } +} + +// add adds n bytes (positive or negative) to the flow control window. +// It returns false if the sum would exceed 2^31-1. +func (f *flow) add(n int32) bool { + remain := (1<<31 - 1) - f.n + if n > remain { + return false + } + f.n += n + return true +} diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go new file mode 100644 index 000000000..3b1489072 --- /dev/null +++ b/vendor/golang.org/x/net/http2/frame.go @@ -0,0 +1,1579 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "log" + "strings" + "sync" + + "golang.org/x/net/http2/hpack" + "golang.org/x/net/lex/httplex" +) + +const frameHeaderLen = 9 + +var padZeros = make([]byte, 255) // zeros for padding + +// A FrameType is a registered frame type as defined in +// http://http2.github.io/http2-spec/#rfc.section.11.2 +type FrameType uint8 + +const ( + FrameData FrameType = 0x0 + FrameHeaders FrameType = 0x1 + FramePriority FrameType = 0x2 + FrameRSTStream FrameType = 0x3 + FrameSettings FrameType = 0x4 + FramePushPromise FrameType = 0x5 + FramePing FrameType = 0x6 + FrameGoAway FrameType = 0x7 + FrameWindowUpdate FrameType = 0x8 + FrameContinuation FrameType = 0x9 +) + +var frameName = map[FrameType]string{ + FrameData: "DATA", + FrameHeaders: "HEADERS", + FramePriority: "PRIORITY", + FrameRSTStream: "RST_STREAM", + FrameSettings: "SETTINGS", + FramePushPromise: "PUSH_PROMISE", + FramePing: "PING", + FrameGoAway: "GOAWAY", + FrameWindowUpdate: "WINDOW_UPDATE", + FrameContinuation: "CONTINUATION", +} + +func (t FrameType) String() string { + if s, ok := frameName[t]; ok { + return s + } + return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t)) +} + +// Flags is a bitmask of HTTP/2 flags. +// The meaning of flags varies depending on the frame type. +type Flags uint8 + +// Has reports whether f contains all (0 or more) flags in v. +func (f Flags) Has(v Flags) bool { + return (f & v) == v +} + +// Frame-specific FrameHeader flag bits. +const ( + // Data Frame + FlagDataEndStream Flags = 0x1 + FlagDataPadded Flags = 0x8 + + // Headers Frame + FlagHeadersEndStream Flags = 0x1 + FlagHeadersEndHeaders Flags = 0x4 + FlagHeadersPadded Flags = 0x8 + FlagHeadersPriority Flags = 0x20 + + // Settings Frame + FlagSettingsAck Flags = 0x1 + + // Ping Frame + FlagPingAck Flags = 0x1 + + // Continuation Frame + FlagContinuationEndHeaders Flags = 0x4 + + FlagPushPromiseEndHeaders Flags = 0x4 + FlagPushPromisePadded Flags = 0x8 +) + +var flagName = map[FrameType]map[Flags]string{ + FrameData: { + FlagDataEndStream: "END_STREAM", + FlagDataPadded: "PADDED", + }, + FrameHeaders: { + FlagHeadersEndStream: "END_STREAM", + FlagHeadersEndHeaders: "END_HEADERS", + FlagHeadersPadded: "PADDED", + FlagHeadersPriority: "PRIORITY", + }, + FrameSettings: { + FlagSettingsAck: "ACK", + }, + FramePing: { + FlagPingAck: "ACK", + }, + FrameContinuation: { + FlagContinuationEndHeaders: "END_HEADERS", + }, + FramePushPromise: { + FlagPushPromiseEndHeaders: "END_HEADERS", + FlagPushPromisePadded: "PADDED", + }, +} + +// a frameParser parses a frame given its FrameHeader and payload +// bytes. The length of payload will always equal fh.Length (which +// might be 0). +type frameParser func(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) + +var frameParsers = map[FrameType]frameParser{ + FrameData: parseDataFrame, + FrameHeaders: parseHeadersFrame, + FramePriority: parsePriorityFrame, + FrameRSTStream: parseRSTStreamFrame, + FrameSettings: parseSettingsFrame, + FramePushPromise: parsePushPromise, + FramePing: parsePingFrame, + FrameGoAway: parseGoAwayFrame, + FrameWindowUpdate: parseWindowUpdateFrame, + FrameContinuation: parseContinuationFrame, +} + +func typeFrameParser(t FrameType) frameParser { + if f := frameParsers[t]; f != nil { + return f + } + return parseUnknownFrame +} + +// A FrameHeader is the 9 byte header of all HTTP/2 frames. +// +// See http://http2.github.io/http2-spec/#FrameHeader +type FrameHeader struct { + valid bool // caller can access []byte fields in the Frame + + // Type is the 1 byte frame type. There are ten standard frame + // types, but extension frame types may be written by WriteRawFrame + // and will be returned by ReadFrame (as UnknownFrame). + Type FrameType + + // Flags are the 1 byte of 8 potential bit flags per frame. + // They are specific to the frame type. + Flags Flags + + // Length is the length of the frame, not including the 9 byte header. + // The maximum size is one byte less than 16MB (uint24), but only + // frames up to 16KB are allowed without peer agreement. + Length uint32 + + // StreamID is which stream this frame is for. Certain frames + // are not stream-specific, in which case this field is 0. + StreamID uint32 +} + +// Header returns h. It exists so FrameHeaders can be embedded in other +// specific frame types and implement the Frame interface. +func (h FrameHeader) Header() FrameHeader { return h } + +func (h FrameHeader) String() string { + var buf bytes.Buffer + buf.WriteString("[FrameHeader ") + h.writeDebug(&buf) + buf.WriteByte(']') + return buf.String() +} + +func (h FrameHeader) writeDebug(buf *bytes.Buffer) { + buf.WriteString(h.Type.String()) + if h.Flags != 0 { + buf.WriteString(" flags=") + set := 0 + for i := uint8(0); i < 8; i++ { + if h.Flags&(1< 1 { + buf.WriteByte('|') + } + name := flagName[h.Type][Flags(1<>24), + byte(streamID>>16), + byte(streamID>>8), + byte(streamID)) +} + +func (f *Framer) endWrite() error { + // Now that we know the final size, fill in the FrameHeader in + // the space previously reserved for it. Abuse append. + length := len(f.wbuf) - frameHeaderLen + if length >= (1 << 24) { + return ErrFrameTooLarge + } + _ = append(f.wbuf[:0], + byte(length>>16), + byte(length>>8), + byte(length)) + if f.logWrites { + f.logWrite() + } + + n, err := f.w.Write(f.wbuf) + if err == nil && n != len(f.wbuf) { + err = io.ErrShortWrite + } + return err +} + +func (f *Framer) logWrite() { + if f.debugFramer == nil { + f.debugFramerBuf = new(bytes.Buffer) + f.debugFramer = NewFramer(nil, f.debugFramerBuf) + f.debugFramer.logReads = false // we log it ourselves, saying "wrote" below + // Let us read anything, even if we accidentally wrote it + // in the wrong order: + f.debugFramer.AllowIllegalReads = true + } + f.debugFramerBuf.Write(f.wbuf) + fr, err := f.debugFramer.ReadFrame() + if err != nil { + f.debugWriteLoggerf("http2: Framer %p: failed to decode just-written frame", f) + return + } + f.debugWriteLoggerf("http2: Framer %p: wrote %v", f, summarizeFrame(fr)) +} + +func (f *Framer) writeByte(v byte) { f.wbuf = append(f.wbuf, v) } +func (f *Framer) writeBytes(v []byte) { f.wbuf = append(f.wbuf, v...) } +func (f *Framer) writeUint16(v uint16) { f.wbuf = append(f.wbuf, byte(v>>8), byte(v)) } +func (f *Framer) writeUint32(v uint32) { + f.wbuf = append(f.wbuf, byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +const ( + minMaxFrameSize = 1 << 14 + maxFrameSize = 1<<24 - 1 +) + +// SetReuseFrames allows the Framer to reuse Frames. +// If called on a Framer, Frames returned by calls to ReadFrame are only +// valid until the next call to ReadFrame. +func (fr *Framer) SetReuseFrames() { + if fr.frameCache != nil { + return + } + fr.frameCache = &frameCache{} +} + +type frameCache struct { + dataFrame DataFrame +} + +func (fc *frameCache) getDataFrame() *DataFrame { + if fc == nil { + return &DataFrame{} + } + return &fc.dataFrame +} + +// NewFramer returns a Framer that writes frames to w and reads them from r. +func NewFramer(w io.Writer, r io.Reader) *Framer { + fr := &Framer{ + w: w, + r: r, + logReads: logFrameReads, + logWrites: logFrameWrites, + debugReadLoggerf: log.Printf, + debugWriteLoggerf: log.Printf, + } + fr.getReadBuf = func(size uint32) []byte { + if cap(fr.readBuf) >= int(size) { + return fr.readBuf[:size] + } + fr.readBuf = make([]byte, size) + return fr.readBuf + } + fr.SetMaxReadFrameSize(maxFrameSize) + return fr +} + +// SetMaxReadFrameSize sets the maximum size of a frame +// that will be read by a subsequent call to ReadFrame. +// It is the caller's responsibility to advertise this +// limit with a SETTINGS frame. +func (fr *Framer) SetMaxReadFrameSize(v uint32) { + if v > maxFrameSize { + v = maxFrameSize + } + fr.maxReadSize = v +} + +// ErrorDetail returns a more detailed error of the last error +// returned by Framer.ReadFrame. For instance, if ReadFrame +// returns a StreamError with code PROTOCOL_ERROR, ErrorDetail +// will say exactly what was invalid. ErrorDetail is not guaranteed +// to return a non-nil value and like the rest of the http2 package, +// its return value is not protected by an API compatibility promise. +// ErrorDetail is reset after the next call to ReadFrame. +func (fr *Framer) ErrorDetail() error { + return fr.errDetail +} + +// ErrFrameTooLarge is returned from Framer.ReadFrame when the peer +// sends a frame that is larger than declared with SetMaxReadFrameSize. +var ErrFrameTooLarge = errors.New("http2: frame too large") + +// terminalReadFrameError reports whether err is an unrecoverable +// error from ReadFrame and no other frames should be read. +func terminalReadFrameError(err error) bool { + if _, ok := err.(StreamError); ok { + return false + } + return err != nil +} + +// ReadFrame reads a single frame. The returned Frame is only valid +// until the next call to ReadFrame. +// +// If the frame is larger than previously set with SetMaxReadFrameSize, the +// returned error is ErrFrameTooLarge. Other errors may be of type +// ConnectionError, StreamError, or anything else from the underlying +// reader. +func (fr *Framer) ReadFrame() (Frame, error) { + fr.errDetail = nil + if fr.lastFrame != nil { + fr.lastFrame.invalidate() + } + fh, err := readFrameHeader(fr.headerBuf[:], fr.r) + if err != nil { + return nil, err + } + if fh.Length > fr.maxReadSize { + return nil, ErrFrameTooLarge + } + payload := fr.getReadBuf(fh.Length) + if _, err := io.ReadFull(fr.r, payload); err != nil { + return nil, err + } + f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, payload) + if err != nil { + if ce, ok := err.(connError); ok { + return nil, fr.connError(ce.Code, ce.Reason) + } + return nil, err + } + if err := fr.checkFrameOrder(f); err != nil { + return nil, err + } + if fr.logReads { + fr.debugReadLoggerf("http2: Framer %p: read %v", fr, summarizeFrame(f)) + } + if fh.Type == FrameHeaders && fr.ReadMetaHeaders != nil { + return fr.readMetaFrame(f.(*HeadersFrame)) + } + return f, nil +} + +// connError returns ConnectionError(code) but first +// stashes away a public reason to the caller can optionally relay it +// to the peer before hanging up on them. This might help others debug +// their implementations. +func (fr *Framer) connError(code ErrCode, reason string) error { + fr.errDetail = errors.New(reason) + return ConnectionError(code) +} + +// checkFrameOrder reports an error if f is an invalid frame to return +// next from ReadFrame. Mostly it checks whether HEADERS and +// CONTINUATION frames are contiguous. +func (fr *Framer) checkFrameOrder(f Frame) error { + last := fr.lastFrame + fr.lastFrame = f + if fr.AllowIllegalReads { + return nil + } + + fh := f.Header() + if fr.lastHeaderStream != 0 { + if fh.Type != FrameContinuation { + return fr.connError(ErrCodeProtocol, + fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d", + fh.Type, fh.StreamID, + last.Header().Type, fr.lastHeaderStream)) + } + if fh.StreamID != fr.lastHeaderStream { + return fr.connError(ErrCodeProtocol, + fmt.Sprintf("got CONTINUATION for stream %d; expected stream %d", + fh.StreamID, fr.lastHeaderStream)) + } + } else if fh.Type == FrameContinuation { + return fr.connError(ErrCodeProtocol, fmt.Sprintf("unexpected CONTINUATION for stream %d", fh.StreamID)) + } + + switch fh.Type { + case FrameHeaders, FrameContinuation: + if fh.Flags.Has(FlagHeadersEndHeaders) { + fr.lastHeaderStream = 0 + } else { + fr.lastHeaderStream = fh.StreamID + } + } + + return nil +} + +// A DataFrame conveys arbitrary, variable-length sequences of octets +// associated with a stream. +// See http://http2.github.io/http2-spec/#rfc.section.6.1 +type DataFrame struct { + FrameHeader + data []byte +} + +func (f *DataFrame) StreamEnded() bool { + return f.FrameHeader.Flags.Has(FlagDataEndStream) +} + +// Data returns the frame's data octets, not including any padding +// size byte or padding suffix bytes. +// The caller must not retain the returned memory past the next +// call to ReadFrame. +func (f *DataFrame) Data() []byte { + f.checkValid() + return f.data +} + +func parseDataFrame(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) { + if fh.StreamID == 0 { + // DATA frames MUST be associated with a stream. If a + // DATA frame is received whose stream identifier + // field is 0x0, the recipient MUST respond with a + // connection error (Section 5.4.1) of type + // PROTOCOL_ERROR. + return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"} + } + f := fc.getDataFrame() + f.FrameHeader = fh + + var padSize byte + if fh.Flags.Has(FlagDataPadded) { + var err error + payload, padSize, err = readByte(payload) + if err != nil { + return nil, err + } + } + if int(padSize) > len(payload) { + // If the length of the padding is greater than the + // length of the frame payload, the recipient MUST + // treat this as a connection error. + // Filed: https://github.com/http2/http2-spec/issues/610 + return nil, connError{ErrCodeProtocol, "pad size larger than data payload"} + } + f.data = payload[:len(payload)-int(padSize)] + return f, nil +} + +var ( + errStreamID = errors.New("invalid stream ID") + errDepStreamID = errors.New("invalid dependent stream ID") + errPadLength = errors.New("pad length too large") + errPadBytes = errors.New("padding bytes must all be zeros unless AllowIllegalWrites is enabled") +) + +func validStreamIDOrZero(streamID uint32) bool { + return streamID&(1<<31) == 0 +} + +func validStreamID(streamID uint32) bool { + return streamID != 0 && streamID&(1<<31) == 0 +} + +// WriteData writes a DATA frame. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility not to violate the maximum frame size +// and to not call other Write methods concurrently. +func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error { + return f.WriteDataPadded(streamID, endStream, data, nil) +} + +// WriteData writes a DATA frame with optional padding. +// +// If pad is nil, the padding bit is not sent. +// The length of pad must not exceed 255 bytes. +// The bytes of pad must all be zero, unless f.AllowIllegalWrites is set. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility not to violate the maximum frame size +// and to not call other Write methods concurrently. +func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error { + if !validStreamID(streamID) && !f.AllowIllegalWrites { + return errStreamID + } + if len(pad) > 0 { + if len(pad) > 255 { + return errPadLength + } + if !f.AllowIllegalWrites { + for _, b := range pad { + if b != 0 { + // "Padding octets MUST be set to zero when sending." + return errPadBytes + } + } + } + } + var flags Flags + if endStream { + flags |= FlagDataEndStream + } + if pad != nil { + flags |= FlagDataPadded + } + f.startWrite(FrameData, flags, streamID) + if pad != nil { + f.wbuf = append(f.wbuf, byte(len(pad))) + } + f.wbuf = append(f.wbuf, data...) + f.wbuf = append(f.wbuf, pad...) + return f.endWrite() +} + +// A SettingsFrame conveys configuration parameters that affect how +// endpoints communicate, such as preferences and constraints on peer +// behavior. +// +// See http://http2.github.io/http2-spec/#SETTINGS +type SettingsFrame struct { + FrameHeader + p []byte +} + +func parseSettingsFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { + if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 { + // When this (ACK 0x1) bit is set, the payload of the + // SETTINGS frame MUST be empty. Receipt of a + // SETTINGS frame with the ACK flag set and a length + // field value other than 0 MUST be treated as a + // connection error (Section 5.4.1) of type + // FRAME_SIZE_ERROR. + return nil, ConnectionError(ErrCodeFrameSize) + } + if fh.StreamID != 0 { + // SETTINGS frames always apply to a connection, + // never a single stream. The stream identifier for a + // SETTINGS frame MUST be zero (0x0). If an endpoint + // receives a SETTINGS frame whose stream identifier + // field is anything other than 0x0, the endpoint MUST + // respond with a connection error (Section 5.4.1) of + // type PROTOCOL_ERROR. + return nil, ConnectionError(ErrCodeProtocol) + } + if len(p)%6 != 0 { + // Expecting even number of 6 byte settings. + return nil, ConnectionError(ErrCodeFrameSize) + } + f := &SettingsFrame{FrameHeader: fh, p: p} + if v, ok := f.Value(SettingInitialWindowSize); ok && v > (1<<31)-1 { + // Values above the maximum flow control window size of 2^31 - 1 MUST + // be treated as a connection error (Section 5.4.1) of type + // FLOW_CONTROL_ERROR. + return nil, ConnectionError(ErrCodeFlowControl) + } + return f, nil +} + +func (f *SettingsFrame) IsAck() bool { + return f.FrameHeader.Flags.Has(FlagSettingsAck) +} + +func (f *SettingsFrame) Value(s SettingID) (v uint32, ok bool) { + f.checkValid() + buf := f.p + for len(buf) > 0 { + settingID := SettingID(binary.BigEndian.Uint16(buf[:2])) + if settingID == s { + return binary.BigEndian.Uint32(buf[2:6]), true + } + buf = buf[6:] + } + return 0, false +} + +// ForeachSetting runs fn for each setting. +// It stops and returns the first error. +func (f *SettingsFrame) ForeachSetting(fn func(Setting) error) error { + f.checkValid() + buf := f.p + for len(buf) > 0 { + if err := fn(Setting{ + SettingID(binary.BigEndian.Uint16(buf[:2])), + binary.BigEndian.Uint32(buf[2:6]), + }); err != nil { + return err + } + buf = buf[6:] + } + return nil +} + +// WriteSettings writes a SETTINGS frame with zero or more settings +// specified and the ACK bit not set. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteSettings(settings ...Setting) error { + f.startWrite(FrameSettings, 0, 0) + for _, s := range settings { + f.writeUint16(uint16(s.ID)) + f.writeUint32(s.Val) + } + return f.endWrite() +} + +// WriteSettingsAck writes an empty SETTINGS frame with the ACK bit set. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteSettingsAck() error { + f.startWrite(FrameSettings, FlagSettingsAck, 0) + return f.endWrite() +} + +// A PingFrame is a mechanism for measuring a minimal round trip time +// from the sender, as well as determining whether an idle connection +// is still functional. +// See http://http2.github.io/http2-spec/#rfc.section.6.7 +type PingFrame struct { + FrameHeader + Data [8]byte +} + +func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) } + +func parsePingFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) { + if len(payload) != 8 { + return nil, ConnectionError(ErrCodeFrameSize) + } + if fh.StreamID != 0 { + return nil, ConnectionError(ErrCodeProtocol) + } + f := &PingFrame{FrameHeader: fh} + copy(f.Data[:], payload) + return f, nil +} + +func (f *Framer) WritePing(ack bool, data [8]byte) error { + var flags Flags + if ack { + flags = FlagPingAck + } + f.startWrite(FramePing, flags, 0) + f.writeBytes(data[:]) + return f.endWrite() +} + +// A GoAwayFrame informs the remote peer to stop creating streams on this connection. +// See http://http2.github.io/http2-spec/#rfc.section.6.8 +type GoAwayFrame struct { + FrameHeader + LastStreamID uint32 + ErrCode ErrCode + debugData []byte +} + +// DebugData returns any debug data in the GOAWAY frame. Its contents +// are not defined. +// The caller must not retain the returned memory past the next +// call to ReadFrame. +func (f *GoAwayFrame) DebugData() []byte { + f.checkValid() + return f.debugData +} + +func parseGoAwayFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { + if fh.StreamID != 0 { + return nil, ConnectionError(ErrCodeProtocol) + } + if len(p) < 8 { + return nil, ConnectionError(ErrCodeFrameSize) + } + return &GoAwayFrame{ + FrameHeader: fh, + LastStreamID: binary.BigEndian.Uint32(p[:4]) & (1<<31 - 1), + ErrCode: ErrCode(binary.BigEndian.Uint32(p[4:8])), + debugData: p[8:], + }, nil +} + +func (f *Framer) WriteGoAway(maxStreamID uint32, code ErrCode, debugData []byte) error { + f.startWrite(FrameGoAway, 0, 0) + f.writeUint32(maxStreamID & (1<<31 - 1)) + f.writeUint32(uint32(code)) + f.writeBytes(debugData) + return f.endWrite() +} + +// An UnknownFrame is the frame type returned when the frame type is unknown +// or no specific frame type parser exists. +type UnknownFrame struct { + FrameHeader + p []byte +} + +// Payload returns the frame's payload (after the header). It is not +// valid to call this method after a subsequent call to +// Framer.ReadFrame, nor is it valid to retain the returned slice. +// The memory is owned by the Framer and is invalidated when the next +// frame is read. +func (f *UnknownFrame) Payload() []byte { + f.checkValid() + return f.p +} + +func parseUnknownFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { + return &UnknownFrame{fh, p}, nil +} + +// A WindowUpdateFrame is used to implement flow control. +// See http://http2.github.io/http2-spec/#rfc.section.6.9 +type WindowUpdateFrame struct { + FrameHeader + Increment uint32 // never read with high bit set +} + +func parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { + if len(p) != 4 { + return nil, ConnectionError(ErrCodeFrameSize) + } + inc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff // mask off high reserved bit + if inc == 0 { + // A receiver MUST treat the receipt of a + // WINDOW_UPDATE frame with an flow control window + // increment of 0 as a stream error (Section 5.4.2) of + // type PROTOCOL_ERROR; errors on the connection flow + // control window MUST be treated as a connection + // error (Section 5.4.1). + if fh.StreamID == 0 { + return nil, ConnectionError(ErrCodeProtocol) + } + return nil, streamError(fh.StreamID, ErrCodeProtocol) + } + return &WindowUpdateFrame{ + FrameHeader: fh, + Increment: inc, + }, nil +} + +// WriteWindowUpdate writes a WINDOW_UPDATE frame. +// The increment value must be between 1 and 2,147,483,647, inclusive. +// If the Stream ID is zero, the window update applies to the +// connection as a whole. +func (f *Framer) WriteWindowUpdate(streamID, incr uint32) error { + // "The legal range for the increment to the flow control window is 1 to 2^31-1 (2,147,483,647) octets." + if (incr < 1 || incr > 2147483647) && !f.AllowIllegalWrites { + return errors.New("illegal window increment value") + } + f.startWrite(FrameWindowUpdate, 0, streamID) + f.writeUint32(incr) + return f.endWrite() +} + +// A HeadersFrame is used to open a stream and additionally carries a +// header block fragment. +type HeadersFrame struct { + FrameHeader + + // Priority is set if FlagHeadersPriority is set in the FrameHeader. + Priority PriorityParam + + headerFragBuf []byte // not owned +} + +func (f *HeadersFrame) HeaderBlockFragment() []byte { + f.checkValid() + return f.headerFragBuf +} + +func (f *HeadersFrame) HeadersEnded() bool { + return f.FrameHeader.Flags.Has(FlagHeadersEndHeaders) +} + +func (f *HeadersFrame) StreamEnded() bool { + return f.FrameHeader.Flags.Has(FlagHeadersEndStream) +} + +func (f *HeadersFrame) HasPriority() bool { + return f.FrameHeader.Flags.Has(FlagHeadersPriority) +} + +func parseHeadersFrame(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) { + hf := &HeadersFrame{ + FrameHeader: fh, + } + if fh.StreamID == 0 { + // HEADERS frames MUST be associated with a stream. If a HEADERS frame + // is received whose stream identifier field is 0x0, the recipient MUST + // respond with a connection error (Section 5.4.1) of type + // PROTOCOL_ERROR. + return nil, connError{ErrCodeProtocol, "HEADERS frame with stream ID 0"} + } + var padLength uint8 + if fh.Flags.Has(FlagHeadersPadded) { + if p, padLength, err = readByte(p); err != nil { + return + } + } + if fh.Flags.Has(FlagHeadersPriority) { + var v uint32 + p, v, err = readUint32(p) + if err != nil { + return nil, err + } + hf.Priority.StreamDep = v & 0x7fffffff + hf.Priority.Exclusive = (v != hf.Priority.StreamDep) // high bit was set + p, hf.Priority.Weight, err = readByte(p) + if err != nil { + return nil, err + } + } + if len(p)-int(padLength) <= 0 { + return nil, streamError(fh.StreamID, ErrCodeProtocol) + } + hf.headerFragBuf = p[:len(p)-int(padLength)] + return hf, nil +} + +// HeadersFrameParam are the parameters for writing a HEADERS frame. +type HeadersFrameParam struct { + // StreamID is the required Stream ID to initiate. + StreamID uint32 + // BlockFragment is part (or all) of a Header Block. + BlockFragment []byte + + // EndStream indicates that the header block is the last that + // the endpoint will send for the identified stream. Setting + // this flag causes the stream to enter one of "half closed" + // states. + EndStream bool + + // EndHeaders indicates that this frame contains an entire + // header block and is not followed by any + // CONTINUATION frames. + EndHeaders bool + + // PadLength is the optional number of bytes of zeros to add + // to this frame. + PadLength uint8 + + // Priority, if non-zero, includes stream priority information + // in the HEADER frame. + Priority PriorityParam +} + +// WriteHeaders writes a single HEADERS frame. +// +// This is a low-level header writing method. Encoding headers and +// splitting them into any necessary CONTINUATION frames is handled +// elsewhere. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteHeaders(p HeadersFrameParam) error { + if !validStreamID(p.StreamID) && !f.AllowIllegalWrites { + return errStreamID + } + var flags Flags + if p.PadLength != 0 { + flags |= FlagHeadersPadded + } + if p.EndStream { + flags |= FlagHeadersEndStream + } + if p.EndHeaders { + flags |= FlagHeadersEndHeaders + } + if !p.Priority.IsZero() { + flags |= FlagHeadersPriority + } + f.startWrite(FrameHeaders, flags, p.StreamID) + if p.PadLength != 0 { + f.writeByte(p.PadLength) + } + if !p.Priority.IsZero() { + v := p.Priority.StreamDep + if !validStreamIDOrZero(v) && !f.AllowIllegalWrites { + return errDepStreamID + } + if p.Priority.Exclusive { + v |= 1 << 31 + } + f.writeUint32(v) + f.writeByte(p.Priority.Weight) + } + f.wbuf = append(f.wbuf, p.BlockFragment...) + f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...) + return f.endWrite() +} + +// A PriorityFrame specifies the sender-advised priority of a stream. +// See http://http2.github.io/http2-spec/#rfc.section.6.3 +type PriorityFrame struct { + FrameHeader + PriorityParam +} + +// PriorityParam are the stream prioritzation parameters. +type PriorityParam struct { + // StreamDep is a 31-bit stream identifier for the + // stream that this stream depends on. Zero means no + // dependency. + StreamDep uint32 + + // Exclusive is whether the dependency is exclusive. + Exclusive bool + + // Weight is the stream's zero-indexed weight. It should be + // set together with StreamDep, or neither should be set. Per + // the spec, "Add one to the value to obtain a weight between + // 1 and 256." + Weight uint8 +} + +func (p PriorityParam) IsZero() bool { + return p == PriorityParam{} +} + +func parsePriorityFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) { + if fh.StreamID == 0 { + return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"} + } + if len(payload) != 5 { + return nil, connError{ErrCodeFrameSize, fmt.Sprintf("PRIORITY frame payload size was %d; want 5", len(payload))} + } + v := binary.BigEndian.Uint32(payload[:4]) + streamID := v & 0x7fffffff // mask off high bit + return &PriorityFrame{ + FrameHeader: fh, + PriorityParam: PriorityParam{ + Weight: payload[4], + StreamDep: streamID, + Exclusive: streamID != v, // was high bit set? + }, + }, nil +} + +// WritePriority writes a PRIORITY frame. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WritePriority(streamID uint32, p PriorityParam) error { + if !validStreamID(streamID) && !f.AllowIllegalWrites { + return errStreamID + } + if !validStreamIDOrZero(p.StreamDep) { + return errDepStreamID + } + f.startWrite(FramePriority, 0, streamID) + v := p.StreamDep + if p.Exclusive { + v |= 1 << 31 + } + f.writeUint32(v) + f.writeByte(p.Weight) + return f.endWrite() +} + +// A RSTStreamFrame allows for abnormal termination of a stream. +// See http://http2.github.io/http2-spec/#rfc.section.6.4 +type RSTStreamFrame struct { + FrameHeader + ErrCode ErrCode +} + +func parseRSTStreamFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { + if len(p) != 4 { + return nil, ConnectionError(ErrCodeFrameSize) + } + if fh.StreamID == 0 { + return nil, ConnectionError(ErrCodeProtocol) + } + return &RSTStreamFrame{fh, ErrCode(binary.BigEndian.Uint32(p[:4]))}, nil +} + +// WriteRSTStream writes a RST_STREAM frame. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteRSTStream(streamID uint32, code ErrCode) error { + if !validStreamID(streamID) && !f.AllowIllegalWrites { + return errStreamID + } + f.startWrite(FrameRSTStream, 0, streamID) + f.writeUint32(uint32(code)) + return f.endWrite() +} + +// A ContinuationFrame is used to continue a sequence of header block fragments. +// See http://http2.github.io/http2-spec/#rfc.section.6.10 +type ContinuationFrame struct { + FrameHeader + headerFragBuf []byte +} + +func parseContinuationFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { + if fh.StreamID == 0 { + return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"} + } + return &ContinuationFrame{fh, p}, nil +} + +func (f *ContinuationFrame) HeaderBlockFragment() []byte { + f.checkValid() + return f.headerFragBuf +} + +func (f *ContinuationFrame) HeadersEnded() bool { + return f.FrameHeader.Flags.Has(FlagContinuationEndHeaders) +} + +// WriteContinuation writes a CONTINUATION frame. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteContinuation(streamID uint32, endHeaders bool, headerBlockFragment []byte) error { + if !validStreamID(streamID) && !f.AllowIllegalWrites { + return errStreamID + } + var flags Flags + if endHeaders { + flags |= FlagContinuationEndHeaders + } + f.startWrite(FrameContinuation, flags, streamID) + f.wbuf = append(f.wbuf, headerBlockFragment...) + return f.endWrite() +} + +// A PushPromiseFrame is used to initiate a server stream. +// See http://http2.github.io/http2-spec/#rfc.section.6.6 +type PushPromiseFrame struct { + FrameHeader + PromiseID uint32 + headerFragBuf []byte // not owned +} + +func (f *PushPromiseFrame) HeaderBlockFragment() []byte { + f.checkValid() + return f.headerFragBuf +} + +func (f *PushPromiseFrame) HeadersEnded() bool { + return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders) +} + +func parsePushPromise(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) { + pp := &PushPromiseFrame{ + FrameHeader: fh, + } + if pp.StreamID == 0 { + // PUSH_PROMISE frames MUST be associated with an existing, + // peer-initiated stream. The stream identifier of a + // PUSH_PROMISE frame indicates the stream it is associated + // with. If the stream identifier field specifies the value + // 0x0, a recipient MUST respond with a connection error + // (Section 5.4.1) of type PROTOCOL_ERROR. + return nil, ConnectionError(ErrCodeProtocol) + } + // The PUSH_PROMISE frame includes optional padding. + // Padding fields and flags are identical to those defined for DATA frames + var padLength uint8 + if fh.Flags.Has(FlagPushPromisePadded) { + if p, padLength, err = readByte(p); err != nil { + return + } + } + + p, pp.PromiseID, err = readUint32(p) + if err != nil { + return + } + pp.PromiseID = pp.PromiseID & (1<<31 - 1) + + if int(padLength) > len(p) { + // like the DATA frame, error out if padding is longer than the body. + return nil, ConnectionError(ErrCodeProtocol) + } + pp.headerFragBuf = p[:len(p)-int(padLength)] + return pp, nil +} + +// PushPromiseParam are the parameters for writing a PUSH_PROMISE frame. +type PushPromiseParam struct { + // StreamID is the required Stream ID to initiate. + StreamID uint32 + + // PromiseID is the required Stream ID which this + // Push Promises + PromiseID uint32 + + // BlockFragment is part (or all) of a Header Block. + BlockFragment []byte + + // EndHeaders indicates that this frame contains an entire + // header block and is not followed by any + // CONTINUATION frames. + EndHeaders bool + + // PadLength is the optional number of bytes of zeros to add + // to this frame. + PadLength uint8 +} + +// WritePushPromise writes a single PushPromise Frame. +// +// As with Header Frames, This is the low level call for writing +// individual frames. Continuation frames are handled elsewhere. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WritePushPromise(p PushPromiseParam) error { + if !validStreamID(p.StreamID) && !f.AllowIllegalWrites { + return errStreamID + } + var flags Flags + if p.PadLength != 0 { + flags |= FlagPushPromisePadded + } + if p.EndHeaders { + flags |= FlagPushPromiseEndHeaders + } + f.startWrite(FramePushPromise, flags, p.StreamID) + if p.PadLength != 0 { + f.writeByte(p.PadLength) + } + if !validStreamID(p.PromiseID) && !f.AllowIllegalWrites { + return errStreamID + } + f.writeUint32(p.PromiseID) + f.wbuf = append(f.wbuf, p.BlockFragment...) + f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...) + return f.endWrite() +} + +// WriteRawFrame writes a raw frame. This can be used to write +// extension frames unknown to this package. +func (f *Framer) WriteRawFrame(t FrameType, flags Flags, streamID uint32, payload []byte) error { + f.startWrite(t, flags, streamID) + f.writeBytes(payload) + return f.endWrite() +} + +func readByte(p []byte) (remain []byte, b byte, err error) { + if len(p) == 0 { + return nil, 0, io.ErrUnexpectedEOF + } + return p[1:], p[0], nil +} + +func readUint32(p []byte) (remain []byte, v uint32, err error) { + if len(p) < 4 { + return nil, 0, io.ErrUnexpectedEOF + } + return p[4:], binary.BigEndian.Uint32(p[:4]), nil +} + +type streamEnder interface { + StreamEnded() bool +} + +type headersEnder interface { + HeadersEnded() bool +} + +type headersOrContinuation interface { + headersEnder + HeaderBlockFragment() []byte +} + +// A MetaHeadersFrame is the representation of one HEADERS frame and +// zero or more contiguous CONTINUATION frames and the decoding of +// their HPACK-encoded contents. +// +// This type of frame does not appear on the wire and is only returned +// by the Framer when Framer.ReadMetaHeaders is set. +type MetaHeadersFrame struct { + *HeadersFrame + + // Fields are the fields contained in the HEADERS and + // CONTINUATION frames. The underlying slice is owned by the + // Framer and must not be retained after the next call to + // ReadFrame. + // + // Fields are guaranteed to be in the correct http2 order and + // not have unknown pseudo header fields or invalid header + // field names or values. Required pseudo header fields may be + // missing, however. Use the MetaHeadersFrame.Pseudo accessor + // method access pseudo headers. + Fields []hpack.HeaderField + + // Truncated is whether the max header list size limit was hit + // and Fields is incomplete. The hpack decoder state is still + // valid, however. + Truncated bool +} + +// PseudoValue returns the given pseudo header field's value. +// The provided pseudo field should not contain the leading colon. +func (mh *MetaHeadersFrame) PseudoValue(pseudo string) string { + for _, hf := range mh.Fields { + if !hf.IsPseudo() { + return "" + } + if hf.Name[1:] == pseudo { + return hf.Value + } + } + return "" +} + +// RegularFields returns the regular (non-pseudo) header fields of mh. +// The caller does not own the returned slice. +func (mh *MetaHeadersFrame) RegularFields() []hpack.HeaderField { + for i, hf := range mh.Fields { + if !hf.IsPseudo() { + return mh.Fields[i:] + } + } + return nil +} + +// PseudoFields returns the pseudo header fields of mh. +// The caller does not own the returned slice. +func (mh *MetaHeadersFrame) PseudoFields() []hpack.HeaderField { + for i, hf := range mh.Fields { + if !hf.IsPseudo() { + return mh.Fields[:i] + } + } + return mh.Fields +} + +func (mh *MetaHeadersFrame) checkPseudos() error { + var isRequest, isResponse bool + pf := mh.PseudoFields() + for i, hf := range pf { + switch hf.Name { + case ":method", ":path", ":scheme", ":authority": + isRequest = true + case ":status": + isResponse = true + default: + return pseudoHeaderError(hf.Name) + } + // Check for duplicates. + // This would be a bad algorithm, but N is 4. + // And this doesn't allocate. + for _, hf2 := range pf[:i] { + if hf.Name == hf2.Name { + return duplicatePseudoHeaderError(hf.Name) + } + } + } + if isRequest && isResponse { + return errMixPseudoHeaderTypes + } + return nil +} + +func (fr *Framer) maxHeaderStringLen() int { + v := fr.maxHeaderListSize() + if uint32(int(v)) == v { + return int(v) + } + // They had a crazy big number for MaxHeaderBytes anyway, + // so give them unlimited header lengths: + return 0 +} + +// readMetaFrame returns 0 or more CONTINUATION frames from fr and +// merge them into into the provided hf and returns a MetaHeadersFrame +// with the decoded hpack values. +func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { + if fr.AllowIllegalReads { + return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders") + } + mh := &MetaHeadersFrame{ + HeadersFrame: hf, + } + var remainSize = fr.maxHeaderListSize() + var sawRegular bool + + var invalid error // pseudo header field errors + hdec := fr.ReadMetaHeaders + hdec.SetEmitEnabled(true) + hdec.SetMaxStringLength(fr.maxHeaderStringLen()) + hdec.SetEmitFunc(func(hf hpack.HeaderField) { + if VerboseLogs && fr.logReads { + fr.debugReadLoggerf("http2: decoded hpack field %+v", hf) + } + if !httplex.ValidHeaderFieldValue(hf.Value) { + invalid = headerFieldValueError(hf.Value) + } + isPseudo := strings.HasPrefix(hf.Name, ":") + if isPseudo { + if sawRegular { + invalid = errPseudoAfterRegular + } + } else { + sawRegular = true + if !validWireHeaderFieldName(hf.Name) { + invalid = headerFieldNameError(hf.Name) + } + } + + if invalid != nil { + hdec.SetEmitEnabled(false) + return + } + + size := hf.Size() + if size > remainSize { + hdec.SetEmitEnabled(false) + mh.Truncated = true + return + } + remainSize -= size + + mh.Fields = append(mh.Fields, hf) + }) + // Lose reference to MetaHeadersFrame: + defer hdec.SetEmitFunc(func(hf hpack.HeaderField) {}) + + var hc headersOrContinuation = hf + for { + frag := hc.HeaderBlockFragment() + if _, err := hdec.Write(frag); err != nil { + return nil, ConnectionError(ErrCodeCompression) + } + + if hc.HeadersEnded() { + break + } + if f, err := fr.ReadFrame(); err != nil { + return nil, err + } else { + hc = f.(*ContinuationFrame) // guaranteed by checkFrameOrder + } + } + + mh.HeadersFrame.headerFragBuf = nil + mh.HeadersFrame.invalidate() + + if err := hdec.Close(); err != nil { + return nil, ConnectionError(ErrCodeCompression) + } + if invalid != nil { + fr.errDetail = invalid + if VerboseLogs { + log.Printf("http2: invalid header: %v", invalid) + } + return nil, StreamError{mh.StreamID, ErrCodeProtocol, invalid} + } + if err := mh.checkPseudos(); err != nil { + fr.errDetail = err + if VerboseLogs { + log.Printf("http2: invalid pseudo headers: %v", err) + } + return nil, StreamError{mh.StreamID, ErrCodeProtocol, err} + } + return mh, nil +} + +func summarizeFrame(f Frame) string { + var buf bytes.Buffer + f.Header().writeDebug(&buf) + switch f := f.(type) { + case *SettingsFrame: + n := 0 + f.ForeachSetting(func(s Setting) error { + n++ + if n == 1 { + buf.WriteString(", settings:") + } + fmt.Fprintf(&buf, " %v=%v,", s.ID, s.Val) + return nil + }) + if n > 0 { + buf.Truncate(buf.Len() - 1) // remove trailing comma + } + case *DataFrame: + data := f.Data() + const max = 256 + if len(data) > max { + data = data[:max] + } + fmt.Fprintf(&buf, " data=%q", data) + if len(f.Data()) > max { + fmt.Fprintf(&buf, " (%d bytes omitted)", len(f.Data())-max) + } + case *WindowUpdateFrame: + if f.StreamID == 0 { + buf.WriteString(" (conn)") + } + fmt.Fprintf(&buf, " incr=%v", f.Increment) + case *PingFrame: + fmt.Fprintf(&buf, " ping=%q", f.Data[:]) + case *GoAwayFrame: + fmt.Fprintf(&buf, " LastStreamID=%v ErrCode=%v Debug=%q", + f.LastStreamID, f.ErrCode, f.debugData) + case *RSTStreamFrame: + fmt.Fprintf(&buf, " ErrCode=%v", f.ErrCode) + } + return buf.String() +} diff --git a/vendor/golang.org/x/net/http2/go16.go b/vendor/golang.org/x/net/http2/go16.go new file mode 100644 index 000000000..2b72855f5 --- /dev/null +++ b/vendor/golang.org/x/net/http2/go16.go @@ -0,0 +1,43 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.6 + +package http2 + +import ( + "crypto/tls" + "net/http" + "time" +) + +func transportExpectContinueTimeout(t1 *http.Transport) time.Duration { + return t1.ExpectContinueTimeout +} + +// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. +func isBadCipher(cipher uint16) bool { + switch cipher { + case tls.TLS_RSA_WITH_RC4_128_SHA, + tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, + tls.TLS_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_RSA_WITH_AES_128_GCM_SHA256, + tls.TLS_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, + tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: + // Reject cipher suites from Appendix A. + // "This list includes those cipher suites that do not + // offer an ephemeral key exchange and those that are + // based on the TLS null, stream or block cipher type" + return true + default: + return false + } +} diff --git a/vendor/golang.org/x/net/http2/go17.go b/vendor/golang.org/x/net/http2/go17.go new file mode 100644 index 000000000..47b7fae08 --- /dev/null +++ b/vendor/golang.org/x/net/http2/go17.go @@ -0,0 +1,106 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package http2 + +import ( + "context" + "net" + "net/http" + "net/http/httptrace" + "time" +) + +type contextContext interface { + context.Context +} + +func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) { + ctx, cancel = context.WithCancel(context.Background()) + ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr()) + if hs := opts.baseConfig(); hs != nil { + ctx = context.WithValue(ctx, http.ServerContextKey, hs) + } + return +} + +func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) { + return context.WithCancel(ctx) +} + +func requestWithContext(req *http.Request, ctx contextContext) *http.Request { + return req.WithContext(ctx) +} + +type clientTrace httptrace.ClientTrace + +func reqContext(r *http.Request) context.Context { return r.Context() } + +func (t *Transport) idleConnTimeout() time.Duration { + if t.t1 != nil { + return t.t1.IdleConnTimeout + } + return 0 +} + +func setResponseUncompressed(res *http.Response) { res.Uncompressed = true } + +func traceGotConn(req *http.Request, cc *ClientConn) { + trace := httptrace.ContextClientTrace(req.Context()) + if trace == nil || trace.GotConn == nil { + return + } + ci := httptrace.GotConnInfo{Conn: cc.tconn} + cc.mu.Lock() + ci.Reused = cc.nextStreamID > 1 + ci.WasIdle = len(cc.streams) == 0 && ci.Reused + if ci.WasIdle && !cc.lastActive.IsZero() { + ci.IdleTime = time.Now().Sub(cc.lastActive) + } + cc.mu.Unlock() + + trace.GotConn(ci) +} + +func traceWroteHeaders(trace *clientTrace) { + if trace != nil && trace.WroteHeaders != nil { + trace.WroteHeaders() + } +} + +func traceGot100Continue(trace *clientTrace) { + if trace != nil && trace.Got100Continue != nil { + trace.Got100Continue() + } +} + +func traceWait100Continue(trace *clientTrace) { + if trace != nil && trace.Wait100Continue != nil { + trace.Wait100Continue() + } +} + +func traceWroteRequest(trace *clientTrace, err error) { + if trace != nil && trace.WroteRequest != nil { + trace.WroteRequest(httptrace.WroteRequestInfo{Err: err}) + } +} + +func traceFirstResponseByte(trace *clientTrace) { + if trace != nil && trace.GotFirstResponseByte != nil { + trace.GotFirstResponseByte() + } +} + +func requestTrace(req *http.Request) *clientTrace { + trace := httptrace.ContextClientTrace(req.Context()) + return (*clientTrace)(trace) +} + +// Ping sends a PING frame to the server and waits for the ack. +func (cc *ClientConn) Ping(ctx context.Context) error { + return cc.ping(ctx) +} diff --git a/vendor/golang.org/x/net/http2/go17_not18.go b/vendor/golang.org/x/net/http2/go17_not18.go new file mode 100644 index 000000000..b4c52ecec --- /dev/null +++ b/vendor/golang.org/x/net/http2/go17_not18.go @@ -0,0 +1,36 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7,!go1.8 + +package http2 + +import "crypto/tls" + +// temporary copy of Go 1.7's private tls.Config.clone: +func cloneTLSConfig(c *tls.Config) *tls.Config { + return &tls.Config{ + Rand: c.Rand, + Time: c.Time, + Certificates: c.Certificates, + NameToCertificate: c.NameToCertificate, + GetCertificate: c.GetCertificate, + RootCAs: c.RootCAs, + NextProtos: c.NextProtos, + ServerName: c.ServerName, + ClientAuth: c.ClientAuth, + ClientCAs: c.ClientCAs, + InsecureSkipVerify: c.InsecureSkipVerify, + CipherSuites: c.CipherSuites, + PreferServerCipherSuites: c.PreferServerCipherSuites, + SessionTicketsDisabled: c.SessionTicketsDisabled, + SessionTicketKey: c.SessionTicketKey, + ClientSessionCache: c.ClientSessionCache, + MinVersion: c.MinVersion, + MaxVersion: c.MaxVersion, + CurvePreferences: c.CurvePreferences, + DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled, + Renegotiation: c.Renegotiation, + } +} diff --git a/vendor/golang.org/x/net/http2/go18.go b/vendor/golang.org/x/net/http2/go18.go new file mode 100644 index 000000000..73cc2381f --- /dev/null +++ b/vendor/golang.org/x/net/http2/go18.go @@ -0,0 +1,54 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package http2 + +import ( + "crypto/tls" + "io" + "net/http" +) + +func cloneTLSConfig(c *tls.Config) *tls.Config { + c2 := c.Clone() + c2.GetClientCertificate = c.GetClientCertificate // golang.org/issue/19264 + return c2 +} + +var _ http.Pusher = (*responseWriter)(nil) + +// Push implements http.Pusher. +func (w *responseWriter) Push(target string, opts *http.PushOptions) error { + internalOpts := pushOptions{} + if opts != nil { + internalOpts.Method = opts.Method + internalOpts.Header = opts.Header + } + return w.push(target, internalOpts) +} + +func configureServer18(h1 *http.Server, h2 *Server) error { + if h2.IdleTimeout == 0 { + if h1.IdleTimeout != 0 { + h2.IdleTimeout = h1.IdleTimeout + } else { + h2.IdleTimeout = h1.ReadTimeout + } + } + return nil +} + +func shouldLogPanic(panicValue interface{}) bool { + return panicValue != nil && panicValue != http.ErrAbortHandler +} + +func reqGetBody(req *http.Request) func() (io.ReadCloser, error) { + return req.GetBody +} + +func reqBodyIsNoBody(body io.ReadCloser) bool { + return body == http.NoBody +} diff --git a/vendor/golang.org/x/net/http2/gotrack.go b/vendor/golang.org/x/net/http2/gotrack.go new file mode 100644 index 000000000..9933c9f8c --- /dev/null +++ b/vendor/golang.org/x/net/http2/gotrack.go @@ -0,0 +1,170 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Defensive debug-only utility to track that functions run on the +// goroutine that they're supposed to. + +package http2 + +import ( + "bytes" + "errors" + "fmt" + "os" + "runtime" + "strconv" + "sync" +) + +var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1" + +type goroutineLock uint64 + +func newGoroutineLock() goroutineLock { + if !DebugGoroutines { + return 0 + } + return goroutineLock(curGoroutineID()) +} + +func (g goroutineLock) check() { + if !DebugGoroutines { + return + } + if curGoroutineID() != uint64(g) { + panic("running on the wrong goroutine") + } +} + +func (g goroutineLock) checkNotOn() { + if !DebugGoroutines { + return + } + if curGoroutineID() == uint64(g) { + panic("running on the wrong goroutine") + } +} + +var goroutineSpace = []byte("goroutine ") + +func curGoroutineID() uint64 { + bp := littleBuf.Get().(*[]byte) + defer littleBuf.Put(bp) + b := *bp + b = b[:runtime.Stack(b, false)] + // Parse the 4707 out of "goroutine 4707 [" + b = bytes.TrimPrefix(b, goroutineSpace) + i := bytes.IndexByte(b, ' ') + if i < 0 { + panic(fmt.Sprintf("No space found in %q", b)) + } + b = b[:i] + n, err := parseUintBytes(b, 10, 64) + if err != nil { + panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err)) + } + return n +} + +var littleBuf = sync.Pool{ + New: func() interface{} { + buf := make([]byte, 64) + return &buf + }, +} + +// parseUintBytes is like strconv.ParseUint, but using a []byte. +func parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) { + var cutoff, maxVal uint64 + + if bitSize == 0 { + bitSize = int(strconv.IntSize) + } + + s0 := s + switch { + case len(s) < 1: + err = strconv.ErrSyntax + goto Error + + case 2 <= base && base <= 36: + // valid base; nothing to do + + case base == 0: + // Look for octal, hex prefix. + switch { + case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'): + base = 16 + s = s[2:] + if len(s) < 1 { + err = strconv.ErrSyntax + goto Error + } + case s[0] == '0': + base = 8 + default: + base = 10 + } + + default: + err = errors.New("invalid base " + strconv.Itoa(base)) + goto Error + } + + n = 0 + cutoff = cutoff64(base) + maxVal = 1<= base { + n = 0 + err = strconv.ErrSyntax + goto Error + } + + if n >= cutoff { + // n*base overflows + n = 1<<64 - 1 + err = strconv.ErrRange + goto Error + } + n *= uint64(base) + + n1 := n + uint64(v) + if n1 < n || n1 > maxVal { + // n+v overflows + n = 1<<64 - 1 + err = strconv.ErrRange + goto Error + } + n = n1 + } + + return n, nil + +Error: + return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err} +} + +// Return the first number n such that n*base >= 1<<64. +func cutoff64(base int) uint64 { + if base < 2 { + return 0 + } + return (1<<64-1)/uint64(base) + 1 +} diff --git a/vendor/golang.org/x/net/http2/headermap.go b/vendor/golang.org/x/net/http2/headermap.go new file mode 100644 index 000000000..c2805f6ac --- /dev/null +++ b/vendor/golang.org/x/net/http2/headermap.go @@ -0,0 +1,78 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "net/http" + "strings" +) + +var ( + commonLowerHeader = map[string]string{} // Go-Canonical-Case -> lower-case + commonCanonHeader = map[string]string{} // lower-case -> Go-Canonical-Case +) + +func init() { + for _, v := range []string{ + "accept", + "accept-charset", + "accept-encoding", + "accept-language", + "accept-ranges", + "age", + "access-control-allow-origin", + "allow", + "authorization", + "cache-control", + "content-disposition", + "content-encoding", + "content-language", + "content-length", + "content-location", + "content-range", + "content-type", + "cookie", + "date", + "etag", + "expect", + "expires", + "from", + "host", + "if-match", + "if-modified-since", + "if-none-match", + "if-unmodified-since", + "last-modified", + "link", + "location", + "max-forwards", + "proxy-authenticate", + "proxy-authorization", + "range", + "referer", + "refresh", + "retry-after", + "server", + "set-cookie", + "strict-transport-security", + "trailer", + "transfer-encoding", + "user-agent", + "vary", + "via", + "www-authenticate", + } { + chk := http.CanonicalHeaderKey(v) + commonLowerHeader[chk] = v + commonCanonHeader[v] = chk + } +} + +func lowerHeader(v string) string { + if s, ok := commonLowerHeader[v]; ok { + return s + } + return strings.ToLower(v) +} diff --git a/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/golang.org/x/net/http2/hpack/encode.go new file mode 100644 index 000000000..54726c2a3 --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/encode.go @@ -0,0 +1,240 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "io" +) + +const ( + uint32Max = ^uint32(0) + initialHeaderTableSize = 4096 +) + +type Encoder struct { + dynTab dynamicTable + // minSize is the minimum table size set by + // SetMaxDynamicTableSize after the previous Header Table Size + // Update. + minSize uint32 + // maxSizeLimit is the maximum table size this encoder + // supports. This will protect the encoder from too large + // size. + maxSizeLimit uint32 + // tableSizeUpdate indicates whether "Header Table Size + // Update" is required. + tableSizeUpdate bool + w io.Writer + buf []byte +} + +// NewEncoder returns a new Encoder which performs HPACK encoding. An +// encoded data is written to w. +func NewEncoder(w io.Writer) *Encoder { + e := &Encoder{ + minSize: uint32Max, + maxSizeLimit: initialHeaderTableSize, + tableSizeUpdate: false, + w: w, + } + e.dynTab.table.init() + e.dynTab.setMaxSize(initialHeaderTableSize) + return e +} + +// WriteField encodes f into a single Write to e's underlying Writer. +// This function may also produce bytes for "Header Table Size Update" +// if necessary. If produced, it is done before encoding f. +func (e *Encoder) WriteField(f HeaderField) error { + e.buf = e.buf[:0] + + if e.tableSizeUpdate { + e.tableSizeUpdate = false + if e.minSize < e.dynTab.maxSize { + e.buf = appendTableSize(e.buf, e.minSize) + } + e.minSize = uint32Max + e.buf = appendTableSize(e.buf, e.dynTab.maxSize) + } + + idx, nameValueMatch := e.searchTable(f) + if nameValueMatch { + e.buf = appendIndexed(e.buf, idx) + } else { + indexing := e.shouldIndex(f) + if indexing { + e.dynTab.add(f) + } + + if idx == 0 { + e.buf = appendNewName(e.buf, f, indexing) + } else { + e.buf = appendIndexedName(e.buf, f, idx, indexing) + } + } + n, err := e.w.Write(e.buf) + if err == nil && n != len(e.buf) { + err = io.ErrShortWrite + } + return err +} + +// searchTable searches f in both stable and dynamic header tables. +// The static header table is searched first. Only when there is no +// exact match for both name and value, the dynamic header table is +// then searched. If there is no match, i is 0. If both name and value +// match, i is the matched index and nameValueMatch becomes true. If +// only name matches, i points to that index and nameValueMatch +// becomes false. +func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) { + i, nameValueMatch = staticTable.search(f) + if nameValueMatch { + return i, true + } + + j, nameValueMatch := e.dynTab.table.search(f) + if nameValueMatch || (i == 0 && j != 0) { + return j + uint64(staticTable.len()), nameValueMatch + } + + return i, false +} + +// SetMaxDynamicTableSize changes the dynamic header table size to v. +// The actual size is bounded by the value passed to +// SetMaxDynamicTableSizeLimit. +func (e *Encoder) SetMaxDynamicTableSize(v uint32) { + if v > e.maxSizeLimit { + v = e.maxSizeLimit + } + if v < e.minSize { + e.minSize = v + } + e.tableSizeUpdate = true + e.dynTab.setMaxSize(v) +} + +// SetMaxDynamicTableSizeLimit changes the maximum value that can be +// specified in SetMaxDynamicTableSize to v. By default, it is set to +// 4096, which is the same size of the default dynamic header table +// size described in HPACK specification. If the current maximum +// dynamic header table size is strictly greater than v, "Header Table +// Size Update" will be done in the next WriteField call and the +// maximum dynamic header table size is truncated to v. +func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) { + e.maxSizeLimit = v + if e.dynTab.maxSize > v { + e.tableSizeUpdate = true + e.dynTab.setMaxSize(v) + } +} + +// shouldIndex reports whether f should be indexed. +func (e *Encoder) shouldIndex(f HeaderField) bool { + return !f.Sensitive && f.Size() <= e.dynTab.maxSize +} + +// appendIndexed appends index i, as encoded in "Indexed Header Field" +// representation, to dst and returns the extended buffer. +func appendIndexed(dst []byte, i uint64) []byte { + first := len(dst) + dst = appendVarInt(dst, 7, i) + dst[first] |= 0x80 + return dst +} + +// appendNewName appends f, as encoded in one of "Literal Header field +// - New Name" representation variants, to dst and returns the +// extended buffer. +// +// If f.Sensitive is true, "Never Indexed" representation is used. If +// f.Sensitive is false and indexing is true, "Inremental Indexing" +// representation is used. +func appendNewName(dst []byte, f HeaderField, indexing bool) []byte { + dst = append(dst, encodeTypeByte(indexing, f.Sensitive)) + dst = appendHpackString(dst, f.Name) + return appendHpackString(dst, f.Value) +} + +// appendIndexedName appends f and index i referring indexed name +// entry, as encoded in one of "Literal Header field - Indexed Name" +// representation variants, to dst and returns the extended buffer. +// +// If f.Sensitive is true, "Never Indexed" representation is used. If +// f.Sensitive is false and indexing is true, "Incremental Indexing" +// representation is used. +func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte { + first := len(dst) + var n byte + if indexing { + n = 6 + } else { + n = 4 + } + dst = appendVarInt(dst, n, i) + dst[first] |= encodeTypeByte(indexing, f.Sensitive) + return appendHpackString(dst, f.Value) +} + +// appendTableSize appends v, as encoded in "Header Table Size Update" +// representation, to dst and returns the extended buffer. +func appendTableSize(dst []byte, v uint32) []byte { + first := len(dst) + dst = appendVarInt(dst, 5, uint64(v)) + dst[first] |= 0x20 + return dst +} + +// appendVarInt appends i, as encoded in variable integer form using n +// bit prefix, to dst and returns the extended buffer. +// +// See +// http://http2.github.io/http2-spec/compression.html#integer.representation +func appendVarInt(dst []byte, n byte, i uint64) []byte { + k := uint64((1 << n) - 1) + if i < k { + return append(dst, byte(i)) + } + dst = append(dst, byte(k)) + i -= k + for ; i >= 128; i >>= 7 { + dst = append(dst, byte(0x80|(i&0x7f))) + } + return append(dst, byte(i)) +} + +// appendHpackString appends s, as encoded in "String Literal" +// representation, to dst and returns the the extended buffer. +// +// s will be encoded in Huffman codes only when it produces strictly +// shorter byte string. +func appendHpackString(dst []byte, s string) []byte { + huffmanLength := HuffmanEncodeLength(s) + if huffmanLength < uint64(len(s)) { + first := len(dst) + dst = appendVarInt(dst, 7, huffmanLength) + dst = AppendHuffmanString(dst, s) + dst[first] |= 0x80 + } else { + dst = appendVarInt(dst, 7, uint64(len(s))) + dst = append(dst, s...) + } + return dst +} + +// encodeTypeByte returns type byte. If sensitive is true, type byte +// for "Never Indexed" representation is returned. If sensitive is +// false and indexing is true, type byte for "Incremental Indexing" +// representation is returned. Otherwise, type byte for "Without +// Indexing" is returned. +func encodeTypeByte(indexing, sensitive bool) byte { + if sensitive { + return 0x10 + } + if indexing { + return 0x40 + } + return 0 +} diff --git a/vendor/golang.org/x/net/http2/hpack/hpack.go b/vendor/golang.org/x/net/http2/hpack/hpack.go new file mode 100644 index 000000000..176644acd --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/hpack.go @@ -0,0 +1,490 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package hpack implements HPACK, a compression format for +// efficiently representing HTTP header fields in the context of HTTP/2. +// +// See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09 +package hpack + +import ( + "bytes" + "errors" + "fmt" +) + +// A DecodingError is something the spec defines as a decoding error. +type DecodingError struct { + Err error +} + +func (de DecodingError) Error() string { + return fmt.Sprintf("decoding error: %v", de.Err) +} + +// An InvalidIndexError is returned when an encoder references a table +// entry before the static table or after the end of the dynamic table. +type InvalidIndexError int + +func (e InvalidIndexError) Error() string { + return fmt.Sprintf("invalid indexed representation index %d", int(e)) +} + +// A HeaderField is a name-value pair. Both the name and value are +// treated as opaque sequences of octets. +type HeaderField struct { + Name, Value string + + // Sensitive means that this header field should never be + // indexed. + Sensitive bool +} + +// IsPseudo reports whether the header field is an http2 pseudo header. +// That is, it reports whether it starts with a colon. +// It is not otherwise guaranteed to be a valid pseudo header field, +// though. +func (hf HeaderField) IsPseudo() bool { + return len(hf.Name) != 0 && hf.Name[0] == ':' +} + +func (hf HeaderField) String() string { + var suffix string + if hf.Sensitive { + suffix = " (sensitive)" + } + return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix) +} + +// Size returns the size of an entry per RFC 7541 section 4.1. +func (hf HeaderField) Size() uint32 { + // http://http2.github.io/http2-spec/compression.html#rfc.section.4.1 + // "The size of the dynamic table is the sum of the size of + // its entries. The size of an entry is the sum of its name's + // length in octets (as defined in Section 5.2), its value's + // length in octets (see Section 5.2), plus 32. The size of + // an entry is calculated using the length of the name and + // value without any Huffman encoding applied." + + // This can overflow if somebody makes a large HeaderField + // Name and/or Value by hand, but we don't care, because that + // won't happen on the wire because the encoding doesn't allow + // it. + return uint32(len(hf.Name) + len(hf.Value) + 32) +} + +// A Decoder is the decoding context for incremental processing of +// header blocks. +type Decoder struct { + dynTab dynamicTable + emit func(f HeaderField) + + emitEnabled bool // whether calls to emit are enabled + maxStrLen int // 0 means unlimited + + // buf is the unparsed buffer. It's only written to + // saveBuf if it was truncated in the middle of a header + // block. Because it's usually not owned, we can only + // process it under Write. + buf []byte // not owned; only valid during Write + + // saveBuf is previous data passed to Write which we weren't able + // to fully parse before. Unlike buf, we own this data. + saveBuf bytes.Buffer +} + +// NewDecoder returns a new decoder with the provided maximum dynamic +// table size. The emitFunc will be called for each valid field +// parsed, in the same goroutine as calls to Write, before Write returns. +func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decoder { + d := &Decoder{ + emit: emitFunc, + emitEnabled: true, + } + d.dynTab.table.init() + d.dynTab.allowedMaxSize = maxDynamicTableSize + d.dynTab.setMaxSize(maxDynamicTableSize) + return d +} + +// ErrStringLength is returned by Decoder.Write when the max string length +// (as configured by Decoder.SetMaxStringLength) would be violated. +var ErrStringLength = errors.New("hpack: string too long") + +// SetMaxStringLength sets the maximum size of a HeaderField name or +// value string. If a string exceeds this length (even after any +// decompression), Write will return ErrStringLength. +// A value of 0 means unlimited and is the default from NewDecoder. +func (d *Decoder) SetMaxStringLength(n int) { + d.maxStrLen = n +} + +// SetEmitFunc changes the callback used when new header fields +// are decoded. +// It must be non-nil. It does not affect EmitEnabled. +func (d *Decoder) SetEmitFunc(emitFunc func(f HeaderField)) { + d.emit = emitFunc +} + +// SetEmitEnabled controls whether the emitFunc provided to NewDecoder +// should be called. The default is true. +// +// This facility exists to let servers enforce MAX_HEADER_LIST_SIZE +// while still decoding and keeping in-sync with decoder state, but +// without doing unnecessary decompression or generating unnecessary +// garbage for header fields past the limit. +func (d *Decoder) SetEmitEnabled(v bool) { d.emitEnabled = v } + +// EmitEnabled reports whether calls to the emitFunc provided to NewDecoder +// are currently enabled. The default is true. +func (d *Decoder) EmitEnabled() bool { return d.emitEnabled } + +// TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their +// underlying buffers for garbage reasons. + +func (d *Decoder) SetMaxDynamicTableSize(v uint32) { + d.dynTab.setMaxSize(v) +} + +// SetAllowedMaxDynamicTableSize sets the upper bound that the encoded +// stream (via dynamic table size updates) may set the maximum size +// to. +func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) { + d.dynTab.allowedMaxSize = v +} + +type dynamicTable struct { + // http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2 + table headerFieldTable + size uint32 // in bytes + maxSize uint32 // current maxSize + allowedMaxSize uint32 // maxSize may go up to this, inclusive +} + +func (dt *dynamicTable) setMaxSize(v uint32) { + dt.maxSize = v + dt.evict() +} + +func (dt *dynamicTable) add(f HeaderField) { + dt.table.addEntry(f) + dt.size += f.Size() + dt.evict() +} + +// If we're too big, evict old stuff. +func (dt *dynamicTable) evict() { + var n int + for dt.size > dt.maxSize && n < dt.table.len() { + dt.size -= dt.table.ents[n].Size() + n++ + } + dt.table.evictOldest(n) +} + +func (d *Decoder) maxTableIndex() int { + // This should never overflow. RFC 7540 Section 6.5.2 limits the size of + // the dynamic table to 2^32 bytes, where each entry will occupy more than + // one byte. Further, the staticTable has a fixed, small length. + return d.dynTab.table.len() + staticTable.len() +} + +func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) { + // See Section 2.3.3. + if i == 0 { + return + } + if i <= uint64(staticTable.len()) { + return staticTable.ents[i-1], true + } + if i > uint64(d.maxTableIndex()) { + return + } + // In the dynamic table, newer entries have lower indices. + // However, dt.ents[0] is the oldest entry. Hence, dt.ents is + // the reversed dynamic table. + dt := d.dynTab.table + return dt.ents[dt.len()-(int(i)-staticTable.len())], true +} + +// Decode decodes an entire block. +// +// TODO: remove this method and make it incremental later? This is +// easier for debugging now. +func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) { + var hf []HeaderField + saveFunc := d.emit + defer func() { d.emit = saveFunc }() + d.emit = func(f HeaderField) { hf = append(hf, f) } + if _, err := d.Write(p); err != nil { + return nil, err + } + if err := d.Close(); err != nil { + return nil, err + } + return hf, nil +} + +func (d *Decoder) Close() error { + if d.saveBuf.Len() > 0 { + d.saveBuf.Reset() + return DecodingError{errors.New("truncated headers")} + } + return nil +} + +func (d *Decoder) Write(p []byte) (n int, err error) { + if len(p) == 0 { + // Prevent state machine CPU attacks (making us redo + // work up to the point of finding out we don't have + // enough data) + return + } + // Only copy the data if we have to. Optimistically assume + // that p will contain a complete header block. + if d.saveBuf.Len() == 0 { + d.buf = p + } else { + d.saveBuf.Write(p) + d.buf = d.saveBuf.Bytes() + d.saveBuf.Reset() + } + + for len(d.buf) > 0 { + err = d.parseHeaderFieldRepr() + if err == errNeedMore { + // Extra paranoia, making sure saveBuf won't + // get too large. All the varint and string + // reading code earlier should already catch + // overlong things and return ErrStringLength, + // but keep this as a last resort. + const varIntOverhead = 8 // conservative + if d.maxStrLen != 0 && int64(len(d.buf)) > 2*(int64(d.maxStrLen)+varIntOverhead) { + return 0, ErrStringLength + } + d.saveBuf.Write(d.buf) + return len(p), nil + } + if err != nil { + break + } + } + return len(p), err +} + +// errNeedMore is an internal sentinel error value that means the +// buffer is truncated and we need to read more data before we can +// continue parsing. +var errNeedMore = errors.New("need more data") + +type indexType int + +const ( + indexedTrue indexType = iota + indexedFalse + indexedNever +) + +func (v indexType) indexed() bool { return v == indexedTrue } +func (v indexType) sensitive() bool { return v == indexedNever } + +// returns errNeedMore if there isn't enough data available. +// any other error is fatal. +// consumes d.buf iff it returns nil. +// precondition: must be called with len(d.buf) > 0 +func (d *Decoder) parseHeaderFieldRepr() error { + b := d.buf[0] + switch { + case b&128 != 0: + // Indexed representation. + // High bit set? + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.1 + return d.parseFieldIndexed() + case b&192 == 64: + // 6.2.1 Literal Header Field with Incremental Indexing + // 0b10xxxxxx: top two bits are 10 + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1 + return d.parseFieldLiteral(6, indexedTrue) + case b&240 == 0: + // 6.2.2 Literal Header Field without Indexing + // 0b0000xxxx: top four bits are 0000 + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2 + return d.parseFieldLiteral(4, indexedFalse) + case b&240 == 16: + // 6.2.3 Literal Header Field never Indexed + // 0b0001xxxx: top four bits are 0001 + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3 + return d.parseFieldLiteral(4, indexedNever) + case b&224 == 32: + // 6.3 Dynamic Table Size Update + // Top three bits are '001'. + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.3 + return d.parseDynamicTableSizeUpdate() + } + + return DecodingError{errors.New("invalid encoding")} +} + +// (same invariants and behavior as parseHeaderFieldRepr) +func (d *Decoder) parseFieldIndexed() error { + buf := d.buf + idx, buf, err := readVarInt(7, buf) + if err != nil { + return err + } + hf, ok := d.at(idx) + if !ok { + return DecodingError{InvalidIndexError(idx)} + } + d.buf = buf + return d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value}) +} + +// (same invariants and behavior as parseHeaderFieldRepr) +func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error { + buf := d.buf + nameIdx, buf, err := readVarInt(n, buf) + if err != nil { + return err + } + + var hf HeaderField + wantStr := d.emitEnabled || it.indexed() + if nameIdx > 0 { + ihf, ok := d.at(nameIdx) + if !ok { + return DecodingError{InvalidIndexError(nameIdx)} + } + hf.Name = ihf.Name + } else { + hf.Name, buf, err = d.readString(buf, wantStr) + if err != nil { + return err + } + } + hf.Value, buf, err = d.readString(buf, wantStr) + if err != nil { + return err + } + d.buf = buf + if it.indexed() { + d.dynTab.add(hf) + } + hf.Sensitive = it.sensitive() + return d.callEmit(hf) +} + +func (d *Decoder) callEmit(hf HeaderField) error { + if d.maxStrLen != 0 { + if len(hf.Name) > d.maxStrLen || len(hf.Value) > d.maxStrLen { + return ErrStringLength + } + } + if d.emitEnabled { + d.emit(hf) + } + return nil +} + +// (same invariants and behavior as parseHeaderFieldRepr) +func (d *Decoder) parseDynamicTableSizeUpdate() error { + buf := d.buf + size, buf, err := readVarInt(5, buf) + if err != nil { + return err + } + if size > uint64(d.dynTab.allowedMaxSize) { + return DecodingError{errors.New("dynamic table size update too large")} + } + d.dynTab.setMaxSize(uint32(size)) + d.buf = buf + return nil +} + +var errVarintOverflow = DecodingError{errors.New("varint integer overflow")} + +// readVarInt reads an unsigned variable length integer off the +// beginning of p. n is the parameter as described in +// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1. +// +// n must always be between 1 and 8. +// +// The returned remain buffer is either a smaller suffix of p, or err != nil. +// The error is errNeedMore if p doesn't contain a complete integer. +func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) { + if n < 1 || n > 8 { + panic("bad n") + } + if len(p) == 0 { + return 0, p, errNeedMore + } + i = uint64(p[0]) + if n < 8 { + i &= (1 << uint64(n)) - 1 + } + if i < (1< 0 { + b := p[0] + p = p[1:] + i += uint64(b&127) << m + if b&128 == 0 { + return i, p, nil + } + m += 7 + if m >= 63 { // TODO: proper overflow check. making this up. + return 0, origP, errVarintOverflow + } + } + return 0, origP, errNeedMore +} + +// readString decodes an hpack string from p. +// +// wantStr is whether s will be used. If false, decompression and +// []byte->string garbage are skipped if s will be ignored +// anyway. This does mean that huffman decoding errors for non-indexed +// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server +// is returning an error anyway, and because they're not indexed, the error +// won't affect the decoding state. +func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) { + if len(p) == 0 { + return "", p, errNeedMore + } + isHuff := p[0]&128 != 0 + strLen, p, err := readVarInt(7, p) + if err != nil { + return "", p, err + } + if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) { + return "", nil, ErrStringLength + } + if uint64(len(p)) < strLen { + return "", p, errNeedMore + } + if !isHuff { + if wantStr { + s = string(p[:strLen]) + } + return s, p[strLen:], nil + } + + if wantStr { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() // don't trust others + defer bufPool.Put(buf) + if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil { + buf.Reset() + return "", nil, err + } + s = buf.String() + buf.Reset() // be nice to GC + } + return s, p[strLen:], nil +} diff --git a/vendor/golang.org/x/net/http2/hpack/huffman.go b/vendor/golang.org/x/net/http2/hpack/huffman.go new file mode 100644 index 000000000..8850e3946 --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/huffman.go @@ -0,0 +1,212 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "bytes" + "errors" + "io" + "sync" +) + +var bufPool = sync.Pool{ + New: func() interface{} { return new(bytes.Buffer) }, +} + +// HuffmanDecode decodes the string in v and writes the expanded +// result to w, returning the number of bytes written to w and the +// Write call's return value. At most one Write call is made. +func HuffmanDecode(w io.Writer, v []byte) (int, error) { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() + defer bufPool.Put(buf) + if err := huffmanDecode(buf, 0, v); err != nil { + return 0, err + } + return w.Write(buf.Bytes()) +} + +// HuffmanDecodeToString decodes the string in v. +func HuffmanDecodeToString(v []byte) (string, error) { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() + defer bufPool.Put(buf) + if err := huffmanDecode(buf, 0, v); err != nil { + return "", err + } + return buf.String(), nil +} + +// ErrInvalidHuffman is returned for errors found decoding +// Huffman-encoded strings. +var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data") + +// huffmanDecode decodes v to buf. +// If maxLen is greater than 0, attempts to write more to buf than +// maxLen bytes will return ErrStringLength. +func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error { + n := rootHuffmanNode + // cur is the bit buffer that has not been fed into n. + // cbits is the number of low order bits in cur that are valid. + // sbits is the number of bits of the symbol prefix being decoded. + cur, cbits, sbits := uint(0), uint8(0), uint8(0) + for _, b := range v { + cur = cur<<8 | uint(b) + cbits += 8 + sbits += 8 + for cbits >= 8 { + idx := byte(cur >> (cbits - 8)) + n = n.children[idx] + if n == nil { + return ErrInvalidHuffman + } + if n.children == nil { + if maxLen != 0 && buf.Len() == maxLen { + return ErrStringLength + } + buf.WriteByte(n.sym) + cbits -= n.codeLen + n = rootHuffmanNode + sbits = cbits + } else { + cbits -= 8 + } + } + } + for cbits > 0 { + n = n.children[byte(cur<<(8-cbits))] + if n == nil { + return ErrInvalidHuffman + } + if n.children != nil || n.codeLen > cbits { + break + } + if maxLen != 0 && buf.Len() == maxLen { + return ErrStringLength + } + buf.WriteByte(n.sym) + cbits -= n.codeLen + n = rootHuffmanNode + sbits = cbits + } + if sbits > 7 { + // Either there was an incomplete symbol, or overlong padding. + // Both are decoding errors per RFC 7541 section 5.2. + return ErrInvalidHuffman + } + if mask := uint(1< 8 { + codeLen -= 8 + i := uint8(code >> codeLen) + if cur.children[i] == nil { + cur.children[i] = newInternalNode() + } + cur = cur.children[i] + } + shift := 8 - codeLen + start, end := int(uint8(code<> (nbits - rembits)) + dst[len(dst)-1] |= t + } + + return dst +} + +// HuffmanEncodeLength returns the number of bytes required to encode +// s in Huffman codes. The result is round up to byte boundary. +func HuffmanEncodeLength(s string) uint64 { + n := uint64(0) + for i := 0; i < len(s); i++ { + n += uint64(huffmanCodeLen[s[i]]) + } + return (n + 7) / 8 +} + +// appendByteToHuffmanCode appends Huffman code for c to dst and +// returns the extended buffer and the remaining bits in the last +// element. The appending is not byte aligned and the remaining bits +// in the last element of dst is given in rembits. +func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) { + code := huffmanCodes[c] + nbits := huffmanCodeLen[c] + + for { + if rembits > nbits { + t := uint8(code << (rembits - nbits)) + dst[len(dst)-1] |= t + rembits -= nbits + break + } + + t := uint8(code >> (nbits - rembits)) + dst[len(dst)-1] |= t + + nbits -= rembits + rembits = 8 + + if nbits == 0 { + break + } + + dst = append(dst, 0) + } + + return dst, rembits +} diff --git a/vendor/golang.org/x/net/http2/hpack/tables.go b/vendor/golang.org/x/net/http2/hpack/tables.go new file mode 100644 index 000000000..31bd5a553 --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/tables.go @@ -0,0 +1,478 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "fmt" +) + +// headerFieldTable implements a list of HeaderFields. +// This is used to implement the static and dynamic tables. +type headerFieldTable struct { + // For static tables, entries are never evicted. + // + // For dynamic tables, entries are evicted from ents[0] and added to the end. + // Each entry has a unique id that starts at one and increments for each + // entry that is added. This unique id is stable across evictions, meaning + // it can be used as a pointer to a specific entry. As in hpack, unique ids + // are 1-based. The unique id for ents[k] is k + evictCount + 1. + // + // Zero is not a valid unique id. + // + // evictCount should not overflow in any remotely practical situation. In + // practice, we will have one dynamic table per HTTP/2 connection. If we + // assume a very powerful server that handles 1M QPS per connection and each + // request adds (then evicts) 100 entries from the table, it would still take + // 2M years for evictCount to overflow. + ents []HeaderField + evictCount uint64 + + // byName maps a HeaderField name to the unique id of the newest entry with + // the same name. See above for a definition of "unique id". + byName map[string]uint64 + + // byNameValue maps a HeaderField name/value pair to the unique id of the newest + // entry with the same name and value. See above for a definition of "unique id". + byNameValue map[pairNameValue]uint64 +} + +type pairNameValue struct { + name, value string +} + +func (t *headerFieldTable) init() { + t.byName = make(map[string]uint64) + t.byNameValue = make(map[pairNameValue]uint64) +} + +// len reports the number of entries in the table. +func (t *headerFieldTable) len() int { + return len(t.ents) +} + +// addEntry adds a new entry. +func (t *headerFieldTable) addEntry(f HeaderField) { + id := uint64(t.len()) + t.evictCount + 1 + t.byName[f.Name] = id + t.byNameValue[pairNameValue{f.Name, f.Value}] = id + t.ents = append(t.ents, f) +} + +// evictOldest evicts the n oldest entries in the table. +func (t *headerFieldTable) evictOldest(n int) { + if n > t.len() { + panic(fmt.Sprintf("evictOldest(%v) on table with %v entries", n, t.len())) + } + for k := 0; k < n; k++ { + f := t.ents[k] + id := t.evictCount + uint64(k) + 1 + if t.byName[f.Name] == id { + delete(t.byName, f.Name) + } + if p := (pairNameValue{f.Name, f.Value}); t.byNameValue[p] == id { + delete(t.byNameValue, p) + } + } + copy(t.ents, t.ents[n:]) + for k := t.len() - n; k < t.len(); k++ { + t.ents[k] = HeaderField{} // so strings can be garbage collected + } + t.ents = t.ents[:t.len()-n] + if t.evictCount+uint64(n) < t.evictCount { + panic("evictCount overflow") + } + t.evictCount += uint64(n) +} + +// search finds f in the table. If there is no match, i is 0. +// If both name and value match, i is the matched index and nameValueMatch +// becomes true. If only name matches, i points to that index and +// nameValueMatch becomes false. +// +// The returned index is a 1-based HPACK index. For dynamic tables, HPACK says +// that index 1 should be the newest entry, but t.ents[0] is the oldest entry, +// meaning t.ents is reversed for dynamic tables. Hence, when t is a dynamic +// table, the return value i actually refers to the entry t.ents[t.len()-i]. +// +// All tables are assumed to be a dynamic tables except for the global +// staticTable pointer. +// +// See Section 2.3.3. +func (t *headerFieldTable) search(f HeaderField) (i uint64, nameValueMatch bool) { + if !f.Sensitive { + if id := t.byNameValue[pairNameValue{f.Name, f.Value}]; id != 0 { + return t.idToIndex(id), true + } + } + if id := t.byName[f.Name]; id != 0 { + return t.idToIndex(id), false + } + return 0, false +} + +// idToIndex converts a unique id to an HPACK index. +// See Section 2.3.3. +func (t *headerFieldTable) idToIndex(id uint64) uint64 { + if id <= t.evictCount { + panic(fmt.Sprintf("id (%v) <= evictCount (%v)", id, t.evictCount)) + } + k := id - t.evictCount - 1 // convert id to an index t.ents[k] + if t != staticTable { + return uint64(t.len()) - k // dynamic table + } + return k + 1 +} + +func pair(name, value string) HeaderField { + return HeaderField{Name: name, Value: value} +} + +// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B +var staticTable = newStaticTable() + +func newStaticTable() *headerFieldTable { + t := &headerFieldTable{} + t.init() + t.addEntry(pair(":authority", "")) + t.addEntry(pair(":method", "GET")) + t.addEntry(pair(":method", "POST")) + t.addEntry(pair(":path", "/")) + t.addEntry(pair(":path", "/index.html")) + t.addEntry(pair(":scheme", "http")) + t.addEntry(pair(":scheme", "https")) + t.addEntry(pair(":status", "200")) + t.addEntry(pair(":status", "204")) + t.addEntry(pair(":status", "206")) + t.addEntry(pair(":status", "304")) + t.addEntry(pair(":status", "400")) + t.addEntry(pair(":status", "404")) + t.addEntry(pair(":status", "500")) + t.addEntry(pair("accept-charset", "")) + t.addEntry(pair("accept-encoding", "gzip, deflate")) + t.addEntry(pair("accept-language", "")) + t.addEntry(pair("accept-ranges", "")) + t.addEntry(pair("accept", "")) + t.addEntry(pair("access-control-allow-origin", "")) + t.addEntry(pair("age", "")) + t.addEntry(pair("allow", "")) + t.addEntry(pair("authorization", "")) + t.addEntry(pair("cache-control", "")) + t.addEntry(pair("content-disposition", "")) + t.addEntry(pair("content-encoding", "")) + t.addEntry(pair("content-language", "")) + t.addEntry(pair("content-length", "")) + t.addEntry(pair("content-location", "")) + t.addEntry(pair("content-range", "")) + t.addEntry(pair("content-type", "")) + t.addEntry(pair("cookie", "")) + t.addEntry(pair("date", "")) + t.addEntry(pair("etag", "")) + t.addEntry(pair("expect", "")) + t.addEntry(pair("expires", "")) + t.addEntry(pair("from", "")) + t.addEntry(pair("host", "")) + t.addEntry(pair("if-match", "")) + t.addEntry(pair("if-modified-since", "")) + t.addEntry(pair("if-none-match", "")) + t.addEntry(pair("if-range", "")) + t.addEntry(pair("if-unmodified-since", "")) + t.addEntry(pair("last-modified", "")) + t.addEntry(pair("link", "")) + t.addEntry(pair("location", "")) + t.addEntry(pair("max-forwards", "")) + t.addEntry(pair("proxy-authenticate", "")) + t.addEntry(pair("proxy-authorization", "")) + t.addEntry(pair("range", "")) + t.addEntry(pair("referer", "")) + t.addEntry(pair("refresh", "")) + t.addEntry(pair("retry-after", "")) + t.addEntry(pair("server", "")) + t.addEntry(pair("set-cookie", "")) + t.addEntry(pair("strict-transport-security", "")) + t.addEntry(pair("transfer-encoding", "")) + t.addEntry(pair("user-agent", "")) + t.addEntry(pair("vary", "")) + t.addEntry(pair("via", "")) + t.addEntry(pair("www-authenticate", "")) + return t +} + +var huffmanCodes = [256]uint32{ + 0x1ff8, + 0x7fffd8, + 0xfffffe2, + 0xfffffe3, + 0xfffffe4, + 0xfffffe5, + 0xfffffe6, + 0xfffffe7, + 0xfffffe8, + 0xffffea, + 0x3ffffffc, + 0xfffffe9, + 0xfffffea, + 0x3ffffffd, + 0xfffffeb, + 0xfffffec, + 0xfffffed, + 0xfffffee, + 0xfffffef, + 0xffffff0, + 0xffffff1, + 0xffffff2, + 0x3ffffffe, + 0xffffff3, + 0xffffff4, + 0xffffff5, + 0xffffff6, + 0xffffff7, + 0xffffff8, + 0xffffff9, + 0xffffffa, + 0xffffffb, + 0x14, + 0x3f8, + 0x3f9, + 0xffa, + 0x1ff9, + 0x15, + 0xf8, + 0x7fa, + 0x3fa, + 0x3fb, + 0xf9, + 0x7fb, + 0xfa, + 0x16, + 0x17, + 0x18, + 0x0, + 0x1, + 0x2, + 0x19, + 0x1a, + 0x1b, + 0x1c, + 0x1d, + 0x1e, + 0x1f, + 0x5c, + 0xfb, + 0x7ffc, + 0x20, + 0xffb, + 0x3fc, + 0x1ffa, + 0x21, + 0x5d, + 0x5e, + 0x5f, + 0x60, + 0x61, + 0x62, + 0x63, + 0x64, + 0x65, + 0x66, + 0x67, + 0x68, + 0x69, + 0x6a, + 0x6b, + 0x6c, + 0x6d, + 0x6e, + 0x6f, + 0x70, + 0x71, + 0x72, + 0xfc, + 0x73, + 0xfd, + 0x1ffb, + 0x7fff0, + 0x1ffc, + 0x3ffc, + 0x22, + 0x7ffd, + 0x3, + 0x23, + 0x4, + 0x24, + 0x5, + 0x25, + 0x26, + 0x27, + 0x6, + 0x74, + 0x75, + 0x28, + 0x29, + 0x2a, + 0x7, + 0x2b, + 0x76, + 0x2c, + 0x8, + 0x9, + 0x2d, + 0x77, + 0x78, + 0x79, + 0x7a, + 0x7b, + 0x7ffe, + 0x7fc, + 0x3ffd, + 0x1ffd, + 0xffffffc, + 0xfffe6, + 0x3fffd2, + 0xfffe7, + 0xfffe8, + 0x3fffd3, + 0x3fffd4, + 0x3fffd5, + 0x7fffd9, + 0x3fffd6, + 0x7fffda, + 0x7fffdb, + 0x7fffdc, + 0x7fffdd, + 0x7fffde, + 0xffffeb, + 0x7fffdf, + 0xffffec, + 0xffffed, + 0x3fffd7, + 0x7fffe0, + 0xffffee, + 0x7fffe1, + 0x7fffe2, + 0x7fffe3, + 0x7fffe4, + 0x1fffdc, + 0x3fffd8, + 0x7fffe5, + 0x3fffd9, + 0x7fffe6, + 0x7fffe7, + 0xffffef, + 0x3fffda, + 0x1fffdd, + 0xfffe9, + 0x3fffdb, + 0x3fffdc, + 0x7fffe8, + 0x7fffe9, + 0x1fffde, + 0x7fffea, + 0x3fffdd, + 0x3fffde, + 0xfffff0, + 0x1fffdf, + 0x3fffdf, + 0x7fffeb, + 0x7fffec, + 0x1fffe0, + 0x1fffe1, + 0x3fffe0, + 0x1fffe2, + 0x7fffed, + 0x3fffe1, + 0x7fffee, + 0x7fffef, + 0xfffea, + 0x3fffe2, + 0x3fffe3, + 0x3fffe4, + 0x7ffff0, + 0x3fffe5, + 0x3fffe6, + 0x7ffff1, + 0x3ffffe0, + 0x3ffffe1, + 0xfffeb, + 0x7fff1, + 0x3fffe7, + 0x7ffff2, + 0x3fffe8, + 0x1ffffec, + 0x3ffffe2, + 0x3ffffe3, + 0x3ffffe4, + 0x7ffffde, + 0x7ffffdf, + 0x3ffffe5, + 0xfffff1, + 0x1ffffed, + 0x7fff2, + 0x1fffe3, + 0x3ffffe6, + 0x7ffffe0, + 0x7ffffe1, + 0x3ffffe7, + 0x7ffffe2, + 0xfffff2, + 0x1fffe4, + 0x1fffe5, + 0x3ffffe8, + 0x3ffffe9, + 0xffffffd, + 0x7ffffe3, + 0x7ffffe4, + 0x7ffffe5, + 0xfffec, + 0xfffff3, + 0xfffed, + 0x1fffe6, + 0x3fffe9, + 0x1fffe7, + 0x1fffe8, + 0x7ffff3, + 0x3fffea, + 0x3fffeb, + 0x1ffffee, + 0x1ffffef, + 0xfffff4, + 0xfffff5, + 0x3ffffea, + 0x7ffff4, + 0x3ffffeb, + 0x7ffffe6, + 0x3ffffec, + 0x3ffffed, + 0x7ffffe7, + 0x7ffffe8, + 0x7ffffe9, + 0x7ffffea, + 0x7ffffeb, + 0xffffffe, + 0x7ffffec, + 0x7ffffed, + 0x7ffffee, + 0x7ffffef, + 0x7fffff0, + 0x3ffffee, +} + +var huffmanCodeLen = [256]uint8{ + 13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28, + 28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28, + 6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6, + 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10, + 13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6, + 15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5, + 6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28, + 20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23, + 24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24, + 22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23, + 21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23, + 26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25, + 19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27, + 20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23, + 26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26, +} diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go new file mode 100644 index 000000000..b6b0f9ad1 --- /dev/null +++ b/vendor/golang.org/x/net/http2/http2.go @@ -0,0 +1,387 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package http2 implements the HTTP/2 protocol. +// +// This package is low-level and intended to be used directly by very +// few people. Most users will use it indirectly through the automatic +// use by the net/http package (from Go 1.6 and later). +// For use in earlier Go versions see ConfigureServer. (Transport support +// requires Go 1.6 or later) +// +// See https://http2.github.io/ for more information on HTTP/2. +// +// See https://http2.golang.org/ for a test server running this code. +// +package http2 // import "golang.org/x/net/http2" + +import ( + "bufio" + "crypto/tls" + "errors" + "fmt" + "io" + "net/http" + "os" + "sort" + "strconv" + "strings" + "sync" + + "golang.org/x/net/lex/httplex" +) + +var ( + VerboseLogs bool + logFrameWrites bool + logFrameReads bool + inTests bool +) + +func init() { + e := os.Getenv("GODEBUG") + if strings.Contains(e, "http2debug=1") { + VerboseLogs = true + } + if strings.Contains(e, "http2debug=2") { + VerboseLogs = true + logFrameWrites = true + logFrameReads = true + } +} + +const ( + // ClientPreface is the string that must be sent by new + // connections from clients. + ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n" + + // SETTINGS_MAX_FRAME_SIZE default + // http://http2.github.io/http2-spec/#rfc.section.6.5.2 + initialMaxFrameSize = 16384 + + // NextProtoTLS is the NPN/ALPN protocol negotiated during + // HTTP/2's TLS setup. + NextProtoTLS = "h2" + + // http://http2.github.io/http2-spec/#SettingValues + initialHeaderTableSize = 4096 + + initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size + + defaultMaxReadFrameSize = 1 << 20 +) + +var ( + clientPreface = []byte(ClientPreface) +) + +type streamState int + +// HTTP/2 stream states. +// +// See http://tools.ietf.org/html/rfc7540#section-5.1. +// +// For simplicity, the server code merges "reserved (local)" into +// "half-closed (remote)". This is one less state transition to track. +// The only downside is that we send PUSH_PROMISEs slightly less +// liberally than allowable. More discussion here: +// https://lists.w3.org/Archives/Public/ietf-http-wg/2016JulSep/0599.html +// +// "reserved (remote)" is omitted since the client code does not +// support server push. +const ( + stateIdle streamState = iota + stateOpen + stateHalfClosedLocal + stateHalfClosedRemote + stateClosed +) + +var stateName = [...]string{ + stateIdle: "Idle", + stateOpen: "Open", + stateHalfClosedLocal: "HalfClosedLocal", + stateHalfClosedRemote: "HalfClosedRemote", + stateClosed: "Closed", +} + +func (st streamState) String() string { + return stateName[st] +} + +// Setting is a setting parameter: which setting it is, and its value. +type Setting struct { + // ID is which setting is being set. + // See http://http2.github.io/http2-spec/#SettingValues + ID SettingID + + // Val is the value. + Val uint32 +} + +func (s Setting) String() string { + return fmt.Sprintf("[%v = %d]", s.ID, s.Val) +} + +// Valid reports whether the setting is valid. +func (s Setting) Valid() error { + // Limits and error codes from 6.5.2 Defined SETTINGS Parameters + switch s.ID { + case SettingEnablePush: + if s.Val != 1 && s.Val != 0 { + return ConnectionError(ErrCodeProtocol) + } + case SettingInitialWindowSize: + if s.Val > 1<<31-1 { + return ConnectionError(ErrCodeFlowControl) + } + case SettingMaxFrameSize: + if s.Val < 16384 || s.Val > 1<<24-1 { + return ConnectionError(ErrCodeProtocol) + } + } + return nil +} + +// A SettingID is an HTTP/2 setting as defined in +// http://http2.github.io/http2-spec/#iana-settings +type SettingID uint16 + +const ( + SettingHeaderTableSize SettingID = 0x1 + SettingEnablePush SettingID = 0x2 + SettingMaxConcurrentStreams SettingID = 0x3 + SettingInitialWindowSize SettingID = 0x4 + SettingMaxFrameSize SettingID = 0x5 + SettingMaxHeaderListSize SettingID = 0x6 +) + +var settingName = map[SettingID]string{ + SettingHeaderTableSize: "HEADER_TABLE_SIZE", + SettingEnablePush: "ENABLE_PUSH", + SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", + SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", + SettingMaxFrameSize: "MAX_FRAME_SIZE", + SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", +} + +func (s SettingID) String() string { + if v, ok := settingName[s]; ok { + return v + } + return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s)) +} + +var ( + errInvalidHeaderFieldName = errors.New("http2: invalid header field name") + errInvalidHeaderFieldValue = errors.New("http2: invalid header field value") +) + +// validWireHeaderFieldName reports whether v is a valid header field +// name (key). See httplex.ValidHeaderName for the base rules. +// +// Further, http2 says: +// "Just as in HTTP/1.x, header field names are strings of ASCII +// characters that are compared in a case-insensitive +// fashion. However, header field names MUST be converted to +// lowercase prior to their encoding in HTTP/2. " +func validWireHeaderFieldName(v string) bool { + if len(v) == 0 { + return false + } + for _, r := range v { + if !httplex.IsTokenRune(r) { + return false + } + if 'A' <= r && r <= 'Z' { + return false + } + } + return true +} + +var httpCodeStringCommon = map[int]string{} // n -> strconv.Itoa(n) + +func init() { + for i := 100; i <= 999; i++ { + if v := http.StatusText(i); v != "" { + httpCodeStringCommon[i] = strconv.Itoa(i) + } + } +} + +func httpCodeString(code int) string { + if s, ok := httpCodeStringCommon[code]; ok { + return s + } + return strconv.Itoa(code) +} + +// from pkg io +type stringWriter interface { + WriteString(s string) (n int, err error) +} + +// A gate lets two goroutines coordinate their activities. +type gate chan struct{} + +func (g gate) Done() { g <- struct{}{} } +func (g gate) Wait() { <-g } + +// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed). +type closeWaiter chan struct{} + +// Init makes a closeWaiter usable. +// It exists because so a closeWaiter value can be placed inside a +// larger struct and have the Mutex and Cond's memory in the same +// allocation. +func (cw *closeWaiter) Init() { + *cw = make(chan struct{}) +} + +// Close marks the closeWaiter as closed and unblocks any waiters. +func (cw closeWaiter) Close() { + close(cw) +} + +// Wait waits for the closeWaiter to become closed. +func (cw closeWaiter) Wait() { + <-cw +} + +// bufferedWriter is a buffered writer that writes to w. +// Its buffered writer is lazily allocated as needed, to minimize +// idle memory usage with many connections. +type bufferedWriter struct { + w io.Writer // immutable + bw *bufio.Writer // non-nil when data is buffered +} + +func newBufferedWriter(w io.Writer) *bufferedWriter { + return &bufferedWriter{w: w} +} + +// bufWriterPoolBufferSize is the size of bufio.Writer's +// buffers created using bufWriterPool. +// +// TODO: pick a less arbitrary value? this is a bit under +// (3 x typical 1500 byte MTU) at least. Other than that, +// not much thought went into it. +const bufWriterPoolBufferSize = 4 << 10 + +var bufWriterPool = sync.Pool{ + New: func() interface{} { + return bufio.NewWriterSize(nil, bufWriterPoolBufferSize) + }, +} + +func (w *bufferedWriter) Available() int { + if w.bw == nil { + return bufWriterPoolBufferSize + } + return w.bw.Available() +} + +func (w *bufferedWriter) Write(p []byte) (n int, err error) { + if w.bw == nil { + bw := bufWriterPool.Get().(*bufio.Writer) + bw.Reset(w.w) + w.bw = bw + } + return w.bw.Write(p) +} + +func (w *bufferedWriter) Flush() error { + bw := w.bw + if bw == nil { + return nil + } + err := bw.Flush() + bw.Reset(nil) + bufWriterPool.Put(bw) + w.bw = nil + return err +} + +func mustUint31(v int32) uint32 { + if v < 0 || v > 2147483647 { + panic("out of range") + } + return uint32(v) +} + +// bodyAllowedForStatus reports whether a given response status code +// permits a body. See RFC 2616, section 4.4. +func bodyAllowedForStatus(status int) bool { + switch { + case status >= 100 && status <= 199: + return false + case status == 204: + return false + case status == 304: + return false + } + return true +} + +type httpError struct { + msg string + timeout bool +} + +func (e *httpError) Error() string { return e.msg } +func (e *httpError) Timeout() bool { return e.timeout } +func (e *httpError) Temporary() bool { return true } + +var errTimeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true} + +type connectionStater interface { + ConnectionState() tls.ConnectionState +} + +var sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }} + +type sorter struct { + v []string // owned by sorter +} + +func (s *sorter) Len() int { return len(s.v) } +func (s *sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] } +func (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] } + +// Keys returns the sorted keys of h. +// +// The returned slice is only valid until s used again or returned to +// its pool. +func (s *sorter) Keys(h http.Header) []string { + keys := s.v[:0] + for k := range h { + keys = append(keys, k) + } + s.v = keys + sort.Sort(s) + return keys +} + +func (s *sorter) SortStrings(ss []string) { + // Our sorter works on s.v, which sorter owns, so + // stash it away while we sort the user's buffer. + save := s.v + s.v = ss + sort.Sort(s) + s.v = save +} + +// validPseudoPath reports whether v is a valid :path pseudo-header +// value. It must be either: +// +// *) a non-empty string starting with '/', but not with with "//", +// *) the string '*', for OPTIONS requests. +// +// For now this is only used a quick check for deciding when to clean +// up Opaque URLs before sending requests from the Transport. +// See golang.org/issue/16847 +func validPseudoPath(v string) bool { + return (len(v) > 0 && v[0] == '/' && (len(v) == 1 || v[1] != '/')) || v == "*" +} diff --git a/vendor/golang.org/x/net/http2/not_go16.go b/vendor/golang.org/x/net/http2/not_go16.go new file mode 100644 index 000000000..efd2e1282 --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go16.go @@ -0,0 +1,46 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.6 + +package http2 + +import ( + "crypto/tls" + "net/http" + "time" +) + +func configureTransport(t1 *http.Transport) (*Transport, error) { + return nil, errTransportVersion +} + +func transportExpectContinueTimeout(t1 *http.Transport) time.Duration { + return 0 + +} + +// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. +func isBadCipher(cipher uint16) bool { + switch cipher { + case tls.TLS_RSA_WITH_RC4_128_SHA, + tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, + tls.TLS_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, + tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: + // Reject cipher suites from Appendix A. + // "This list includes those cipher suites that do not + // offer an ephemeral key exchange and those that are + // based on the TLS null, stream or block cipher type" + return true + default: + return false + } +} diff --git a/vendor/golang.org/x/net/http2/not_go17.go b/vendor/golang.org/x/net/http2/not_go17.go new file mode 100644 index 000000000..140434a79 --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go17.go @@ -0,0 +1,87 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package http2 + +import ( + "crypto/tls" + "net" + "net/http" + "time" +) + +type contextContext interface { + Done() <-chan struct{} + Err() error +} + +type fakeContext struct{} + +func (fakeContext) Done() <-chan struct{} { return nil } +func (fakeContext) Err() error { panic("should not be called") } + +func reqContext(r *http.Request) fakeContext { + return fakeContext{} +} + +func setResponseUncompressed(res *http.Response) { + // Nothing. +} + +type clientTrace struct{} + +func requestTrace(*http.Request) *clientTrace { return nil } +func traceGotConn(*http.Request, *ClientConn) {} +func traceFirstResponseByte(*clientTrace) {} +func traceWroteHeaders(*clientTrace) {} +func traceWroteRequest(*clientTrace, error) {} +func traceGot100Continue(trace *clientTrace) {} +func traceWait100Continue(trace *clientTrace) {} + +func nop() {} + +func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) { + return nil, nop +} + +func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) { + return ctx, nop +} + +func requestWithContext(req *http.Request, ctx contextContext) *http.Request { + return req +} + +// temporary copy of Go 1.6's private tls.Config.clone: +func cloneTLSConfig(c *tls.Config) *tls.Config { + return &tls.Config{ + Rand: c.Rand, + Time: c.Time, + Certificates: c.Certificates, + NameToCertificate: c.NameToCertificate, + GetCertificate: c.GetCertificate, + RootCAs: c.RootCAs, + NextProtos: c.NextProtos, + ServerName: c.ServerName, + ClientAuth: c.ClientAuth, + ClientCAs: c.ClientCAs, + InsecureSkipVerify: c.InsecureSkipVerify, + CipherSuites: c.CipherSuites, + PreferServerCipherSuites: c.PreferServerCipherSuites, + SessionTicketsDisabled: c.SessionTicketsDisabled, + SessionTicketKey: c.SessionTicketKey, + ClientSessionCache: c.ClientSessionCache, + MinVersion: c.MinVersion, + MaxVersion: c.MaxVersion, + CurvePreferences: c.CurvePreferences, + } +} + +func (cc *ClientConn) Ping(ctx contextContext) error { + return cc.ping(ctx) +} + +func (t *Transport) idleConnTimeout() time.Duration { return 0 } diff --git a/vendor/golang.org/x/net/http2/not_go18.go b/vendor/golang.org/x/net/http2/not_go18.go new file mode 100644 index 000000000..efbf83c32 --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go18.go @@ -0,0 +1,27 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.8 + +package http2 + +import ( + "io" + "net/http" +) + +func configureServer18(h1 *http.Server, h2 *Server) error { + // No IdleTimeout to sync prior to Go 1.8. + return nil +} + +func shouldLogPanic(panicValue interface{}) bool { + return panicValue != nil +} + +func reqGetBody(req *http.Request) func() (io.ReadCloser, error) { + return nil +} + +func reqBodyIsNoBody(io.ReadCloser) bool { return false } diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go new file mode 100644 index 000000000..914aaf8a7 --- /dev/null +++ b/vendor/golang.org/x/net/http2/pipe.go @@ -0,0 +1,153 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "errors" + "io" + "sync" +) + +// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like +// io.Pipe except there are no PipeReader/PipeWriter halves, and the +// underlying buffer is an interface. (io.Pipe is always unbuffered) +type pipe struct { + mu sync.Mutex + c sync.Cond // c.L lazily initialized to &p.mu + b pipeBuffer + err error // read error once empty. non-nil means closed. + breakErr error // immediate read error (caller doesn't see rest of b) + donec chan struct{} // closed on error + readFn func() // optional code to run in Read before error +} + +type pipeBuffer interface { + Len() int + io.Writer + io.Reader +} + +func (p *pipe) Len() int { + p.mu.Lock() + defer p.mu.Unlock() + return p.b.Len() +} + +// Read waits until data is available and copies bytes +// from the buffer into p. +func (p *pipe) Read(d []byte) (n int, err error) { + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + for { + if p.breakErr != nil { + return 0, p.breakErr + } + if p.b.Len() > 0 { + return p.b.Read(d) + } + if p.err != nil { + if p.readFn != nil { + p.readFn() // e.g. copy trailers + p.readFn = nil // not sticky like p.err + } + return 0, p.err + } + p.c.Wait() + } +} + +var errClosedPipeWrite = errors.New("write on closed buffer") + +// Write copies bytes from p into the buffer and wakes a reader. +// It is an error to write more data than the buffer can hold. +func (p *pipe) Write(d []byte) (n int, err error) { + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + defer p.c.Signal() + if p.err != nil { + return 0, errClosedPipeWrite + } + return p.b.Write(d) +} + +// CloseWithError causes the next Read (waking up a current blocked +// Read if needed) to return the provided err after all data has been +// read. +// +// The error must be non-nil. +func (p *pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) } + +// BreakWithError causes the next Read (waking up a current blocked +// Read if needed) to return the provided err immediately, without +// waiting for unread data. +func (p *pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) } + +// closeWithErrorAndCode is like CloseWithError but also sets some code to run +// in the caller's goroutine before returning the error. +func (p *pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) } + +func (p *pipe) closeWithError(dst *error, err error, fn func()) { + if err == nil { + panic("err must be non-nil") + } + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + defer p.c.Signal() + if *dst != nil { + // Already been done. + return + } + p.readFn = fn + *dst = err + p.closeDoneLocked() +} + +// requires p.mu be held. +func (p *pipe) closeDoneLocked() { + if p.donec == nil { + return + } + // Close if unclosed. This isn't racy since we always + // hold p.mu while closing. + select { + case <-p.donec: + default: + close(p.donec) + } +} + +// Err returns the error (if any) first set by BreakWithError or CloseWithError. +func (p *pipe) Err() error { + p.mu.Lock() + defer p.mu.Unlock() + if p.breakErr != nil { + return p.breakErr + } + return p.err +} + +// Done returns a channel which is closed if and when this pipe is closed +// with CloseWithError. +func (p *pipe) Done() <-chan struct{} { + p.mu.Lock() + defer p.mu.Unlock() + if p.donec == nil { + p.donec = make(chan struct{}) + if p.err != nil || p.breakErr != nil { + // Already hit an error. + p.closeDoneLocked() + } + } + return p.donec +} diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go new file mode 100644 index 000000000..029ed958a --- /dev/null +++ b/vendor/golang.org/x/net/http2/server.go @@ -0,0 +1,2778 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// TODO: turn off the serve goroutine when idle, so +// an idle conn only has the readFrames goroutine active. (which could +// also be optimized probably to pin less memory in crypto/tls). This +// would involve tracking when the serve goroutine is active (atomic +// int32 read/CAS probably?) and starting it up when frames arrive, +// and shutting it down when all handlers exit. the occasional PING +// packets could use time.AfterFunc to call sc.wakeStartServeLoop() +// (which is a no-op if already running) and then queue the PING write +// as normal. The serve loop would then exit in most cases (if no +// Handlers running) and not be woken up again until the PING packet +// returns. + +// TODO (maybe): add a mechanism for Handlers to going into +// half-closed-local mode (rw.(io.Closer) test?) but not exit their +// handler, and continue to be able to read from the +// Request.Body. This would be a somewhat semantic change from HTTP/1 +// (or at least what we expose in net/http), so I'd probably want to +// add it there too. For now, this package says that returning from +// the Handler ServeHTTP function means you're both done reading and +// done writing, without a way to stop just one or the other. + +package http2 + +import ( + "bufio" + "bytes" + "crypto/tls" + "errors" + "fmt" + "io" + "log" + "math" + "net" + "net/http" + "net/textproto" + "net/url" + "os" + "reflect" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/net/http2/hpack" +) + +const ( + prefaceTimeout = 10 * time.Second + firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway + handlerChunkWriteSize = 4 << 10 + defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? +) + +var ( + errClientDisconnected = errors.New("client disconnected") + errClosedBody = errors.New("body closed by handler") + errHandlerComplete = errors.New("http2: request body closed due to handler exiting") + errStreamClosed = errors.New("http2: stream closed") +) + +var responseWriterStatePool = sync.Pool{ + New: func() interface{} { + rws := &responseWriterState{} + rws.bw = bufio.NewWriterSize(chunkWriter{rws}, handlerChunkWriteSize) + return rws + }, +} + +// Test hooks. +var ( + testHookOnConn func() + testHookGetServerConn func(*serverConn) + testHookOnPanicMu *sync.Mutex // nil except in tests + testHookOnPanic func(sc *serverConn, panicVal interface{}) (rePanic bool) +) + +// Server is an HTTP/2 server. +type Server struct { + // MaxHandlers limits the number of http.Handler ServeHTTP goroutines + // which may run at a time over all connections. + // Negative or zero no limit. + // TODO: implement + MaxHandlers int + + // MaxConcurrentStreams optionally specifies the number of + // concurrent streams that each client may have open at a + // time. This is unrelated to the number of http.Handler goroutines + // which may be active globally, which is MaxHandlers. + // If zero, MaxConcurrentStreams defaults to at least 100, per + // the HTTP/2 spec's recommendations. + MaxConcurrentStreams uint32 + + // MaxReadFrameSize optionally specifies the largest frame + // this server is willing to read. A valid value is between + // 16k and 16M, inclusive. If zero or otherwise invalid, a + // default value is used. + MaxReadFrameSize uint32 + + // PermitProhibitedCipherSuites, if true, permits the use of + // cipher suites prohibited by the HTTP/2 spec. + PermitProhibitedCipherSuites bool + + // IdleTimeout specifies how long until idle clients should be + // closed with a GOAWAY frame. PING frames are not considered + // activity for the purposes of IdleTimeout. + IdleTimeout time.Duration + + // MaxUploadBufferPerConnection is the size of the initial flow + // control window for each connections. The HTTP/2 spec does not + // allow this to be smaller than 65535 or larger than 2^32-1. + // If the value is outside this range, a default value will be + // used instead. + MaxUploadBufferPerConnection int32 + + // MaxUploadBufferPerStream is the size of the initial flow control + // window for each stream. The HTTP/2 spec does not allow this to + // be larger than 2^32-1. If the value is zero or larger than the + // maximum, a default value will be used instead. + MaxUploadBufferPerStream int32 + + // NewWriteScheduler constructs a write scheduler for a connection. + // If nil, a default scheduler is chosen. + NewWriteScheduler func() WriteScheduler +} + +func (s *Server) initialConnRecvWindowSize() int32 { + if s.MaxUploadBufferPerConnection > initialWindowSize { + return s.MaxUploadBufferPerConnection + } + return 1 << 20 +} + +func (s *Server) initialStreamRecvWindowSize() int32 { + if s.MaxUploadBufferPerStream > 0 { + return s.MaxUploadBufferPerStream + } + return 1 << 20 +} + +func (s *Server) maxReadFrameSize() uint32 { + if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize { + return v + } + return defaultMaxReadFrameSize +} + +func (s *Server) maxConcurrentStreams() uint32 { + if v := s.MaxConcurrentStreams; v > 0 { + return v + } + return defaultMaxStreams +} + +// ConfigureServer adds HTTP/2 support to a net/http Server. +// +// The configuration conf may be nil. +// +// ConfigureServer must be called before s begins serving. +func ConfigureServer(s *http.Server, conf *Server) error { + if s == nil { + panic("nil *http.Server") + } + if conf == nil { + conf = new(Server) + } + if err := configureServer18(s, conf); err != nil { + return err + } + + if s.TLSConfig == nil { + s.TLSConfig = new(tls.Config) + } else if s.TLSConfig.CipherSuites != nil { + // If they already provided a CipherSuite list, return + // an error if it has a bad order or is missing + // ECDHE_RSA_WITH_AES_128_GCM_SHA256. + const requiredCipher = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + haveRequired := false + sawBad := false + for i, cs := range s.TLSConfig.CipherSuites { + if cs == requiredCipher { + haveRequired = true + } + if isBadCipher(cs) { + sawBad = true + } else if sawBad { + return fmt.Errorf("http2: TLSConfig.CipherSuites index %d contains an HTTP/2-approved cipher suite (%#04x), but it comes after unapproved cipher suites. With this configuration, clients that don't support previous, approved cipher suites may be given an unapproved one and reject the connection.", i, cs) + } + } + if !haveRequired { + return fmt.Errorf("http2: TLSConfig.CipherSuites is missing HTTP/2-required TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256") + } + } + + // Note: not setting MinVersion to tls.VersionTLS12, + // as we don't want to interfere with HTTP/1.1 traffic + // on the user's server. We enforce TLS 1.2 later once + // we accept a connection. Ideally this should be done + // during next-proto selection, but using TLS <1.2 with + // HTTP/2 is still the client's bug. + + s.TLSConfig.PreferServerCipherSuites = true + + haveNPN := false + for _, p := range s.TLSConfig.NextProtos { + if p == NextProtoTLS { + haveNPN = true + break + } + } + if !haveNPN { + s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS) + } + + if s.TLSNextProto == nil { + s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){} + } + protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) { + if testHookOnConn != nil { + testHookOnConn() + } + conf.ServeConn(c, &ServeConnOpts{ + Handler: h, + BaseConfig: hs, + }) + } + s.TLSNextProto[NextProtoTLS] = protoHandler + return nil +} + +// ServeConnOpts are options for the Server.ServeConn method. +type ServeConnOpts struct { + // BaseConfig optionally sets the base configuration + // for values. If nil, defaults are used. + BaseConfig *http.Server + + // Handler specifies which handler to use for processing + // requests. If nil, BaseConfig.Handler is used. If BaseConfig + // or BaseConfig.Handler is nil, http.DefaultServeMux is used. + Handler http.Handler +} + +func (o *ServeConnOpts) baseConfig() *http.Server { + if o != nil && o.BaseConfig != nil { + return o.BaseConfig + } + return new(http.Server) +} + +func (o *ServeConnOpts) handler() http.Handler { + if o != nil { + if o.Handler != nil { + return o.Handler + } + if o.BaseConfig != nil && o.BaseConfig.Handler != nil { + return o.BaseConfig.Handler + } + } + return http.DefaultServeMux +} + +// ServeConn serves HTTP/2 requests on the provided connection and +// blocks until the connection is no longer readable. +// +// ServeConn starts speaking HTTP/2 assuming that c has not had any +// reads or writes. It writes its initial settings frame and expects +// to be able to read the preface and settings frame from the +// client. If c has a ConnectionState method like a *tls.Conn, the +// ConnectionState is used to verify the TLS ciphersuite and to set +// the Request.TLS field in Handlers. +// +// ServeConn does not support h2c by itself. Any h2c support must be +// implemented in terms of providing a suitably-behaving net.Conn. +// +// The opts parameter is optional. If nil, default values are used. +func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { + baseCtx, cancel := serverConnBaseContext(c, opts) + defer cancel() + + sc := &serverConn{ + srv: s, + hs: opts.baseConfig(), + conn: c, + baseCtx: baseCtx, + remoteAddrStr: c.RemoteAddr().String(), + bw: newBufferedWriter(c), + handler: opts.handler(), + streams: make(map[uint32]*stream), + readFrameCh: make(chan readFrameResult), + wantWriteFrameCh: make(chan FrameWriteRequest, 8), + wantStartPushCh: make(chan startPushRequest, 8), + wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync + bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way + doneServing: make(chan struct{}), + clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value" + advMaxStreams: s.maxConcurrentStreams(), + initialStreamSendWindowSize: initialWindowSize, + maxFrameSize: initialMaxFrameSize, + headerTableSize: initialHeaderTableSize, + serveG: newGoroutineLock(), + pushEnabled: true, + } + + // The net/http package sets the write deadline from the + // http.Server.WriteTimeout during the TLS handshake, but then + // passes the connection off to us with the deadline already set. + // Write deadlines are set per stream in serverConn.newStream. + // Disarm the net.Conn write deadline here. + if sc.hs.WriteTimeout != 0 { + sc.conn.SetWriteDeadline(time.Time{}) + } + + if s.NewWriteScheduler != nil { + sc.writeSched = s.NewWriteScheduler() + } else { + sc.writeSched = NewRandomWriteScheduler() + } + + // These start at the RFC-specified defaults. If there is a higher + // configured value for inflow, that will be updated when we send a + // WINDOW_UPDATE shortly after sending SETTINGS. + sc.flow.add(initialWindowSize) + sc.inflow.add(initialWindowSize) + sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) + + fr := NewFramer(sc.bw, c) + fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + fr.MaxHeaderListSize = sc.maxHeaderListSize() + fr.SetMaxReadFrameSize(s.maxReadFrameSize()) + sc.framer = fr + + if tc, ok := c.(connectionStater); ok { + sc.tlsState = new(tls.ConnectionState) + *sc.tlsState = tc.ConnectionState() + // 9.2 Use of TLS Features + // An implementation of HTTP/2 over TLS MUST use TLS + // 1.2 or higher with the restrictions on feature set + // and cipher suite described in this section. Due to + // implementation limitations, it might not be + // possible to fail TLS negotiation. An endpoint MUST + // immediately terminate an HTTP/2 connection that + // does not meet the TLS requirements described in + // this section with a connection error (Section + // 5.4.1) of type INADEQUATE_SECURITY. + if sc.tlsState.Version < tls.VersionTLS12 { + sc.rejectConn(ErrCodeInadequateSecurity, "TLS version too low") + return + } + + if sc.tlsState.ServerName == "" { + // Client must use SNI, but we don't enforce that anymore, + // since it was causing problems when connecting to bare IP + // addresses during development. + // + // TODO: optionally enforce? Or enforce at the time we receive + // a new request, and verify the the ServerName matches the :authority? + // But that precludes proxy situations, perhaps. + // + // So for now, do nothing here again. + } + + if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { + // "Endpoints MAY choose to generate a connection error + // (Section 5.4.1) of type INADEQUATE_SECURITY if one of + // the prohibited cipher suites are negotiated." + // + // We choose that. In my opinion, the spec is weak + // here. It also says both parties must support at least + // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no + // excuses here. If we really must, we could allow an + // "AllowInsecureWeakCiphers" option on the server later. + // Let's see how it plays out first. + sc.rejectConn(ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite)) + return + } + } + + if hook := testHookGetServerConn; hook != nil { + hook(sc) + } + sc.serve() +} + +func (sc *serverConn) rejectConn(err ErrCode, debug string) { + sc.vlogf("http2: server rejecting conn: %v, %s", err, debug) + // ignoring errors. hanging up anyway. + sc.framer.WriteGoAway(0, err, []byte(debug)) + sc.bw.Flush() + sc.conn.Close() +} + +type serverConn struct { + // Immutable: + srv *Server + hs *http.Server + conn net.Conn + bw *bufferedWriter // writing to conn + handler http.Handler + baseCtx contextContext + framer *Framer + doneServing chan struct{} // closed when serverConn.serve ends + readFrameCh chan readFrameResult // written by serverConn.readFrames + wantWriteFrameCh chan FrameWriteRequest // from handlers -> serve + wantStartPushCh chan startPushRequest // from handlers -> serve + wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes + bodyReadCh chan bodyReadMsg // from handlers -> serve + testHookCh chan func(int) // code to run on the serve loop + flow flow // conn-wide (not stream-specific) outbound flow control + inflow flow // conn-wide inbound flow control + tlsState *tls.ConnectionState // shared by all handlers, like net/http + remoteAddrStr string + writeSched WriteScheduler + + // Everything following is owned by the serve loop; use serveG.check(): + serveG goroutineLock // used to verify funcs are on serve() + pushEnabled bool + sawFirstSettings bool // got the initial SETTINGS frame after the preface + needToSendSettingsAck bool + unackedSettings int // how many SETTINGS have we sent without ACKs? + clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit) + advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client + curClientStreams uint32 // number of open streams initiated by the client + curPushedStreams uint32 // number of open streams initiated by server push + maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests + maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes + streams map[uint32]*stream + initialStreamSendWindowSize int32 + maxFrameSize int32 + headerTableSize uint32 + peerMaxHeaderListSize uint32 // zero means unknown (default) + canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case + writingFrame bool // started writing a frame (on serve goroutine or separate) + writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh + needsFrameFlush bool // last frame write wasn't a flush + inGoAway bool // we've started to or sent GOAWAY + inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop + needToSendGoAway bool // we need to schedule a GOAWAY frame write + goAwayCode ErrCode + shutdownTimerCh <-chan time.Time // nil until used + shutdownTimer *time.Timer // nil until used + idleTimer *time.Timer // nil if unused + idleTimerCh <-chan time.Time // nil if unused + + // Owned by the writeFrameAsync goroutine: + headerWriteBuf bytes.Buffer + hpackEncoder *hpack.Encoder +} + +func (sc *serverConn) maxHeaderListSize() uint32 { + n := sc.hs.MaxHeaderBytes + if n <= 0 { + n = http.DefaultMaxHeaderBytes + } + // http2's count is in a slightly different unit and includes 32 bytes per pair. + // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. + const perFieldOverhead = 32 // per http2 spec + const typicalHeaders = 10 // conservative + return uint32(n + typicalHeaders*perFieldOverhead) +} + +func (sc *serverConn) curOpenStreams() uint32 { + sc.serveG.check() + return sc.curClientStreams + sc.curPushedStreams +} + +// stream represents a stream. This is the minimal metadata needed by +// the serve goroutine. Most of the actual stream state is owned by +// the http.Handler's goroutine in the responseWriter. Because the +// responseWriter's responseWriterState is recycled at the end of a +// handler, this struct intentionally has no pointer to the +// *responseWriter{,State} itself, as the Handler ending nils out the +// responseWriter's state field. +type stream struct { + // immutable: + sc *serverConn + id uint32 + body *pipe // non-nil if expecting DATA frames + cw closeWaiter // closed wait stream transitions to closed state + ctx contextContext + cancelCtx func() + + // owned by serverConn's serve loop: + bodyBytes int64 // body bytes seen so far + declBodyBytes int64 // or -1 if undeclared + flow flow // limits writing from Handler to client + inflow flow // what the client is allowed to POST/etc to us + parent *stream // or nil + numTrailerValues int64 + weight uint8 + state streamState + resetQueued bool // RST_STREAM queued for write; set by sc.resetStream + gotTrailerHeader bool // HEADER frame for trailers was seen + wroteHeaders bool // whether we wrote headers (not status 100) + writeDeadline *time.Timer // nil if unused + + trailer http.Header // accumulated trailers + reqTrailer http.Header // handler's Request.Trailer +} + +func (sc *serverConn) Framer() *Framer { return sc.framer } +func (sc *serverConn) CloseConn() error { return sc.conn.Close() } +func (sc *serverConn) Flush() error { return sc.bw.Flush() } +func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) { + return sc.hpackEncoder, &sc.headerWriteBuf +} + +func (sc *serverConn) state(streamID uint32) (streamState, *stream) { + sc.serveG.check() + // http://tools.ietf.org/html/rfc7540#section-5.1 + if st, ok := sc.streams[streamID]; ok { + return st.state, st + } + // "The first use of a new stream identifier implicitly closes all + // streams in the "idle" state that might have been initiated by + // that peer with a lower-valued stream identifier. For example, if + // a client sends a HEADERS frame on stream 7 without ever sending a + // frame on stream 5, then stream 5 transitions to the "closed" + // state when the first frame for stream 7 is sent or received." + if streamID%2 == 1 { + if streamID <= sc.maxClientStreamID { + return stateClosed, nil + } + } else { + if streamID <= sc.maxPushPromiseID { + return stateClosed, nil + } + } + return stateIdle, nil +} + +// setConnState calls the net/http ConnState hook for this connection, if configured. +// Note that the net/http package does StateNew and StateClosed for us. +// There is currently no plan for StateHijacked or hijacking HTTP/2 connections. +func (sc *serverConn) setConnState(state http.ConnState) { + if sc.hs.ConnState != nil { + sc.hs.ConnState(sc.conn, state) + } +} + +func (sc *serverConn) vlogf(format string, args ...interface{}) { + if VerboseLogs { + sc.logf(format, args...) + } +} + +func (sc *serverConn) logf(format string, args ...interface{}) { + if lg := sc.hs.ErrorLog; lg != nil { + lg.Printf(format, args...) + } else { + log.Printf(format, args...) + } +} + +// errno returns v's underlying uintptr, else 0. +// +// TODO: remove this helper function once http2 can use build +// tags. See comment in isClosedConnError. +func errno(v error) uintptr { + if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr { + return uintptr(rv.Uint()) + } + return 0 +} + +// isClosedConnError reports whether err is an error from use of a closed +// network connection. +func isClosedConnError(err error) bool { + if err == nil { + return false + } + + // TODO: remove this string search and be more like the Windows + // case below. That might involve modifying the standard library + // to return better error types. + str := err.Error() + if strings.Contains(str, "use of closed network connection") { + return true + } + + // TODO(bradfitz): x/tools/cmd/bundle doesn't really support + // build tags, so I can't make an http2_windows.go file with + // Windows-specific stuff. Fix that and move this, once we + // have a way to bundle this into std's net/http somehow. + if runtime.GOOS == "windows" { + if oe, ok := err.(*net.OpError); ok && oe.Op == "read" { + if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" { + const WSAECONNABORTED = 10053 + const WSAECONNRESET = 10054 + if n := errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED { + return true + } + } + } + } + return false +} + +func (sc *serverConn) condlogf(err error, format string, args ...interface{}) { + if err == nil { + return + } + if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) { + // Boring, expected errors. + sc.vlogf(format, args...) + } else { + sc.logf(format, args...) + } +} + +func (sc *serverConn) canonicalHeader(v string) string { + sc.serveG.check() + cv, ok := commonCanonHeader[v] + if ok { + return cv + } + cv, ok = sc.canonHeader[v] + if ok { + return cv + } + if sc.canonHeader == nil { + sc.canonHeader = make(map[string]string) + } + cv = http.CanonicalHeaderKey(v) + sc.canonHeader[v] = cv + return cv +} + +type readFrameResult struct { + f Frame // valid until readMore is called + err error + + // readMore should be called once the consumer no longer needs or + // retains f. After readMore, f is invalid and more frames can be + // read. + readMore func() +} + +// readFrames is the loop that reads incoming frames. +// It takes care to only read one frame at a time, blocking until the +// consumer is done with the frame. +// It's run on its own goroutine. +func (sc *serverConn) readFrames() { + gate := make(gate) + gateDone := gate.Done + for { + f, err := sc.framer.ReadFrame() + select { + case sc.readFrameCh <- readFrameResult{f, err, gateDone}: + case <-sc.doneServing: + return + } + select { + case <-gate: + case <-sc.doneServing: + return + } + if terminalReadFrameError(err) { + return + } + } +} + +// frameWriteResult is the message passed from writeFrameAsync to the serve goroutine. +type frameWriteResult struct { + wr FrameWriteRequest // what was written (or attempted) + err error // result of the writeFrame call +} + +// writeFrameAsync runs in its own goroutine and writes a single frame +// and then reports when it's done. +// At most one goroutine can be running writeFrameAsync at a time per +// serverConn. +func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest) { + err := wr.write.writeFrame(sc) + sc.wroteFrameCh <- frameWriteResult{wr, err} +} + +func (sc *serverConn) closeAllStreamsOnConnClose() { + sc.serveG.check() + for _, st := range sc.streams { + sc.closeStream(st, errClientDisconnected) + } +} + +func (sc *serverConn) stopShutdownTimer() { + sc.serveG.check() + if t := sc.shutdownTimer; t != nil { + t.Stop() + } +} + +func (sc *serverConn) notePanic() { + // Note: this is for serverConn.serve panicking, not http.Handler code. + if testHookOnPanicMu != nil { + testHookOnPanicMu.Lock() + defer testHookOnPanicMu.Unlock() + } + if testHookOnPanic != nil { + if e := recover(); e != nil { + if testHookOnPanic(sc, e) { + panic(e) + } + } + } +} + +func (sc *serverConn) serve() { + sc.serveG.check() + defer sc.notePanic() + defer sc.conn.Close() + defer sc.closeAllStreamsOnConnClose() + defer sc.stopShutdownTimer() + defer close(sc.doneServing) // unblocks handlers trying to send + + if VerboseLogs { + sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) + } + + sc.writeFrame(FrameWriteRequest{ + write: writeSettings{ + {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, + {SettingMaxConcurrentStreams, sc.advMaxStreams}, + {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, + {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())}, + }, + }) + sc.unackedSettings++ + + // Each connection starts with intialWindowSize inflow tokens. + // If a higher value is configured, we add more tokens. + if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 { + sc.sendWindowUpdate(nil, int(diff)) + } + + if err := sc.readPreface(); err != nil { + sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err) + return + } + // Now that we've got the preface, get us out of the + // "StateNew" state. We can't go directly to idle, though. + // Active means we read some data and anticipate a request. We'll + // do another Active when we get a HEADERS frame. + sc.setConnState(http.StateActive) + sc.setConnState(http.StateIdle) + + if sc.srv.IdleTimeout != 0 { + sc.idleTimer = time.NewTimer(sc.srv.IdleTimeout) + defer sc.idleTimer.Stop() + sc.idleTimerCh = sc.idleTimer.C + } + + var gracefulShutdownCh <-chan struct{} + if sc.hs != nil { + gracefulShutdownCh = h1ServerShutdownChan(sc.hs) + } + + go sc.readFrames() // closed by defer sc.conn.Close above + + settingsTimer := time.NewTimer(firstSettingsTimeout) + loopNum := 0 + for { + loopNum++ + select { + case wr := <-sc.wantWriteFrameCh: + if se, ok := wr.write.(StreamError); ok { + sc.resetStream(se) + break + } + sc.writeFrame(wr) + case spr := <-sc.wantStartPushCh: + sc.startPush(spr) + case res := <-sc.wroteFrameCh: + sc.wroteFrame(res) + case res := <-sc.readFrameCh: + if !sc.processFrameFromReader(res) { + return + } + res.readMore() + if settingsTimer.C != nil { + settingsTimer.Stop() + settingsTimer.C = nil + } + case m := <-sc.bodyReadCh: + sc.noteBodyRead(m.st, m.n) + case <-settingsTimer.C: + sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr()) + return + case <-gracefulShutdownCh: + gracefulShutdownCh = nil + sc.startGracefulShutdown() + case <-sc.shutdownTimerCh: + sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) + return + case <-sc.idleTimerCh: + sc.vlogf("connection is idle") + sc.goAway(ErrCodeNo) + case fn := <-sc.testHookCh: + fn(loopNum) + } + + if sc.inGoAway && sc.curOpenStreams() == 0 && !sc.needToSendGoAway && !sc.writingFrame { + return + } + } +} + +// readPreface reads the ClientPreface greeting from the peer +// or returns an error on timeout or an invalid greeting. +func (sc *serverConn) readPreface() error { + errc := make(chan error, 1) + go func() { + // Read the client preface + buf := make([]byte, len(ClientPreface)) + if _, err := io.ReadFull(sc.conn, buf); err != nil { + errc <- err + } else if !bytes.Equal(buf, clientPreface) { + errc <- fmt.Errorf("bogus greeting %q", buf) + } else { + errc <- nil + } + }() + timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server? + defer timer.Stop() + select { + case <-timer.C: + return errors.New("timeout waiting for client preface") + case err := <-errc: + if err == nil { + if VerboseLogs { + sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr()) + } + } + return err + } +} + +var errChanPool = sync.Pool{ + New: func() interface{} { return make(chan error, 1) }, +} + +var writeDataPool = sync.Pool{ + New: func() interface{} { return new(writeData) }, +} + +// writeDataFromHandler writes DATA response frames from a handler on +// the given stream. +func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error { + ch := errChanPool.Get().(chan error) + writeArg := writeDataPool.Get().(*writeData) + *writeArg = writeData{stream.id, data, endStream} + err := sc.writeFrameFromHandler(FrameWriteRequest{ + write: writeArg, + stream: stream, + done: ch, + }) + if err != nil { + return err + } + var frameWriteDone bool // the frame write is done (successfully or not) + select { + case err = <-ch: + frameWriteDone = true + case <-sc.doneServing: + return errClientDisconnected + case <-stream.cw: + // If both ch and stream.cw were ready (as might + // happen on the final Write after an http.Handler + // ends), prefer the write result. Otherwise this + // might just be us successfully closing the stream. + // The writeFrameAsync and serve goroutines guarantee + // that the ch send will happen before the stream.cw + // close. + select { + case err = <-ch: + frameWriteDone = true + default: + return errStreamClosed + } + } + errChanPool.Put(ch) + if frameWriteDone { + writeDataPool.Put(writeArg) + } + return err +} + +// writeFrameFromHandler sends wr to sc.wantWriteFrameCh, but aborts +// if the connection has gone away. +// +// This must not be run from the serve goroutine itself, else it might +// deadlock writing to sc.wantWriteFrameCh (which is only mildly +// buffered and is read by serve itself). If you're on the serve +// goroutine, call writeFrame instead. +func (sc *serverConn) writeFrameFromHandler(wr FrameWriteRequest) error { + sc.serveG.checkNotOn() // NOT + select { + case sc.wantWriteFrameCh <- wr: + return nil + case <-sc.doneServing: + // Serve loop is gone. + // Client has closed their connection to the server. + return errClientDisconnected + } +} + +// writeFrame schedules a frame to write and sends it if there's nothing +// already being written. +// +// There is no pushback here (the serve goroutine never blocks). It's +// the http.Handlers that block, waiting for their previous frames to +// make it onto the wire +// +// If you're not on the serve goroutine, use writeFrameFromHandler instead. +func (sc *serverConn) writeFrame(wr FrameWriteRequest) { + sc.serveG.check() + + // If true, wr will not be written and wr.done will not be signaled. + var ignoreWrite bool + + // We are not allowed to write frames on closed streams. RFC 7540 Section + // 5.1.1 says: "An endpoint MUST NOT send frames other than PRIORITY on + // a closed stream." Our server never sends PRIORITY, so that exception + // does not apply. + // + // The serverConn might close an open stream while the stream's handler + // is still running. For example, the server might close a stream when it + // receives bad data from the client. If this happens, the handler might + // attempt to write a frame after the stream has been closed (since the + // handler hasn't yet been notified of the close). In this case, we simply + // ignore the frame. The handler will notice that the stream is closed when + // it waits for the frame to be written. + // + // As an exception to this rule, we allow sending RST_STREAM after close. + // This allows us to immediately reject new streams without tracking any + // state for those streams (except for the queued RST_STREAM frame). This + // may result in duplicate RST_STREAMs in some cases, but the client should + // ignore those. + if wr.StreamID() != 0 { + _, isReset := wr.write.(StreamError) + if state, _ := sc.state(wr.StreamID()); state == stateClosed && !isReset { + ignoreWrite = true + } + } + + // Don't send a 100-continue response if we've already sent headers. + // See golang.org/issue/14030. + switch wr.write.(type) { + case *writeResHeaders: + wr.stream.wroteHeaders = true + case write100ContinueHeadersFrame: + if wr.stream.wroteHeaders { + // We do not need to notify wr.done because this frame is + // never written with wr.done != nil. + if wr.done != nil { + panic("wr.done != nil for write100ContinueHeadersFrame") + } + ignoreWrite = true + } + } + + if !ignoreWrite { + sc.writeSched.Push(wr) + } + sc.scheduleFrameWrite() +} + +// startFrameWrite starts a goroutine to write wr (in a separate +// goroutine since that might block on the network), and updates the +// serve goroutine's state about the world, updated from info in wr. +func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) { + sc.serveG.check() + if sc.writingFrame { + panic("internal error: can only be writing one frame at a time") + } + + st := wr.stream + if st != nil { + switch st.state { + case stateHalfClosedLocal: + switch wr.write.(type) { + case StreamError, handlerPanicRST, writeWindowUpdate: + // RFC 7540 Section 5.1 allows sending RST_STREAM, PRIORITY, and WINDOW_UPDATE + // in this state. (We never send PRIORITY from the server, so that is not checked.) + default: + panic(fmt.Sprintf("internal error: attempt to send frame on a half-closed-local stream: %v", wr)) + } + case stateClosed: + panic(fmt.Sprintf("internal error: attempt to send frame on a closed stream: %v", wr)) + } + } + if wpp, ok := wr.write.(*writePushPromise); ok { + var err error + wpp.promisedID, err = wpp.allocatePromisedID() + if err != nil { + sc.writingFrameAsync = false + wr.replyToWriter(err) + return + } + } + + sc.writingFrame = true + sc.needsFrameFlush = true + if wr.write.staysWithinBuffer(sc.bw.Available()) { + sc.writingFrameAsync = false + err := wr.write.writeFrame(sc) + sc.wroteFrame(frameWriteResult{wr, err}) + } else { + sc.writingFrameAsync = true + go sc.writeFrameAsync(wr) + } +} + +// errHandlerPanicked is the error given to any callers blocked in a read from +// Request.Body when the main goroutine panics. Since most handlers read in the +// the main ServeHTTP goroutine, this will show up rarely. +var errHandlerPanicked = errors.New("http2: handler panicked") + +// wroteFrame is called on the serve goroutine with the result of +// whatever happened on writeFrameAsync. +func (sc *serverConn) wroteFrame(res frameWriteResult) { + sc.serveG.check() + if !sc.writingFrame { + panic("internal error: expected to be already writing a frame") + } + sc.writingFrame = false + sc.writingFrameAsync = false + + wr := res.wr + + if writeEndsStream(wr.write) { + st := wr.stream + if st == nil { + panic("internal error: expecting non-nil stream") + } + switch st.state { + case stateOpen: + // Here we would go to stateHalfClosedLocal in + // theory, but since our handler is done and + // the net/http package provides no mechanism + // for closing a ResponseWriter while still + // reading data (see possible TODO at top of + // this file), we go into closed state here + // anyway, after telling the peer we're + // hanging up on them. We'll transition to + // stateClosed after the RST_STREAM frame is + // written. + st.state = stateHalfClosedLocal + // Section 8.1: a server MAY request that the client abort + // transmission of a request without error by sending a + // RST_STREAM with an error code of NO_ERROR after sending + // a complete response. + sc.resetStream(streamError(st.id, ErrCodeNo)) + case stateHalfClosedRemote: + sc.closeStream(st, errHandlerComplete) + } + } else { + switch v := wr.write.(type) { + case StreamError: + // st may be unknown if the RST_STREAM was generated to reject bad input. + if st, ok := sc.streams[v.StreamID]; ok { + sc.closeStream(st, v) + } + case handlerPanicRST: + sc.closeStream(wr.stream, errHandlerPanicked) + } + } + + // Reply (if requested) to unblock the ServeHTTP goroutine. + wr.replyToWriter(res.err) + + sc.scheduleFrameWrite() +} + +// scheduleFrameWrite tickles the frame writing scheduler. +// +// If a frame is already being written, nothing happens. This will be called again +// when the frame is done being written. +// +// If a frame isn't being written we need to send one, the best frame +// to send is selected, preferring first things that aren't +// stream-specific (e.g. ACKing settings), and then finding the +// highest priority stream. +// +// If a frame isn't being written and there's nothing else to send, we +// flush the write buffer. +func (sc *serverConn) scheduleFrameWrite() { + sc.serveG.check() + if sc.writingFrame || sc.inFrameScheduleLoop { + return + } + sc.inFrameScheduleLoop = true + for !sc.writingFrameAsync { + if sc.needToSendGoAway { + sc.needToSendGoAway = false + sc.startFrameWrite(FrameWriteRequest{ + write: &writeGoAway{ + maxStreamID: sc.maxClientStreamID, + code: sc.goAwayCode, + }, + }) + continue + } + if sc.needToSendSettingsAck { + sc.needToSendSettingsAck = false + sc.startFrameWrite(FrameWriteRequest{write: writeSettingsAck{}}) + continue + } + if !sc.inGoAway || sc.goAwayCode == ErrCodeNo { + if wr, ok := sc.writeSched.Pop(); ok { + sc.startFrameWrite(wr) + continue + } + } + if sc.needsFrameFlush { + sc.startFrameWrite(FrameWriteRequest{write: flushFrameWriter{}}) + sc.needsFrameFlush = false // after startFrameWrite, since it sets this true + continue + } + break + } + sc.inFrameScheduleLoop = false +} + +// startGracefulShutdown sends a GOAWAY with ErrCodeNo to tell the +// client we're gracefully shutting down. The connection isn't closed +// until all current streams are done. +func (sc *serverConn) startGracefulShutdown() { + sc.goAwayIn(ErrCodeNo, 0) +} + +func (sc *serverConn) goAway(code ErrCode) { + sc.serveG.check() + var forceCloseIn time.Duration + if code != ErrCodeNo { + forceCloseIn = 250 * time.Millisecond + } else { + // TODO: configurable + forceCloseIn = 1 * time.Second + } + sc.goAwayIn(code, forceCloseIn) +} + +func (sc *serverConn) goAwayIn(code ErrCode, forceCloseIn time.Duration) { + sc.serveG.check() + if sc.inGoAway { + return + } + if forceCloseIn != 0 { + sc.shutDownIn(forceCloseIn) + } + sc.inGoAway = true + sc.needToSendGoAway = true + sc.goAwayCode = code + sc.scheduleFrameWrite() +} + +func (sc *serverConn) shutDownIn(d time.Duration) { + sc.serveG.check() + sc.shutdownTimer = time.NewTimer(d) + sc.shutdownTimerCh = sc.shutdownTimer.C +} + +func (sc *serverConn) resetStream(se StreamError) { + sc.serveG.check() + sc.writeFrame(FrameWriteRequest{write: se}) + if st, ok := sc.streams[se.StreamID]; ok { + st.resetQueued = true + } +} + +// processFrameFromReader processes the serve loop's read from readFrameCh from the +// frame-reading goroutine. +// processFrameFromReader returns whether the connection should be kept open. +func (sc *serverConn) processFrameFromReader(res readFrameResult) bool { + sc.serveG.check() + err := res.err + if err != nil { + if err == ErrFrameTooLarge { + sc.goAway(ErrCodeFrameSize) + return true // goAway will close the loop + } + clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) + if clientGone { + // TODO: could we also get into this state if + // the peer does a half close + // (e.g. CloseWrite) because they're done + // sending frames but they're still wanting + // our open replies? Investigate. + // TODO: add CloseWrite to crypto/tls.Conn first + // so we have a way to test this? I suppose + // just for testing we could have a non-TLS mode. + return false + } + } else { + f := res.f + if VerboseLogs { + sc.vlogf("http2: server read frame %v", summarizeFrame(f)) + } + err = sc.processFrame(f) + if err == nil { + return true + } + } + + switch ev := err.(type) { + case StreamError: + sc.resetStream(ev) + return true + case goAwayFlowError: + sc.goAway(ErrCodeFlowControl) + return true + case ConnectionError: + sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev) + sc.goAway(ErrCode(ev)) + return true // goAway will handle shutdown + default: + if res.err != nil { + sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err) + } else { + sc.logf("http2: server closing client connection: %v", err) + } + return false + } +} + +func (sc *serverConn) processFrame(f Frame) error { + sc.serveG.check() + + // First frame received must be SETTINGS. + if !sc.sawFirstSettings { + if _, ok := f.(*SettingsFrame); !ok { + return ConnectionError(ErrCodeProtocol) + } + sc.sawFirstSettings = true + } + + switch f := f.(type) { + case *SettingsFrame: + return sc.processSettings(f) + case *MetaHeadersFrame: + return sc.processHeaders(f) + case *WindowUpdateFrame: + return sc.processWindowUpdate(f) + case *PingFrame: + return sc.processPing(f) + case *DataFrame: + return sc.processData(f) + case *RSTStreamFrame: + return sc.processResetStream(f) + case *PriorityFrame: + return sc.processPriority(f) + case *GoAwayFrame: + return sc.processGoAway(f) + case *PushPromiseFrame: + // A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE + // frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR. + return ConnectionError(ErrCodeProtocol) + default: + sc.vlogf("http2: server ignoring frame: %v", f.Header()) + return nil + } +} + +func (sc *serverConn) processPing(f *PingFrame) error { + sc.serveG.check() + if f.IsAck() { + // 6.7 PING: " An endpoint MUST NOT respond to PING frames + // containing this flag." + return nil + } + if f.StreamID != 0 { + // "PING frames are not associated with any individual + // stream. If a PING frame is received with a stream + // identifier field value other than 0x0, the recipient MUST + // respond with a connection error (Section 5.4.1) of type + // PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) + } + if sc.inGoAway && sc.goAwayCode != ErrCodeNo { + return nil + } + sc.writeFrame(FrameWriteRequest{write: writePingAck{f}}) + return nil +} + +func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error { + sc.serveG.check() + switch { + case f.StreamID != 0: // stream-level flow control + state, st := sc.state(f.StreamID) + if state == stateIdle { + // Section 5.1: "Receiving any frame other than HEADERS + // or PRIORITY on a stream in this state MUST be + // treated as a connection error (Section 5.4.1) of + // type PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) + } + if st == nil { + // "WINDOW_UPDATE can be sent by a peer that has sent a + // frame bearing the END_STREAM flag. This means that a + // receiver could receive a WINDOW_UPDATE frame on a "half + // closed (remote)" or "closed" stream. A receiver MUST + // NOT treat this as an error, see Section 5.1." + return nil + } + if !st.flow.add(int32(f.Increment)) { + return streamError(f.StreamID, ErrCodeFlowControl) + } + default: // connection-level flow control + if !sc.flow.add(int32(f.Increment)) { + return goAwayFlowError{} + } + } + sc.scheduleFrameWrite() + return nil +} + +func (sc *serverConn) processResetStream(f *RSTStreamFrame) error { + sc.serveG.check() + + state, st := sc.state(f.StreamID) + if state == stateIdle { + // 6.4 "RST_STREAM frames MUST NOT be sent for a + // stream in the "idle" state. If a RST_STREAM frame + // identifying an idle stream is received, the + // recipient MUST treat this as a connection error + // (Section 5.4.1) of type PROTOCOL_ERROR. + return ConnectionError(ErrCodeProtocol) + } + if st != nil { + st.cancelCtx() + sc.closeStream(st, streamError(f.StreamID, f.ErrCode)) + } + return nil +} + +func (sc *serverConn) closeStream(st *stream, err error) { + sc.serveG.check() + if st.state == stateIdle || st.state == stateClosed { + panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state)) + } + st.state = stateClosed + if st.writeDeadline != nil { + st.writeDeadline.Stop() + } + if st.isPushed() { + sc.curPushedStreams-- + } else { + sc.curClientStreams-- + } + delete(sc.streams, st.id) + if len(sc.streams) == 0 { + sc.setConnState(http.StateIdle) + if sc.srv.IdleTimeout != 0 { + sc.idleTimer.Reset(sc.srv.IdleTimeout) + } + if h1ServerKeepAlivesDisabled(sc.hs) { + sc.startGracefulShutdown() + } + } + if p := st.body; p != nil { + // Return any buffered unread bytes worth of conn-level flow control. + // See golang.org/issue/16481 + sc.sendWindowUpdate(nil, p.Len()) + + p.CloseWithError(err) + } + st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc + sc.writeSched.CloseStream(st.id) +} + +func (sc *serverConn) processSettings(f *SettingsFrame) error { + sc.serveG.check() + if f.IsAck() { + sc.unackedSettings-- + if sc.unackedSettings < 0 { + // Why is the peer ACKing settings we never sent? + // The spec doesn't mention this case, but + // hang up on them anyway. + return ConnectionError(ErrCodeProtocol) + } + return nil + } + if err := f.ForeachSetting(sc.processSetting); err != nil { + return err + } + sc.needToSendSettingsAck = true + sc.scheduleFrameWrite() + return nil +} + +func (sc *serverConn) processSetting(s Setting) error { + sc.serveG.check() + if err := s.Valid(); err != nil { + return err + } + if VerboseLogs { + sc.vlogf("http2: server processing setting %v", s) + } + switch s.ID { + case SettingHeaderTableSize: + sc.headerTableSize = s.Val + sc.hpackEncoder.SetMaxDynamicTableSize(s.Val) + case SettingEnablePush: + sc.pushEnabled = s.Val != 0 + case SettingMaxConcurrentStreams: + sc.clientMaxStreams = s.Val + case SettingInitialWindowSize: + return sc.processSettingInitialWindowSize(s.Val) + case SettingMaxFrameSize: + sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31 + case SettingMaxHeaderListSize: + sc.peerMaxHeaderListSize = s.Val + default: + // Unknown setting: "An endpoint that receives a SETTINGS + // frame with any unknown or unsupported identifier MUST + // ignore that setting." + if VerboseLogs { + sc.vlogf("http2: server ignoring unknown setting %v", s) + } + } + return nil +} + +func (sc *serverConn) processSettingInitialWindowSize(val uint32) error { + sc.serveG.check() + // Note: val already validated to be within range by + // processSetting's Valid call. + + // "A SETTINGS frame can alter the initial flow control window + // size for all current streams. When the value of + // SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST + // adjust the size of all stream flow control windows that it + // maintains by the difference between the new value and the + // old value." + old := sc.initialStreamSendWindowSize + sc.initialStreamSendWindowSize = int32(val) + growth := int32(val) - old // may be negative + for _, st := range sc.streams { + if !st.flow.add(growth) { + // 6.9.2 Initial Flow Control Window Size + // "An endpoint MUST treat a change to + // SETTINGS_INITIAL_WINDOW_SIZE that causes any flow + // control window to exceed the maximum size as a + // connection error (Section 5.4.1) of type + // FLOW_CONTROL_ERROR." + return ConnectionError(ErrCodeFlowControl) + } + } + return nil +} + +func (sc *serverConn) processData(f *DataFrame) error { + sc.serveG.check() + if sc.inGoAway && sc.goAwayCode != ErrCodeNo { + return nil + } + data := f.Data() + + // "If a DATA frame is received whose stream is not in "open" + // or "half closed (local)" state, the recipient MUST respond + // with a stream error (Section 5.4.2) of type STREAM_CLOSED." + id := f.Header().StreamID + state, st := sc.state(id) + if id == 0 || state == stateIdle { + // Section 5.1: "Receiving any frame other than HEADERS + // or PRIORITY on a stream in this state MUST be + // treated as a connection error (Section 5.4.1) of + // type PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) + } + if st == nil || state != stateOpen || st.gotTrailerHeader || st.resetQueued { + // This includes sending a RST_STREAM if the stream is + // in stateHalfClosedLocal (which currently means that + // the http.Handler returned, so it's done reading & + // done writing). Try to stop the client from sending + // more DATA. + + // But still enforce their connection-level flow control, + // and return any flow control bytes since we're not going + // to consume them. + if sc.inflow.available() < int32(f.Length) { + return streamError(id, ErrCodeFlowControl) + } + // Deduct the flow control from inflow, since we're + // going to immediately add it back in + // sendWindowUpdate, which also schedules sending the + // frames. + sc.inflow.take(int32(f.Length)) + sc.sendWindowUpdate(nil, int(f.Length)) // conn-level + + if st != nil && st.resetQueued { + // Already have a stream error in flight. Don't send another. + return nil + } + return streamError(id, ErrCodeStreamClosed) + } + if st.body == nil { + panic("internal error: should have a body in this state") + } + + // Sender sending more than they'd declared? + if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes { + st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes)) + return streamError(id, ErrCodeStreamClosed) + } + if f.Length > 0 { + // Check whether the client has flow control quota. + if st.inflow.available() < int32(f.Length) { + return streamError(id, ErrCodeFlowControl) + } + st.inflow.take(int32(f.Length)) + + if len(data) > 0 { + wrote, err := st.body.Write(data) + if err != nil { + return streamError(id, ErrCodeStreamClosed) + } + if wrote != len(data) { + panic("internal error: bad Writer") + } + st.bodyBytes += int64(len(data)) + } + + // Return any padded flow control now, since we won't + // refund it later on body reads. + if pad := int32(f.Length) - int32(len(data)); pad > 0 { + sc.sendWindowUpdate32(nil, pad) + sc.sendWindowUpdate32(st, pad) + } + } + if f.StreamEnded() { + st.endStream() + } + return nil +} + +func (sc *serverConn) processGoAway(f *GoAwayFrame) error { + sc.serveG.check() + if f.ErrCode != ErrCodeNo { + sc.logf("http2: received GOAWAY %+v, starting graceful shutdown", f) + } else { + sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f) + } + sc.startGracefulShutdown() + // http://tools.ietf.org/html/rfc7540#section-6.8 + // We should not create any new streams, which means we should disable push. + sc.pushEnabled = false + return nil +} + +// isPushed reports whether the stream is server-initiated. +func (st *stream) isPushed() bool { + return st.id%2 == 0 +} + +// endStream closes a Request.Body's pipe. It is called when a DATA +// frame says a request body is over (or after trailers). +func (st *stream) endStream() { + sc := st.sc + sc.serveG.check() + + if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes { + st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes", + st.declBodyBytes, st.bodyBytes)) + } else { + st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest) + st.body.CloseWithError(io.EOF) + } + st.state = stateHalfClosedRemote +} + +// copyTrailersToHandlerRequest is run in the Handler's goroutine in +// its Request.Body.Read just before it gets io.EOF. +func (st *stream) copyTrailersToHandlerRequest() { + for k, vv := range st.trailer { + if _, ok := st.reqTrailer[k]; ok { + // Only copy it over it was pre-declared. + st.reqTrailer[k] = vv + } + } +} + +// onWriteTimeout is run on its own goroutine (from time.AfterFunc) +// when the stream's WriteTimeout has fired. +func (st *stream) onWriteTimeout() { + st.sc.writeFrameFromHandler(FrameWriteRequest{write: streamError(st.id, ErrCodeInternal)}) +} + +func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { + sc.serveG.check() + id := f.StreamID + if sc.inGoAway { + // Ignore. + return nil + } + // http://tools.ietf.org/html/rfc7540#section-5.1.1 + // Streams initiated by a client MUST use odd-numbered stream + // identifiers. [...] An endpoint that receives an unexpected + // stream identifier MUST respond with a connection error + // (Section 5.4.1) of type PROTOCOL_ERROR. + if id%2 != 1 { + return ConnectionError(ErrCodeProtocol) + } + // A HEADERS frame can be used to create a new stream or + // send a trailer for an open one. If we already have a stream + // open, let it process its own HEADERS frame (trailers at this + // point, if it's valid). + if st := sc.streams[f.StreamID]; st != nil { + if st.resetQueued { + // We're sending RST_STREAM to close the stream, so don't bother + // processing this frame. + return nil + } + return st.processTrailerHeaders(f) + } + + // [...] The identifier of a newly established stream MUST be + // numerically greater than all streams that the initiating + // endpoint has opened or reserved. [...] An endpoint that + // receives an unexpected stream identifier MUST respond with + // a connection error (Section 5.4.1) of type PROTOCOL_ERROR. + if id <= sc.maxClientStreamID { + return ConnectionError(ErrCodeProtocol) + } + sc.maxClientStreamID = id + + if sc.idleTimer != nil { + sc.idleTimer.Stop() + } + + // http://tools.ietf.org/html/rfc7540#section-5.1.2 + // [...] Endpoints MUST NOT exceed the limit set by their peer. An + // endpoint that receives a HEADERS frame that causes their + // advertised concurrent stream limit to be exceeded MUST treat + // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR + // or REFUSED_STREAM. + if sc.curClientStreams+1 > sc.advMaxStreams { + if sc.unackedSettings == 0 { + // They should know better. + return streamError(id, ErrCodeProtocol) + } + // Assume it's a network race, where they just haven't + // received our last SETTINGS update. But actually + // this can't happen yet, because we don't yet provide + // a way for users to adjust server parameters at + // runtime. + return streamError(id, ErrCodeRefusedStream) + } + + initialState := stateOpen + if f.StreamEnded() { + initialState = stateHalfClosedRemote + } + st := sc.newStream(id, 0, initialState) + + if f.HasPriority() { + if err := checkPriority(f.StreamID, f.Priority); err != nil { + return err + } + sc.writeSched.AdjustStream(st.id, f.Priority) + } + + rw, req, err := sc.newWriterAndRequest(st, f) + if err != nil { + return err + } + st.reqTrailer = req.Trailer + if st.reqTrailer != nil { + st.trailer = make(http.Header) + } + st.body = req.Body.(*requestBody).pipe // may be nil + st.declBodyBytes = req.ContentLength + + handler := sc.handler.ServeHTTP + if f.Truncated { + // Their header list was too long. Send a 431 error. + handler = handleHeaderListTooLong + } else if err := checkValidHTTP2RequestHeaders(req.Header); err != nil { + handler = new400Handler(err) + } + + // The net/http package sets the read deadline from the + // http.Server.ReadTimeout during the TLS handshake, but then + // passes the connection off to us with the deadline already + // set. Disarm it here after the request headers are read, + // similar to how the http1 server works. Here it's + // technically more like the http1 Server's ReadHeaderTimeout + // (in Go 1.8), though. That's a more sane option anyway. + if sc.hs.ReadTimeout != 0 { + sc.conn.SetReadDeadline(time.Time{}) + } + + go sc.runHandler(rw, req, handler) + return nil +} + +func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { + sc := st.sc + sc.serveG.check() + if st.gotTrailerHeader { + return ConnectionError(ErrCodeProtocol) + } + st.gotTrailerHeader = true + if !f.StreamEnded() { + return streamError(st.id, ErrCodeProtocol) + } + + if len(f.PseudoFields()) > 0 { + return streamError(st.id, ErrCodeProtocol) + } + if st.trailer != nil { + for _, hf := range f.RegularFields() { + key := sc.canonicalHeader(hf.Name) + if !ValidTrailerHeader(key) { + // TODO: send more details to the peer somehow. But http2 has + // no way to send debug data at a stream level. Discuss with + // HTTP folk. + return streamError(st.id, ErrCodeProtocol) + } + st.trailer[key] = append(st.trailer[key], hf.Value) + } + } + st.endStream() + return nil +} + +func checkPriority(streamID uint32, p PriorityParam) error { + if streamID == p.StreamDep { + // Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat + // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR." + // Section 5.3.3 says that a stream can depend on one of its dependencies, + // so it's only self-dependencies that are forbidden. + return streamError(streamID, ErrCodeProtocol) + } + return nil +} + +func (sc *serverConn) processPriority(f *PriorityFrame) error { + if sc.inGoAway { + return nil + } + if err := checkPriority(f.StreamID, f.PriorityParam); err != nil { + return err + } + sc.writeSched.AdjustStream(f.StreamID, f.PriorityParam) + return nil +} + +func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream { + sc.serveG.check() + if id == 0 { + panic("internal error: cannot create stream with id 0") + } + + ctx, cancelCtx := contextWithCancel(sc.baseCtx) + st := &stream{ + sc: sc, + id: id, + state: state, + ctx: ctx, + cancelCtx: cancelCtx, + } + st.cw.Init() + st.flow.conn = &sc.flow // link to conn-level counter + st.flow.add(sc.initialStreamSendWindowSize) + st.inflow.conn = &sc.inflow // link to conn-level counter + st.inflow.add(sc.srv.initialStreamRecvWindowSize()) + if sc.hs.WriteTimeout != 0 { + st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) + } + + sc.streams[id] = st + sc.writeSched.OpenStream(st.id, OpenStreamOptions{PusherID: pusherID}) + if st.isPushed() { + sc.curPushedStreams++ + } else { + sc.curClientStreams++ + } + if sc.curOpenStreams() == 1 { + sc.setConnState(http.StateActive) + } + + return st +} + +func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) { + sc.serveG.check() + + rp := requestParam{ + method: f.PseudoValue("method"), + scheme: f.PseudoValue("scheme"), + authority: f.PseudoValue("authority"), + path: f.PseudoValue("path"), + } + + isConnect := rp.method == "CONNECT" + if isConnect { + if rp.path != "" || rp.scheme != "" || rp.authority == "" { + return nil, nil, streamError(f.StreamID, ErrCodeProtocol) + } + } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") { + // See 8.1.2.6 Malformed Requests and Responses: + // + // Malformed requests or responses that are detected + // MUST be treated as a stream error (Section 5.4.2) + // of type PROTOCOL_ERROR." + // + // 8.1.2.3 Request Pseudo-Header Fields + // "All HTTP/2 requests MUST include exactly one valid + // value for the :method, :scheme, and :path + // pseudo-header fields" + return nil, nil, streamError(f.StreamID, ErrCodeProtocol) + } + + bodyOpen := !f.StreamEnded() + if rp.method == "HEAD" && bodyOpen { + // HEAD requests can't have bodies + return nil, nil, streamError(f.StreamID, ErrCodeProtocol) + } + + rp.header = make(http.Header) + for _, hf := range f.RegularFields() { + rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value) + } + if rp.authority == "" { + rp.authority = rp.header.Get("Host") + } + + rw, req, err := sc.newWriterAndRequestNoBody(st, rp) + if err != nil { + return nil, nil, err + } + if bodyOpen { + if vv, ok := rp.header["Content-Length"]; ok { + req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64) + } else { + req.ContentLength = -1 + } + req.Body.(*requestBody).pipe = &pipe{ + b: &dataBuffer{expected: req.ContentLength}, + } + } + return rw, req, nil +} + +type requestParam struct { + method string + scheme, authority, path string + header http.Header +} + +func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) { + sc.serveG.check() + + var tlsState *tls.ConnectionState // nil if not scheme https + if rp.scheme == "https" { + tlsState = sc.tlsState + } + + needsContinue := rp.header.Get("Expect") == "100-continue" + if needsContinue { + rp.header.Del("Expect") + } + // Merge Cookie headers into one "; "-delimited value. + if cookies := rp.header["Cookie"]; len(cookies) > 1 { + rp.header.Set("Cookie", strings.Join(cookies, "; ")) + } + + // Setup Trailers + var trailer http.Header + for _, v := range rp.header["Trailer"] { + for _, key := range strings.Split(v, ",") { + key = http.CanonicalHeaderKey(strings.TrimSpace(key)) + switch key { + case "Transfer-Encoding", "Trailer", "Content-Length": + // Bogus. (copy of http1 rules) + // Ignore. + default: + if trailer == nil { + trailer = make(http.Header) + } + trailer[key] = nil + } + } + } + delete(rp.header, "Trailer") + + var url_ *url.URL + var requestURI string + if rp.method == "CONNECT" { + url_ = &url.URL{Host: rp.authority} + requestURI = rp.authority // mimic HTTP/1 server behavior + } else { + var err error + url_, err = url.ParseRequestURI(rp.path) + if err != nil { + return nil, nil, streamError(st.id, ErrCodeProtocol) + } + requestURI = rp.path + } + + body := &requestBody{ + conn: sc, + stream: st, + needsContinue: needsContinue, + } + req := &http.Request{ + Method: rp.method, + URL: url_, + RemoteAddr: sc.remoteAddrStr, + Header: rp.header, + RequestURI: requestURI, + Proto: "HTTP/2.0", + ProtoMajor: 2, + ProtoMinor: 0, + TLS: tlsState, + Host: rp.authority, + Body: body, + Trailer: trailer, + } + req = requestWithContext(req, st.ctx) + + rws := responseWriterStatePool.Get().(*responseWriterState) + bwSave := rws.bw + *rws = responseWriterState{} // zero all the fields + rws.conn = sc + rws.bw = bwSave + rws.bw.Reset(chunkWriter{rws}) + rws.stream = st + rws.req = req + rws.body = body + + rw := &responseWriter{rws: rws} + return rw, req, nil +} + +// Run on its own goroutine. +func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { + didPanic := true + defer func() { + rw.rws.stream.cancelCtx() + if didPanic { + e := recover() + sc.writeFrameFromHandler(FrameWriteRequest{ + write: handlerPanicRST{rw.rws.stream.id}, + stream: rw.rws.stream, + }) + // Same as net/http: + if shouldLogPanic(e) { + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf) + } + return + } + rw.handlerDone() + }() + handler(rw, req) + didPanic = false +} + +func handleHeaderListTooLong(w http.ResponseWriter, r *http.Request) { + // 10.5.1 Limits on Header Block Size: + // .. "A server that receives a larger header block than it is + // willing to handle can send an HTTP 431 (Request Header Fields Too + // Large) status code" + const statusRequestHeaderFieldsTooLarge = 431 // only in Go 1.6+ + w.WriteHeader(statusRequestHeaderFieldsTooLarge) + io.WriteString(w, "

HTTP Error 431

Request Header Field(s) Too Large

") +} + +// called from handler goroutines. +// h may be nil. +func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) error { + sc.serveG.checkNotOn() // NOT on + var errc chan error + if headerData.h != nil { + // If there's a header map (which we don't own), so we have to block on + // waiting for this frame to be written, so an http.Flush mid-handler + // writes out the correct value of keys, before a handler later potentially + // mutates it. + errc = errChanPool.Get().(chan error) + } + if err := sc.writeFrameFromHandler(FrameWriteRequest{ + write: headerData, + stream: st, + done: errc, + }); err != nil { + return err + } + if errc != nil { + select { + case err := <-errc: + errChanPool.Put(errc) + return err + case <-sc.doneServing: + return errClientDisconnected + case <-st.cw: + return errStreamClosed + } + } + return nil +} + +// called from handler goroutines. +func (sc *serverConn) write100ContinueHeaders(st *stream) { + sc.writeFrameFromHandler(FrameWriteRequest{ + write: write100ContinueHeadersFrame{st.id}, + stream: st, + }) +} + +// A bodyReadMsg tells the server loop that the http.Handler read n +// bytes of the DATA from the client on the given stream. +type bodyReadMsg struct { + st *stream + n int +} + +// called from handler goroutines. +// Notes that the handler for the given stream ID read n bytes of its body +// and schedules flow control tokens to be sent. +func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) { + sc.serveG.checkNotOn() // NOT on + if n > 0 { + select { + case sc.bodyReadCh <- bodyReadMsg{st, n}: + case <-sc.doneServing: + } + } +} + +func (sc *serverConn) noteBodyRead(st *stream, n int) { + sc.serveG.check() + sc.sendWindowUpdate(nil, n) // conn-level + if st.state != stateHalfClosedRemote && st.state != stateClosed { + // Don't send this WINDOW_UPDATE if the stream is closed + // remotely. + sc.sendWindowUpdate(st, n) + } +} + +// st may be nil for conn-level +func (sc *serverConn) sendWindowUpdate(st *stream, n int) { + sc.serveG.check() + // "The legal range for the increment to the flow control + // window is 1 to 2^31-1 (2,147,483,647) octets." + // A Go Read call on 64-bit machines could in theory read + // a larger Read than this. Very unlikely, but we handle it here + // rather than elsewhere for now. + const maxUint31 = 1<<31 - 1 + for n >= maxUint31 { + sc.sendWindowUpdate32(st, maxUint31) + n -= maxUint31 + } + sc.sendWindowUpdate32(st, int32(n)) +} + +// st may be nil for conn-level +func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) { + sc.serveG.check() + if n == 0 { + return + } + if n < 0 { + panic("negative update") + } + var streamID uint32 + if st != nil { + streamID = st.id + } + sc.writeFrame(FrameWriteRequest{ + write: writeWindowUpdate{streamID: streamID, n: uint32(n)}, + stream: st, + }) + var ok bool + if st == nil { + ok = sc.inflow.add(n) + } else { + ok = st.inflow.add(n) + } + if !ok { + panic("internal error; sent too many window updates without decrements?") + } +} + +// requestBody is the Handler's Request.Body type. +// Read and Close may be called concurrently. +type requestBody struct { + stream *stream + conn *serverConn + closed bool // for use by Close only + sawEOF bool // for use by Read only + pipe *pipe // non-nil if we have a HTTP entity message body + needsContinue bool // need to send a 100-continue +} + +func (b *requestBody) Close() error { + if b.pipe != nil && !b.closed { + b.pipe.BreakWithError(errClosedBody) + } + b.closed = true + return nil +} + +func (b *requestBody) Read(p []byte) (n int, err error) { + if b.needsContinue { + b.needsContinue = false + b.conn.write100ContinueHeaders(b.stream) + } + if b.pipe == nil || b.sawEOF { + return 0, io.EOF + } + n, err = b.pipe.Read(p) + if err == io.EOF { + b.sawEOF = true + } + if b.conn == nil && inTests { + return + } + b.conn.noteBodyReadFromHandler(b.stream, n, err) + return +} + +// responseWriter is the http.ResponseWriter implementation. It's +// intentionally small (1 pointer wide) to minimize garbage. The +// responseWriterState pointer inside is zeroed at the end of a +// request (in handlerDone) and calls on the responseWriter thereafter +// simply crash (caller's mistake), but the much larger responseWriterState +// and buffers are reused between multiple requests. +type responseWriter struct { + rws *responseWriterState +} + +// Optional http.ResponseWriter interfaces implemented. +var ( + _ http.CloseNotifier = (*responseWriter)(nil) + _ http.Flusher = (*responseWriter)(nil) + _ stringWriter = (*responseWriter)(nil) +) + +type responseWriterState struct { + // immutable within a request: + stream *stream + req *http.Request + body *requestBody // to close at end of request, if DATA frames didn't + conn *serverConn + + // TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc + bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState} + + // mutated by http.Handler goroutine: + handlerHeader http.Header // nil until called + snapHeader http.Header // snapshot of handlerHeader at WriteHeader time + trailers []string // set in writeChunk + status int // status code passed to WriteHeader + wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. + sentHeader bool // have we sent the header frame? + handlerDone bool // handler has finished + + sentContentLen int64 // non-zero if handler set a Content-Length header + wroteBytes int64 + + closeNotifierMu sync.Mutex // guards closeNotifierCh + closeNotifierCh chan bool // nil until first used +} + +type chunkWriter struct{ rws *responseWriterState } + +func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) } + +func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) != 0 } + +// declareTrailer is called for each Trailer header when the +// response header is written. It notes that a header will need to be +// written in the trailers at the end of the response. +func (rws *responseWriterState) declareTrailer(k string) { + k = http.CanonicalHeaderKey(k) + if !ValidTrailerHeader(k) { + // Forbidden by RFC 2616 14.40. + rws.conn.logf("ignoring invalid trailer %q", k) + return + } + if !strSliceContains(rws.trailers, k) { + rws.trailers = append(rws.trailers, k) + } +} + +// writeChunk writes chunks from the bufio.Writer. But because +// bufio.Writer may bypass its chunking, sometimes p may be +// arbitrarily large. +// +// writeChunk is also responsible (on the first chunk) for sending the +// HEADER response. +func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { + if !rws.wroteHeader { + rws.writeHeader(200) + } + + isHeadResp := rws.req.Method == "HEAD" + if !rws.sentHeader { + rws.sentHeader = true + var ctype, clen string + if clen = rws.snapHeader.Get("Content-Length"); clen != "" { + rws.snapHeader.Del("Content-Length") + clen64, err := strconv.ParseInt(clen, 10, 64) + if err == nil && clen64 >= 0 { + rws.sentContentLen = clen64 + } else { + clen = "" + } + } + if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) { + clen = strconv.Itoa(len(p)) + } + _, hasContentType := rws.snapHeader["Content-Type"] + if !hasContentType && bodyAllowedForStatus(rws.status) { + ctype = http.DetectContentType(p) + } + var date string + if _, ok := rws.snapHeader["Date"]; !ok { + // TODO(bradfitz): be faster here, like net/http? measure. + date = time.Now().UTC().Format(http.TimeFormat) + } + + for _, v := range rws.snapHeader["Trailer"] { + foreachHeaderElement(v, rws.declareTrailer) + } + + endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp + err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ + streamID: rws.stream.id, + httpResCode: rws.status, + h: rws.snapHeader, + endStream: endStream, + contentType: ctype, + contentLength: clen, + date: date, + }) + if err != nil { + return 0, err + } + if endStream { + return 0, nil + } + } + if isHeadResp { + return len(p), nil + } + if len(p) == 0 && !rws.handlerDone { + return 0, nil + } + + if rws.handlerDone { + rws.promoteUndeclaredTrailers() + } + + endStream := rws.handlerDone && !rws.hasTrailers() + if len(p) > 0 || endStream { + // only send a 0 byte DATA frame if we're ending the stream. + if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { + return 0, err + } + } + + if rws.handlerDone && rws.hasTrailers() { + err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ + streamID: rws.stream.id, + h: rws.handlerHeader, + trailers: rws.trailers, + endStream: true, + }) + return len(p), err + } + return len(p), nil +} + +// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys +// that, if present, signals that the map entry is actually for +// the response trailers, and not the response headers. The prefix +// is stripped after the ServeHTTP call finishes and the values are +// sent in the trailers. +// +// This mechanism is intended only for trailers that are not known +// prior to the headers being written. If the set of trailers is fixed +// or known before the header is written, the normal Go trailers mechanism +// is preferred: +// https://golang.org/pkg/net/http/#ResponseWriter +// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers +const TrailerPrefix = "Trailer:" + +// promoteUndeclaredTrailers permits http.Handlers to set trailers +// after the header has already been flushed. Because the Go +// ResponseWriter interface has no way to set Trailers (only the +// Header), and because we didn't want to expand the ResponseWriter +// interface, and because nobody used trailers, and because RFC 2616 +// says you SHOULD (but not must) predeclare any trailers in the +// header, the official ResponseWriter rules said trailers in Go must +// be predeclared, and then we reuse the same ResponseWriter.Header() +// map to mean both Headers and Trailers. When it's time to write the +// Trailers, we pick out the fields of Headers that were declared as +// trailers. That worked for a while, until we found the first major +// user of Trailers in the wild: gRPC (using them only over http2), +// and gRPC libraries permit setting trailers mid-stream without +// predeclarnig them. So: change of plans. We still permit the old +// way, but we also permit this hack: if a Header() key begins with +// "Trailer:", the suffix of that key is a Trailer. Because ':' is an +// invalid token byte anyway, there is no ambiguity. (And it's already +// filtered out) It's mildly hacky, but not terrible. +// +// This method runs after the Handler is done and promotes any Header +// fields to be trailers. +func (rws *responseWriterState) promoteUndeclaredTrailers() { + for k, vv := range rws.handlerHeader { + if !strings.HasPrefix(k, TrailerPrefix) { + continue + } + trailerKey := strings.TrimPrefix(k, TrailerPrefix) + rws.declareTrailer(trailerKey) + rws.handlerHeader[http.CanonicalHeaderKey(trailerKey)] = vv + } + + if len(rws.trailers) > 1 { + sorter := sorterPool.Get().(*sorter) + sorter.SortStrings(rws.trailers) + sorterPool.Put(sorter) + } +} + +func (w *responseWriter) Flush() { + rws := w.rws + if rws == nil { + panic("Header called after Handler finished") + } + if rws.bw.Buffered() > 0 { + if err := rws.bw.Flush(); err != nil { + // Ignore the error. The frame writer already knows. + return + } + } else { + // The bufio.Writer won't call chunkWriter.Write + // (writeChunk with zero bytes, so we have to do it + // ourselves to force the HTTP response header and/or + // final DATA frame (with END_STREAM) to be sent. + rws.writeChunk(nil) + } +} + +func (w *responseWriter) CloseNotify() <-chan bool { + rws := w.rws + if rws == nil { + panic("CloseNotify called after Handler finished") + } + rws.closeNotifierMu.Lock() + ch := rws.closeNotifierCh + if ch == nil { + ch = make(chan bool, 1) + rws.closeNotifierCh = ch + cw := rws.stream.cw + go func() { + cw.Wait() // wait for close + ch <- true + }() + } + rws.closeNotifierMu.Unlock() + return ch +} + +func (w *responseWriter) Header() http.Header { + rws := w.rws + if rws == nil { + panic("Header called after Handler finished") + } + if rws.handlerHeader == nil { + rws.handlerHeader = make(http.Header) + } + return rws.handlerHeader +} + +func (w *responseWriter) WriteHeader(code int) { + rws := w.rws + if rws == nil { + panic("WriteHeader called after Handler finished") + } + rws.writeHeader(code) +} + +func (rws *responseWriterState) writeHeader(code int) { + if !rws.wroteHeader { + rws.wroteHeader = true + rws.status = code + if len(rws.handlerHeader) > 0 { + rws.snapHeader = cloneHeader(rws.handlerHeader) + } + } +} + +func cloneHeader(h http.Header) http.Header { + h2 := make(http.Header, len(h)) + for k, vv := range h { + vv2 := make([]string, len(vv)) + copy(vv2, vv) + h2[k] = vv2 + } + return h2 +} + +// The Life Of A Write is like this: +// +// * Handler calls w.Write or w.WriteString -> +// * -> rws.bw (*bufio.Writer) -> +// * (Handler migth call Flush) +// * -> chunkWriter{rws} +// * -> responseWriterState.writeChunk(p []byte) +// * -> responseWriterState.writeChunk (most of the magic; see comment there) +func (w *responseWriter) Write(p []byte) (n int, err error) { + return w.write(len(p), p, "") +} + +func (w *responseWriter) WriteString(s string) (n int, err error) { + return w.write(len(s), nil, s) +} + +// either dataB or dataS is non-zero. +func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) { + rws := w.rws + if rws == nil { + panic("Write called after Handler finished") + } + if !rws.wroteHeader { + w.WriteHeader(200) + } + if !bodyAllowedForStatus(rws.status) { + return 0, http.ErrBodyNotAllowed + } + rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set + if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen { + // TODO: send a RST_STREAM + return 0, errors.New("http2: handler wrote more than declared Content-Length") + } + + if dataB != nil { + return rws.bw.Write(dataB) + } else { + return rws.bw.WriteString(dataS) + } +} + +func (w *responseWriter) handlerDone() { + rws := w.rws + rws.handlerDone = true + w.Flush() + w.rws = nil + responseWriterStatePool.Put(rws) +} + +// Push errors. +var ( + ErrRecursivePush = errors.New("http2: recursive push not allowed") + ErrPushLimitReached = errors.New("http2: push would exceed peer's SETTINGS_MAX_CONCURRENT_STREAMS") +) + +// pushOptions is the internal version of http.PushOptions, which we +// cannot include here because it's only defined in Go 1.8 and later. +type pushOptions struct { + Method string + Header http.Header +} + +func (w *responseWriter) push(target string, opts pushOptions) error { + st := w.rws.stream + sc := st.sc + sc.serveG.checkNotOn() + + // No recursive pushes: "PUSH_PROMISE frames MUST only be sent on a peer-initiated stream." + // http://tools.ietf.org/html/rfc7540#section-6.6 + if st.isPushed() { + return ErrRecursivePush + } + + // Default options. + if opts.Method == "" { + opts.Method = "GET" + } + if opts.Header == nil { + opts.Header = http.Header{} + } + wantScheme := "http" + if w.rws.req.TLS != nil { + wantScheme = "https" + } + + // Validate the request. + u, err := url.Parse(target) + if err != nil { + return err + } + if u.Scheme == "" { + if !strings.HasPrefix(target, "/") { + return fmt.Errorf("target must be an absolute URL or an absolute path: %q", target) + } + u.Scheme = wantScheme + u.Host = w.rws.req.Host + } else { + if u.Scheme != wantScheme { + return fmt.Errorf("cannot push URL with scheme %q from request with scheme %q", u.Scheme, wantScheme) + } + if u.Host == "" { + return errors.New("URL must have a host") + } + } + for k := range opts.Header { + if strings.HasPrefix(k, ":") { + return fmt.Errorf("promised request headers cannot include pseudo header %q", k) + } + // These headers are meaningful only if the request has a body, + // but PUSH_PROMISE requests cannot have a body. + // http://tools.ietf.org/html/rfc7540#section-8.2 + // Also disallow Host, since the promised URL must be absolute. + switch strings.ToLower(k) { + case "content-length", "content-encoding", "trailer", "te", "expect", "host": + return fmt.Errorf("promised request headers cannot include %q", k) + } + } + if err := checkValidHTTP2RequestHeaders(opts.Header); err != nil { + return err + } + + // The RFC effectively limits promised requests to GET and HEAD: + // "Promised requests MUST be cacheable [GET, HEAD, or POST], and MUST be safe [GET or HEAD]" + // http://tools.ietf.org/html/rfc7540#section-8.2 + if opts.Method != "GET" && opts.Method != "HEAD" { + return fmt.Errorf("method %q must be GET or HEAD", opts.Method) + } + + msg := startPushRequest{ + parent: st, + method: opts.Method, + url: u, + header: cloneHeader(opts.Header), + done: errChanPool.Get().(chan error), + } + + select { + case <-sc.doneServing: + return errClientDisconnected + case <-st.cw: + return errStreamClosed + case sc.wantStartPushCh <- msg: + } + + select { + case <-sc.doneServing: + return errClientDisconnected + case <-st.cw: + return errStreamClosed + case err := <-msg.done: + errChanPool.Put(msg.done) + return err + } +} + +type startPushRequest struct { + parent *stream + method string + url *url.URL + header http.Header + done chan error +} + +func (sc *serverConn) startPush(msg startPushRequest) { + sc.serveG.check() + + // http://tools.ietf.org/html/rfc7540#section-6.6. + // PUSH_PROMISE frames MUST only be sent on a peer-initiated stream that + // is in either the "open" or "half-closed (remote)" state. + if msg.parent.state != stateOpen && msg.parent.state != stateHalfClosedRemote { + // responseWriter.Push checks that the stream is peer-initiaed. + msg.done <- errStreamClosed + return + } + + // http://tools.ietf.org/html/rfc7540#section-6.6. + if !sc.pushEnabled { + msg.done <- http.ErrNotSupported + return + } + + // PUSH_PROMISE frames must be sent in increasing order by stream ID, so + // we allocate an ID for the promised stream lazily, when the PUSH_PROMISE + // is written. Once the ID is allocated, we start the request handler. + allocatePromisedID := func() (uint32, error) { + sc.serveG.check() + + // Check this again, just in case. Technically, we might have received + // an updated SETTINGS by the time we got around to writing this frame. + if !sc.pushEnabled { + return 0, http.ErrNotSupported + } + // http://tools.ietf.org/html/rfc7540#section-6.5.2. + if sc.curPushedStreams+1 > sc.clientMaxStreams { + return 0, ErrPushLimitReached + } + + // http://tools.ietf.org/html/rfc7540#section-5.1.1. + // Streams initiated by the server MUST use even-numbered identifiers. + // A server that is unable to establish a new stream identifier can send a GOAWAY + // frame so that the client is forced to open a new connection for new streams. + if sc.maxPushPromiseID+2 >= 1<<31 { + sc.startGracefulShutdown() + return 0, ErrPushLimitReached + } + sc.maxPushPromiseID += 2 + promisedID := sc.maxPushPromiseID + + // http://tools.ietf.org/html/rfc7540#section-8.2. + // Strictly speaking, the new stream should start in "reserved (local)", then + // transition to "half closed (remote)" after sending the initial HEADERS, but + // we start in "half closed (remote)" for simplicity. + // See further comments at the definition of stateHalfClosedRemote. + promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote) + rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{ + method: msg.method, + scheme: msg.url.Scheme, + authority: msg.url.Host, + path: msg.url.RequestURI(), + header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE + }) + if err != nil { + // Should not happen, since we've already validated msg.url. + panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err)) + } + + go sc.runHandler(rw, req, sc.handler.ServeHTTP) + return promisedID, nil + } + + sc.writeFrame(FrameWriteRequest{ + write: &writePushPromise{ + streamID: msg.parent.id, + method: msg.method, + url: msg.url, + h: msg.header, + allocatePromisedID: allocatePromisedID, + }, + stream: msg.parent, + done: msg.done, + }) +} + +// foreachHeaderElement splits v according to the "#rule" construction +// in RFC 2616 section 2.1 and calls fn for each non-empty element. +func foreachHeaderElement(v string, fn func(string)) { + v = textproto.TrimString(v) + if v == "" { + return + } + if !strings.Contains(v, ",") { + fn(v) + return + } + for _, f := range strings.Split(v, ",") { + if f = textproto.TrimString(f); f != "" { + fn(f) + } + } +} + +// From http://httpwg.org/specs/rfc7540.html#rfc.section.8.1.2.2 +var connHeaders = []string{ + "Connection", + "Keep-Alive", + "Proxy-Connection", + "Transfer-Encoding", + "Upgrade", +} + +// checkValidHTTP2RequestHeaders checks whether h is a valid HTTP/2 request, +// per RFC 7540 Section 8.1.2.2. +// The returned error is reported to users. +func checkValidHTTP2RequestHeaders(h http.Header) error { + for _, k := range connHeaders { + if _, ok := h[k]; ok { + return fmt.Errorf("request header %q is not valid in HTTP/2", k) + } + } + te := h["Te"] + if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) { + return errors.New(`request header "TE" may only be "trailers" in HTTP/2`) + } + return nil +} + +func new400Handler(err error) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + http.Error(w, err.Error(), http.StatusBadRequest) + } +} + +// ValidTrailerHeader reports whether name is a valid header field name to appear +// in trailers. +// See: http://tools.ietf.org/html/rfc7230#section-4.1.2 +func ValidTrailerHeader(name string) bool { + name = http.CanonicalHeaderKey(name) + if strings.HasPrefix(name, "If-") || badTrailer[name] { + return false + } + return true +} + +var badTrailer = map[string]bool{ + "Authorization": true, + "Cache-Control": true, + "Connection": true, + "Content-Encoding": true, + "Content-Length": true, + "Content-Range": true, + "Content-Type": true, + "Expect": true, + "Host": true, + "Keep-Alive": true, + "Max-Forwards": true, + "Pragma": true, + "Proxy-Authenticate": true, + "Proxy-Authorization": true, + "Proxy-Connection": true, + "Range": true, + "Realm": true, + "Te": true, + "Trailer": true, + "Transfer-Encoding": true, + "Www-Authenticate": true, +} + +// h1ServerShutdownChan returns a channel that will be closed when the +// provided *http.Server wants to shut down. +// +// This is a somewhat hacky way to get at http1 innards. It works +// when the http2 code is bundled into the net/http package in the +// standard library. The alternatives ended up making the cmd/go tool +// depend on http Servers. This is the lightest option for now. +// This is tested via the TestServeShutdown* tests in net/http. +func h1ServerShutdownChan(hs *http.Server) <-chan struct{} { + if fn := testh1ServerShutdownChan; fn != nil { + return fn(hs) + } + var x interface{} = hs + type I interface { + getDoneChan() <-chan struct{} + } + if hs, ok := x.(I); ok { + return hs.getDoneChan() + } + return nil +} + +// optional test hook for h1ServerShutdownChan. +var testh1ServerShutdownChan func(hs *http.Server) <-chan struct{} + +// h1ServerKeepAlivesDisabled reports whether hs has its keep-alives +// disabled. See comments on h1ServerShutdownChan above for why +// the code is written this way. +func h1ServerKeepAlivesDisabled(hs *http.Server) bool { + var x interface{} = hs + type I interface { + doKeepAlives() bool + } + if hs, ok := x.(I); ok { + return !hs.doKeepAlives() + } + return false +} diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go new file mode 100644 index 000000000..84d042d46 --- /dev/null +++ b/vendor/golang.org/x/net/http2/transport.go @@ -0,0 +1,2128 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Transport code. + +package http2 + +import ( + "bufio" + "bytes" + "compress/gzip" + "crypto/rand" + "crypto/tls" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "math" + "net" + "net/http" + "sort" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/net/http2/hpack" + "golang.org/x/net/idna" + "golang.org/x/net/lex/httplex" +) + +const ( + // transportDefaultConnFlow is how many connection-level flow control + // tokens we give the server at start-up, past the default 64k. + transportDefaultConnFlow = 1 << 30 + + // transportDefaultStreamFlow is how many stream-level flow + // control tokens we announce to the peer, and how many bytes + // we buffer per stream. + transportDefaultStreamFlow = 4 << 20 + + // transportDefaultStreamMinRefresh is the minimum number of bytes we'll send + // a stream-level WINDOW_UPDATE for at a time. + transportDefaultStreamMinRefresh = 4 << 10 + + defaultUserAgent = "Go-http-client/2.0" +) + +// Transport is an HTTP/2 Transport. +// +// A Transport internally caches connections to servers. It is safe +// for concurrent use by multiple goroutines. +type Transport struct { + // DialTLS specifies an optional dial function for creating + // TLS connections for requests. + // + // If DialTLS is nil, tls.Dial is used. + // + // If the returned net.Conn has a ConnectionState method like tls.Conn, + // it will be used to set http.Response.TLS. + DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error) + + // TLSClientConfig specifies the TLS configuration to use with + // tls.Client. If nil, the default configuration is used. + TLSClientConfig *tls.Config + + // ConnPool optionally specifies an alternate connection pool to use. + // If nil, the default is used. + ConnPool ClientConnPool + + // DisableCompression, if true, prevents the Transport from + // requesting compression with an "Accept-Encoding: gzip" + // request header when the Request contains no existing + // Accept-Encoding value. If the Transport requests gzip on + // its own and gets a gzipped response, it's transparently + // decoded in the Response.Body. However, if the user + // explicitly requested gzip it is not automatically + // uncompressed. + DisableCompression bool + + // AllowHTTP, if true, permits HTTP/2 requests using the insecure, + // plain-text "http" scheme. Note that this does not enable h2c support. + AllowHTTP bool + + // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to + // send in the initial settings frame. It is how many bytes + // of response headers are allow. Unlike the http2 spec, zero here + // means to use a default limit (currently 10MB). If you actually + // want to advertise an ulimited value to the peer, Transport + // interprets the highest possible value here (0xffffffff or 1<<32-1) + // to mean no limit. + MaxHeaderListSize uint32 + + // t1, if non-nil, is the standard library Transport using + // this transport. Its settings are used (but not its + // RoundTrip method, etc). + t1 *http.Transport + + connPoolOnce sync.Once + connPoolOrDef ClientConnPool // non-nil version of ConnPool +} + +func (t *Transport) maxHeaderListSize() uint32 { + if t.MaxHeaderListSize == 0 { + return 10 << 20 + } + if t.MaxHeaderListSize == 0xffffffff { + return 0 + } + return t.MaxHeaderListSize +} + +func (t *Transport) disableCompression() bool { + return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) +} + +var errTransportVersion = errors.New("http2: ConfigureTransport is only supported starting at Go 1.6") + +// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. +// It requires Go 1.6 or later and returns an error if the net/http package is too old +// or if t1 has already been HTTP/2-enabled. +func ConfigureTransport(t1 *http.Transport) error { + _, err := configureTransport(t1) // in configure_transport.go (go1.6) or not_go16.go + return err +} + +func (t *Transport) connPool() ClientConnPool { + t.connPoolOnce.Do(t.initConnPool) + return t.connPoolOrDef +} + +func (t *Transport) initConnPool() { + if t.ConnPool != nil { + t.connPoolOrDef = t.ConnPool + } else { + t.connPoolOrDef = &clientConnPool{t: t} + } +} + +// ClientConn is the state of a single HTTP/2 client connection to an +// HTTP/2 server. +type ClientConn struct { + t *Transport + tconn net.Conn // usually *tls.Conn, except specialized impls + tlsState *tls.ConnectionState // nil only for specialized impls + singleUse bool // whether being used for a single http.Request + + // readLoop goroutine fields: + readerDone chan struct{} // closed on error + readerErr error // set before readerDone is closed + + idleTimeout time.Duration // or 0 for never + idleTimer *time.Timer + + mu sync.Mutex // guards following + cond *sync.Cond // hold mu; broadcast on flow/closed changes + flow flow // our conn-level flow control quota (cs.flow is per stream) + inflow flow // peer's conn-level flow control + closed bool + wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back + goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received + goAwayDebug string // goAway frame's debug data, retained as a string + streams map[uint32]*clientStream // client-initiated + nextStreamID uint32 + pings map[[8]byte]chan struct{} // in flight ping data to notification channel + bw *bufio.Writer + br *bufio.Reader + fr *Framer + lastActive time.Time + // Settings from peer: (also guarded by mu) + maxFrameSize uint32 + maxConcurrentStreams uint32 + initialWindowSize uint32 + + hbuf bytes.Buffer // HPACK encoder writes into this + henc *hpack.Encoder + freeBuf [][]byte + + wmu sync.Mutex // held while writing; acquire AFTER mu if holding both + werr error // first write error that has occurred +} + +// clientStream is the state for a single HTTP/2 stream. One of these +// is created for each Transport.RoundTrip call. +type clientStream struct { + cc *ClientConn + req *http.Request + trace *clientTrace // or nil + ID uint32 + resc chan resAndError + bufPipe pipe // buffered pipe with the flow-controlled response payload + startedWrite bool // started request body write; guarded by cc.mu + requestedGzip bool + on100 func() // optional code to run if get a 100 continue response + + flow flow // guarded by cc.mu + inflow flow // guarded by cc.mu + bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read + readErr error // sticky read error; owned by transportResponseBody.Read + stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu + didReset bool // whether we sent a RST_STREAM to the server; guarded by cc.mu + + peerReset chan struct{} // closed on peer reset + resetErr error // populated before peerReset is closed + + done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu + + // owned by clientConnReadLoop: + firstByte bool // got the first response byte + pastHeaders bool // got first MetaHeadersFrame (actual headers) + pastTrailers bool // got optional second MetaHeadersFrame (trailers) + + trailer http.Header // accumulated trailers + resTrailer *http.Header // client's Response.Trailer +} + +// awaitRequestCancel runs in its own goroutine and waits for the user +// to cancel a RoundTrip request, its context to expire, or for the +// request to be done (any way it might be removed from the cc.streams +// map: peer reset, successful completion, TCP connection breakage, +// etc) +func (cs *clientStream) awaitRequestCancel(req *http.Request) { + ctx := reqContext(req) + if req.Cancel == nil && ctx.Done() == nil { + return + } + select { + case <-req.Cancel: + cs.cancelStream() + cs.bufPipe.CloseWithError(errRequestCanceled) + case <-ctx.Done(): + cs.cancelStream() + cs.bufPipe.CloseWithError(ctx.Err()) + case <-cs.done: + } +} + +func (cs *clientStream) cancelStream() { + cs.cc.mu.Lock() + didReset := cs.didReset + cs.didReset = true + cs.cc.mu.Unlock() + + if !didReset { + cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } +} + +// checkResetOrDone reports any error sent in a RST_STREAM frame by the +// server, or errStreamClosed if the stream is complete. +func (cs *clientStream) checkResetOrDone() error { + select { + case <-cs.peerReset: + return cs.resetErr + case <-cs.done: + return errStreamClosed + default: + return nil + } +} + +func (cs *clientStream) abortRequestBodyWrite(err error) { + if err == nil { + panic("nil error") + } + cc := cs.cc + cc.mu.Lock() + cs.stopReqBody = err + cc.cond.Broadcast() + cc.mu.Unlock() +} + +type stickyErrWriter struct { + w io.Writer + err *error +} + +func (sew stickyErrWriter) Write(p []byte) (n int, err error) { + if *sew.err != nil { + return 0, *sew.err + } + n, err = sew.w.Write(p) + *sew.err = err + return +} + +var ErrNoCachedConn = errors.New("http2: no cached connection was available") + +// RoundTripOpt are options for the Transport.RoundTripOpt method. +type RoundTripOpt struct { + // OnlyCachedConn controls whether RoundTripOpt may + // create a new TCP connection. If set true and + // no cached connection is available, RoundTripOpt + // will return ErrNoCachedConn. + OnlyCachedConn bool +} + +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + return t.RoundTripOpt(req, RoundTripOpt{}) +} + +// authorityAddr returns a given authority (a host/IP, or host:port / ip:port) +// and returns a host:port. The port 443 is added if needed. +func authorityAddr(scheme string, authority string) (addr string) { + host, port, err := net.SplitHostPort(authority) + if err != nil { // authority didn't have a port + port = "443" + if scheme == "http" { + port = "80" + } + host = authority + } + if a, err := idna.ToASCII(host); err == nil { + host = a + } + // IPv6 address literal, without a port: + if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") { + return host + ":" + port + } + return net.JoinHostPort(host, port) +} + +// RoundTripOpt is like RoundTrip, but takes options. +func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { + if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { + return nil, errors.New("http2: unsupported scheme") + } + + addr := authorityAddr(req.URL.Scheme, req.URL.Host) + for { + cc, err := t.connPool().GetClientConn(req, addr) + if err != nil { + t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) + return nil, err + } + traceGotConn(req, cc) + res, err := cc.RoundTrip(req) + if err != nil { + if req, err = shouldRetryRequest(req, err); err == nil { + continue + } + } + if err != nil { + t.vlogf("RoundTrip failure: %v", err) + return nil, err + } + return res, nil + } +} + +// CloseIdleConnections closes any connections which were previously +// connected from previous requests but are now sitting idle. +// It does not interrupt any connections currently in use. +func (t *Transport) CloseIdleConnections() { + if cp, ok := t.connPool().(clientConnPoolIdleCloser); ok { + cp.closeIdleConnections() + } +} + +var ( + errClientConnClosed = errors.New("http2: client conn is closed") + errClientConnUnusable = errors.New("http2: client conn not usable") + + errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") + errClientConnGotGoAwayAfterSomeReqBody = errors.New("http2: Transport received Server's graceful shutdown GOAWAY; some request body already written") +) + +// shouldRetryRequest is called by RoundTrip when a request fails to get +// response headers. It is always called with a non-nil error. +// It returns either a request to retry (either the same request, or a +// modified clone), or an error if the request can't be replayed. +func shouldRetryRequest(req *http.Request, err error) (*http.Request, error) { + switch err { + default: + return nil, err + case errClientConnUnusable, errClientConnGotGoAway: + return req, nil + case errClientConnGotGoAwayAfterSomeReqBody: + // If the Body is nil (or http.NoBody), it's safe to reuse + // this request and its Body. + if req.Body == nil || reqBodyIsNoBody(req.Body) { + return req, nil + } + // Otherwise we depend on the Request having its GetBody + // func defined. + getBody := reqGetBody(req) // Go 1.8: getBody = req.GetBody + if getBody == nil { + return nil, errors.New("http2: Transport: peer server initiated graceful shutdown after some of Request.Body was written; define Request.GetBody to avoid this error") + } + body, err := getBody() + if err != nil { + return nil, err + } + newReq := *req + newReq.Body = body + return &newReq, nil + } +} + +func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + tconn, err := t.dialTLS()("tcp", addr, t.newTLSConfig(host)) + if err != nil { + return nil, err + } + return t.newClientConn(tconn, singleUse) +} + +func (t *Transport) newTLSConfig(host string) *tls.Config { + cfg := new(tls.Config) + if t.TLSClientConfig != nil { + *cfg = *cloneTLSConfig(t.TLSClientConfig) + } + if !strSliceContains(cfg.NextProtos, NextProtoTLS) { + cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...) + } + if cfg.ServerName == "" { + cfg.ServerName = host + } + return cfg +} + +func (t *Transport) dialTLS() func(string, string, *tls.Config) (net.Conn, error) { + if t.DialTLS != nil { + return t.DialTLS + } + return t.dialTLSDefault +} + +func (t *Transport) dialTLSDefault(network, addr string, cfg *tls.Config) (net.Conn, error) { + cn, err := tls.Dial(network, addr, cfg) + if err != nil { + return nil, err + } + if err := cn.Handshake(); err != nil { + return nil, err + } + if !cfg.InsecureSkipVerify { + if err := cn.VerifyHostname(cfg.ServerName); err != nil { + return nil, err + } + } + state := cn.ConnectionState() + if p := state.NegotiatedProtocol; p != NextProtoTLS { + return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS) + } + if !state.NegotiatedProtocolIsMutual { + return nil, errors.New("http2: could not negotiate protocol mutually") + } + return cn, nil +} + +// disableKeepAlives reports whether connections should be closed as +// soon as possible after handling the first request. +func (t *Transport) disableKeepAlives() bool { + return t.t1 != nil && t.t1.DisableKeepAlives +} + +func (t *Transport) expectContinueTimeout() time.Duration { + if t.t1 == nil { + return 0 + } + return transportExpectContinueTimeout(t.t1) +} + +func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { + return t.newClientConn(c, false) +} + +func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { + cc := &ClientConn{ + t: t, + tconn: c, + readerDone: make(chan struct{}), + nextStreamID: 1, + maxFrameSize: 16 << 10, // spec default + initialWindowSize: 65535, // spec default + maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough. + streams: make(map[uint32]*clientStream), + singleUse: singleUse, + wantSettingsAck: true, + pings: make(map[[8]byte]chan struct{}), + } + if d := t.idleConnTimeout(); d != 0 { + cc.idleTimeout = d + cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout) + } + if VerboseLogs { + t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) + } + + cc.cond = sync.NewCond(&cc.mu) + cc.flow.add(int32(initialWindowSize)) + + // TODO: adjust this writer size to account for frame size + + // MTU + crypto/tls record padding. + cc.bw = bufio.NewWriter(stickyErrWriter{c, &cc.werr}) + cc.br = bufio.NewReader(c) + cc.fr = NewFramer(cc.bw, cc.br) + cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + cc.fr.MaxHeaderListSize = t.maxHeaderListSize() + + // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on + // henc in response to SETTINGS frames? + cc.henc = hpack.NewEncoder(&cc.hbuf) + + if cs, ok := c.(connectionStater); ok { + state := cs.ConnectionState() + cc.tlsState = &state + } + + initialSettings := []Setting{ + {ID: SettingEnablePush, Val: 0}, + {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, + } + if max := t.maxHeaderListSize(); max != 0 { + initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) + } + + cc.bw.Write(clientPreface) + cc.fr.WriteSettings(initialSettings...) + cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) + cc.inflow.add(transportDefaultConnFlow + initialWindowSize) + cc.bw.Flush() + if cc.werr != nil { + return nil, cc.werr + } + + go cc.readLoop() + return cc, nil +} + +func (cc *ClientConn) setGoAway(f *GoAwayFrame) { + cc.mu.Lock() + defer cc.mu.Unlock() + + old := cc.goAway + cc.goAway = f + + // Merge the previous and current GoAway error frames. + if cc.goAwayDebug == "" { + cc.goAwayDebug = string(f.DebugData()) + } + if old != nil && old.ErrCode != ErrCodeNo { + cc.goAway.ErrCode = old.ErrCode + } + last := f.LastStreamID + for streamID, cs := range cc.streams { + if streamID > last { + select { + case cs.resc <- resAndError{err: errClientConnGotGoAway}: + default: + } + } + } +} + +func (cc *ClientConn) CanTakeNewRequest() bool { + cc.mu.Lock() + defer cc.mu.Unlock() + return cc.canTakeNewRequestLocked() +} + +func (cc *ClientConn) canTakeNewRequestLocked() bool { + if cc.singleUse && cc.nextStreamID > 1 { + return false + } + return cc.goAway == nil && !cc.closed && + int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) && + cc.nextStreamID < math.MaxInt32 +} + +// onIdleTimeout is called from a time.AfterFunc goroutine. It will +// only be called when we're idle, but because we're coming from a new +// goroutine, there could be a new request coming in at the same time, +// so this simply calls the synchronized closeIfIdle to shut down this +// connection. The timer could just call closeIfIdle, but this is more +// clear. +func (cc *ClientConn) onIdleTimeout() { + cc.closeIfIdle() +} + +func (cc *ClientConn) closeIfIdle() { + cc.mu.Lock() + if len(cc.streams) > 0 { + cc.mu.Unlock() + return + } + cc.closed = true + nextID := cc.nextStreamID + // TODO: do clients send GOAWAY too? maybe? Just Close: + cc.mu.Unlock() + + if VerboseLogs { + cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2) + } + cc.tconn.Close() +} + +const maxAllocFrameSize = 512 << 10 + +// frameBuffer returns a scratch buffer suitable for writing DATA frames. +// They're capped at the min of the peer's max frame size or 512KB +// (kinda arbitrarily), but definitely capped so we don't allocate 4GB +// bufers. +func (cc *ClientConn) frameScratchBuffer() []byte { + cc.mu.Lock() + size := cc.maxFrameSize + if size > maxAllocFrameSize { + size = maxAllocFrameSize + } + for i, buf := range cc.freeBuf { + if len(buf) >= int(size) { + cc.freeBuf[i] = nil + cc.mu.Unlock() + return buf[:size] + } + } + cc.mu.Unlock() + return make([]byte, size) +} + +func (cc *ClientConn) putFrameScratchBuffer(buf []byte) { + cc.mu.Lock() + defer cc.mu.Unlock() + const maxBufs = 4 // arbitrary; 4 concurrent requests per conn? investigate. + if len(cc.freeBuf) < maxBufs { + cc.freeBuf = append(cc.freeBuf, buf) + return + } + for i, old := range cc.freeBuf { + if old == nil { + cc.freeBuf[i] = buf + return + } + } + // forget about it. +} + +// errRequestCanceled is a copy of net/http's errRequestCanceled because it's not +// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests. +var errRequestCanceled = errors.New("net/http: request canceled") + +func commaSeparatedTrailers(req *http.Request) (string, error) { + keys := make([]string, 0, len(req.Trailer)) + for k := range req.Trailer { + k = http.CanonicalHeaderKey(k) + switch k { + case "Transfer-Encoding", "Trailer", "Content-Length": + return "", &badStringError{"invalid Trailer key", k} + } + keys = append(keys, k) + } + if len(keys) > 0 { + sort.Strings(keys) + return strings.Join(keys, ","), nil + } + return "", nil +} + +func (cc *ClientConn) responseHeaderTimeout() time.Duration { + if cc.t.t1 != nil { + return cc.t.t1.ResponseHeaderTimeout + } + // No way to do this (yet?) with just an http2.Transport. Probably + // no need. Request.Cancel this is the new way. We only need to support + // this for compatibility with the old http.Transport fields when + // we're doing transparent http2. + return 0 +} + +// checkConnHeaders checks whether req has any invalid connection-level headers. +// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields. +// Certain headers are special-cased as okay but not transmitted later. +func checkConnHeaders(req *http.Request) error { + if v := req.Header.Get("Upgrade"); v != "" { + return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"]) + } + if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { + return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv) + } + if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "close" && vv[0] != "keep-alive") { + return fmt.Errorf("http2: invalid Connection request header: %q", vv) + } + return nil +} + +// actualContentLength returns a sanitized version of +// req.ContentLength, where 0 actually means zero (not unknown) and -1 +// means unknown. +func actualContentLength(req *http.Request) int64 { + if req.Body == nil { + return 0 + } + if req.ContentLength != 0 { + return req.ContentLength + } + return -1 +} + +func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { + if err := checkConnHeaders(req); err != nil { + return nil, err + } + if cc.idleTimer != nil { + cc.idleTimer.Stop() + } + + trailers, err := commaSeparatedTrailers(req) + if err != nil { + return nil, err + } + hasTrailers := trailers != "" + + cc.mu.Lock() + cc.lastActive = time.Now() + if cc.closed || !cc.canTakeNewRequestLocked() { + cc.mu.Unlock() + return nil, errClientConnUnusable + } + + body := req.Body + hasBody := body != nil + contentLen := actualContentLength(req) + + // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? + var requestedGzip bool + if !cc.t.disableCompression() && + req.Header.Get("Accept-Encoding") == "" && + req.Header.Get("Range") == "" && + req.Method != "HEAD" { + // Request gzip only, not deflate. Deflate is ambiguous and + // not as universally supported anyway. + // See: http://www.gzip.org/zlib/zlib_faq.html#faq38 + // + // Note that we don't request this for HEAD requests, + // due to a bug in nginx: + // http://trac.nginx.org/nginx/ticket/358 + // https://golang.org/issue/5522 + // + // We don't request gzip if the request is for a range, since + // auto-decoding a portion of a gzipped document will just fail + // anyway. See https://golang.org/issue/8923 + requestedGzip = true + } + + // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is + // sent by writeRequestBody below, along with any Trailers, + // again in form HEADERS{1}, CONTINUATION{0,}) + hdrs, err := cc.encodeHeaders(req, requestedGzip, trailers, contentLen) + if err != nil { + cc.mu.Unlock() + return nil, err + } + + cs := cc.newStream() + cs.req = req + cs.trace = requestTrace(req) + cs.requestedGzip = requestedGzip + bodyWriter := cc.t.getBodyWriterState(cs, body) + cs.on100 = bodyWriter.on100 + + cc.wmu.Lock() + endStream := !hasBody && !hasTrailers + werr := cc.writeHeaders(cs.ID, endStream, hdrs) + cc.wmu.Unlock() + traceWroteHeaders(cs.trace) + cc.mu.Unlock() + + if werr != nil { + if hasBody { + req.Body.Close() // per RoundTripper contract + bodyWriter.cancel() + } + cc.forgetStreamID(cs.ID) + // Don't bother sending a RST_STREAM (our write already failed; + // no need to keep writing) + traceWroteRequest(cs.trace, werr) + return nil, werr + } + + var respHeaderTimer <-chan time.Time + if hasBody { + bodyWriter.scheduleBodyWrite() + } else { + traceWroteRequest(cs.trace, nil) + if d := cc.responseHeaderTimeout(); d != 0 { + timer := time.NewTimer(d) + defer timer.Stop() + respHeaderTimer = timer.C + } + } + + readLoopResCh := cs.resc + bodyWritten := false + ctx := reqContext(req) + + handleReadLoopResponse := func(re resAndError) (*http.Response, error) { + res := re.res + if re.err != nil || res.StatusCode > 299 { + // On error or status code 3xx, 4xx, 5xx, etc abort any + // ongoing write, assuming that the server doesn't care + // about our request body. If the server replied with 1xx or + // 2xx, however, then assume the server DOES potentially + // want our body (e.g. full-duplex streaming: + // golang.org/issue/13444). If it turns out the server + // doesn't, they'll RST_STREAM us soon enough. This is a + // heuristic to avoid adding knobs to Transport. Hopefully + // we can keep it. + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWrite) + } + if re.err != nil { + if re.err == errClientConnGotGoAway { + cc.mu.Lock() + if cs.startedWrite { + re.err = errClientConnGotGoAwayAfterSomeReqBody + } + cc.mu.Unlock() + } + cc.forgetStreamID(cs.ID) + return nil, re.err + } + res.Request = req + res.TLS = cc.tlsState + return res, nil + } + + for { + select { + case re := <-readLoopResCh: + return handleReadLoopResponse(re) + case <-respHeaderTimer: + cc.forgetStreamID(cs.ID) + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + } + return nil, errTimeout + case <-ctx.Done(): + cc.forgetStreamID(cs.ID) + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + } + return nil, ctx.Err() + case <-req.Cancel: + cc.forgetStreamID(cs.ID) + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + } + return nil, errRequestCanceled + case <-cs.peerReset: + // processResetStream already removed the + // stream from the streams map; no need for + // forgetStreamID. + return nil, cs.resetErr + case err := <-bodyWriter.resc: + // Prefer the read loop's response, if available. Issue 16102. + select { + case re := <-readLoopResCh: + return handleReadLoopResponse(re) + default: + } + if err != nil { + return nil, err + } + bodyWritten = true + if d := cc.responseHeaderTimeout(); d != 0 { + timer := time.NewTimer(d) + defer timer.Stop() + respHeaderTimer = timer.C + } + } + } +} + +// requires cc.wmu be held +func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, hdrs []byte) error { + first := true // first frame written (HEADERS is first, then CONTINUATION) + frameSize := int(cc.maxFrameSize) + for len(hdrs) > 0 && cc.werr == nil { + chunk := hdrs + if len(chunk) > frameSize { + chunk = chunk[:frameSize] + } + hdrs = hdrs[len(chunk):] + endHeaders := len(hdrs) == 0 + if first { + cc.fr.WriteHeaders(HeadersFrameParam{ + StreamID: streamID, + BlockFragment: chunk, + EndStream: endStream, + EndHeaders: endHeaders, + }) + first = false + } else { + cc.fr.WriteContinuation(streamID, endHeaders, chunk) + } + } + // TODO(bradfitz): this Flush could potentially block (as + // could the WriteHeaders call(s) above), which means they + // wouldn't respond to Request.Cancel being readable. That's + // rare, but this should probably be in a goroutine. + cc.bw.Flush() + return cc.werr +} + +// internal error values; they don't escape to callers +var ( + // abort request body write; don't send cancel + errStopReqBodyWrite = errors.New("http2: aborting request body write") + + // abort request body write, but send stream reset of cancel. + errStopReqBodyWriteAndCancel = errors.New("http2: canceling request") +) + +func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) { + cc := cs.cc + sentEnd := false // whether we sent the final DATA frame w/ END_STREAM + buf := cc.frameScratchBuffer() + defer cc.putFrameScratchBuffer(buf) + + defer func() { + traceWroteRequest(cs.trace, err) + // TODO: write h12Compare test showing whether + // Request.Body is closed by the Transport, + // and in multiple cases: server replies <=299 and >299 + // while still writing request body + cerr := bodyCloser.Close() + if err == nil { + err = cerr + } + }() + + req := cs.req + hasTrailers := req.Trailer != nil + + var sawEOF bool + for !sawEOF { + n, err := body.Read(buf) + if err == io.EOF { + sawEOF = true + err = nil + } else if err != nil { + return err + } + + remain := buf[:n] + for len(remain) > 0 && err == nil { + var allowed int32 + allowed, err = cs.awaitFlowControl(len(remain)) + switch { + case err == errStopReqBodyWrite: + return err + case err == errStopReqBodyWriteAndCancel: + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + return err + case err != nil: + return err + } + cc.wmu.Lock() + data := remain[:allowed] + remain = remain[allowed:] + sentEnd = sawEOF && len(remain) == 0 && !hasTrailers + err = cc.fr.WriteData(cs.ID, sentEnd, data) + if err == nil { + // TODO(bradfitz): this flush is for latency, not bandwidth. + // Most requests won't need this. Make this opt-in or + // opt-out? Use some heuristic on the body type? Nagel-like + // timers? Based on 'n'? Only last chunk of this for loop, + // unless flow control tokens are low? For now, always. + // If we change this, see comment below. + err = cc.bw.Flush() + } + cc.wmu.Unlock() + } + if err != nil { + return err + } + } + + if sentEnd { + // Already sent END_STREAM (which implies we have no + // trailers) and flushed, because currently all + // WriteData frames above get a flush. So we're done. + return nil + } + + var trls []byte + if hasTrailers { + cc.mu.Lock() + defer cc.mu.Unlock() + trls = cc.encodeTrailers(req) + } + + cc.wmu.Lock() + defer cc.wmu.Unlock() + + // Two ways to send END_STREAM: either with trailers, or + // with an empty DATA frame. + if len(trls) > 0 { + err = cc.writeHeaders(cs.ID, true, trls) + } else { + err = cc.fr.WriteData(cs.ID, true, nil) + } + if ferr := cc.bw.Flush(); ferr != nil && err == nil { + err = ferr + } + return err +} + +// awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow +// control tokens from the server. +// It returns either the non-zero number of tokens taken or an error +// if the stream is dead. +func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) { + cc := cs.cc + cc.mu.Lock() + defer cc.mu.Unlock() + for { + if cc.closed { + return 0, errClientConnClosed + } + if cs.stopReqBody != nil { + return 0, cs.stopReqBody + } + if err := cs.checkResetOrDone(); err != nil { + return 0, err + } + if a := cs.flow.available(); a > 0 { + take := a + if int(take) > maxBytes { + + take = int32(maxBytes) // can't truncate int; take is int32 + } + if take > int32(cc.maxFrameSize) { + take = int32(cc.maxFrameSize) + } + cs.flow.take(take) + return take, nil + } + cc.cond.Wait() + } +} + +type badStringError struct { + what string + str string +} + +func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) } + +// requires cc.mu be held. +func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { + cc.hbuf.Reset() + + host := req.Host + if host == "" { + host = req.URL.Host + } + host, err := httplex.PunycodeHostPort(host) + if err != nil { + return nil, err + } + + var path string + if req.Method != "CONNECT" { + path = req.URL.RequestURI() + if !validPseudoPath(path) { + orig := path + path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) + if !validPseudoPath(path) { + if req.URL.Opaque != "" { + return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) + } else { + return nil, fmt.Errorf("invalid request :path %q", orig) + } + } + } + } + + // Check for any invalid headers and return an error before we + // potentially pollute our hpack state. (We want to be able to + // continue to reuse the hpack encoder for future requests) + for k, vv := range req.Header { + if !httplex.ValidHeaderFieldName(k) { + return nil, fmt.Errorf("invalid HTTP header name %q", k) + } + for _, v := range vv { + if !httplex.ValidHeaderFieldValue(v) { + return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k) + } + } + } + + // 8.1.2.3 Request Pseudo-Header Fields + // The :path pseudo-header field includes the path and query parts of the + // target URI (the path-absolute production and optionally a '?' character + // followed by the query production (see Sections 3.3 and 3.4 of + // [RFC3986]). + cc.writeHeader(":authority", host) + cc.writeHeader(":method", req.Method) + if req.Method != "CONNECT" { + cc.writeHeader(":path", path) + cc.writeHeader(":scheme", req.URL.Scheme) + } + if trailers != "" { + cc.writeHeader("trailer", trailers) + } + + var didUA bool + for k, vv := range req.Header { + lowKey := strings.ToLower(k) + switch lowKey { + case "host", "content-length": + // Host is :authority, already sent. + // Content-Length is automatic, set below. + continue + case "connection", "proxy-connection", "transfer-encoding", "upgrade", "keep-alive": + // Per 8.1.2.2 Connection-Specific Header + // Fields, don't send connection-specific + // fields. We have already checked if any + // are error-worthy so just ignore the rest. + continue + case "user-agent": + // Match Go's http1 behavior: at most one + // User-Agent. If set to nil or empty string, + // then omit it. Otherwise if not mentioned, + // include the default (below). + didUA = true + if len(vv) < 1 { + continue + } + vv = vv[:1] + if vv[0] == "" { + continue + } + } + for _, v := range vv { + cc.writeHeader(lowKey, v) + } + } + if shouldSendReqContentLength(req.Method, contentLength) { + cc.writeHeader("content-length", strconv.FormatInt(contentLength, 10)) + } + if addGzipHeader { + cc.writeHeader("accept-encoding", "gzip") + } + if !didUA { + cc.writeHeader("user-agent", defaultUserAgent) + } + return cc.hbuf.Bytes(), nil +} + +// shouldSendReqContentLength reports whether the http2.Transport should send +// a "content-length" request header. This logic is basically a copy of the net/http +// transferWriter.shouldSendContentLength. +// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). +// -1 means unknown. +func shouldSendReqContentLength(method string, contentLength int64) bool { + if contentLength > 0 { + return true + } + if contentLength < 0 { + return false + } + // For zero bodies, whether we send a content-length depends on the method. + // It also kinda doesn't matter for http2 either way, with END_STREAM. + switch method { + case "POST", "PUT", "PATCH": + return true + default: + return false + } +} + +// requires cc.mu be held. +func (cc *ClientConn) encodeTrailers(req *http.Request) []byte { + cc.hbuf.Reset() + for k, vv := range req.Trailer { + // Transfer-Encoding, etc.. have already been filter at the + // start of RoundTrip + lowKey := strings.ToLower(k) + for _, v := range vv { + cc.writeHeader(lowKey, v) + } + } + return cc.hbuf.Bytes() +} + +func (cc *ClientConn) writeHeader(name, value string) { + if VerboseLogs { + log.Printf("http2: Transport encoding header %q = %q", name, value) + } + cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value}) +} + +type resAndError struct { + res *http.Response + err error +} + +// requires cc.mu be held. +func (cc *ClientConn) newStream() *clientStream { + cs := &clientStream{ + cc: cc, + ID: cc.nextStreamID, + resc: make(chan resAndError, 1), + peerReset: make(chan struct{}), + done: make(chan struct{}), + } + cs.flow.add(int32(cc.initialWindowSize)) + cs.flow.setConnFlow(&cc.flow) + cs.inflow.add(transportDefaultStreamFlow) + cs.inflow.setConnFlow(&cc.inflow) + cc.nextStreamID += 2 + cc.streams[cs.ID] = cs + return cs +} + +func (cc *ClientConn) forgetStreamID(id uint32) { + cc.streamByID(id, true) +} + +func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream { + cc.mu.Lock() + defer cc.mu.Unlock() + cs := cc.streams[id] + if andRemove && cs != nil && !cc.closed { + cc.lastActive = time.Now() + delete(cc.streams, id) + if len(cc.streams) == 0 && cc.idleTimer != nil { + cc.idleTimer.Reset(cc.idleTimeout) + } + close(cs.done) + cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl + } + return cs +} + +// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop. +type clientConnReadLoop struct { + cc *ClientConn + activeRes map[uint32]*clientStream // keyed by streamID + closeWhenIdle bool +} + +// readLoop runs in its own goroutine and reads and dispatches frames. +func (cc *ClientConn) readLoop() { + rl := &clientConnReadLoop{ + cc: cc, + activeRes: make(map[uint32]*clientStream), + } + + defer rl.cleanup() + cc.readerErr = rl.run() + if ce, ok := cc.readerErr.(ConnectionError); ok { + cc.wmu.Lock() + cc.fr.WriteGoAway(0, ErrCode(ce), nil) + cc.wmu.Unlock() + } +} + +// GoAwayError is returned by the Transport when the server closes the +// TCP connection after sending a GOAWAY frame. +type GoAwayError struct { + LastStreamID uint32 + ErrCode ErrCode + DebugData string +} + +func (e GoAwayError) Error() string { + return fmt.Sprintf("http2: server sent GOAWAY and closed the connection; LastStreamID=%v, ErrCode=%v, debug=%q", + e.LastStreamID, e.ErrCode, e.DebugData) +} + +func isEOFOrNetReadError(err error) bool { + if err == io.EOF { + return true + } + ne, ok := err.(*net.OpError) + return ok && ne.Op == "read" +} + +func (rl *clientConnReadLoop) cleanup() { + cc := rl.cc + defer cc.tconn.Close() + defer cc.t.connPool().MarkDead(cc) + defer close(cc.readerDone) + + if cc.idleTimer != nil { + cc.idleTimer.Stop() + } + + // Close any response bodies if the server closes prematurely. + // TODO: also do this if we've written the headers but not + // gotten a response yet. + err := cc.readerErr + cc.mu.Lock() + if cc.goAway != nil && isEOFOrNetReadError(err) { + err = GoAwayError{ + LastStreamID: cc.goAway.LastStreamID, + ErrCode: cc.goAway.ErrCode, + DebugData: cc.goAwayDebug, + } + } else if err == io.EOF { + err = io.ErrUnexpectedEOF + } + for _, cs := range rl.activeRes { + cs.bufPipe.CloseWithError(err) + } + for _, cs := range cc.streams { + select { + case cs.resc <- resAndError{err: err}: + default: + } + close(cs.done) + } + cc.closed = true + cc.cond.Broadcast() + cc.mu.Unlock() +} + +func (rl *clientConnReadLoop) run() error { + cc := rl.cc + rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse + gotReply := false // ever saw a HEADERS reply + gotSettings := false + for { + f, err := cc.fr.ReadFrame() + if err != nil { + cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) + } + if se, ok := err.(StreamError); ok { + if cs := cc.streamByID(se.StreamID, true /*ended; remove it*/); cs != nil { + cs.cc.writeStreamReset(cs.ID, se.Code, err) + if se.Cause == nil { + se.Cause = cc.fr.errDetail + } + rl.endStreamError(cs, se) + } + continue + } else if err != nil { + return err + } + if VerboseLogs { + cc.vlogf("http2: Transport received %s", summarizeFrame(f)) + } + if !gotSettings { + if _, ok := f.(*SettingsFrame); !ok { + cc.logf("protocol error: received %T before a SETTINGS frame", f) + return ConnectionError(ErrCodeProtocol) + } + gotSettings = true + } + maybeIdle := false // whether frame might transition us to idle + + switch f := f.(type) { + case *MetaHeadersFrame: + err = rl.processHeaders(f) + maybeIdle = true + gotReply = true + case *DataFrame: + err = rl.processData(f) + maybeIdle = true + case *GoAwayFrame: + err = rl.processGoAway(f) + maybeIdle = true + case *RSTStreamFrame: + err = rl.processResetStream(f) + maybeIdle = true + case *SettingsFrame: + err = rl.processSettings(f) + case *PushPromiseFrame: + err = rl.processPushPromise(f) + case *WindowUpdateFrame: + err = rl.processWindowUpdate(f) + case *PingFrame: + err = rl.processPing(f) + default: + cc.logf("Transport: unhandled response frame type %T", f) + } + if err != nil { + if VerboseLogs { + cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err) + } + return err + } + if rl.closeWhenIdle && gotReply && maybeIdle && len(rl.activeRes) == 0 { + cc.closeIfIdle() + } + } +} + +func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { + cc := rl.cc + cs := cc.streamByID(f.StreamID, f.StreamEnded()) + if cs == nil { + // We'd get here if we canceled a request while the + // server had its response still in flight. So if this + // was just something we canceled, ignore it. + return nil + } + if !cs.firstByte { + if cs.trace != nil { + // TODO(bradfitz): move first response byte earlier, + // when we first read the 9 byte header, not waiting + // until all the HEADERS+CONTINUATION frames have been + // merged. This works for now. + traceFirstResponseByte(cs.trace) + } + cs.firstByte = true + } + if !cs.pastHeaders { + cs.pastHeaders = true + } else { + return rl.processTrailers(cs, f) + } + + res, err := rl.handleResponse(cs, f) + if err != nil { + if _, ok := err.(ConnectionError); ok { + return err + } + // Any other error type is a stream error. + cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err) + cs.resc <- resAndError{err: err} + return nil // return nil from process* funcs to keep conn alive + } + if res == nil { + // (nil, nil) special case. See handleResponse docs. + return nil + } + if res.Body != noBody { + rl.activeRes[cs.ID] = cs + } + cs.resTrailer = &res.Trailer + cs.resc <- resAndError{res: res} + return nil +} + +// may return error types nil, or ConnectionError. Any other error value +// is a StreamError of type ErrCodeProtocol. The returned error in that case +// is the detail. +// +// As a special case, handleResponse may return (nil, nil) to skip the +// frame (currently only used for 100 expect continue). This special +// case is going away after Issue 13851 is fixed. +func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) { + if f.Truncated { + return nil, errResponseHeaderListSize + } + + status := f.PseudoValue("status") + if status == "" { + return nil, errors.New("missing status pseudo header") + } + statusCode, err := strconv.Atoi(status) + if err != nil { + return nil, errors.New("malformed non-numeric status pseudo header") + } + + if statusCode == 100 { + traceGot100Continue(cs.trace) + if cs.on100 != nil { + cs.on100() // forces any write delay timer to fire + } + cs.pastHeaders = false // do it all again + return nil, nil + } + + header := make(http.Header) + res := &http.Response{ + Proto: "HTTP/2.0", + ProtoMajor: 2, + Header: header, + StatusCode: statusCode, + Status: status + " " + http.StatusText(statusCode), + } + for _, hf := range f.RegularFields() { + key := http.CanonicalHeaderKey(hf.Name) + if key == "Trailer" { + t := res.Trailer + if t == nil { + t = make(http.Header) + res.Trailer = t + } + foreachHeaderElement(hf.Value, func(v string) { + t[http.CanonicalHeaderKey(v)] = nil + }) + } else { + header[key] = append(header[key], hf.Value) + } + } + + streamEnded := f.StreamEnded() + isHead := cs.req.Method == "HEAD" + if !streamEnded || isHead { + res.ContentLength = -1 + if clens := res.Header["Content-Length"]; len(clens) == 1 { + if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil { + res.ContentLength = clen64 + } else { + // TODO: care? unlike http/1, it won't mess up our framing, so it's + // more safe smuggling-wise to ignore. + } + } else if len(clens) > 1 { + // TODO: care? unlike http/1, it won't mess up our framing, so it's + // more safe smuggling-wise to ignore. + } + } + + if streamEnded || isHead { + res.Body = noBody + return res, nil + } + + cs.bufPipe = pipe{b: &dataBuffer{expected: res.ContentLength}} + cs.bytesRemain = res.ContentLength + res.Body = transportResponseBody{cs} + go cs.awaitRequestCancel(cs.req) + + if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" { + res.Header.Del("Content-Encoding") + res.Header.Del("Content-Length") + res.ContentLength = -1 + res.Body = &gzipReader{body: res.Body} + setResponseUncompressed(res) + } + return res, nil +} + +func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFrame) error { + if cs.pastTrailers { + // Too many HEADERS frames for this stream. + return ConnectionError(ErrCodeProtocol) + } + cs.pastTrailers = true + if !f.StreamEnded() { + // We expect that any headers for trailers also + // has END_STREAM. + return ConnectionError(ErrCodeProtocol) + } + if len(f.PseudoFields()) > 0 { + // No pseudo header fields are defined for trailers. + // TODO: ConnectionError might be overly harsh? Check. + return ConnectionError(ErrCodeProtocol) + } + + trailer := make(http.Header) + for _, hf := range f.RegularFields() { + key := http.CanonicalHeaderKey(hf.Name) + trailer[key] = append(trailer[key], hf.Value) + } + cs.trailer = trailer + + rl.endStream(cs) + return nil +} + +// transportResponseBody is the concrete type of Transport.RoundTrip's +// Response.Body. It is an io.ReadCloser. On Read, it reads from cs.body. +// On Close it sends RST_STREAM if EOF wasn't already seen. +type transportResponseBody struct { + cs *clientStream +} + +func (b transportResponseBody) Read(p []byte) (n int, err error) { + cs := b.cs + cc := cs.cc + + if cs.readErr != nil { + return 0, cs.readErr + } + n, err = b.cs.bufPipe.Read(p) + if cs.bytesRemain != -1 { + if int64(n) > cs.bytesRemain { + n = int(cs.bytesRemain) + if err == nil { + err = errors.New("net/http: server replied with more than declared Content-Length; truncated") + cc.writeStreamReset(cs.ID, ErrCodeProtocol, err) + } + cs.readErr = err + return int(cs.bytesRemain), err + } + cs.bytesRemain -= int64(n) + if err == io.EOF && cs.bytesRemain > 0 { + err = io.ErrUnexpectedEOF + cs.readErr = err + return n, err + } + } + if n == 0 { + // No flow control tokens to send back. + return + } + + cc.mu.Lock() + defer cc.mu.Unlock() + + var connAdd, streamAdd int32 + // Check the conn-level first, before the stream-level. + if v := cc.inflow.available(); v < transportDefaultConnFlow/2 { + connAdd = transportDefaultConnFlow - v + cc.inflow.add(connAdd) + } + if err == nil { // No need to refresh if the stream is over or failed. + // Consider any buffered body data (read from the conn but not + // consumed by the client) when computing flow control for this + // stream. + v := int(cs.inflow.available()) + cs.bufPipe.Len() + if v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh { + streamAdd = int32(transportDefaultStreamFlow - v) + cs.inflow.add(streamAdd) + } + } + if connAdd != 0 || streamAdd != 0 { + cc.wmu.Lock() + defer cc.wmu.Unlock() + if connAdd != 0 { + cc.fr.WriteWindowUpdate(0, mustUint31(connAdd)) + } + if streamAdd != 0 { + cc.fr.WriteWindowUpdate(cs.ID, mustUint31(streamAdd)) + } + cc.bw.Flush() + } + return +} + +var errClosedResponseBody = errors.New("http2: response body closed") + +func (b transportResponseBody) Close() error { + cs := b.cs + cc := cs.cc + + serverSentStreamEnd := cs.bufPipe.Err() == io.EOF + unread := cs.bufPipe.Len() + + if unread > 0 || !serverSentStreamEnd { + cc.mu.Lock() + cc.wmu.Lock() + if !serverSentStreamEnd { + cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel) + } + // Return connection-level flow control. + if unread > 0 { + cc.inflow.add(int32(unread)) + cc.fr.WriteWindowUpdate(0, uint32(unread)) + } + cc.bw.Flush() + cc.wmu.Unlock() + cc.mu.Unlock() + } + + cs.bufPipe.BreakWithError(errClosedResponseBody) + return nil +} + +func (rl *clientConnReadLoop) processData(f *DataFrame) error { + cc := rl.cc + cs := cc.streamByID(f.StreamID, f.StreamEnded()) + data := f.Data() + if cs == nil { + cc.mu.Lock() + neverSent := cc.nextStreamID + cc.mu.Unlock() + if f.StreamID >= neverSent { + // We never asked for this. + cc.logf("http2: Transport received unsolicited DATA frame; closing connection") + return ConnectionError(ErrCodeProtocol) + } + // We probably did ask for this, but canceled. Just ignore it. + // TODO: be stricter here? only silently ignore things which + // we canceled, but not things which were closed normally + // by the peer? Tough without accumulating too much state. + + // But at least return their flow control: + if f.Length > 0 { + cc.mu.Lock() + cc.inflow.add(int32(f.Length)) + cc.mu.Unlock() + + cc.wmu.Lock() + cc.fr.WriteWindowUpdate(0, uint32(f.Length)) + cc.bw.Flush() + cc.wmu.Unlock() + } + return nil + } + if f.Length > 0 { + if len(data) > 0 && cs.bufPipe.b == nil { + // Data frame after it's already closed? + cc.logf("http2: Transport received DATA frame for closed stream; closing connection") + return ConnectionError(ErrCodeProtocol) + } + + // Check connection-level flow control. + cc.mu.Lock() + if cs.inflow.available() >= int32(f.Length) { + cs.inflow.take(int32(f.Length)) + } else { + cc.mu.Unlock() + return ConnectionError(ErrCodeFlowControl) + } + // Return any padded flow control now, since we won't + // refund it later on body reads. + if pad := int32(f.Length) - int32(len(data)); pad > 0 { + cs.inflow.add(pad) + cc.inflow.add(pad) + cc.wmu.Lock() + cc.fr.WriteWindowUpdate(0, uint32(pad)) + cc.fr.WriteWindowUpdate(cs.ID, uint32(pad)) + cc.bw.Flush() + cc.wmu.Unlock() + } + didReset := cs.didReset + cc.mu.Unlock() + + if len(data) > 0 && !didReset { + if _, err := cs.bufPipe.Write(data); err != nil { + rl.endStreamError(cs, err) + return err + } + } + } + + if f.StreamEnded() { + rl.endStream(cs) + } + return nil +} + +var errInvalidTrailers = errors.New("http2: invalid trailers") + +func (rl *clientConnReadLoop) endStream(cs *clientStream) { + // TODO: check that any declared content-length matches, like + // server.go's (*stream).endStream method. + rl.endStreamError(cs, nil) +} + +func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { + var code func() + if err == nil { + err = io.EOF + code = cs.copyTrailers + } + cs.bufPipe.closeWithErrorAndCode(err, code) + delete(rl.activeRes, cs.ID) + if isConnectionCloseRequest(cs.req) { + rl.closeWhenIdle = true + } + + select { + case cs.resc <- resAndError{err: err}: + default: + } +} + +func (cs *clientStream) copyTrailers() { + for k, vv := range cs.trailer { + t := cs.resTrailer + if *t == nil { + *t = make(http.Header) + } + (*t)[k] = vv + } +} + +func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error { + cc := rl.cc + cc.t.connPool().MarkDead(cc) + if f.ErrCode != 0 { + // TODO: deal with GOAWAY more. particularly the error code + cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode) + } + cc.setGoAway(f) + return nil +} + +func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { + cc := rl.cc + cc.mu.Lock() + defer cc.mu.Unlock() + + if f.IsAck() { + if cc.wantSettingsAck { + cc.wantSettingsAck = false + return nil + } + return ConnectionError(ErrCodeProtocol) + } + + err := f.ForeachSetting(func(s Setting) error { + switch s.ID { + case SettingMaxFrameSize: + cc.maxFrameSize = s.Val + case SettingMaxConcurrentStreams: + cc.maxConcurrentStreams = s.Val + case SettingInitialWindowSize: + // Values above the maximum flow-control + // window size of 2^31-1 MUST be treated as a + // connection error (Section 5.4.1) of type + // FLOW_CONTROL_ERROR. + if s.Val > math.MaxInt32 { + return ConnectionError(ErrCodeFlowControl) + } + + // Adjust flow control of currently-open + // frames by the difference of the old initial + // window size and this one. + delta := int32(s.Val) - int32(cc.initialWindowSize) + for _, cs := range cc.streams { + cs.flow.add(delta) + } + cc.cond.Broadcast() + + cc.initialWindowSize = s.Val + default: + // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably. + cc.vlogf("Unhandled Setting: %v", s) + } + return nil + }) + if err != nil { + return err + } + + cc.wmu.Lock() + defer cc.wmu.Unlock() + + cc.fr.WriteSettingsAck() + cc.bw.Flush() + return cc.werr +} + +func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { + cc := rl.cc + cs := cc.streamByID(f.StreamID, false) + if f.StreamID != 0 && cs == nil { + return nil + } + + cc.mu.Lock() + defer cc.mu.Unlock() + + fl := &cc.flow + if cs != nil { + fl = &cs.flow + } + if !fl.add(int32(f.Increment)) { + return ConnectionError(ErrCodeFlowControl) + } + cc.cond.Broadcast() + return nil +} + +func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { + cs := rl.cc.streamByID(f.StreamID, true) + if cs == nil { + // TODO: return error if server tries to RST_STEAM an idle stream + return nil + } + select { + case <-cs.peerReset: + // Already reset. + // This is the only goroutine + // which closes this, so there + // isn't a race. + default: + err := streamError(cs.ID, f.ErrCode) + cs.resetErr = err + close(cs.peerReset) + cs.bufPipe.CloseWithError(err) + cs.cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl + } + delete(rl.activeRes, cs.ID) + return nil +} + +// Ping sends a PING frame to the server and waits for the ack. +// Public implementation is in go17.go and not_go17.go +func (cc *ClientConn) ping(ctx contextContext) error { + c := make(chan struct{}) + // Generate a random payload + var p [8]byte + for { + if _, err := rand.Read(p[:]); err != nil { + return err + } + cc.mu.Lock() + // check for dup before insert + if _, found := cc.pings[p]; !found { + cc.pings[p] = c + cc.mu.Unlock() + break + } + cc.mu.Unlock() + } + cc.wmu.Lock() + if err := cc.fr.WritePing(false, p); err != nil { + cc.wmu.Unlock() + return err + } + if err := cc.bw.Flush(); err != nil { + cc.wmu.Unlock() + return err + } + cc.wmu.Unlock() + select { + case <-c: + return nil + case <-ctx.Done(): + return ctx.Err() + case <-cc.readerDone: + // connection closed + return cc.readerErr + } +} + +func (rl *clientConnReadLoop) processPing(f *PingFrame) error { + if f.IsAck() { + cc := rl.cc + cc.mu.Lock() + defer cc.mu.Unlock() + // If ack, notify listener if any + if c, ok := cc.pings[f.Data]; ok { + close(c) + delete(cc.pings, f.Data) + } + return nil + } + cc := rl.cc + cc.wmu.Lock() + defer cc.wmu.Unlock() + if err := cc.fr.WritePing(true, f.Data); err != nil { + return err + } + return cc.bw.Flush() +} + +func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error { + // We told the peer we don't want them. + // Spec says: + // "PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH + // setting of the peer endpoint is set to 0. An endpoint that + // has set this setting and has received acknowledgement MUST + // treat the receipt of a PUSH_PROMISE frame as a connection + // error (Section 5.4.1) of type PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) +} + +func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { + // TODO: map err to more interesting error codes, once the + // HTTP community comes up with some. But currently for + // RST_STREAM there's no equivalent to GOAWAY frame's debug + // data, and the error codes are all pretty vague ("cancel"). + cc.wmu.Lock() + cc.fr.WriteRSTStream(streamID, code) + cc.bw.Flush() + cc.wmu.Unlock() +} + +var ( + errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") + errPseudoTrailers = errors.New("http2: invalid pseudo header in trailers") +) + +func (cc *ClientConn) logf(format string, args ...interface{}) { + cc.t.logf(format, args...) +} + +func (cc *ClientConn) vlogf(format string, args ...interface{}) { + cc.t.vlogf(format, args...) +} + +func (t *Transport) vlogf(format string, args ...interface{}) { + if VerboseLogs { + t.logf(format, args...) + } +} + +func (t *Transport) logf(format string, args ...interface{}) { + log.Printf(format, args...) +} + +var noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil)) + +func strSliceContains(ss []string, s string) bool { + for _, v := range ss { + if v == s { + return true + } + } + return false +} + +type erringRoundTripper struct{ err error } + +func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err } + +// gzipReader wraps a response body so it can lazily +// call gzip.NewReader on the first call to Read +type gzipReader struct { + body io.ReadCloser // underlying Response.Body + zr *gzip.Reader // lazily-initialized gzip reader + zerr error // sticky error +} + +func (gz *gzipReader) Read(p []byte) (n int, err error) { + if gz.zerr != nil { + return 0, gz.zerr + } + if gz.zr == nil { + gz.zr, err = gzip.NewReader(gz.body) + if err != nil { + gz.zerr = err + return 0, err + } + } + return gz.zr.Read(p) +} + +func (gz *gzipReader) Close() error { + return gz.body.Close() +} + +type errorReader struct{ err error } + +func (r errorReader) Read(p []byte) (int, error) { return 0, r.err } + +// bodyWriterState encapsulates various state around the Transport's writing +// of the request body, particularly regarding doing delayed writes of the body +// when the request contains "Expect: 100-continue". +type bodyWriterState struct { + cs *clientStream + timer *time.Timer // if non-nil, we're doing a delayed write + fnonce *sync.Once // to call fn with + fn func() // the code to run in the goroutine, writing the body + resc chan error // result of fn's execution + delay time.Duration // how long we should delay a delayed write for +} + +func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s bodyWriterState) { + s.cs = cs + if body == nil { + return + } + resc := make(chan error, 1) + s.resc = resc + s.fn = func() { + cs.cc.mu.Lock() + cs.startedWrite = true + cs.cc.mu.Unlock() + resc <- cs.writeRequestBody(body, cs.req.Body) + } + s.delay = t.expectContinueTimeout() + if s.delay == 0 || + !httplex.HeaderValuesContainsToken( + cs.req.Header["Expect"], + "100-continue") { + return + } + s.fnonce = new(sync.Once) + + // Arm the timer with a very large duration, which we'll + // intentionally lower later. It has to be large now because + // we need a handle to it before writing the headers, but the + // s.delay value is defined to not start until after the + // request headers were written. + const hugeDuration = 365 * 24 * time.Hour + s.timer = time.AfterFunc(hugeDuration, func() { + s.fnonce.Do(s.fn) + }) + return +} + +func (s bodyWriterState) cancel() { + if s.timer != nil { + s.timer.Stop() + } +} + +func (s bodyWriterState) on100() { + if s.timer == nil { + // If we didn't do a delayed write, ignore the server's + // bogus 100 continue response. + return + } + s.timer.Stop() + go func() { s.fnonce.Do(s.fn) }() +} + +// scheduleBodyWrite starts writing the body, either immediately (in +// the common case) or after the delay timeout. It should not be +// called until after the headers have been written. +func (s bodyWriterState) scheduleBodyWrite() { + if s.timer == nil { + // We're not doing a delayed write (see + // getBodyWriterState), so just start the writing + // goroutine immediately. + go s.fn() + return + } + traceWait100Continue(s.cs.trace) + if s.timer.Stop() { + s.timer.Reset(s.delay) + } +} + +// isConnectionCloseRequest reports whether req should use its own +// connection for a single request and then close the connection. +func isConnectionCloseRequest(req *http.Request) bool { + return req.Close || httplex.HeaderValuesContainsToken(req.Header["Connection"], "close") +} diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go new file mode 100644 index 000000000..6b0dfae31 --- /dev/null +++ b/vendor/golang.org/x/net/http2/write.go @@ -0,0 +1,370 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "fmt" + "log" + "net/http" + "net/url" + "time" + + "golang.org/x/net/http2/hpack" + "golang.org/x/net/lex/httplex" +) + +// writeFramer is implemented by any type that is used to write frames. +type writeFramer interface { + writeFrame(writeContext) error + + // staysWithinBuffer reports whether this writer promises that + // it will only write less than or equal to size bytes, and it + // won't Flush the write context. + staysWithinBuffer(size int) bool +} + +// writeContext is the interface needed by the various frame writer +// types below. All the writeFrame methods below are scheduled via the +// frame writing scheduler (see writeScheduler in writesched.go). +// +// This interface is implemented by *serverConn. +// +// TODO: decide whether to a) use this in the client code (which didn't +// end up using this yet, because it has a simpler design, not +// currently implementing priorities), or b) delete this and +// make the server code a bit more concrete. +type writeContext interface { + Framer() *Framer + Flush() error + CloseConn() error + // HeaderEncoder returns an HPACK encoder that writes to the + // returned buffer. + HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) +} + +// writeEndsStream reports whether w writes a frame that will transition +// the stream to a half-closed local state. This returns false for RST_STREAM, +// which closes the entire stream (not just the local half). +func writeEndsStream(w writeFramer) bool { + switch v := w.(type) { + case *writeData: + return v.endStream + case *writeResHeaders: + return v.endStream + case nil: + // This can only happen if the caller reuses w after it's + // been intentionally nil'ed out to prevent use. Keep this + // here to catch future refactoring breaking it. + panic("writeEndsStream called on nil writeFramer") + } + return false +} + +type flushFrameWriter struct{} + +func (flushFrameWriter) writeFrame(ctx writeContext) error { + return ctx.Flush() +} + +func (flushFrameWriter) staysWithinBuffer(max int) bool { return false } + +type writeSettings []Setting + +func (s writeSettings) staysWithinBuffer(max int) bool { + const settingSize = 6 // uint16 + uint32 + return frameHeaderLen+settingSize*len(s) <= max + +} + +func (s writeSettings) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteSettings([]Setting(s)...) +} + +type writeGoAway struct { + maxStreamID uint32 + code ErrCode +} + +func (p *writeGoAway) writeFrame(ctx writeContext) error { + err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil) + if p.code != 0 { + ctx.Flush() // ignore error: we're hanging up on them anyway + time.Sleep(50 * time.Millisecond) + ctx.CloseConn() + } + return err +} + +func (*writeGoAway) staysWithinBuffer(max int) bool { return false } // flushes + +type writeData struct { + streamID uint32 + p []byte + endStream bool +} + +func (w *writeData) String() string { + return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream) +} + +func (w *writeData) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteData(w.streamID, w.endStream, w.p) +} + +func (w *writeData) staysWithinBuffer(max int) bool { + return frameHeaderLen+len(w.p) <= max +} + +// handlerPanicRST is the message sent from handler goroutines when +// the handler panics. +type handlerPanicRST struct { + StreamID uint32 +} + +func (hp handlerPanicRST) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal) +} + +func (hp handlerPanicRST) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } + +func (se StreamError) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteRSTStream(se.StreamID, se.Code) +} + +func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } + +type writePingAck struct{ pf *PingFrame } + +func (w writePingAck) writeFrame(ctx writeContext) error { + return ctx.Framer().WritePing(true, w.pf.Data) +} + +func (w writePingAck) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.pf.Data) <= max } + +type writeSettingsAck struct{} + +func (writeSettingsAck) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteSettingsAck() +} + +func (writeSettingsAck) staysWithinBuffer(max int) bool { return frameHeaderLen <= max } + +// splitHeaderBlock splits headerBlock into fragments so that each fragment fits +// in a single frame, then calls fn for each fragment. firstFrag/lastFrag are true +// for the first/last fragment, respectively. +func splitHeaderBlock(ctx writeContext, headerBlock []byte, fn func(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error) error { + // For now we're lazy and just pick the minimum MAX_FRAME_SIZE + // that all peers must support (16KB). Later we could care + // more and send larger frames if the peer advertised it, but + // there's little point. Most headers are small anyway (so we + // generally won't have CONTINUATION frames), and extra frames + // only waste 9 bytes anyway. + const maxFrameSize = 16384 + + first := true + for len(headerBlock) > 0 { + frag := headerBlock + if len(frag) > maxFrameSize { + frag = frag[:maxFrameSize] + } + headerBlock = headerBlock[len(frag):] + if err := fn(ctx, frag, first, len(headerBlock) == 0); err != nil { + return err + } + first = false + } + return nil +} + +// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames +// for HTTP response headers or trailers from a server handler. +type writeResHeaders struct { + streamID uint32 + httpResCode int // 0 means no ":status" line + h http.Header // may be nil + trailers []string // if non-nil, which keys of h to write. nil means all. + endStream bool + + date string + contentType string + contentLength string +} + +func encKV(enc *hpack.Encoder, k, v string) { + if VerboseLogs { + log.Printf("http2: server encoding header %q = %q", k, v) + } + enc.WriteField(hpack.HeaderField{Name: k, Value: v}) +} + +func (w *writeResHeaders) staysWithinBuffer(max int) bool { + // TODO: this is a common one. It'd be nice to return true + // here and get into the fast path if we could be clever and + // calculate the size fast enough, or at least a conservative + // uppper bound that usually fires. (Maybe if w.h and + // w.trailers are nil, so we don't need to enumerate it.) + // Otherwise I'm afraid that just calculating the length to + // answer this question would be slower than the ~2µs benefit. + return false +} + +func (w *writeResHeaders) writeFrame(ctx writeContext) error { + enc, buf := ctx.HeaderEncoder() + buf.Reset() + + if w.httpResCode != 0 { + encKV(enc, ":status", httpCodeString(w.httpResCode)) + } + + encodeHeaders(enc, w.h, w.trailers) + + if w.contentType != "" { + encKV(enc, "content-type", w.contentType) + } + if w.contentLength != "" { + encKV(enc, "content-length", w.contentLength) + } + if w.date != "" { + encKV(enc, "date", w.date) + } + + headerBlock := buf.Bytes() + if len(headerBlock) == 0 && w.trailers == nil { + panic("unexpected empty hpack") + } + + return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock) +} + +func (w *writeResHeaders) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error { + if firstFrag { + return ctx.Framer().WriteHeaders(HeadersFrameParam{ + StreamID: w.streamID, + BlockFragment: frag, + EndStream: w.endStream, + EndHeaders: lastFrag, + }) + } else { + return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag) + } +} + +// writePushPromise is a request to write a PUSH_PROMISE and 0+ CONTINUATION frames. +type writePushPromise struct { + streamID uint32 // pusher stream + method string // for :method + url *url.URL // for :scheme, :authority, :path + h http.Header + + // Creates an ID for a pushed stream. This runs on serveG just before + // the frame is written. The returned ID is copied to promisedID. + allocatePromisedID func() (uint32, error) + promisedID uint32 +} + +func (w *writePushPromise) staysWithinBuffer(max int) bool { + // TODO: see writeResHeaders.staysWithinBuffer + return false +} + +func (w *writePushPromise) writeFrame(ctx writeContext) error { + enc, buf := ctx.HeaderEncoder() + buf.Reset() + + encKV(enc, ":method", w.method) + encKV(enc, ":scheme", w.url.Scheme) + encKV(enc, ":authority", w.url.Host) + encKV(enc, ":path", w.url.RequestURI()) + encodeHeaders(enc, w.h, nil) + + headerBlock := buf.Bytes() + if len(headerBlock) == 0 { + panic("unexpected empty hpack") + } + + return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock) +} + +func (w *writePushPromise) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error { + if firstFrag { + return ctx.Framer().WritePushPromise(PushPromiseParam{ + StreamID: w.streamID, + PromiseID: w.promisedID, + BlockFragment: frag, + EndHeaders: lastFrag, + }) + } else { + return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag) + } +} + +type write100ContinueHeadersFrame struct { + streamID uint32 +} + +func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error { + enc, buf := ctx.HeaderEncoder() + buf.Reset() + encKV(enc, ":status", "100") + return ctx.Framer().WriteHeaders(HeadersFrameParam{ + StreamID: w.streamID, + BlockFragment: buf.Bytes(), + EndStream: false, + EndHeaders: true, + }) +} + +func (w write100ContinueHeadersFrame) staysWithinBuffer(max int) bool { + // Sloppy but conservative: + return 9+2*(len(":status")+len("100")) <= max +} + +type writeWindowUpdate struct { + streamID uint32 // or 0 for conn-level + n uint32 +} + +func (wu writeWindowUpdate) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } + +func (wu writeWindowUpdate) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n) +} + +// encodeHeaders encodes an http.Header. If keys is not nil, then (k, h[k]) +// is encoded only only if k is in keys. +func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { + if keys == nil { + sorter := sorterPool.Get().(*sorter) + // Using defer here, since the returned keys from the + // sorter.Keys method is only valid until the sorter + // is returned: + defer sorterPool.Put(sorter) + keys = sorter.Keys(h) + } + for _, k := range keys { + vv := h[k] + k = lowerHeader(k) + if !validWireHeaderFieldName(k) { + // Skip it as backup paranoia. Per + // golang.org/issue/14048, these should + // already be rejected at a higher level. + continue + } + isTE := k == "transfer-encoding" + for _, v := range vv { + if !httplex.ValidHeaderFieldValue(v) { + // TODO: return an error? golang.org/issue/14048 + // For now just omit it. + continue + } + // TODO: more of "8.1.2.2 Connection-Specific Header Fields" + if isTE && v != "trailers" { + continue + } + encKV(enc, k, v) + } + } +} diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go new file mode 100644 index 000000000..4fe307307 --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched.go @@ -0,0 +1,242 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import "fmt" + +// WriteScheduler is the interface implemented by HTTP/2 write schedulers. +// Methods are never called concurrently. +type WriteScheduler interface { + // OpenStream opens a new stream in the write scheduler. + // It is illegal to call this with streamID=0 or with a streamID that is + // already open -- the call may panic. + OpenStream(streamID uint32, options OpenStreamOptions) + + // CloseStream closes a stream in the write scheduler. Any frames queued on + // this stream should be discarded. It is illegal to call this on a stream + // that is not open -- the call may panic. + CloseStream(streamID uint32) + + // AdjustStream adjusts the priority of the given stream. This may be called + // on a stream that has not yet been opened or has been closed. Note that + // RFC 7540 allows PRIORITY frames to be sent on streams in any state. See: + // https://tools.ietf.org/html/rfc7540#section-5.1 + AdjustStream(streamID uint32, priority PriorityParam) + + // Push queues a frame in the scheduler. In most cases, this will not be + // called with wr.StreamID()!=0 unless that stream is currently open. The one + // exception is RST_STREAM frames, which may be sent on idle or closed streams. + Push(wr FrameWriteRequest) + + // Pop dequeues the next frame to write. Returns false if no frames can + // be written. Frames with a given wr.StreamID() are Pop'd in the same + // order they are Push'd. + Pop() (wr FrameWriteRequest, ok bool) +} + +// OpenStreamOptions specifies extra options for WriteScheduler.OpenStream. +type OpenStreamOptions struct { + // PusherID is zero if the stream was initiated by the client. Otherwise, + // PusherID names the stream that pushed the newly opened stream. + PusherID uint32 +} + +// FrameWriteRequest is a request to write a frame. +type FrameWriteRequest struct { + // write is the interface value that does the writing, once the + // WriteScheduler has selected this frame to write. The write + // functions are all defined in write.go. + write writeFramer + + // stream is the stream on which this frame will be written. + // nil for non-stream frames like PING and SETTINGS. + stream *stream + + // done, if non-nil, must be a buffered channel with space for + // 1 message and is sent the return value from write (or an + // earlier error) when the frame has been written. + done chan error +} + +// StreamID returns the id of the stream this frame will be written to. +// 0 is used for non-stream frames such as PING and SETTINGS. +func (wr FrameWriteRequest) StreamID() uint32 { + if wr.stream == nil { + if se, ok := wr.write.(StreamError); ok { + // (*serverConn).resetStream doesn't set + // stream because it doesn't necessarily have + // one. So special case this type of write + // message. + return se.StreamID + } + return 0 + } + return wr.stream.id +} + +// DataSize returns the number of flow control bytes that must be consumed +// to write this entire frame. This is 0 for non-DATA frames. +func (wr FrameWriteRequest) DataSize() int { + if wd, ok := wr.write.(*writeData); ok { + return len(wd.p) + } + return 0 +} + +// Consume consumes min(n, available) bytes from this frame, where available +// is the number of flow control bytes available on the stream. Consume returns +// 0, 1, or 2 frames, where the integer return value gives the number of frames +// returned. +// +// If flow control prevents consuming any bytes, this returns (_, _, 0). If +// the entire frame was consumed, this returns (wr, _, 1). Otherwise, this +// returns (consumed, rest, 2), where 'consumed' contains the consumed bytes and +// 'rest' contains the remaining bytes. The consumed bytes are deducted from the +// underlying stream's flow control budget. +func (wr FrameWriteRequest) Consume(n int32) (FrameWriteRequest, FrameWriteRequest, int) { + var empty FrameWriteRequest + + // Non-DATA frames are always consumed whole. + wd, ok := wr.write.(*writeData) + if !ok || len(wd.p) == 0 { + return wr, empty, 1 + } + + // Might need to split after applying limits. + allowed := wr.stream.flow.available() + if n < allowed { + allowed = n + } + if wr.stream.sc.maxFrameSize < allowed { + allowed = wr.stream.sc.maxFrameSize + } + if allowed <= 0 { + return empty, empty, 0 + } + if len(wd.p) > int(allowed) { + wr.stream.flow.take(allowed) + consumed := FrameWriteRequest{ + stream: wr.stream, + write: &writeData{ + streamID: wd.streamID, + p: wd.p[:allowed], + // Even if the original had endStream set, there + // are bytes remaining because len(wd.p) > allowed, + // so we know endStream is false. + endStream: false, + }, + // Our caller is blocking on the final DATA frame, not + // this intermediate frame, so no need to wait. + done: nil, + } + rest := FrameWriteRequest{ + stream: wr.stream, + write: &writeData{ + streamID: wd.streamID, + p: wd.p[allowed:], + endStream: wd.endStream, + }, + done: wr.done, + } + return consumed, rest, 2 + } + + // The frame is consumed whole. + // NB: This cast cannot overflow because allowed is <= math.MaxInt32. + wr.stream.flow.take(int32(len(wd.p))) + return wr, empty, 1 +} + +// String is for debugging only. +func (wr FrameWriteRequest) String() string { + var des string + if s, ok := wr.write.(fmt.Stringer); ok { + des = s.String() + } else { + des = fmt.Sprintf("%T", wr.write) + } + return fmt.Sprintf("[FrameWriteRequest stream=%d, ch=%v, writer=%v]", wr.StreamID(), wr.done != nil, des) +} + +// replyToWriter sends err to wr.done and panics if the send must block +// This does nothing if wr.done is nil. +func (wr *FrameWriteRequest) replyToWriter(err error) { + if wr.done == nil { + return + } + select { + case wr.done <- err: + default: + panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wr.write)) + } + wr.write = nil // prevent use (assume it's tainted after wr.done send) +} + +// writeQueue is used by implementations of WriteScheduler. +type writeQueue struct { + s []FrameWriteRequest +} + +func (q *writeQueue) empty() bool { return len(q.s) == 0 } + +func (q *writeQueue) push(wr FrameWriteRequest) { + q.s = append(q.s, wr) +} + +func (q *writeQueue) shift() FrameWriteRequest { + if len(q.s) == 0 { + panic("invalid use of queue") + } + wr := q.s[0] + // TODO: less copy-happy queue. + copy(q.s, q.s[1:]) + q.s[len(q.s)-1] = FrameWriteRequest{} + q.s = q.s[:len(q.s)-1] + return wr +} + +// consume consumes up to n bytes from q.s[0]. If the frame is +// entirely consumed, it is removed from the queue. If the frame +// is partially consumed, the frame is kept with the consumed +// bytes removed. Returns true iff any bytes were consumed. +func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) { + if len(q.s) == 0 { + return FrameWriteRequest{}, false + } + consumed, rest, numresult := q.s[0].Consume(n) + switch numresult { + case 0: + return FrameWriteRequest{}, false + case 1: + q.shift() + case 2: + q.s[0] = rest + } + return consumed, true +} + +type writeQueuePool []*writeQueue + +// put inserts an unused writeQueue into the pool. +func (p *writeQueuePool) put(q *writeQueue) { + for i := range q.s { + q.s[i] = FrameWriteRequest{} + } + q.s = q.s[:0] + *p = append(*p, q) +} + +// get returns an empty writeQueue. +func (p *writeQueuePool) get() *writeQueue { + ln := len(*p) + if ln == 0 { + return new(writeQueue) + } + x := ln - 1 + q := (*p)[x] + (*p)[x] = nil + *p = (*p)[:x] + return q +} diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go new file mode 100644 index 000000000..01132721b --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched_priority.go @@ -0,0 +1,452 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "fmt" + "math" + "sort" +) + +// RFC 7540, Section 5.3.5: the default weight is 16. +const priorityDefaultWeight = 15 // 16 = 15 + 1 + +// PriorityWriteSchedulerConfig configures a priorityWriteScheduler. +type PriorityWriteSchedulerConfig struct { + // MaxClosedNodesInTree controls the maximum number of closed streams to + // retain in the priority tree. Setting this to zero saves a small amount + // of memory at the cost of performance. + // + // See RFC 7540, Section 5.3.4: + // "It is possible for a stream to become closed while prioritization + // information ... is in transit. ... This potentially creates suboptimal + // prioritization, since the stream could be given a priority that is + // different from what is intended. To avoid these problems, an endpoint + // SHOULD retain stream prioritization state for a period after streams + // become closed. The longer state is retained, the lower the chance that + // streams are assigned incorrect or default priority values." + MaxClosedNodesInTree int + + // MaxIdleNodesInTree controls the maximum number of idle streams to + // retain in the priority tree. Setting this to zero saves a small amount + // of memory at the cost of performance. + // + // See RFC 7540, Section 5.3.4: + // Similarly, streams that are in the "idle" state can be assigned + // priority or become a parent of other streams. This allows for the + // creation of a grouping node in the dependency tree, which enables + // more flexible expressions of priority. Idle streams begin with a + // default priority (Section 5.3.5). + MaxIdleNodesInTree int + + // ThrottleOutOfOrderWrites enables write throttling to help ensure that + // data is delivered in priority order. This works around a race where + // stream B depends on stream A and both streams are about to call Write + // to queue DATA frames. If B wins the race, a naive scheduler would eagerly + // write as much data from B as possible, but this is suboptimal because A + // is a higher-priority stream. With throttling enabled, we write a small + // amount of data from B to minimize the amount of bandwidth that B can + // steal from A. + ThrottleOutOfOrderWrites bool +} + +// NewPriorityWriteScheduler constructs a WriteScheduler that schedules +// frames by following HTTP/2 priorities as described in RFC 7340 Section 5.3. +// If cfg is nil, default options are used. +func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler { + if cfg == nil { + // For justification of these defaults, see: + // https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY + cfg = &PriorityWriteSchedulerConfig{ + MaxClosedNodesInTree: 10, + MaxIdleNodesInTree: 10, + ThrottleOutOfOrderWrites: false, + } + } + + ws := &priorityWriteScheduler{ + nodes: make(map[uint32]*priorityNode), + maxClosedNodesInTree: cfg.MaxClosedNodesInTree, + maxIdleNodesInTree: cfg.MaxIdleNodesInTree, + enableWriteThrottle: cfg.ThrottleOutOfOrderWrites, + } + ws.nodes[0] = &ws.root + if cfg.ThrottleOutOfOrderWrites { + ws.writeThrottleLimit = 1024 + } else { + ws.writeThrottleLimit = math.MaxInt32 + } + return ws +} + +type priorityNodeState int + +const ( + priorityNodeOpen priorityNodeState = iota + priorityNodeClosed + priorityNodeIdle +) + +// priorityNode is a node in an HTTP/2 priority tree. +// Each node is associated with a single stream ID. +// See RFC 7540, Section 5.3. +type priorityNode struct { + q writeQueue // queue of pending frames to write + id uint32 // id of the stream, or 0 for the root of the tree + weight uint8 // the actual weight is weight+1, so the value is in [1,256] + state priorityNodeState // open | closed | idle + bytes int64 // number of bytes written by this node, or 0 if closed + subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree + + // These links form the priority tree. + parent *priorityNode + kids *priorityNode // start of the kids list + prev, next *priorityNode // doubly-linked list of siblings +} + +func (n *priorityNode) setParent(parent *priorityNode) { + if n == parent { + panic("setParent to self") + } + if n.parent == parent { + return + } + // Unlink from current parent. + if parent := n.parent; parent != nil { + if n.prev == nil { + parent.kids = n.next + } else { + n.prev.next = n.next + } + if n.next != nil { + n.next.prev = n.prev + } + } + // Link to new parent. + // If parent=nil, remove n from the tree. + // Always insert at the head of parent.kids (this is assumed by walkReadyInOrder). + n.parent = parent + if parent == nil { + n.next = nil + n.prev = nil + } else { + n.next = parent.kids + n.prev = nil + if n.next != nil { + n.next.prev = n + } + parent.kids = n + } +} + +func (n *priorityNode) addBytes(b int64) { + n.bytes += b + for ; n != nil; n = n.parent { + n.subtreeBytes += b + } +} + +// walkReadyInOrder iterates over the tree in priority order, calling f for each node +// with a non-empty write queue. When f returns true, this funcion returns true and the +// walk halts. tmp is used as scratch space for sorting. +// +// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true +// if any ancestor p of n is still open (ignoring the root node). +func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool { + if !n.q.empty() && f(n, openParent) { + return true + } + if n.kids == nil { + return false + } + + // Don't consider the root "open" when updating openParent since + // we can't send data frames on the root stream (only control frames). + if n.id != 0 { + openParent = openParent || (n.state == priorityNodeOpen) + } + + // Common case: only one kid or all kids have the same weight. + // Some clients don't use weights; other clients (like web browsers) + // use mostly-linear priority trees. + w := n.kids.weight + needSort := false + for k := n.kids.next; k != nil; k = k.next { + if k.weight != w { + needSort = true + break + } + } + if !needSort { + for k := n.kids; k != nil; k = k.next { + if k.walkReadyInOrder(openParent, tmp, f) { + return true + } + } + return false + } + + // Uncommon case: sort the child nodes. We remove the kids from the parent, + // then re-insert after sorting so we can reuse tmp for future sort calls. + *tmp = (*tmp)[:0] + for n.kids != nil { + *tmp = append(*tmp, n.kids) + n.kids.setParent(nil) + } + sort.Sort(sortPriorityNodeSiblings(*tmp)) + for i := len(*tmp) - 1; i >= 0; i-- { + (*tmp)[i].setParent(n) // setParent inserts at the head of n.kids + } + for k := n.kids; k != nil; k = k.next { + if k.walkReadyInOrder(openParent, tmp, f) { + return true + } + } + return false +} + +type sortPriorityNodeSiblings []*priorityNode + +func (z sortPriorityNodeSiblings) Len() int { return len(z) } +func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] } +func (z sortPriorityNodeSiblings) Less(i, k int) bool { + // Prefer the subtree that has sent fewer bytes relative to its weight. + // See sections 5.3.2 and 5.3.4. + wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes) + wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes) + if bi == 0 && bk == 0 { + return wi >= wk + } + if bk == 0 { + return false + } + return bi/bk <= wi/wk +} + +type priorityWriteScheduler struct { + // root is the root of the priority tree, where root.id = 0. + // The root queues control frames that are not associated with any stream. + root priorityNode + + // nodes maps stream ids to priority tree nodes. + nodes map[uint32]*priorityNode + + // maxID is the maximum stream id in nodes. + maxID uint32 + + // lists of nodes that have been closed or are idle, but are kept in + // the tree for improved prioritization. When the lengths exceed either + // maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded. + closedNodes, idleNodes []*priorityNode + + // From the config. + maxClosedNodesInTree int + maxIdleNodesInTree int + writeThrottleLimit int32 + enableWriteThrottle bool + + // tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations. + tmp []*priorityNode + + // pool of empty queues for reuse. + queuePool writeQueuePool +} + +func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { + // The stream may be currently idle but cannot be opened or closed. + if curr := ws.nodes[streamID]; curr != nil { + if curr.state != priorityNodeIdle { + panic(fmt.Sprintf("stream %d already opened", streamID)) + } + curr.state = priorityNodeOpen + return + } + + // RFC 7540, Section 5.3.5: + // "All streams are initially assigned a non-exclusive dependency on stream 0x0. + // Pushed streams initially depend on their associated stream. In both cases, + // streams are assigned a default weight of 16." + parent := ws.nodes[options.PusherID] + if parent == nil { + parent = &ws.root + } + n := &priorityNode{ + q: *ws.queuePool.get(), + id: streamID, + weight: priorityDefaultWeight, + state: priorityNodeOpen, + } + n.setParent(parent) + ws.nodes[streamID] = n + if streamID > ws.maxID { + ws.maxID = streamID + } +} + +func (ws *priorityWriteScheduler) CloseStream(streamID uint32) { + if streamID == 0 { + panic("violation of WriteScheduler interface: cannot close stream 0") + } + if ws.nodes[streamID] == nil { + panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID)) + } + if ws.nodes[streamID].state != priorityNodeOpen { + panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID)) + } + + n := ws.nodes[streamID] + n.state = priorityNodeClosed + n.addBytes(-n.bytes) + + q := n.q + ws.queuePool.put(&q) + n.q.s = nil + if ws.maxClosedNodesInTree > 0 { + ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n) + } else { + ws.removeNode(n) + } +} + +func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { + if streamID == 0 { + panic("adjustPriority on root") + } + + // If streamID does not exist, there are two cases: + // - A closed stream that has been removed (this will have ID <= maxID) + // - An idle stream that is being used for "grouping" (this will have ID > maxID) + n := ws.nodes[streamID] + if n == nil { + if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 { + return + } + ws.maxID = streamID + n = &priorityNode{ + q: *ws.queuePool.get(), + id: streamID, + weight: priorityDefaultWeight, + state: priorityNodeIdle, + } + n.setParent(&ws.root) + ws.nodes[streamID] = n + ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n) + } + + // Section 5.3.1: A dependency on a stream that is not currently in the tree + // results in that stream being given a default priority (Section 5.3.5). + parent := ws.nodes[priority.StreamDep] + if parent == nil { + n.setParent(&ws.root) + n.weight = priorityDefaultWeight + return + } + + // Ignore if the client tries to make a node its own parent. + if n == parent { + return + } + + // Section 5.3.3: + // "If a stream is made dependent on one of its own dependencies, the + // formerly dependent stream is first moved to be dependent on the + // reprioritized stream's previous parent. The moved dependency retains + // its weight." + // + // That is: if parent depends on n, move parent to depend on n.parent. + for x := parent.parent; x != nil; x = x.parent { + if x == n { + parent.setParent(n.parent) + break + } + } + + // Section 5.3.3: The exclusive flag causes the stream to become the sole + // dependency of its parent stream, causing other dependencies to become + // dependent on the exclusive stream. + if priority.Exclusive { + k := parent.kids + for k != nil { + next := k.next + if k != n { + k.setParent(n) + } + k = next + } + } + + n.setParent(parent) + n.weight = priority.Weight +} + +func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) { + var n *priorityNode + if id := wr.StreamID(); id == 0 { + n = &ws.root + } else { + n = ws.nodes[id] + if n == nil { + // id is an idle or closed stream. wr should not be a HEADERS or + // DATA frame. However, wr can be a RST_STREAM. In this case, we + // push wr onto the root, rather than creating a new priorityNode, + // since RST_STREAM is tiny and the stream's priority is unknown + // anyway. See issue #17919. + if wr.DataSize() > 0 { + panic("add DATA on non-open stream") + } + n = &ws.root + } + } + n.q.push(wr) +} + +func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) { + ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool { + limit := int32(math.MaxInt32) + if openParent { + limit = ws.writeThrottleLimit + } + wr, ok = n.q.consume(limit) + if !ok { + return false + } + n.addBytes(int64(wr.DataSize())) + // If B depends on A and B continuously has data available but A + // does not, gradually increase the throttling limit to allow B to + // steal more and more bandwidth from A. + if openParent { + ws.writeThrottleLimit += 1024 + if ws.writeThrottleLimit < 0 { + ws.writeThrottleLimit = math.MaxInt32 + } + } else if ws.enableWriteThrottle { + ws.writeThrottleLimit = 1024 + } + return true + }) + return wr, ok +} + +func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) { + if maxSize == 0 { + return + } + if len(*list) == maxSize { + // Remove the oldest node, then shift left. + ws.removeNode((*list)[0]) + x := (*list)[1:] + copy(*list, x) + *list = (*list)[:len(x)] + } + *list = append(*list, n) +} + +func (ws *priorityWriteScheduler) removeNode(n *priorityNode) { + for k := n.kids; k != nil; k = k.next { + k.setParent(n.parent) + } + n.setParent(nil) + delete(ws.nodes, n.id) +} diff --git a/vendor/golang.org/x/net/http2/writesched_random.go b/vendor/golang.org/x/net/http2/writesched_random.go new file mode 100644 index 000000000..36d7919f1 --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched_random.go @@ -0,0 +1,72 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import "math" + +// NewRandomWriteScheduler constructs a WriteScheduler that ignores HTTP/2 +// priorities. Control frames like SETTINGS and PING are written before DATA +// frames, but if no control frames are queued and multiple streams have queued +// HEADERS or DATA frames, Pop selects a ready stream arbitrarily. +func NewRandomWriteScheduler() WriteScheduler { + return &randomWriteScheduler{sq: make(map[uint32]*writeQueue)} +} + +type randomWriteScheduler struct { + // zero are frames not associated with a specific stream. + zero writeQueue + + // sq contains the stream-specific queues, keyed by stream ID. + // When a stream is idle or closed, it's deleted from the map. + sq map[uint32]*writeQueue + + // pool of empty queues for reuse. + queuePool writeQueuePool +} + +func (ws *randomWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { + // no-op: idle streams are not tracked +} + +func (ws *randomWriteScheduler) CloseStream(streamID uint32) { + q, ok := ws.sq[streamID] + if !ok { + return + } + delete(ws.sq, streamID) + ws.queuePool.put(q) +} + +func (ws *randomWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { + // no-op: priorities are ignored +} + +func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) { + id := wr.StreamID() + if id == 0 { + ws.zero.push(wr) + return + } + q, ok := ws.sq[id] + if !ok { + q = ws.queuePool.get() + ws.sq[id] = q + } + q.push(wr) +} + +func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) { + // Control frames first. + if !ws.zero.empty() { + return ws.zero.shift(), true + } + // Iterate over all non-idle streams until finding one that can be consumed. + for _, q := range ws.sq { + if wr, ok := q.consume(math.MaxInt32); ok { + return wr, true + } + } + return FrameWriteRequest{}, false +} diff --git a/vendor/golang.org/x/net/idna/idna.go b/vendor/golang.org/x/net/idna/idna.go new file mode 100644 index 000000000..ee2dbda6d --- /dev/null +++ b/vendor/golang.org/x/net/idna/idna.go @@ -0,0 +1,668 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package idna implements IDNA2008 using the compatibility processing +// defined by UTS (Unicode Technical Standard) #46, which defines a standard to +// deal with the transition from IDNA2003. +// +// IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC +// 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894. +// UTS #46 is defined in http://www.unicode.org/reports/tr46. +// See http://unicode.org/cldr/utility/idna.jsp for a visualization of the +// differences between these two standards. +package idna // import "golang.org/x/net/idna" + +import ( + "fmt" + "strings" + "unicode/utf8" + + "golang.org/x/text/secure/bidirule" + "golang.org/x/text/unicode/norm" +) + +// NOTE: Unlike common practice in Go APIs, the functions will return a +// sanitized domain name in case of errors. Browsers sometimes use a partially +// evaluated string as lookup. +// TODO: the current error handling is, in my opinion, the least opinionated. +// Other strategies are also viable, though: +// Option 1) Return an empty string in case of error, but allow the user to +// specify explicitly which errors to ignore. +// Option 2) Return the partially evaluated string if it is itself a valid +// string, otherwise return the empty string in case of error. +// Option 3) Option 1 and 2. +// Option 4) Always return an empty string for now and implement Option 1 as +// needed, and document that the return string may not be empty in case of +// error in the future. +// I think Option 1 is best, but it is quite opinionated. + +// ToASCII is a wrapper for Punycode.ToASCII. +func ToASCII(s string) (string, error) { + return Punycode.process(s, true) +} + +// ToUnicode is a wrapper for Punycode.ToUnicode. +func ToUnicode(s string) (string, error) { + return Punycode.process(s, false) +} + +// An Option configures a Profile at creation time. +type Option func(*options) + +// Transitional sets a Profile to use the Transitional mapping as defined in UTS +// #46. This will cause, for example, "ß" to be mapped to "ss". Using the +// transitional mapping provides a compromise between IDNA2003 and IDNA2008 +// compatibility. It is used by most browsers when resolving domain names. This +// option is only meaningful if combined with MapForLookup. +func Transitional(transitional bool) Option { + return func(o *options) { o.transitional = true } +} + +// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts +// are longer than allowed by the RFC. +func VerifyDNSLength(verify bool) Option { + return func(o *options) { o.verifyDNSLength = verify } +} + +// ValidateLabels sets whether to check the mandatory label validation criteria +// as defined in Section 5.4 of RFC 5891. This includes testing for correct use +// of hyphens ('-'), normalization, validity of runes, and the context rules. +func ValidateLabels(enable bool) Option { + return func(o *options) { + // Don't override existing mappings, but set one that at least checks + // normalization if it is not set. + if o.mapping == nil && enable { + o.mapping = normalize + } + o.trie = trie + o.validateLabels = enable + o.fromPuny = validateFromPunycode + } +} + +// StrictDomainName limits the set of permissable ASCII characters to those +// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the +// hyphen). This is set by default for MapForLookup and ValidateForRegistration. +// +// This option is useful, for instance, for browsers that allow characters +// outside this range, for example a '_' (U+005F LOW LINE). See +// http://www.rfc-editor.org/std/std3.txt for more details This option +// corresponds to the UseSTD3ASCIIRules option in UTS #46. +func StrictDomainName(use bool) Option { + return func(o *options) { + o.trie = trie + o.useSTD3Rules = use + o.fromPuny = validateFromPunycode + } +} + +// NOTE: the following options pull in tables. The tables should not be linked +// in as long as the options are not used. + +// BidiRule enables the Bidi rule as defined in RFC 5893. Any application +// that relies on proper validation of labels should include this rule. +func BidiRule() Option { + return func(o *options) { o.bidirule = bidirule.ValidString } +} + +// ValidateForRegistration sets validation options to verify that a given IDN is +// properly formatted for registration as defined by Section 4 of RFC 5891. +func ValidateForRegistration() Option { + return func(o *options) { + o.mapping = validateRegistration + StrictDomainName(true)(o) + ValidateLabels(true)(o) + VerifyDNSLength(true)(o) + BidiRule()(o) + } +} + +// MapForLookup sets validation and mapping options such that a given IDN is +// transformed for domain name lookup according to the requirements set out in +// Section 5 of RFC 5891. The mappings follow the recommendations of RFC 5894, +// RFC 5895 and UTS 46. It does not add the Bidi Rule. Use the BidiRule option +// to add this check. +// +// The mappings include normalization and mapping case, width and other +// compatibility mappings. +func MapForLookup() Option { + return func(o *options) { + o.mapping = validateAndMap + StrictDomainName(true)(o) + ValidateLabels(true)(o) + } +} + +type options struct { + transitional bool + useSTD3Rules bool + validateLabels bool + verifyDNSLength bool + + trie *idnaTrie + + // fromPuny calls validation rules when converting A-labels to U-labels. + fromPuny func(p *Profile, s string) error + + // mapping implements a validation and mapping step as defined in RFC 5895 + // or UTS 46, tailored to, for example, domain registration or lookup. + mapping func(p *Profile, s string) (string, error) + + // bidirule, if specified, checks whether s conforms to the Bidi Rule + // defined in RFC 5893. + bidirule func(s string) bool +} + +// A Profile defines the configuration of a IDNA mapper. +type Profile struct { + options +} + +func apply(o *options, opts []Option) { + for _, f := range opts { + f(o) + } +} + +// New creates a new Profile. +// +// With no options, the returned Profile is the most permissive and equals the +// Punycode Profile. Options can be passed to further restrict the Profile. The +// MapForLookup and ValidateForRegistration options set a collection of options, +// for lookup and registration purposes respectively, which can be tailored by +// adding more fine-grained options, where later options override earlier +// options. +func New(o ...Option) *Profile { + p := &Profile{} + apply(&p.options, o) + return p +} + +// ToASCII converts a domain or domain label to its ASCII form. For example, +// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and +// ToASCII("golang") is "golang". If an error is encountered it will return +// an error and a (partially) processed result. +func (p *Profile) ToASCII(s string) (string, error) { + return p.process(s, true) +} + +// ToUnicode converts a domain or domain label to its Unicode form. For example, +// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and +// ToUnicode("golang") is "golang". If an error is encountered it will return +// an error and a (partially) processed result. +func (p *Profile) ToUnicode(s string) (string, error) { + pp := *p + pp.transitional = false + return pp.process(s, false) +} + +// String reports a string with a description of the profile for debugging +// purposes. The string format may change with different versions. +func (p *Profile) String() string { + s := "" + if p.transitional { + s = "Transitional" + } else { + s = "NonTransitional" + } + if p.useSTD3Rules { + s += ":UseSTD3Rules" + } + if p.validateLabels { + s += ":ValidateLabels" + } + if p.verifyDNSLength { + s += ":VerifyDNSLength" + } + return s +} + +var ( + // Punycode is a Profile that does raw punycode processing with a minimum + // of validation. + Punycode *Profile = punycode + + // Lookup is the recommended profile for looking up domain names, according + // to Section 5 of RFC 5891. The exact configuration of this profile may + // change over time. + Lookup *Profile = lookup + + // Display is the recommended profile for displaying domain names. + // The configuration of this profile may change over time. + Display *Profile = display + + // Registration is the recommended profile for checking whether a given + // IDN is valid for registration, according to Section 4 of RFC 5891. + Registration *Profile = registration + + punycode = &Profile{} + lookup = &Profile{options{ + transitional: true, + useSTD3Rules: true, + validateLabels: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateAndMap, + bidirule: bidirule.ValidString, + }} + display = &Profile{options{ + useSTD3Rules: true, + validateLabels: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateAndMap, + bidirule: bidirule.ValidString, + }} + registration = &Profile{options{ + useSTD3Rules: true, + validateLabels: true, + verifyDNSLength: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateRegistration, + bidirule: bidirule.ValidString, + }} + + // TODO: profiles + // Register: recommended for approving domain names: don't do any mappings + // but rather reject on invalid input. Bundle or block deviation characters. +) + +type labelError struct{ label, code_ string } + +func (e labelError) code() string { return e.code_ } +func (e labelError) Error() string { + return fmt.Sprintf("idna: invalid label %q", e.label) +} + +type runeError rune + +func (e runeError) code() string { return "P1" } +func (e runeError) Error() string { + return fmt.Sprintf("idna: disallowed rune %U", e) +} + +// process implements the algorithm described in section 4 of UTS #46, +// see http://www.unicode.org/reports/tr46. +func (p *Profile) process(s string, toASCII bool) (string, error) { + var err error + if p.mapping != nil { + s, err = p.mapping(p, s) + } + // Remove leading empty labels. + for ; len(s) > 0 && s[0] == '.'; s = s[1:] { + } + // It seems like we should only create this error on ToASCII, but the + // UTS 46 conformance tests suggests we should always check this. + if err == nil && p.verifyDNSLength && s == "" { + err = &labelError{s, "A4"} + } + labels := labelIter{orig: s} + for ; !labels.done(); labels.next() { + label := labels.label() + if label == "" { + // Empty labels are not okay. The label iterator skips the last + // label if it is empty. + if err == nil && p.verifyDNSLength { + err = &labelError{s, "A4"} + } + continue + } + if strings.HasPrefix(label, acePrefix) { + u, err2 := decode(label[len(acePrefix):]) + if err2 != nil { + if err == nil { + err = err2 + } + // Spec says keep the old label. + continue + } + labels.set(u) + if err == nil && p.validateLabels { + err = p.fromPuny(p, u) + } + if err == nil { + // This should be called on NonTransitional, according to the + // spec, but that currently does not have any effect. Use the + // original profile to preserve options. + err = p.validateLabel(u) + } + } else if err == nil { + err = p.validateLabel(label) + } + } + if toASCII { + for labels.reset(); !labels.done(); labels.next() { + label := labels.label() + if !ascii(label) { + a, err2 := encode(acePrefix, label) + if err == nil { + err = err2 + } + label = a + labels.set(a) + } + n := len(label) + if p.verifyDNSLength && err == nil && (n == 0 || n > 63) { + err = &labelError{label, "A4"} + } + } + } + s = labels.result() + if toASCII && p.verifyDNSLength && err == nil { + // Compute the length of the domain name minus the root label and its dot. + n := len(s) + if n > 0 && s[n-1] == '.' { + n-- + } + if len(s) < 1 || n > 253 { + err = &labelError{s, "A4"} + } + } + return s, err +} + +func normalize(p *Profile, s string) (string, error) { + return norm.NFC.String(s), nil +} + +func validateRegistration(p *Profile, s string) (string, error) { + if !norm.NFC.IsNormalString(s) { + return s, &labelError{s, "V1"} + } + var err error + for i := 0; i < len(s); { + v, sz := trie.lookupString(s[i:]) + i += sz + // Copy bytes not copied so far. + switch p.simplify(info(v).category()) { + // TODO: handle the NV8 defined in the Unicode idna data set to allow + // for strict conformance to IDNA2008. + case valid, deviation: + case disallowed, mapped, unknown, ignored: + if err == nil { + r, _ := utf8.DecodeRuneInString(s[i:]) + err = runeError(r) + } + } + } + return s, err +} + +func validateAndMap(p *Profile, s string) (string, error) { + var ( + err error + b []byte + k int + ) + for i := 0; i < len(s); { + v, sz := trie.lookupString(s[i:]) + start := i + i += sz + // Copy bytes not copied so far. + switch p.simplify(info(v).category()) { + case valid: + continue + case disallowed: + if err == nil { + r, _ := utf8.DecodeRuneInString(s[i:]) + err = runeError(r) + } + continue + case mapped, deviation: + b = append(b, s[k:start]...) + b = info(v).appendMapping(b, s[start:i]) + case ignored: + b = append(b, s[k:start]...) + // drop the rune + case unknown: + b = append(b, s[k:start]...) + b = append(b, "\ufffd"...) + } + k = i + } + if k == 0 { + // No changes so far. + s = norm.NFC.String(s) + } else { + b = append(b, s[k:]...) + if norm.NFC.QuickSpan(b) != len(b) { + b = norm.NFC.Bytes(b) + } + // TODO: the punycode converters require strings as input. + s = string(b) + } + return s, err +} + +// A labelIter allows iterating over domain name labels. +type labelIter struct { + orig string + slice []string + curStart int + curEnd int + i int +} + +func (l *labelIter) reset() { + l.curStart = 0 + l.curEnd = 0 + l.i = 0 +} + +func (l *labelIter) done() bool { + return l.curStart >= len(l.orig) +} + +func (l *labelIter) result() string { + if l.slice != nil { + return strings.Join(l.slice, ".") + } + return l.orig +} + +func (l *labelIter) label() string { + if l.slice != nil { + return l.slice[l.i] + } + p := strings.IndexByte(l.orig[l.curStart:], '.') + l.curEnd = l.curStart + p + if p == -1 { + l.curEnd = len(l.orig) + } + return l.orig[l.curStart:l.curEnd] +} + +// next sets the value to the next label. It skips the last label if it is empty. +func (l *labelIter) next() { + l.i++ + if l.slice != nil { + if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" { + l.curStart = len(l.orig) + } + } else { + l.curStart = l.curEnd + 1 + if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' { + l.curStart = len(l.orig) + } + } +} + +func (l *labelIter) set(s string) { + if l.slice == nil { + l.slice = strings.Split(l.orig, ".") + } + l.slice[l.i] = s +} + +// acePrefix is the ASCII Compatible Encoding prefix. +const acePrefix = "xn--" + +func (p *Profile) simplify(cat category) category { + switch cat { + case disallowedSTD3Mapped: + if p.useSTD3Rules { + cat = disallowed + } else { + cat = mapped + } + case disallowedSTD3Valid: + if p.useSTD3Rules { + cat = disallowed + } else { + cat = valid + } + case deviation: + if !p.transitional { + cat = valid + } + case validNV8, validXV8: + // TODO: handle V2008 + cat = valid + } + return cat +} + +func validateFromPunycode(p *Profile, s string) error { + if !norm.NFC.IsNormalString(s) { + return &labelError{s, "V1"} + } + for i := 0; i < len(s); { + v, sz := trie.lookupString(s[i:]) + if c := p.simplify(info(v).category()); c != valid && c != deviation { + return &labelError{s, "V6"} + } + i += sz + } + return nil +} + +const ( + zwnj = "\u200c" + zwj = "\u200d" +) + +type joinState int8 + +const ( + stateStart joinState = iota + stateVirama + stateBefore + stateBeforeVirama + stateAfter + stateFAIL +) + +var joinStates = [][numJoinTypes]joinState{ + stateStart: { + joiningL: stateBefore, + joiningD: stateBefore, + joinZWNJ: stateFAIL, + joinZWJ: stateFAIL, + joinVirama: stateVirama, + }, + stateVirama: { + joiningL: stateBefore, + joiningD: stateBefore, + }, + stateBefore: { + joiningL: stateBefore, + joiningD: stateBefore, + joiningT: stateBefore, + joinZWNJ: stateAfter, + joinZWJ: stateFAIL, + joinVirama: stateBeforeVirama, + }, + stateBeforeVirama: { + joiningL: stateBefore, + joiningD: stateBefore, + joiningT: stateBefore, + }, + stateAfter: { + joiningL: stateFAIL, + joiningD: stateBefore, + joiningT: stateAfter, + joiningR: stateStart, + joinZWNJ: stateFAIL, + joinZWJ: stateFAIL, + joinVirama: stateAfter, // no-op as we can't accept joiners here + }, + stateFAIL: { + 0: stateFAIL, + joiningL: stateFAIL, + joiningD: stateFAIL, + joiningT: stateFAIL, + joiningR: stateFAIL, + joinZWNJ: stateFAIL, + joinZWJ: stateFAIL, + joinVirama: stateFAIL, + }, +} + +// validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are +// already implicitly satisfied by the overall implementation. +func (p *Profile) validateLabel(s string) error { + if s == "" { + if p.verifyDNSLength { + return &labelError{s, "A4"} + } + return nil + } + if p.bidirule != nil && !p.bidirule(s) { + return &labelError{s, "B"} + } + if !p.validateLabels { + return nil + } + trie := p.trie // p.validateLabels is only set if trie is set. + if len(s) > 4 && s[2] == '-' && s[3] == '-' { + return &labelError{s, "V2"} + } + if s[0] == '-' || s[len(s)-1] == '-' { + return &labelError{s, "V3"} + } + // TODO: merge the use of this in the trie. + v, sz := trie.lookupString(s) + x := info(v) + if x.isModifier() { + return &labelError{s, "V5"} + } + // Quickly return in the absence of zero-width (non) joiners. + if strings.Index(s, zwj) == -1 && strings.Index(s, zwnj) == -1 { + return nil + } + st := stateStart + for i := 0; ; { + jt := x.joinType() + if s[i:i+sz] == zwj { + jt = joinZWJ + } else if s[i:i+sz] == zwnj { + jt = joinZWNJ + } + st = joinStates[st][jt] + if x.isViramaModifier() { + st = joinStates[st][joinVirama] + } + if i += sz; i == len(s) { + break + } + v, sz = trie.lookupString(s[i:]) + x = info(v) + } + if st == stateFAIL || st == stateAfter { + return &labelError{s, "C"} + } + return nil +} + +func ascii(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} diff --git a/vendor/golang.org/x/net/idna/punycode.go b/vendor/golang.org/x/net/idna/punycode.go new file mode 100644 index 000000000..02c7d59af --- /dev/null +++ b/vendor/golang.org/x/net/idna/punycode.go @@ -0,0 +1,203 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package idna + +// This file implements the Punycode algorithm from RFC 3492. + +import ( + "math" + "strings" + "unicode/utf8" +) + +// These parameter values are specified in section 5. +// +// All computation is done with int32s, so that overflow behavior is identical +// regardless of whether int is 32-bit or 64-bit. +const ( + base int32 = 36 + damp int32 = 700 + initialBias int32 = 72 + initialN int32 = 128 + skew int32 = 38 + tmax int32 = 26 + tmin int32 = 1 +) + +func punyError(s string) error { return &labelError{s, "A3"} } + +// decode decodes a string as specified in section 6.2. +func decode(encoded string) (string, error) { + if encoded == "" { + return "", nil + } + pos := 1 + strings.LastIndex(encoded, "-") + if pos == 1 { + return "", punyError(encoded) + } + if pos == len(encoded) { + return encoded[:len(encoded)-1], nil + } + output := make([]rune, 0, len(encoded)) + if pos != 0 { + for _, r := range encoded[:pos-1] { + output = append(output, r) + } + } + i, n, bias := int32(0), initialN, initialBias + for pos < len(encoded) { + oldI, w := i, int32(1) + for k := base; ; k += base { + if pos == len(encoded) { + return "", punyError(encoded) + } + digit, ok := decodeDigit(encoded[pos]) + if !ok { + return "", punyError(encoded) + } + pos++ + i += digit * w + if i < 0 { + return "", punyError(encoded) + } + t := k - bias + if t < tmin { + t = tmin + } else if t > tmax { + t = tmax + } + if digit < t { + break + } + w *= base - t + if w >= math.MaxInt32/base { + return "", punyError(encoded) + } + } + x := int32(len(output) + 1) + bias = adapt(i-oldI, x, oldI == 0) + n += i / x + i %= x + if n > utf8.MaxRune || len(output) >= 1024 { + return "", punyError(encoded) + } + output = append(output, 0) + copy(output[i+1:], output[i:]) + output[i] = n + i++ + } + return string(output), nil +} + +// encode encodes a string as specified in section 6.3 and prepends prefix to +// the result. +// +// The "while h < length(input)" line in the specification becomes "for +// remaining != 0" in the Go code, because len(s) in Go is in bytes, not runes. +func encode(prefix, s string) (string, error) { + output := make([]byte, len(prefix), len(prefix)+1+2*len(s)) + copy(output, prefix) + delta, n, bias := int32(0), initialN, initialBias + b, remaining := int32(0), int32(0) + for _, r := range s { + if r < 0x80 { + b++ + output = append(output, byte(r)) + } else { + remaining++ + } + } + h := b + if b > 0 { + output = append(output, '-') + } + for remaining != 0 { + m := int32(0x7fffffff) + for _, r := range s { + if m > r && r >= n { + m = r + } + } + delta += (m - n) * (h + 1) + if delta < 0 { + return "", punyError(s) + } + n = m + for _, r := range s { + if r < n { + delta++ + if delta < 0 { + return "", punyError(s) + } + continue + } + if r > n { + continue + } + q := delta + for k := base; ; k += base { + t := k - bias + if t < tmin { + t = tmin + } else if t > tmax { + t = tmax + } + if q < t { + break + } + output = append(output, encodeDigit(t+(q-t)%(base-t))) + q = (q - t) / (base - t) + } + output = append(output, encodeDigit(q)) + bias = adapt(delta, h+1, h == b) + delta = 0 + h++ + remaining-- + } + delta++ + n++ + } + return string(output), nil +} + +func decodeDigit(x byte) (digit int32, ok bool) { + switch { + case '0' <= x && x <= '9': + return int32(x - ('0' - 26)), true + case 'A' <= x && x <= 'Z': + return int32(x - 'A'), true + case 'a' <= x && x <= 'z': + return int32(x - 'a'), true + } + return 0, false +} + +func encodeDigit(digit int32) byte { + switch { + case 0 <= digit && digit < 26: + return byte(digit + 'a') + case 26 <= digit && digit < 36: + return byte(digit + ('0' - 26)) + } + panic("idna: internal error in punycode encoding") +} + +// adapt is the bias adaptation function specified in section 6.1. +func adapt(delta, numPoints int32, firstTime bool) int32 { + if firstTime { + delta /= damp + } else { + delta /= 2 + } + delta += delta / numPoints + k := int32(0) + for delta > ((base-tmin)*tmax)/2 { + delta /= base - tmin + k += base + } + return k + (base-tmin+1)*delta/(delta+skew) +} diff --git a/vendor/golang.org/x/net/idna/tables.go b/vendor/golang.org/x/net/idna/tables.go new file mode 100644 index 000000000..d2819345f --- /dev/null +++ b/vendor/golang.org/x/net/idna/tables.go @@ -0,0 +1,4477 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package idna + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "9.0.0" + +var mappings string = "" + // Size: 8176 bytes + "\x00\x01 \x03 ̈\x01a\x03 ̄\x012\x013\x03 ́\x03 ̧\x011\x01o\x051⁄4\x051⁄2" + + "\x053⁄4\x03i̇\x03l·\x03ʼn\x01s\x03dž\x03ⱥ\x03ⱦ\x01h\x01j\x01r\x01w\x01y" + + "\x03 ̆\x03 ̇\x03 ̊\x03 ̨\x03 ̃\x03 ̋\x01l\x01x\x04̈́\x03 ι\x01;\x05 ̈́" + + "\x04եւ\x04اٴ\x04وٴ\x04ۇٴ\x04يٴ\x06क़\x06ख़\x06ग़\x06ज़\x06ड़\x06ढ़\x06फ़" + + "\x06य़\x06ড়\x06ঢ়\x06য়\x06ਲ਼\x06ਸ਼\x06ਖ਼\x06ਗ਼\x06ਜ਼\x06ਫ਼\x06ଡ଼\x06ଢ଼" + + "\x06ํา\x06ໍາ\x06ຫນ\x06ຫມ\x06གྷ\x06ཌྷ\x06དྷ\x06བྷ\x06ཛྷ\x06ཀྵ\x06ཱི\x06ཱུ" + + "\x06ྲྀ\x09ྲཱྀ\x06ླྀ\x09ླཱྀ\x06ཱྀ\x06ྒྷ\x06ྜྷ\x06ྡྷ\x06ྦྷ\x06ྫྷ\x06ྐྵ\x02" + + "в\x02д\x02о\x02с\x02т\x02ъ\x02ѣ\x02æ\x01b\x01d\x01e\x02ǝ\x01g\x01i\x01k" + + "\x01m\x01n\x02ȣ\x01p\x01t\x01u\x02ɐ\x02ɑ\x02ə\x02ɛ\x02ɜ\x02ŋ\x02ɔ\x02ɯ" + + "\x01v\x02β\x02γ\x02δ\x02φ\x02χ\x02ρ\x02н\x02ɒ\x01c\x02ɕ\x02ð\x01f\x02ɟ" + + "\x02ɡ\x02ɥ\x02ɨ\x02ɩ\x02ɪ\x02ʝ\x02ɭ\x02ʟ\x02ɱ\x02ɰ\x02ɲ\x02ɳ\x02ɴ\x02ɵ" + + "\x02ɸ\x02ʂ\x02ʃ\x02ƫ\x02ʉ\x02ʊ\x02ʋ\x02ʌ\x01z\x02ʐ\x02ʑ\x02ʒ\x02θ\x02ss" + + "\x02ά\x02έ\x02ή\x02ί\x02ό\x02ύ\x02ώ\x05ἀι\x05ἁι\x05ἂι\x05ἃι\x05ἄι\x05ἅι" + + "\x05ἆι\x05ἇι\x05ἠι\x05ἡι\x05ἢι\x05ἣι\x05ἤι\x05ἥι\x05ἦι\x05ἧι\x05ὠι\x05ὡι" + + "\x05ὢι\x05ὣι\x05ὤι\x05ὥι\x05ὦι\x05ὧι\x05ὰι\x04αι\x04άι\x05ᾶι\x02ι\x05 ̈͂" + + "\x05ὴι\x04ηι\x04ήι\x05ῆι\x05 ̓̀\x05 ̓́\x05 ̓͂\x02ΐ\x05 ̔̀\x05 ̔́\x05 ̔͂" + + "\x02ΰ\x05 ̈̀\x01`\x05ὼι\x04ωι\x04ώι\x05ῶι\x06′′\x09′′′\x06‵‵\x09‵‵‵\x02!" + + "!\x02??\x02?!\x02!?\x0c′′′′\x010\x014\x015\x016\x017\x018\x019\x01+\x01=" + + "\x01(\x01)\x02rs\x02ħ\x02no\x01q\x02sm\x02tm\x02ω\x02å\x02א\x02ב\x02ג" + + "\x02ד\x02π\x051⁄7\x051⁄9\x061⁄10\x051⁄3\x052⁄3\x051⁄5\x052⁄5\x053⁄5\x054" + + "⁄5\x051⁄6\x055⁄6\x051⁄8\x053⁄8\x055⁄8\x057⁄8\x041⁄\x02ii\x02iv\x02vi" + + "\x04viii\x02ix\x02xi\x050⁄3\x06∫∫\x09∫∫∫\x06∮∮\x09∮∮∮\x0210\x0211\x0212" + + "\x0213\x0214\x0215\x0216\x0217\x0218\x0219\x0220\x04(10)\x04(11)\x04(12)" + + "\x04(13)\x04(14)\x04(15)\x04(16)\x04(17)\x04(18)\x04(19)\x04(20)\x0c∫∫∫∫" + + "\x02==\x05⫝̸\x02ɫ\x02ɽ\x02ȿ\x02ɀ\x01.\x04 ゙\x04 ゚\x06より\x06コト\x05(ᄀ)\x05" + + "(ᄂ)\x05(ᄃ)\x05(ᄅ)\x05(ᄆ)\x05(ᄇ)\x05(ᄉ)\x05(ᄋ)\x05(ᄌ)\x05(ᄎ)\x05(ᄏ)\x05(ᄐ" + + ")\x05(ᄑ)\x05(ᄒ)\x05(가)\x05(나)\x05(다)\x05(라)\x05(마)\x05(바)\x05(사)\x05(아)" + + "\x05(자)\x05(차)\x05(카)\x05(타)\x05(파)\x05(하)\x05(주)\x08(오전)\x08(오후)\x05(一)" + + "\x05(二)\x05(三)\x05(四)\x05(五)\x05(六)\x05(七)\x05(八)\x05(九)\x05(十)\x05(月)" + + "\x05(火)\x05(水)\x05(木)\x05(金)\x05(土)\x05(日)\x05(株)\x05(有)\x05(社)\x05(名)" + + "\x05(特)\x05(財)\x05(祝)\x05(労)\x05(代)\x05(呼)\x05(学)\x05(監)\x05(企)\x05(資)" + + "\x05(協)\x05(祭)\x05(休)\x05(自)\x05(至)\x0221\x0222\x0223\x0224\x0225\x0226" + + "\x0227\x0228\x0229\x0230\x0231\x0232\x0233\x0234\x0235\x06참고\x06주의\x0236" + + "\x0237\x0238\x0239\x0240\x0241\x0242\x0243\x0244\x0245\x0246\x0247\x0248" + + "\x0249\x0250\x041月\x042月\x043月\x044月\x045月\x046月\x047月\x048月\x049月\x0510" + + "月\x0511月\x0512月\x02hg\x02ev\x0cアパート\x0cアルファ\x0cアンペア\x09アール\x0cイニング\x09" + + "インチ\x09ウォン\x0fエスクード\x0cエーカー\x09オンス\x09オーム\x09カイリ\x0cカラット\x0cカロリー\x09ガロ" + + "ン\x09ガンマ\x06ギガ\x09ギニー\x0cキュリー\x0cギルダー\x06キロ\x0fキログラム\x12キロメートル\x0fキロワッ" + + "ト\x09グラム\x0fグラムトン\x0fクルゼイロ\x0cクローネ\x09ケース\x09コルナ\x09コーポ\x0cサイクル\x0fサンチ" + + "ーム\x0cシリング\x09センチ\x09セント\x09ダース\x06デシ\x06ドル\x06トン\x06ナノ\x09ノット\x09ハイツ" + + "\x0fパーセント\x09パーツ\x0cバーレル\x0fピアストル\x09ピクル\x06ピコ\x06ビル\x0fファラッド\x0cフィート" + + "\x0fブッシェル\x09フラン\x0fヘクタール\x06ペソ\x09ペニヒ\x09ヘルツ\x09ペンス\x09ページ\x09ベータ\x0cポイ" + + "ント\x09ボルト\x06ホン\x09ポンド\x09ホール\x09ホーン\x0cマイクロ\x09マイル\x09マッハ\x09マルク\x0fマ" + + "ンション\x0cミクロン\x06ミリ\x0fミリバール\x06メガ\x0cメガトン\x0cメートル\x09ヤード\x09ヤール\x09ユアン" + + "\x0cリットル\x06リラ\x09ルピー\x0cルーブル\x06レム\x0fレントゲン\x09ワット\x040点\x041点\x042点" + + "\x043点\x044点\x045点\x046点\x047点\x048点\x049点\x0510点\x0511点\x0512点\x0513点" + + "\x0514点\x0515点\x0516点\x0517点\x0518点\x0519点\x0520点\x0521点\x0522点\x0523点" + + "\x0524点\x02da\x02au\x02ov\x02pc\x02dm\x02iu\x06平成\x06昭和\x06大正\x06明治\x0c株" + + "式会社\x02pa\x02na\x02ma\x02ka\x02kb\x02mb\x02gb\x04kcal\x02pf\x02nf\x02m" + + "g\x02kg\x02hz\x02ml\x02dl\x02kl\x02fm\x02nm\x02mm\x02cm\x02km\x02m2\x02m" + + "3\x05m∕s\x06m∕s2\x07rad∕s\x08rad∕s2\x02ps\x02ns\x02ms\x02pv\x02nv\x02mv" + + "\x02kv\x02pw\x02nw\x02mw\x02kw\x02bq\x02cc\x02cd\x06c∕kg\x02db\x02gy\x02" + + "ha\x02hp\x02in\x02kk\x02kt\x02lm\x02ln\x02lx\x02ph\x02pr\x02sr\x02sv\x02" + + "wb\x05v∕m\x05a∕m\x041日\x042日\x043日\x044日\x045日\x046日\x047日\x048日\x049日" + + "\x0510日\x0511日\x0512日\x0513日\x0514日\x0515日\x0516日\x0517日\x0518日\x0519日" + + "\x0520日\x0521日\x0522日\x0523日\x0524日\x0525日\x0526日\x0527日\x0528日\x0529日" + + "\x0530日\x0531日\x02ь\x02ɦ\x02ɬ\x02ʞ\x02ʇ\x02œ\x04𤋮\x04𢡊\x04𢡄\x04𣏕\x04𥉉" + + "\x04𥳐\x04𧻓\x02ff\x02fi\x02fl\x02st\x04մն\x04մե\x04մի\x04վն\x04մխ\x04יִ" + + "\x04ײַ\x02ע\x02ה\x02כ\x02ל\x02ם\x02ר\x02ת\x04שׁ\x04שׂ\x06שּׁ\x06שּׂ\x04א" + + "ַ\x04אָ\x04אּ\x04בּ\x04גּ\x04דּ\x04הּ\x04וּ\x04זּ\x04טּ\x04יּ\x04ךּ\x04" + + "כּ\x04לּ\x04מּ\x04נּ\x04סּ\x04ףּ\x04פּ\x04צּ\x04קּ\x04רּ\x04שּ\x04תּ" + + "\x04וֹ\x04בֿ\x04כֿ\x04פֿ\x04אל\x02ٱ\x02ٻ\x02پ\x02ڀ\x02ٺ\x02ٿ\x02ٹ\x02ڤ" + + "\x02ڦ\x02ڄ\x02ڃ\x02چ\x02ڇ\x02ڍ\x02ڌ\x02ڎ\x02ڈ\x02ژ\x02ڑ\x02ک\x02گ\x02ڳ" + + "\x02ڱ\x02ں\x02ڻ\x02ۀ\x02ہ\x02ھ\x02ے\x02ۓ\x02ڭ\x02ۇ\x02ۆ\x02ۈ\x02ۋ\x02ۅ" + + "\x02ۉ\x02ې\x02ى\x04ئا\x04ئە\x04ئو\x04ئۇ\x04ئۆ\x04ئۈ\x04ئې\x04ئى\x02ی\x04" + + "ئج\x04ئح\x04ئم\x04ئي\x04بج\x04بح\x04بخ\x04بم\x04بى\x04بي\x04تج\x04تح" + + "\x04تخ\x04تم\x04تى\x04تي\x04ثج\x04ثم\x04ثى\x04ثي\x04جح\x04جم\x04حج\x04حم" + + "\x04خج\x04خح\x04خم\x04سج\x04سح\x04سخ\x04سم\x04صح\x04صم\x04ضج\x04ضح\x04ضخ" + + "\x04ضم\x04طح\x04طم\x04ظم\x04عج\x04عم\x04غج\x04غم\x04فج\x04فح\x04فخ\x04فم" + + "\x04فى\x04في\x04قح\x04قم\x04قى\x04قي\x04كا\x04كج\x04كح\x04كخ\x04كل\x04كم" + + "\x04كى\x04كي\x04لج\x04لح\x04لخ\x04لم\x04لى\x04لي\x04مج\x04مح\x04مخ\x04مم" + + "\x04مى\x04مي\x04نج\x04نح\x04نخ\x04نم\x04نى\x04ني\x04هج\x04هم\x04هى\x04هي" + + "\x04يج\x04يح\x04يخ\x04يم\x04يى\x04يي\x04ذٰ\x04رٰ\x04ىٰ\x05 ٌّ\x05 ٍّ\x05" + + " َّ\x05 ُّ\x05 ِّ\x05 ّٰ\x04ئر\x04ئز\x04ئن\x04بر\x04بز\x04بن\x04تر\x04تز" + + "\x04تن\x04ثر\x04ثز\x04ثن\x04ما\x04نر\x04نز\x04نن\x04ير\x04يز\x04ين\x04ئخ" + + "\x04ئه\x04به\x04ته\x04صخ\x04له\x04نه\x04هٰ\x04يه\x04ثه\x04سه\x04شم\x04شه" + + "\x06ـَّ\x06ـُّ\x06ـِّ\x04طى\x04طي\x04عى\x04عي\x04غى\x04غي\x04سى\x04سي" + + "\x04شى\x04شي\x04حى\x04حي\x04جى\x04جي\x04خى\x04خي\x04صى\x04صي\x04ضى\x04ضي" + + "\x04شج\x04شح\x04شخ\x04شر\x04سر\x04صر\x04ضر\x04اً\x06تجم\x06تحج\x06تحم" + + "\x06تخم\x06تمج\x06تمح\x06تمخ\x06جمح\x06حمي\x06حمى\x06سحج\x06سجح\x06سجى" + + "\x06سمح\x06سمج\x06سمم\x06صحح\x06صمم\x06شحم\x06شجي\x06شمخ\x06شمم\x06ضحى" + + "\x06ضخم\x06طمح\x06طمم\x06طمي\x06عجم\x06عمم\x06عمى\x06غمم\x06غمي\x06غمى" + + "\x06فخم\x06قمح\x06قمم\x06لحم\x06لحي\x06لحى\x06لجج\x06لخم\x06لمح\x06محج" + + "\x06محم\x06محي\x06مجح\x06مجم\x06مخج\x06مخم\x06مجخ\x06همج\x06همم\x06نحم" + + "\x06نحى\x06نجم\x06نجى\x06نمي\x06نمى\x06يمم\x06بخي\x06تجي\x06تجى\x06تخي" + + "\x06تخى\x06تمي\x06تمى\x06جمي\x06جحى\x06جمى\x06سخى\x06صحي\x06شحي\x06ضحي" + + "\x06لجي\x06لمي\x06يحي\x06يجي\x06يمي\x06ممي\x06قمي\x06نحي\x06عمي\x06كمي" + + "\x06نجح\x06مخي\x06لجم\x06كمم\x06جحي\x06حجي\x06مجي\x06فمي\x06بحي\x06سخي" + + "\x06نجي\x06صلے\x06قلے\x08الله\x08اكبر\x08محمد\x08صلعم\x08رسول\x08عليه" + + "\x08وسلم\x06صلى!صلى الله عليه وسلم\x0fجل جلاله\x08ریال\x01,\x01:\x01!" + + "\x01?\x01_\x01{\x01}\x01[\x01]\x01#\x01&\x01*\x01-\x01<\x01>\x01\\\x01$" + + "\x01%\x01@\x04ـً\x04ـَ\x04ـُ\x04ـِ\x04ـّ\x04ـْ\x02ء\x02آ\x02أ\x02ؤ\x02إ" + + "\x02ئ\x02ا\x02ب\x02ة\x02ت\x02ث\x02ج\x02ح\x02خ\x02د\x02ذ\x02ر\x02ز\x02س" + + "\x02ش\x02ص\x02ض\x02ط\x02ظ\x02ع\x02غ\x02ف\x02ق\x02ك\x02ل\x02م\x02ن\x02ه" + + "\x02و\x02ي\x04لآ\x04لأ\x04لإ\x04لا\x01\x22\x01'\x01/\x01^\x01|\x01~\x02¢" + + "\x02£\x02¬\x02¦\x02¥\x08𝅗𝅥\x08𝅘𝅥\x0c𝅘𝅥𝅮\x0c𝅘𝅥𝅯\x0c𝅘𝅥𝅰\x0c𝅘𝅥𝅱\x0c𝅘𝅥𝅲\x08𝆹" + + "𝅥\x08𝆺𝅥\x0c𝆹𝅥𝅮\x0c𝆺𝅥𝅮\x0c𝆹𝅥𝅯\x0c𝆺𝅥𝅯\x02ı\x02ȷ\x02α\x02ε\x02ζ\x02η\x02" + + "κ\x02λ\x02μ\x02ν\x02ξ\x02ο\x02σ\x02τ\x02υ\x02ψ\x03∇\x03∂\x02ϝ\x02ٮ\x02ڡ" + + "\x02ٯ\x020,\x021,\x022,\x023,\x024,\x025,\x026,\x027,\x028,\x029,\x03(a)" + + "\x03(b)\x03(c)\x03(d)\x03(e)\x03(f)\x03(g)\x03(h)\x03(i)\x03(j)\x03(k)" + + "\x03(l)\x03(m)\x03(n)\x03(o)\x03(p)\x03(q)\x03(r)\x03(s)\x03(t)\x03(u)" + + "\x03(v)\x03(w)\x03(x)\x03(y)\x03(z)\x07〔s〕\x02wz\x02hv\x02sd\x03ppv\x02w" + + "c\x02mc\x02md\x02dj\x06ほか\x06ココ\x03サ\x03手\x03字\x03双\x03デ\x03二\x03多\x03解" + + "\x03天\x03交\x03映\x03無\x03料\x03前\x03後\x03再\x03新\x03初\x03終\x03生\x03販\x03声" + + "\x03吹\x03演\x03投\x03捕\x03一\x03三\x03遊\x03左\x03中\x03右\x03指\x03走\x03打\x03禁" + + "\x03空\x03合\x03満\x03有\x03月\x03申\x03割\x03営\x03配\x09〔本〕\x09〔三〕\x09〔二〕\x09〔安" + + "〕\x09〔点〕\x09〔打〕\x09〔盗〕\x09〔勝〕\x09〔敗〕\x03得\x03可\x03丽\x03丸\x03乁\x03你\x03" + + "侮\x03侻\x03倂\x03偺\x03備\x03僧\x03像\x03㒞\x03免\x03兔\x03兤\x03具\x03㒹\x03內\x03" + + "冗\x03冤\x03仌\x03冬\x03况\x03凵\x03刃\x03㓟\x03刻\x03剆\x03剷\x03㔕\x03勇\x03勉\x03" + + "勤\x03勺\x03包\x03匆\x03北\x03卉\x03卑\x03博\x03即\x03卽\x03卿\x03灰\x03及\x03叟\x03" + + "叫\x03叱\x03吆\x03咞\x03吸\x03呈\x03周\x03咢\x03哶\x03唐\x03啓\x03啣\x03善\x03喙\x03" + + "喫\x03喳\x03嗂\x03圖\x03嘆\x03圗\x03噑\x03噴\x03切\x03壮\x03城\x03埴\x03堍\x03型\x03" + + "堲\x03報\x03墬\x03売\x03壷\x03夆\x03夢\x03奢\x03姬\x03娛\x03娧\x03姘\x03婦\x03㛮\x03" + + "嬈\x03嬾\x03寃\x03寘\x03寧\x03寳\x03寿\x03将\x03尢\x03㞁\x03屠\x03屮\x03峀\x03岍\x03" + + "嵃\x03嵮\x03嵫\x03嵼\x03巡\x03巢\x03㠯\x03巽\x03帨\x03帽\x03幩\x03㡢\x03㡼\x03庰\x03" + + "庳\x03庶\x03廊\x03廾\x03舁\x03弢\x03㣇\x03形\x03彫\x03㣣\x03徚\x03忍\x03志\x03忹\x03" + + "悁\x03㤺\x03㤜\x03悔\x03惇\x03慈\x03慌\x03慎\x03慺\x03憎\x03憲\x03憤\x03憯\x03懞\x03" + + "懲\x03懶\x03成\x03戛\x03扝\x03抱\x03拔\x03捐\x03挽\x03拼\x03捨\x03掃\x03揤\x03搢\x03" + + "揅\x03掩\x03㨮\x03摩\x03摾\x03撝\x03摷\x03㩬\x03敏\x03敬\x03旣\x03書\x03晉\x03㬙\x03" + + "暑\x03㬈\x03㫤\x03冒\x03冕\x03最\x03暜\x03肭\x03䏙\x03朗\x03望\x03朡\x03杞\x03杓\x03" + + "㭉\x03柺\x03枅\x03桒\x03梅\x03梎\x03栟\x03椔\x03㮝\x03楂\x03榣\x03槪\x03檨\x03櫛\x03" + + "㰘\x03次\x03歔\x03㱎\x03歲\x03殟\x03殺\x03殻\x03汎\x03沿\x03泍\x03汧\x03洖\x03派\x03" + + "海\x03流\x03浩\x03浸\x03涅\x03洴\x03港\x03湮\x03㴳\x03滋\x03滇\x03淹\x03潮\x03濆\x03" + + "瀹\x03瀞\x03瀛\x03㶖\x03灊\x03災\x03灷\x03炭\x03煅\x03熜\x03爨\x03爵\x03牐\x03犀\x03" + + "犕\x03獺\x03王\x03㺬\x03玥\x03㺸\x03瑇\x03瑜\x03瑱\x03璅\x03瓊\x03㼛\x03甤\x03甾\x03" + + "異\x03瘐\x03㿼\x03䀈\x03直\x03眞\x03真\x03睊\x03䀹\x03瞋\x03䁆\x03䂖\x03硎\x03碌\x03" + + "磌\x03䃣\x03祖\x03福\x03秫\x03䄯\x03穀\x03穊\x03穏\x03䈂\x03篆\x03築\x03䈧\x03糒\x03" + + "䊠\x03糨\x03糣\x03紀\x03絣\x03䌁\x03緇\x03縂\x03繅\x03䌴\x03䍙\x03罺\x03羕\x03翺\x03" + + "者\x03聠\x03聰\x03䏕\x03育\x03脃\x03䐋\x03脾\x03媵\x03舄\x03辞\x03䑫\x03芑\x03芋\x03" + + "芝\x03劳\x03花\x03芳\x03芽\x03苦\x03若\x03茝\x03荣\x03莭\x03茣\x03莽\x03菧\x03著\x03" + + "荓\x03菊\x03菌\x03菜\x03䔫\x03蓱\x03蓳\x03蔖\x03蕤\x03䕝\x03䕡\x03䕫\x03虐\x03虜\x03" + + "虧\x03虩\x03蚩\x03蚈\x03蜎\x03蛢\x03蝹\x03蜨\x03蝫\x03螆\x03蟡\x03蠁\x03䗹\x03衠\x03" + + "衣\x03裗\x03裞\x03䘵\x03裺\x03㒻\x03䚾\x03䛇\x03誠\x03諭\x03變\x03豕\x03貫\x03賁\x03" + + "贛\x03起\x03跋\x03趼\x03跰\x03軔\x03輸\x03邔\x03郱\x03鄑\x03鄛\x03鈸\x03鋗\x03鋘\x03" + + "鉼\x03鏹\x03鐕\x03開\x03䦕\x03閷\x03䧦\x03雃\x03嶲\x03霣\x03䩮\x03䩶\x03韠\x03䪲\x03" + + "頋\x03頩\x03飢\x03䬳\x03餩\x03馧\x03駂\x03駾\x03䯎\x03鬒\x03鱀\x03鳽\x03䳎\x03䳭\x03" + + "鵧\x03䳸\x03麻\x03䵖\x03黹\x03黾\x03鼅\x03鼏\x03鼖\x03鼻" + +var xorData string = "" + // Size: 4855 bytes + "\x02\x0c\x09\x02\xb0\xec\x02\xad\xd8\x02\xad\xd9\x02\x06\x07\x02\x0f\x12" + + "\x02\x0f\x1f\x02\x0f\x1d\x02\x01\x13\x02\x0f\x16\x02\x0f\x0b\x02\x0f3" + + "\x02\x0f7\x02\x0f?\x02\x0f/\x02\x0f*\x02\x0c&\x02\x0c*\x02\x0c;\x02\x0c9" + + "\x02\x0c%\x02\xab\xed\x02\xab\xe2\x02\xab\xe3\x02\xa9\xe0\x02\xa9\xe1" + + "\x02\xa9\xe6\x02\xa3\xcb\x02\xa3\xc8\x02\xa3\xc9\x02\x01#\x02\x01\x08" + + "\x02\x0e>\x02\x0e'\x02\x0f\x03\x02\x03\x0d\x02\x03\x09\x02\x03\x17\x02" + + "\x03\x0e\x02\x02\x03\x02\x011\x02\x01\x00\x02\x01\x10\x02\x03<\x02\x07" + + "\x0d\x02\x02\x0c\x02\x0c0\x02\x01\x03\x02\x01\x01\x02\x01 \x02\x01\x22" + + "\x02\x01)\x02\x01\x0a\x02\x01\x0c\x02\x02\x06\x02\x02\x02\x02\x03\x10" + + "\x03\x037 \x03\x0b+\x03\x02\x01\x04\x02\x01\x02\x02\x019\x02\x03\x1c\x02" + + "\x02$\x03\x80p$\x02\x03:\x02\x03\x0a\x03\xc1r.\x03\xc1r,\x03\xc1r\x02" + + "\x02\x02:\x02\x02>\x02\x02,\x02\x02\x10\x02\x02\x00\x03\xc1s<\x03\xc1s*" + + "\x03\xc2L$\x03\xc2L;\x02\x09)\x02\x0a\x19\x03\x83\xab\xe3\x03\x83\xab" + + "\xf2\x03 4\xe0\x03\x81\xab\xea\x03\x81\xab\xf3\x03 4\xef\x03\x96\xe1\xcd" + + "\x03\x84\xe5\xc3\x02\x0d\x11\x03\x8b\xec\xcb\x03\x94\xec\xcf\x03\x9a\xec" + + "\xc2\x03\x8b\xec\xdb\x03\x94\xec\xdf\x03\x9a\xec\xd2\x03\x01\x0c!\x03" + + "\x01\x0c#\x03ʠ\x9d\x03ʣ\x9c\x03ʢ\x9f\x03ʥ\x9e\x03ʤ\x91\x03ʧ\x90\x03ʦ\x93" + + "\x03ʩ\x92\x03ʨ\x95\x03\xca\xf3\xb5\x03\xca\xf0\xb4\x03\xca\xf1\xb7\x03" + + "\xca\xf6\xb6\x03\xca\xf7\x89\x03\xca\xf4\x88\x03\xca\xf5\x8b\x03\xca\xfa" + + "\x8a\x03\xca\xfb\x8d\x03\xca\xf8\x8c\x03\xca\xf9\x8f\x03\xca\xfe\x8e\x03" + + "\xca\xff\x81\x03\xca\xfc\x80\x03\xca\xfd\x83\x03\xca\xe2\x82\x03\xca\xe3" + + "\x85\x03\xca\xe0\x84\x03\xca\xe1\x87\x03\xca\xe6\x86\x03\xca\xe7\x99\x03" + + "\xca\xe4\x98\x03\xca\xe5\x9b\x03\xca\xea\x9a\x03\xca\xeb\x9d\x03\xca\xe8" + + "\x9c\x03ؓ\x89\x03ߔ\x8b\x02\x010\x03\x03\x04\x1e\x03\x04\x15\x12\x03\x0b" + + "\x05,\x03\x06\x04\x00\x03\x06\x04)\x03\x06\x044\x03\x06\x04<\x03\x06\x05" + + "\x1d\x03\x06\x06\x00\x03\x06\x06\x0a\x03\x06\x06'\x03\x06\x062\x03\x0786" + + "\x03\x079/\x03\x079 \x03\x07:\x0e\x03\x07:\x1b\x03\x07:%\x03\x07;/\x03" + + "\x07;%\x03\x074\x11\x03\x076\x09\x03\x077*\x03\x070\x01\x03\x070\x0f\x03" + + "\x070.\x03\x071\x16\x03\x071\x04\x03\x0710\x03\x072\x18\x03\x072-\x03" + + "\x073\x14\x03\x073>\x03\x07'\x09\x03\x07 \x00\x03\x07\x1f\x0b\x03\x07" + + "\x18#\x03\x07\x18(\x03\x07\x186\x03\x07\x18\x03\x03\x07\x19\x16\x03\x07" + + "\x116\x03\x07\x12'\x03\x07\x13\x10\x03\x07\x0c&\x03\x07\x0c\x08\x03\x07" + + "\x0c\x13\x03\x07\x0d\x02\x03\x07\x0d\x1c\x03\x07\x0b5\x03\x07\x0b\x0a" + + "\x03\x07\x0b\x01\x03\x07\x0b\x0f\x03\x07\x05\x00\x03\x07\x05\x09\x03\x07" + + "\x05\x0b\x03\x07\x07\x01\x03\x07\x07\x08\x03\x07\x00<\x03\x07\x00+\x03" + + "\x07\x01)\x03\x07\x01\x1b\x03\x07\x01\x08\x03\x07\x03?\x03\x0445\x03\x04" + + "4\x08\x03\x0454\x03\x04)/\x03\x04)5\x03\x04+\x05\x03\x04+\x14\x03\x04+ " + + "\x03\x04+<\x03\x04*&\x03\x04*\x22\x03\x04&8\x03\x04!\x01\x03\x04!\x22" + + "\x03\x04\x11+\x03\x04\x10.\x03\x04\x104\x03\x04\x13=\x03\x04\x12\x04\x03" + + "\x04\x12\x0a\x03\x04\x0d\x1d\x03\x04\x0d\x07\x03\x04\x0d \x03\x05<>\x03" + + "\x055<\x03\x055!\x03\x055#\x03\x055&\x03\x054\x1d\x03\x054\x02\x03\x054" + + "\x07\x03\x0571\x03\x053\x1a\x03\x053\x16\x03\x05.<\x03\x05.\x07\x03\x05)" + + ":\x03\x05)<\x03\x05)\x0c\x03\x05)\x15\x03\x05+-\x03\x05+5\x03\x05$\x1e" + + "\x03\x05$\x14\x03\x05'\x04\x03\x05'\x14\x03\x05&\x02\x03\x05\x226\x03" + + "\x05\x22\x0c\x03\x05\x22\x1c\x03\x05\x19\x0a\x03\x05\x1b\x09\x03\x05\x1b" + + "\x0c\x03\x05\x14\x07\x03\x05\x16?\x03\x05\x16\x0c\x03\x05\x0c\x05\x03" + + "\x05\x0e\x0f\x03\x05\x01\x0e\x03\x05\x00(\x03\x05\x030\x03\x05\x03\x06" + + "\x03\x0a==\x03\x0a=1\x03\x0a=,\x03\x0a=\x0c\x03\x0a??\x03\x0a<\x08\x03" + + "\x0a9!\x03\x0a9)\x03\x0a97\x03\x0a99\x03\x0a6\x0a\x03\x0a6\x1c\x03\x0a6" + + "\x17\x03\x0a7'\x03\x0a78\x03\x0a73\x03\x0a'\x01\x03\x0a'&\x03\x0a\x1f" + + "\x0e\x03\x0a\x1f\x03\x03\x0a\x1f3\x03\x0a\x1b/\x03\x0a\x18\x19\x03\x0a" + + "\x19\x01\x03\x0a\x16\x14\x03\x0a\x0e\x22\x03\x0a\x0f\x10\x03\x0a\x0f\x02" + + "\x03\x0a\x0f \x03\x0a\x0c\x04\x03\x0a\x0b>\x03\x0a\x0b+\x03\x0a\x08/\x03" + + "\x0a\x046\x03\x0a\x05\x14\x03\x0a\x00\x04\x03\x0a\x00\x10\x03\x0a\x00" + + "\x14\x03\x0b<3\x03\x0b;*\x03\x0b9\x22\x03\x0b9)\x03\x0b97\x03\x0b+\x10" + + "\x03\x0b((\x03\x0b&5\x03\x0b$\x1c\x03\x0b$\x12\x03\x0b%\x04\x03\x0b#<" + + "\x03\x0b#0\x03\x0b#\x0d\x03\x0b#\x19\x03\x0b!:\x03\x0b!\x1f\x03\x0b!\x00" + + "\x03\x0b\x1e5\x03\x0b\x1c\x1d\x03\x0b\x1d-\x03\x0b\x1d(\x03\x0b\x18.\x03" + + "\x0b\x18 \x03\x0b\x18\x16\x03\x0b\x14\x13\x03\x0b\x15$\x03\x0b\x15\x22" + + "\x03\x0b\x12\x1b\x03\x0b\x12\x10\x03\x0b\x132\x03\x0b\x13=\x03\x0b\x12" + + "\x18\x03\x0b\x0c&\x03\x0b\x061\x03\x0b\x06:\x03\x0b\x05#\x03\x0b\x05<" + + "\x03\x0b\x04\x0b\x03\x0b\x04\x04\x03\x0b\x04\x1b\x03\x0b\x042\x03\x0b" + + "\x041\x03\x0b\x03\x03\x03\x0b\x03\x1d\x03\x0b\x03/\x03\x0b\x03+\x03\x0b" + + "\x02\x1b\x03\x0b\x02\x00\x03\x0b\x01\x1e\x03\x0b\x01\x08\x03\x0b\x015" + + "\x03\x06\x0d9\x03\x06\x0d=\x03\x06\x0d?\x03\x02\x001\x03\x02\x003\x03" + + "\x02\x02\x19\x03\x02\x006\x03\x02\x02\x1b\x03\x02\x004\x03\x02\x00<\x03" + + "\x02\x02\x0a\x03\x02\x02\x0e\x03\x02\x01\x1a\x03\x02\x01\x07\x03\x02\x01" + + "\x05\x03\x02\x01\x0b\x03\x02\x01%\x03\x02\x01\x0c\x03\x02\x01\x04\x03" + + "\x02\x01\x1c\x03\x02\x00.\x03\x02\x002\x03\x02\x00>\x03\x02\x00\x12\x03" + + "\x02\x00\x16\x03\x02\x011\x03\x02\x013\x03\x02\x02 \x03\x02\x02%\x03\x02" + + "\x02$\x03\x02\x028\x03\x02\x02;\x03\x02\x024\x03\x02\x012\x03\x02\x022" + + "\x03\x02\x02/\x03\x02\x01,\x03\x02\x01\x13\x03\x02\x01\x16\x03\x02\x01" + + "\x11\x03\x02\x01\x1e\x03\x02\x01\x15\x03\x02\x01\x17\x03\x02\x01\x0f\x03" + + "\x02\x01\x08\x03\x02\x00?\x03\x02\x03\x07\x03\x02\x03\x0d\x03\x02\x03" + + "\x13\x03\x02\x03\x1d\x03\x02\x03\x1f\x03\x02\x00\x03\x03\x02\x00\x0d\x03" + + "\x02\x00\x01\x03\x02\x00\x1b\x03\x02\x00\x19\x03\x02\x00\x18\x03\x02\x00" + + "\x13\x03\x02\x00/\x03\x07>\x12\x03\x07<\x1f\x03\x07>\x1d\x03\x06\x1d\x0e" + + "\x03\x07>\x1c\x03\x07>:\x03\x07>\x13\x03\x04\x12+\x03\x07?\x03\x03\x07>" + + "\x02\x03\x06\x224\x03\x06\x1a.\x03\x07<%\x03\x06\x1c\x0b\x03\x0609\x03" + + "\x05\x1f\x01\x03\x04'\x08\x03\x93\xfd\xf5\x03\x02\x0d \x03\x02\x0d#\x03" + + "\x02\x0d!\x03\x02\x0d&\x03\x02\x0d\x22\x03\x02\x0d/\x03\x02\x0d,\x03\x02" + + "\x0d$\x03\x02\x0d'\x03\x02\x0d%\x03\x02\x0d;\x03\x02\x0d=\x03\x02\x0d?" + + "\x03\x099.\x03\x08\x0b7\x03\x08\x02\x14\x03\x08\x14\x0d\x03\x08.:\x03" + + "\x089'\x03\x0f\x0b\x18\x03\x0f\x1c1\x03\x0f\x17&\x03\x0f9\x1f\x03\x0f0" + + "\x0c\x03\x0e\x0a9\x03\x0e\x056\x03\x0e\x1c#\x03\x0f\x13\x0e\x03\x072\x00" + + "\x03\x070\x0d\x03\x072\x0b\x03\x06\x11\x18\x03\x070\x10\x03\x06\x0f(\x03" + + "\x072\x05\x03\x06\x0f,\x03\x073\x15\x03\x06\x07\x08\x03\x05\x16\x02\x03" + + "\x04\x0b \x03\x05:8\x03\x05\x16%\x03\x0a\x0d\x1f\x03\x06\x16\x10\x03\x05" + + "\x1d5\x03\x05*;\x03\x05\x16\x1b\x03\x04.-\x03\x06\x1a\x19\x03\x04\x03," + + "\x03\x0b87\x03\x04/\x0a\x03\x06\x00,\x03\x04-\x01\x03\x04\x1e-\x03\x06/(" + + "\x03\x0a\x0b5\x03\x06\x0e7\x03\x06\x07.\x03\x0597\x03\x0a*%\x03\x0760" + + "\x03\x06\x0c;\x03\x05'\x00\x03\x072.\x03\x072\x08\x03\x06=\x01\x03\x06" + + "\x05\x1b\x03\x06\x06\x12\x03\x06$=\x03\x06'\x0d\x03\x04\x11\x0f\x03\x076" + + ",\x03\x06\x07;\x03\x06.,\x03\x86\xf9\xea\x03\x8f\xff\xeb\x02\x092\x02" + + "\x095\x02\x094\x02\x09;\x02\x09>\x02\x098\x02\x09*\x02\x09/\x02\x09,\x02" + + "\x09%\x02\x09&\x02\x09#\x02\x09 \x02\x08!\x02\x08%\x02\x08$\x02\x08+\x02" + + "\x08.\x02\x08*\x02\x08&\x02\x088\x02\x08>\x02\x084\x02\x086\x02\x080\x02" + + "\x08\x10\x02\x08\x17\x02\x08\x12\x02\x08\x1d\x02\x08\x1f\x02\x08\x13\x02" + + "\x08\x15\x02\x08\x14\x02\x08\x0c\x03\x8b\xfd\xd0\x03\x81\xec\xc6\x03\x87" + + "\xe0\x8a\x03-2\xe3\x03\x80\xef\xe4\x03-2\xea\x03\x88\xe6\xeb\x03\x8e\xe6" + + "\xe8\x03\x84\xe6\xe9\x03\x97\xe6\xee\x03-2\xf9\x03-2\xf6\x03\x8e\xe3\xad" + + "\x03\x80\xe3\x92\x03\x88\xe3\x90\x03\x8e\xe3\x90\x03\x80\xe3\x97\x03\x88" + + "\xe3\x95\x03\x88\xfe\xcb\x03\x8e\xfe\xca\x03\x84\xfe\xcd\x03\x91\xef\xc9" + + "\x03-2\xc1\x03-2\xc0\x03-2\xcb\x03\x88@\x09\x03\x8e@\x08\x03\x8f\xe0\xf5" + + "\x03\x8e\xe6\xf9\x03\x8e\xe0\xfa\x03\x93\xff\xf4\x03\x84\xee\xd3\x03\x0b" + + "(\x04\x023 \x021;\x02\x01*\x03\x0b#\x10\x03\x0b 0\x03\x0b!\x10\x03\x0b!0" + + "\x03\x07\x15\x08\x03\x09?5\x03\x07\x1f\x08\x03\x07\x17\x0b\x03\x09\x1f" + + "\x15\x03\x0b\x1c7\x03\x0a+#\x03\x06\x1a\x1b\x03\x06\x1a\x14\x03\x0a\x01" + + "\x18\x03\x06#\x1b\x03\x0a2\x0c\x03\x0a\x01\x04\x03\x09#;\x03\x08='\x03" + + "\x08\x1a\x0a\x03\x07\x03\x0a\x111\x03\x09\x1b\x09\x03\x073.\x03\x07\x01\x00" + + "\x03\x09/,\x03\x07#>\x03\x07\x048\x03\x0a\x1f\x22\x03\x098>\x03\x09\x11" + + "\x00\x03\x08/\x17\x03\x06'\x22\x03\x0b\x1a+\x03\x0a\x22\x19\x03\x0a/1" + + "\x03\x0974\x03\x09\x0f\x22\x03\x08,\x22\x03\x08?\x14\x03\x07$5\x03\x07<3" + + "\x03\x07=*\x03\x07\x13\x18\x03\x068\x0a\x03\x06\x09\x16\x03\x06\x13\x00" + + "\x03\x08\x067\x03\x08\x01\x03\x03\x08\x12\x1d\x03\x07+7\x03\x06(;\x03" + + "\x06\x1c?\x03\x07\x0e\x17\x03\x0a\x06\x1d\x03\x0a\x19\x07\x03\x08\x14$" + + "\x03\x07$;\x03\x08,$\x03\x08\x06\x0d\x03\x07\x16\x0a\x03\x06>>\x03\x0a" + + "\x06\x12\x03\x0a\x14)\x03\x09\x0d\x1f\x03\x09\x12\x17\x03\x09\x19\x01" + + "\x03\x08\x11 \x03\x08\x1d'\x03\x06<\x1a\x03\x0a.\x00\x03\x07'\x18\x03" + + "\x0a\x22\x08\x03\x08\x0d\x0a\x03\x08\x13)\x03\x07*)\x03\x06<,\x03\x07" + + "\x0b\x1a\x03\x09.\x14\x03\x09\x0d\x1e\x03\x07\x0e#\x03\x0b\x1d'\x03\x0a" + + "\x0a8\x03\x09%2\x03\x08+&\x03\x080\x12\x03\x0a)4\x03\x08\x06\x1f\x03\x0b" + + "\x1b\x1a\x03\x0a\x1b\x0f\x03\x0b\x1d*\x03\x09\x16$\x03\x090\x11\x03\x08" + + "\x11\x08\x03\x0a*(\x03\x0a\x042\x03\x089,\x03\x074'\x03\x07\x0f\x05\x03" + + "\x09\x0b\x0a\x03\x07\x1b\x01\x03\x09\x17:\x03\x09.\x0d\x03\x07.\x11\x03" + + "\x09+\x15\x03\x080\x13\x03\x0b\x1f\x19\x03\x0a \x11\x03\x0a\x220\x03\x09" + + "\x07;\x03\x08\x16\x1c\x03\x07,\x13\x03\x07\x0e/\x03\x06\x221\x03\x0a." + + "\x0a\x03\x0a7\x02\x03\x0a\x032\x03\x0a\x1d.\x03\x091\x06\x03\x09\x19:" + + "\x03\x08\x02/\x03\x060+\x03\x06\x0f-\x03\x06\x1c\x1f\x03\x06\x1d\x07\x03" + + "\x0a,\x11\x03\x09=\x0d\x03\x09\x0b;\x03\x07\x1b/\x03\x0a\x1f:\x03\x09 " + + "\x1f\x03\x09.\x10\x03\x094\x0b\x03\x09\x1a1\x03\x08#\x1a\x03\x084\x1d" + + "\x03\x08\x01\x1f\x03\x08\x11\x22\x03\x07'8\x03\x07\x1a>\x03\x0757\x03" + + "\x06&9\x03\x06+\x11\x03\x0a.\x0b\x03\x0a,>\x03\x0a4#\x03\x08%\x17\x03" + + "\x07\x05\x22\x03\x07\x0c\x0b\x03\x0a\x1d+\x03\x0a\x19\x16\x03\x09+\x1f" + + "\x03\x09\x08\x0b\x03\x08\x16\x18\x03\x08+\x12\x03\x0b\x1d\x0c\x03\x0a=" + + "\x10\x03\x0a\x09\x0d\x03\x0a\x10\x11\x03\x09&0\x03\x08(\x1f\x03\x087\x07" + + "\x03\x08\x185\x03\x07'6\x03\x06.\x05\x03\x06=\x04\x03\x06;;\x03\x06\x06," + + "\x03\x0b\x18>\x03\x08\x00\x18\x03\x06 \x03\x03\x06<\x00\x03\x09%\x18\x03" + + "\x0b\x1c<\x03\x0a%!\x03\x0a\x09\x12\x03\x0a\x16\x02\x03\x090'\x03\x09" + + "\x0e=\x03\x08 \x0e\x03\x08>\x03\x03\x074>\x03\x06&?\x03\x06\x19\x09\x03" + + "\x06?(\x03\x0a-\x0e\x03\x09:3\x03\x098:\x03\x09\x12\x0b\x03\x09\x1d\x17" + + "\x03\x087\x05\x03\x082\x14\x03\x08\x06%\x03\x08\x13\x1f\x03\x06\x06\x0e" + + "\x03\x0a\x22<\x03\x09/<\x03\x06>+\x03\x0a'?\x03\x0a\x13\x0c\x03\x09\x10<" + + "\x03\x07\x1b=\x03\x0a\x19\x13\x03\x09\x22\x1d\x03\x09\x07\x0d\x03\x08)" + + "\x1c\x03\x06=\x1a\x03\x0a/4\x03\x0a7\x11\x03\x0a\x16:\x03\x09?3\x03\x09:" + + "/\x03\x09\x05\x0a\x03\x09\x14\x06\x03\x087\x22\x03\x080\x07\x03\x08\x1a" + + "\x1f\x03\x07\x04(\x03\x07\x04\x09\x03\x06 %\x03\x06<\x08\x03\x0a+\x14" + + "\x03\x09\x1d\x16\x03\x0a70\x03\x08 >\x03\x0857\x03\x070\x0a\x03\x06=\x12" + + "\x03\x06\x16%\x03\x06\x1d,\x03\x099#\x03\x09\x10>\x03\x07 \x1e\x03\x08" + + "\x0c<\x03\x08\x0b\x18\x03\x08\x15+\x03\x08,:\x03\x08%\x22\x03\x07\x0a$" + + "\x03\x0b\x1c=\x03\x07+\x08\x03\x0a/\x05\x03\x0a \x07\x03\x0a\x12'\x03" + + "\x09#\x11\x03\x08\x1b\x15\x03\x0a\x06\x01\x03\x09\x1c\x1b\x03\x0922\x03" + + "\x07\x14<\x03\x07\x09\x04\x03\x061\x04\x03\x07\x0e\x01\x03\x0a\x13\x18" + + "\x03\x0a-\x0c\x03\x0a?\x0d\x03\x0a\x09\x0a\x03\x091&\x03\x0a/\x0b\x03" + + "\x08$<\x03\x083\x1d\x03\x08\x0c$\x03\x08\x0d\x07\x03\x08\x0d?\x03\x08" + + "\x0e\x14\x03\x065\x0a\x03\x08\x1a#\x03\x08\x16#\x03\x0702\x03\x07\x03" + + "\x1a\x03\x06(\x1d\x03\x06+\x1b\x03\x06\x0b\x05\x03\x06\x0b\x17\x03\x06" + + "\x0c\x04\x03\x06\x1e\x19\x03\x06+0\x03\x062\x18\x03\x0b\x16\x1e\x03\x0a+" + + "\x16\x03\x0a-?\x03\x0a#:\x03\x0a#\x10\x03\x0a%$\x03\x0a>+\x03\x0a01\x03" + + "\x0a1\x10\x03\x0a\x099\x03\x0a\x0a\x12\x03\x0a\x19\x1f\x03\x0a\x19\x12" + + "\x03\x09*)\x03\x09-\x16\x03\x09.1\x03\x09.2\x03\x09<\x0e\x03\x09> \x03" + + "\x093\x12\x03\x09\x0b\x01\x03\x09\x1c2\x03\x09\x11\x1c\x03\x09\x15%\x03" + + "\x08,&\x03\x08!\x22\x03\x089(\x03\x08\x0b\x1a\x03\x08\x0d2\x03\x08\x0c" + + "\x04\x03\x08\x0c\x06\x03\x08\x0c\x1f\x03\x08\x0c\x0c\x03\x08\x0f\x1f\x03" + + "\x08\x0f\x1d\x03\x08\x00\x14\x03\x08\x03\x14\x03\x08\x06\x16\x03\x08\x1e" + + "#\x03\x08\x11\x11\x03\x08\x10\x18\x03\x08\x14(\x03\x07)\x1e\x03\x07.1" + + "\x03\x07 $\x03\x07 '\x03\x078\x08\x03\x07\x0d0\x03\x07\x0f7\x03\x07\x05#" + + "\x03\x07\x05\x1a\x03\x07\x1a7\x03\x07\x1d-\x03\x07\x17\x10\x03\x06)\x1f" + + "\x03\x062\x0b\x03\x066\x16\x03\x06\x09\x11\x03\x09(\x1e\x03\x07!5\x03" + + "\x0b\x11\x16\x03\x0a/\x04\x03\x0a,\x1a\x03\x0b\x173\x03\x0a,1\x03\x0a/5" + + "\x03\x0a\x221\x03\x0a\x22\x0d\x03\x0a?%\x03\x0a<,\x03\x0a?#\x03\x0a>\x19" + + "\x03\x0a\x08&\x03\x0a\x0b\x0e\x03\x0a\x0c:\x03\x0a\x0c+\x03\x0a\x03\x22" + + "\x03\x0a\x06)\x03\x0a\x11\x10\x03\x0a\x11\x1a\x03\x0a\x17-\x03\x0a\x14(" + + "\x03\x09)\x1e\x03\x09/\x09\x03\x09.\x00\x03\x09,\x07\x03\x09/*\x03\x09-9" + + "\x03\x09\x228\x03\x09%\x09\x03\x09:\x12\x03\x09;\x1d\x03\x09?\x06\x03" + + "\x093%\x03\x096\x05\x03\x096\x08\x03\x097\x02\x03\x09\x07,\x03\x09\x04," + + "\x03\x09\x1f\x16\x03\x09\x11\x03\x03\x09\x11\x12\x03\x09\x168\x03\x08*" + + "\x05\x03\x08/2\x03\x084:\x03\x08\x22+\x03\x08 0\x03\x08&\x0a\x03\x08;" + + "\x10\x03\x08>$\x03\x08>\x18\x03\x0829\x03\x082:\x03\x081,\x03\x081<\x03" + + "\x081\x1c\x03\x087#\x03\x087*\x03\x08\x09'\x03\x08\x00\x1d\x03\x08\x05-" + + "\x03\x08\x1f4\x03\x08\x1d\x04\x03\x08\x16\x0f\x03\x07*7\x03\x07'!\x03" + + "\x07%\x1b\x03\x077\x0c\x03\x07\x0c1\x03\x07\x0c.\x03\x07\x00\x06\x03\x07" + + "\x01\x02\x03\x07\x010\x03\x07\x06=\x03\x07\x01\x03\x03\x07\x01\x13\x03" + + "\x07\x06\x06\x03\x07\x05\x0a\x03\x07\x1f\x09\x03\x07\x17:\x03\x06*1\x03" + + "\x06-\x1d\x03\x06\x223\x03\x062:\x03\x060$\x03\x066\x1e\x03\x064\x12\x03" + + "\x0645\x03\x06\x0b\x00\x03\x06\x0b7\x03\x06\x07\x1f\x03\x06\x15\x12\x03" + + "\x0c\x05\x0f\x03\x0b+\x0b\x03\x0b+-\x03\x06\x16\x1b\x03\x06\x15\x17\x03" + + "\x89\xca\xea\x03\x89\xca\xe8\x03\x0c8\x10\x03\x0c8\x01\x03\x0c8\x0f\x03" + + "\x0d8%\x03\x0d8!\x03\x0c8-\x03\x0c8/\x03\x0c8+\x03\x0c87\x03\x0c85\x03" + + "\x0c9\x09\x03\x0c9\x0d\x03\x0c9\x0f\x03\x0c9\x0b\x03\xcfu\x0c\x03\xcfu" + + "\x0f\x03\xcfu\x0e\x03\xcfu\x09\x03\x0c9\x10\x03\x0d9\x0c\x03\xcf`;\x03" + + "\xcf`>\x03\xcf`9\x03\xcf`8\x03\xcf`7\x03\xcf`*\x03\xcf`-\x03\xcf`,\x03" + + "\x0d\x1b\x1a\x03\x0d\x1b&\x03\x0c=.\x03\x0c=%\x03\x0c>\x1e\x03\x0c>\x14" + + "\x03\x0c?\x06\x03\x0c?\x0b\x03\x0c?\x0c\x03\x0c?\x0d\x03\x0c?\x02\x03" + + "\x0c>\x0f\x03\x0c>\x08\x03\x0c>\x09\x03\x0c>,\x03\x0c>\x0c\x03\x0c?\x13" + + "\x03\x0c?\x16\x03\x0c?\x15\x03\x0c?\x1c\x03\x0c?\x1f\x03\x0c?\x1d\x03" + + "\x0c?\x1a\x03\x0c?\x17\x03\x0c?\x08\x03\x0c?\x09\x03\x0c?\x0e\x03\x0c?" + + "\x04\x03\x0c?\x05\x03\x0c" + + "\x03\x0c=2\x03\x0c=6\x03\x0c<\x07\x03\x0c<\x05\x03\x0e:!\x03\x0e:#\x03" + + "\x0e8\x09\x03\x0e:&\x03\x0e8\x0b\x03\x0e:$\x03\x0e:,\x03\x0e8\x1a\x03" + + "\x0e8\x1e\x03\x0e:*\x03\x0e:7\x03\x0e:5\x03\x0e:;\x03\x0e:\x15\x03\x0e:<" + + "\x03\x0e:4\x03\x0e:'\x03\x0e:-\x03\x0e:%\x03\x0e:?\x03\x0e:=\x03\x0e:)" + + "\x03\x0e:/\x03\xcfs'\x03\x0d=\x0f\x03\x0d+*\x03\x0d99\x03\x0d9;\x03\x0d9" + + "?\x03\x0d)\x0d\x03\x0d(%\x02\x01\x18\x02\x01(\x02\x01\x1e\x03\x0f$!\x03" + + "\x0f87\x03\x0f4\x0e\x03\x0f5\x1d\x03\x06'\x03\x03\x0f\x08\x18\x03\x0f" + + "\x0d\x1b\x03\x0e2=\x03\x0e;\x08\x03\x0e:\x0b\x03\x0e\x06$\x03\x0e\x0d)" + + "\x03\x0e\x16\x1f\x03\x0e\x16\x1b\x03\x0d$\x0a\x03\x05,\x1d\x03\x0d. \x03" + + "\x0d.#\x03\x0c(/\x03\x09%\x02\x03\x0d90\x03\x0d\x0e4\x03\x0d\x0d\x0f\x03" + + "\x0c#\x00\x03\x0c,\x1e\x03\x0c2\x0e\x03\x0c\x01\x17\x03\x0c\x09:\x03\x0e" + + "\x173\x03\x0c\x08\x03\x03\x0c\x11\x07\x03\x0c\x10\x18\x03\x0c\x1f\x1c" + + "\x03\x0c\x19\x0e\x03\x0c\x1a\x1f\x03\x0f0>\x03\x0b->\x03\x0b<+\x03\x0b8" + + "\x13\x03\x0b\x043\x03\x0b\x14\x03\x03\x0b\x16%\x03\x0d\x22&\x03\x0b\x1a" + + "\x1a\x03\x0b\x1a\x04\x03\x0a%9\x03\x0a&2\x03\x0a&0\x03\x0a!\x1a\x03\x0a!" + + "7\x03\x0a5\x10\x03\x0a=4\x03\x0a?\x0e\x03\x0a>\x10\x03\x0a\x00 \x03\x0a" + + "\x0f:\x03\x0a\x0f9\x03\x0a\x0b\x0a\x03\x0a\x17%\x03\x0a\x1b-\x03\x09-" + + "\x1a\x03\x09,4\x03\x09.,\x03\x09)\x09\x03\x096!\x03\x091\x1f\x03\x093" + + "\x16\x03\x0c+\x1f\x03\x098 \x03\x098=\x03\x0c(\x1a\x03\x0c(\x16\x03\x09" + + "\x0a+\x03\x09\x16\x12\x03\x09\x13\x0e\x03\x09\x153\x03\x08)!\x03\x09\x1a" + + "\x01\x03\x09\x18\x01\x03\x08%#\x03\x08>\x22\x03\x08\x05%\x03\x08\x02*" + + "\x03\x08\x15;\x03\x08\x1b7\x03\x0f\x07\x1d\x03\x0f\x04\x03\x03\x070\x0c" + + "\x03\x07;\x0b\x03\x07\x08\x17\x03\x07\x12\x06\x03\x06/-\x03\x0671\x03" + + "\x065+\x03\x06>7\x03\x06\x049\x03\x05+\x1e\x03\x05,\x17\x03\x05 \x1d\x03" + + "\x05\x22\x05\x03\x050\x1d" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// idnaTrie. Total size: 28496 bytes (27.83 KiB). Checksum: 43288b883596640e. +type idnaTrie struct{} + +func newIdnaTrie(i int) *idnaTrie { + return &idnaTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *idnaTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 123: + return uint16(idnaValues[n<<6+uint32(b)]) + default: + n -= 123 + return uint16(idnaSparse.lookup(n, b)) + } +} + +// idnaValues: 125 blocks, 8000 entries, 16000 bytes +// The third block is the zero block. +var idnaValues = [8000]uint16{ + // Block 0x0, offset 0x0 + 0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080, + 0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080, + 0x0c: 0x0080, 0x0d: 0x0080, 0x0e: 0x0080, 0x0f: 0x0080, 0x10: 0x0080, 0x11: 0x0080, + 0x12: 0x0080, 0x13: 0x0080, 0x14: 0x0080, 0x15: 0x0080, 0x16: 0x0080, 0x17: 0x0080, + 0x18: 0x0080, 0x19: 0x0080, 0x1a: 0x0080, 0x1b: 0x0080, 0x1c: 0x0080, 0x1d: 0x0080, + 0x1e: 0x0080, 0x1f: 0x0080, 0x20: 0x0080, 0x21: 0x0080, 0x22: 0x0080, 0x23: 0x0080, + 0x24: 0x0080, 0x25: 0x0080, 0x26: 0x0080, 0x27: 0x0080, 0x28: 0x0080, 0x29: 0x0080, + 0x2a: 0x0080, 0x2b: 0x0080, 0x2c: 0x0080, 0x2d: 0x0008, 0x2e: 0x0008, 0x2f: 0x0080, + 0x30: 0x0008, 0x31: 0x0008, 0x32: 0x0008, 0x33: 0x0008, 0x34: 0x0008, 0x35: 0x0008, + 0x36: 0x0008, 0x37: 0x0008, 0x38: 0x0008, 0x39: 0x0008, 0x3a: 0x0080, 0x3b: 0x0080, + 0x3c: 0x0080, 0x3d: 0x0080, 0x3e: 0x0080, 0x3f: 0x0080, + // Block 0x1, offset 0x40 + 0x40: 0x0080, 0x41: 0xe105, 0x42: 0xe105, 0x43: 0xe105, 0x44: 0xe105, 0x45: 0xe105, + 0x46: 0xe105, 0x47: 0xe105, 0x48: 0xe105, 0x49: 0xe105, 0x4a: 0xe105, 0x4b: 0xe105, + 0x4c: 0xe105, 0x4d: 0xe105, 0x4e: 0xe105, 0x4f: 0xe105, 0x50: 0xe105, 0x51: 0xe105, + 0x52: 0xe105, 0x53: 0xe105, 0x54: 0xe105, 0x55: 0xe105, 0x56: 0xe105, 0x57: 0xe105, + 0x58: 0xe105, 0x59: 0xe105, 0x5a: 0xe105, 0x5b: 0x0080, 0x5c: 0x0080, 0x5d: 0x0080, + 0x5e: 0x0080, 0x5f: 0x0080, 0x60: 0x0080, 0x61: 0x0008, 0x62: 0x0008, 0x63: 0x0008, + 0x64: 0x0008, 0x65: 0x0008, 0x66: 0x0008, 0x67: 0x0008, 0x68: 0x0008, 0x69: 0x0008, + 0x6a: 0x0008, 0x6b: 0x0008, 0x6c: 0x0008, 0x6d: 0x0008, 0x6e: 0x0008, 0x6f: 0x0008, + 0x70: 0x0008, 0x71: 0x0008, 0x72: 0x0008, 0x73: 0x0008, 0x74: 0x0008, 0x75: 0x0008, + 0x76: 0x0008, 0x77: 0x0008, 0x78: 0x0008, 0x79: 0x0008, 0x7a: 0x0008, 0x7b: 0x0080, + 0x7c: 0x0080, 0x7d: 0x0080, 0x7e: 0x0080, 0x7f: 0x0080, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040, + 0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040, + 0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040, + 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040, + 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040, + 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x000a, 0xe1: 0x0018, 0xe2: 0x0018, 0xe3: 0x0018, + 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x001a, 0xe9: 0x0018, + 0xea: 0x0039, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x004a, + 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0069, 0xf3: 0x0079, 0xf4: 0x008a, 0xf5: 0x0005, + 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x00aa, 0xf9: 0x00c9, 0xfa: 0x00d9, 0xfb: 0x0018, + 0xfc: 0x00e9, 0xfd: 0x0119, 0xfe: 0x0149, 0xff: 0x0018, + // Block 0x4, offset 0x100 + 0x100: 0xe00d, 0x101: 0x0008, 0x102: 0xe00d, 0x103: 0x0008, 0x104: 0xe00d, 0x105: 0x0008, + 0x106: 0xe00d, 0x107: 0x0008, 0x108: 0xe00d, 0x109: 0x0008, 0x10a: 0xe00d, 0x10b: 0x0008, + 0x10c: 0xe00d, 0x10d: 0x0008, 0x10e: 0xe00d, 0x10f: 0x0008, 0x110: 0xe00d, 0x111: 0x0008, + 0x112: 0xe00d, 0x113: 0x0008, 0x114: 0xe00d, 0x115: 0x0008, 0x116: 0xe00d, 0x117: 0x0008, + 0x118: 0xe00d, 0x119: 0x0008, 0x11a: 0xe00d, 0x11b: 0x0008, 0x11c: 0xe00d, 0x11d: 0x0008, + 0x11e: 0xe00d, 0x11f: 0x0008, 0x120: 0xe00d, 0x121: 0x0008, 0x122: 0xe00d, 0x123: 0x0008, + 0x124: 0xe00d, 0x125: 0x0008, 0x126: 0xe00d, 0x127: 0x0008, 0x128: 0xe00d, 0x129: 0x0008, + 0x12a: 0xe00d, 0x12b: 0x0008, 0x12c: 0xe00d, 0x12d: 0x0008, 0x12e: 0xe00d, 0x12f: 0x0008, + 0x130: 0x0179, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008, + 0x136: 0xe00d, 0x137: 0x0008, 0x138: 0x0008, 0x139: 0xe01d, 0x13a: 0x0008, 0x13b: 0xe03d, + 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0199, + // Block 0x5, offset 0x140 + 0x140: 0x0199, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d, + 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x01b9, 0x14a: 0xe00d, 0x14b: 0x0008, + 0x14c: 0xe00d, 0x14d: 0x0008, 0x14e: 0xe00d, 0x14f: 0x0008, 0x150: 0xe00d, 0x151: 0x0008, + 0x152: 0xe00d, 0x153: 0x0008, 0x154: 0xe00d, 0x155: 0x0008, 0x156: 0xe00d, 0x157: 0x0008, + 0x158: 0xe00d, 0x159: 0x0008, 0x15a: 0xe00d, 0x15b: 0x0008, 0x15c: 0xe00d, 0x15d: 0x0008, + 0x15e: 0xe00d, 0x15f: 0x0008, 0x160: 0xe00d, 0x161: 0x0008, 0x162: 0xe00d, 0x163: 0x0008, + 0x164: 0xe00d, 0x165: 0x0008, 0x166: 0xe00d, 0x167: 0x0008, 0x168: 0xe00d, 0x169: 0x0008, + 0x16a: 0xe00d, 0x16b: 0x0008, 0x16c: 0xe00d, 0x16d: 0x0008, 0x16e: 0xe00d, 0x16f: 0x0008, + 0x170: 0xe00d, 0x171: 0x0008, 0x172: 0xe00d, 0x173: 0x0008, 0x174: 0xe00d, 0x175: 0x0008, + 0x176: 0xe00d, 0x177: 0x0008, 0x178: 0x0065, 0x179: 0xe01d, 0x17a: 0x0008, 0x17b: 0xe03d, + 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x01d9, + // Block 0x6, offset 0x180 + 0x180: 0x0008, 0x181: 0x007d, 0x182: 0xe00d, 0x183: 0x0008, 0x184: 0xe00d, 0x185: 0x0008, + 0x186: 0x007d, 0x187: 0xe07d, 0x188: 0x0008, 0x189: 0x0095, 0x18a: 0x00ad, 0x18b: 0xe03d, + 0x18c: 0x0008, 0x18d: 0x0008, 0x18e: 0x00c5, 0x18f: 0x00dd, 0x190: 0x00f5, 0x191: 0xe01d, + 0x192: 0x0008, 0x193: 0x010d, 0x194: 0x0125, 0x195: 0x0008, 0x196: 0x013d, 0x197: 0x013d, + 0x198: 0xe00d, 0x199: 0x0008, 0x19a: 0x0008, 0x19b: 0x0008, 0x19c: 0x010d, 0x19d: 0x0155, + 0x19e: 0x0008, 0x19f: 0x016d, 0x1a0: 0xe00d, 0x1a1: 0x0008, 0x1a2: 0xe00d, 0x1a3: 0x0008, + 0x1a4: 0xe00d, 0x1a5: 0x0008, 0x1a6: 0x0185, 0x1a7: 0xe07d, 0x1a8: 0x0008, 0x1a9: 0x019d, + 0x1aa: 0x0008, 0x1ab: 0x0008, 0x1ac: 0xe00d, 0x1ad: 0x0008, 0x1ae: 0x0185, 0x1af: 0xe0fd, + 0x1b0: 0x0008, 0x1b1: 0x01b5, 0x1b2: 0x01cd, 0x1b3: 0xe03d, 0x1b4: 0x0008, 0x1b5: 0xe01d, + 0x1b6: 0x0008, 0x1b7: 0x01e5, 0x1b8: 0xe00d, 0x1b9: 0x0008, 0x1ba: 0x0008, 0x1bb: 0x0008, + 0x1bc: 0xe00d, 0x1bd: 0x0008, 0x1be: 0x0008, 0x1bf: 0x0008, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x01e9, 0x1c5: 0x01e9, + 0x1c6: 0x01e9, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d, + 0x1cc: 0x0275, 0x1cd: 0xe01d, 0x1ce: 0x0008, 0x1cf: 0xe0fd, 0x1d0: 0x0008, 0x1d1: 0xe01d, + 0x1d2: 0x0008, 0x1d3: 0xe03d, 0x1d4: 0x0008, 0x1d5: 0xe01d, 0x1d6: 0x0008, 0x1d7: 0xe07d, + 0x1d8: 0x0008, 0x1d9: 0xe01d, 0x1da: 0x0008, 0x1db: 0xe03d, 0x1dc: 0x0008, 0x1dd: 0x0008, + 0x1de: 0xe00d, 0x1df: 0x0008, 0x1e0: 0xe00d, 0x1e1: 0x0008, 0x1e2: 0xe00d, 0x1e3: 0x0008, + 0x1e4: 0xe00d, 0x1e5: 0x0008, 0x1e6: 0xe00d, 0x1e7: 0x0008, 0x1e8: 0xe00d, 0x1e9: 0x0008, + 0x1ea: 0xe00d, 0x1eb: 0x0008, 0x1ec: 0xe00d, 0x1ed: 0x0008, 0x1ee: 0xe00d, 0x1ef: 0x0008, + 0x1f0: 0x0008, 0x1f1: 0x028d, 0x1f2: 0x02a5, 0x1f3: 0x02bd, 0x1f4: 0xe00d, 0x1f5: 0x0008, + 0x1f6: 0x02d5, 0x1f7: 0x02ed, 0x1f8: 0xe00d, 0x1f9: 0x0008, 0x1fa: 0xe00d, 0x1fb: 0x0008, + 0x1fc: 0xe00d, 0x1fd: 0x0008, 0x1fe: 0xe00d, 0x1ff: 0x0008, + // Block 0x8, offset 0x200 + 0x200: 0xe00d, 0x201: 0x0008, 0x202: 0xe00d, 0x203: 0x0008, 0x204: 0xe00d, 0x205: 0x0008, + 0x206: 0xe00d, 0x207: 0x0008, 0x208: 0xe00d, 0x209: 0x0008, 0x20a: 0xe00d, 0x20b: 0x0008, + 0x20c: 0xe00d, 0x20d: 0x0008, 0x20e: 0xe00d, 0x20f: 0x0008, 0x210: 0xe00d, 0x211: 0x0008, + 0x212: 0xe00d, 0x213: 0x0008, 0x214: 0xe00d, 0x215: 0x0008, 0x216: 0xe00d, 0x217: 0x0008, + 0x218: 0xe00d, 0x219: 0x0008, 0x21a: 0xe00d, 0x21b: 0x0008, 0x21c: 0xe00d, 0x21d: 0x0008, + 0x21e: 0xe00d, 0x21f: 0x0008, 0x220: 0x0305, 0x221: 0x0008, 0x222: 0xe00d, 0x223: 0x0008, + 0x224: 0xe00d, 0x225: 0x0008, 0x226: 0xe00d, 0x227: 0x0008, 0x228: 0xe00d, 0x229: 0x0008, + 0x22a: 0xe00d, 0x22b: 0x0008, 0x22c: 0xe00d, 0x22d: 0x0008, 0x22e: 0xe00d, 0x22f: 0x0008, + 0x230: 0xe00d, 0x231: 0x0008, 0x232: 0xe00d, 0x233: 0x0008, 0x234: 0x0008, 0x235: 0x0008, + 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0209, 0x23b: 0xe03d, + 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x0229, 0x23f: 0x0008, + // Block 0x9, offset 0x240 + 0x240: 0x0008, 0x241: 0x0008, 0x242: 0x0018, 0x243: 0x0018, 0x244: 0x0018, 0x245: 0x0018, + 0x246: 0x0008, 0x247: 0x0008, 0x248: 0x0008, 0x249: 0x0008, 0x24a: 0x0008, 0x24b: 0x0008, + 0x24c: 0x0008, 0x24d: 0x0008, 0x24e: 0x0008, 0x24f: 0x0008, 0x250: 0x0008, 0x251: 0x0008, + 0x252: 0x0018, 0x253: 0x0018, 0x254: 0x0018, 0x255: 0x0018, 0x256: 0x0018, 0x257: 0x0018, + 0x258: 0x029a, 0x259: 0x02ba, 0x25a: 0x02da, 0x25b: 0x02fa, 0x25c: 0x031a, 0x25d: 0x033a, + 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0359, 0x262: 0x01d9, 0x263: 0x0369, + 0x264: 0x03c5, 0x265: 0x0018, 0x266: 0x0018, 0x267: 0x0018, 0x268: 0x0018, 0x269: 0x0018, + 0x26a: 0x0018, 0x26b: 0x0018, 0x26c: 0x0008, 0x26d: 0x0018, 0x26e: 0x0008, 0x26f: 0x0018, + 0x270: 0x0018, 0x271: 0x0018, 0x272: 0x0018, 0x273: 0x0018, 0x274: 0x0018, 0x275: 0x0018, + 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018, + 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018, + // Block 0xa, offset 0x280 + 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x1308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d, + 0x286: 0x1308, 0x287: 0x1308, 0x288: 0x1308, 0x289: 0x1308, 0x28a: 0x1308, 0x28b: 0x1308, + 0x28c: 0x1308, 0x28d: 0x1308, 0x28e: 0x1308, 0x28f: 0x13c0, 0x290: 0x1308, 0x291: 0x1308, + 0x292: 0x1308, 0x293: 0x1308, 0x294: 0x1308, 0x295: 0x1308, 0x296: 0x1308, 0x297: 0x1308, + 0x298: 0x1308, 0x299: 0x1308, 0x29a: 0x1308, 0x29b: 0x1308, 0x29c: 0x1308, 0x29d: 0x1308, + 0x29e: 0x1308, 0x29f: 0x1308, 0x2a0: 0x1308, 0x2a1: 0x1308, 0x2a2: 0x1308, 0x2a3: 0x1308, + 0x2a4: 0x1308, 0x2a5: 0x1308, 0x2a6: 0x1308, 0x2a7: 0x1308, 0x2a8: 0x1308, 0x2a9: 0x1308, + 0x2aa: 0x1308, 0x2ab: 0x1308, 0x2ac: 0x1308, 0x2ad: 0x1308, 0x2ae: 0x1308, 0x2af: 0x1308, + 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008, + 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x03a2, 0x2bb: 0x0008, + 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x03c2, 0x2bf: 0x043d, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x008a, 0x2c5: 0x03d2, + 0x2c6: 0xe155, 0x2c7: 0x0455, 0x2c8: 0xe12d, 0x2c9: 0xe13d, 0x2ca: 0xe12d, 0x2cb: 0x0040, + 0x2cc: 0x03dd, 0x2cd: 0x0040, 0x2ce: 0x046d, 0x2cf: 0x0485, 0x2d0: 0x0008, 0x2d1: 0xe105, + 0x2d2: 0xe105, 0x2d3: 0xe105, 0x2d4: 0xe105, 0x2d5: 0xe105, 0x2d6: 0xe105, 0x2d7: 0xe105, + 0x2d8: 0xe105, 0x2d9: 0xe105, 0x2da: 0xe105, 0x2db: 0xe105, 0x2dc: 0xe105, 0x2dd: 0xe105, + 0x2de: 0xe105, 0x2df: 0xe105, 0x2e0: 0x049d, 0x2e1: 0x049d, 0x2e2: 0x0040, 0x2e3: 0x049d, + 0x2e4: 0x049d, 0x2e5: 0x049d, 0x2e6: 0x049d, 0x2e7: 0x049d, 0x2e8: 0x049d, 0x2e9: 0x049d, + 0x2ea: 0x049d, 0x2eb: 0x049d, 0x2ec: 0x0008, 0x2ed: 0x0008, 0x2ee: 0x0008, 0x2ef: 0x0008, + 0x2f0: 0x0008, 0x2f1: 0x0008, 0x2f2: 0x0008, 0x2f3: 0x0008, 0x2f4: 0x0008, 0x2f5: 0x0008, + 0x2f6: 0x0008, 0x2f7: 0x0008, 0x2f8: 0x0008, 0x2f9: 0x0008, 0x2fa: 0x0008, 0x2fb: 0x0008, + 0x2fc: 0x0008, 0x2fd: 0x0008, 0x2fe: 0x0008, 0x2ff: 0x0008, + // Block 0xc, offset 0x300 + 0x300: 0x0008, 0x301: 0x0008, 0x302: 0xe00f, 0x303: 0x0008, 0x304: 0x0008, 0x305: 0x0008, + 0x306: 0x0008, 0x307: 0x0008, 0x308: 0x0008, 0x309: 0x0008, 0x30a: 0x0008, 0x30b: 0x0008, + 0x30c: 0x0008, 0x30d: 0x0008, 0x30e: 0x0008, 0x30f: 0xe0c5, 0x310: 0x04b5, 0x311: 0x04cd, + 0x312: 0xe0bd, 0x313: 0xe0f5, 0x314: 0xe0fd, 0x315: 0xe09d, 0x316: 0xe0b5, 0x317: 0x0008, + 0x318: 0xe00d, 0x319: 0x0008, 0x31a: 0xe00d, 0x31b: 0x0008, 0x31c: 0xe00d, 0x31d: 0x0008, + 0x31e: 0xe00d, 0x31f: 0x0008, 0x320: 0xe00d, 0x321: 0x0008, 0x322: 0xe00d, 0x323: 0x0008, + 0x324: 0xe00d, 0x325: 0x0008, 0x326: 0xe00d, 0x327: 0x0008, 0x328: 0xe00d, 0x329: 0x0008, + 0x32a: 0xe00d, 0x32b: 0x0008, 0x32c: 0xe00d, 0x32d: 0x0008, 0x32e: 0xe00d, 0x32f: 0x0008, + 0x330: 0x04e5, 0x331: 0xe185, 0x332: 0xe18d, 0x333: 0x0008, 0x334: 0x04fd, 0x335: 0x03dd, + 0x336: 0x0018, 0x337: 0xe07d, 0x338: 0x0008, 0x339: 0xe1d5, 0x33a: 0xe00d, 0x33b: 0x0008, + 0x33c: 0x0008, 0x33d: 0x0515, 0x33e: 0x052d, 0x33f: 0x052d, + // Block 0xd, offset 0x340 + 0x340: 0x0008, 0x341: 0x0008, 0x342: 0x0008, 0x343: 0x0008, 0x344: 0x0008, 0x345: 0x0008, + 0x346: 0x0008, 0x347: 0x0008, 0x348: 0x0008, 0x349: 0x0008, 0x34a: 0x0008, 0x34b: 0x0008, + 0x34c: 0x0008, 0x34d: 0x0008, 0x34e: 0x0008, 0x34f: 0x0008, 0x350: 0x0008, 0x351: 0x0008, + 0x352: 0x0008, 0x353: 0x0008, 0x354: 0x0008, 0x355: 0x0008, 0x356: 0x0008, 0x357: 0x0008, + 0x358: 0x0008, 0x359: 0x0008, 0x35a: 0x0008, 0x35b: 0x0008, 0x35c: 0x0008, 0x35d: 0x0008, + 0x35e: 0x0008, 0x35f: 0x0008, 0x360: 0xe00d, 0x361: 0x0008, 0x362: 0xe00d, 0x363: 0x0008, + 0x364: 0xe00d, 0x365: 0x0008, 0x366: 0xe00d, 0x367: 0x0008, 0x368: 0xe00d, 0x369: 0x0008, + 0x36a: 0xe00d, 0x36b: 0x0008, 0x36c: 0xe00d, 0x36d: 0x0008, 0x36e: 0xe00d, 0x36f: 0x0008, + 0x370: 0xe00d, 0x371: 0x0008, 0x372: 0xe00d, 0x373: 0x0008, 0x374: 0xe00d, 0x375: 0x0008, + 0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008, + 0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008, + // Block 0xe, offset 0x380 + 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x1308, 0x384: 0x1308, 0x385: 0x1308, + 0x386: 0x1308, 0x387: 0x1308, 0x388: 0x1318, 0x389: 0x1318, 0x38a: 0xe00d, 0x38b: 0x0008, + 0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008, + 0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008, + 0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008, + 0x39e: 0xe00d, 0x39f: 0x0008, 0x3a0: 0xe00d, 0x3a1: 0x0008, 0x3a2: 0xe00d, 0x3a3: 0x0008, + 0x3a4: 0xe00d, 0x3a5: 0x0008, 0x3a6: 0xe00d, 0x3a7: 0x0008, 0x3a8: 0xe00d, 0x3a9: 0x0008, + 0x3aa: 0xe00d, 0x3ab: 0x0008, 0x3ac: 0xe00d, 0x3ad: 0x0008, 0x3ae: 0xe00d, 0x3af: 0x0008, + 0x3b0: 0xe00d, 0x3b1: 0x0008, 0x3b2: 0xe00d, 0x3b3: 0x0008, 0x3b4: 0xe00d, 0x3b5: 0x0008, + 0x3b6: 0xe00d, 0x3b7: 0x0008, 0x3b8: 0xe00d, 0x3b9: 0x0008, 0x3ba: 0xe00d, 0x3bb: 0x0008, + 0x3bc: 0xe00d, 0x3bd: 0x0008, 0x3be: 0xe00d, 0x3bf: 0x0008, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x0040, 0x3c1: 0xe01d, 0x3c2: 0x0008, 0x3c3: 0xe03d, 0x3c4: 0x0008, 0x3c5: 0xe01d, + 0x3c6: 0x0008, 0x3c7: 0xe07d, 0x3c8: 0x0008, 0x3c9: 0xe01d, 0x3ca: 0x0008, 0x3cb: 0xe03d, + 0x3cc: 0x0008, 0x3cd: 0xe01d, 0x3ce: 0x0008, 0x3cf: 0x0008, 0x3d0: 0xe00d, 0x3d1: 0x0008, + 0x3d2: 0xe00d, 0x3d3: 0x0008, 0x3d4: 0xe00d, 0x3d5: 0x0008, 0x3d6: 0xe00d, 0x3d7: 0x0008, + 0x3d8: 0xe00d, 0x3d9: 0x0008, 0x3da: 0xe00d, 0x3db: 0x0008, 0x3dc: 0xe00d, 0x3dd: 0x0008, + 0x3de: 0xe00d, 0x3df: 0x0008, 0x3e0: 0xe00d, 0x3e1: 0x0008, 0x3e2: 0xe00d, 0x3e3: 0x0008, + 0x3e4: 0xe00d, 0x3e5: 0x0008, 0x3e6: 0xe00d, 0x3e7: 0x0008, 0x3e8: 0xe00d, 0x3e9: 0x0008, + 0x3ea: 0xe00d, 0x3eb: 0x0008, 0x3ec: 0xe00d, 0x3ed: 0x0008, 0x3ee: 0xe00d, 0x3ef: 0x0008, + 0x3f0: 0xe00d, 0x3f1: 0x0008, 0x3f2: 0xe00d, 0x3f3: 0x0008, 0x3f4: 0xe00d, 0x3f5: 0x0008, + 0x3f6: 0xe00d, 0x3f7: 0x0008, 0x3f8: 0xe00d, 0x3f9: 0x0008, 0x3fa: 0xe00d, 0x3fb: 0x0008, + 0x3fc: 0xe00d, 0x3fd: 0x0008, 0x3fe: 0xe00d, 0x3ff: 0x0008, + // Block 0x10, offset 0x400 + 0x400: 0xe00d, 0x401: 0x0008, 0x402: 0xe00d, 0x403: 0x0008, 0x404: 0xe00d, 0x405: 0x0008, + 0x406: 0xe00d, 0x407: 0x0008, 0x408: 0xe00d, 0x409: 0x0008, 0x40a: 0xe00d, 0x40b: 0x0008, + 0x40c: 0xe00d, 0x40d: 0x0008, 0x40e: 0xe00d, 0x40f: 0x0008, 0x410: 0xe00d, 0x411: 0x0008, + 0x412: 0xe00d, 0x413: 0x0008, 0x414: 0xe00d, 0x415: 0x0008, 0x416: 0xe00d, 0x417: 0x0008, + 0x418: 0xe00d, 0x419: 0x0008, 0x41a: 0xe00d, 0x41b: 0x0008, 0x41c: 0xe00d, 0x41d: 0x0008, + 0x41e: 0xe00d, 0x41f: 0x0008, 0x420: 0xe00d, 0x421: 0x0008, 0x422: 0xe00d, 0x423: 0x0008, + 0x424: 0xe00d, 0x425: 0x0008, 0x426: 0xe00d, 0x427: 0x0008, 0x428: 0xe00d, 0x429: 0x0008, + 0x42a: 0xe00d, 0x42b: 0x0008, 0x42c: 0xe00d, 0x42d: 0x0008, 0x42e: 0xe00d, 0x42f: 0x0008, + 0x430: 0x0040, 0x431: 0x03f5, 0x432: 0x03f5, 0x433: 0x03f5, 0x434: 0x03f5, 0x435: 0x03f5, + 0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5, + 0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5, + // Block 0x11, offset 0x440 + 0x440: 0x0040, 0x441: 0x0040, 0x442: 0x0040, 0x443: 0x0040, 0x444: 0x0040, 0x445: 0x0040, + 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0018, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0018, + 0x44c: 0x0018, 0x44d: 0x0018, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x1308, 0x451: 0x1308, + 0x452: 0x1308, 0x453: 0x1308, 0x454: 0x1308, 0x455: 0x1308, 0x456: 0x1308, 0x457: 0x1308, + 0x458: 0x1308, 0x459: 0x1308, 0x45a: 0x1308, 0x45b: 0x0018, 0x45c: 0x0340, 0x45d: 0x0040, + 0x45e: 0x0018, 0x45f: 0x0018, 0x460: 0x0208, 0x461: 0x0008, 0x462: 0x0408, 0x463: 0x0408, + 0x464: 0x0408, 0x465: 0x0408, 0x466: 0x0208, 0x467: 0x0408, 0x468: 0x0208, 0x469: 0x0408, + 0x46a: 0x0208, 0x46b: 0x0208, 0x46c: 0x0208, 0x46d: 0x0208, 0x46e: 0x0208, 0x46f: 0x0408, + 0x470: 0x0408, 0x471: 0x0408, 0x472: 0x0408, 0x473: 0x0208, 0x474: 0x0208, 0x475: 0x0208, + 0x476: 0x0208, 0x477: 0x0208, 0x478: 0x0208, 0x479: 0x0208, 0x47a: 0x0208, 0x47b: 0x0208, + 0x47c: 0x0208, 0x47d: 0x0208, 0x47e: 0x0208, 0x47f: 0x0208, + // Block 0x12, offset 0x480 + 0x480: 0x0408, 0x481: 0x0208, 0x482: 0x0208, 0x483: 0x0408, 0x484: 0x0408, 0x485: 0x0408, + 0x486: 0x0408, 0x487: 0x0408, 0x488: 0x0408, 0x489: 0x0408, 0x48a: 0x0408, 0x48b: 0x0408, + 0x48c: 0x0208, 0x48d: 0x0408, 0x48e: 0x0208, 0x48f: 0x0408, 0x490: 0x0208, 0x491: 0x0208, + 0x492: 0x0408, 0x493: 0x0408, 0x494: 0x0018, 0x495: 0x0408, 0x496: 0x1308, 0x497: 0x1308, + 0x498: 0x1308, 0x499: 0x1308, 0x49a: 0x1308, 0x49b: 0x1308, 0x49c: 0x1308, 0x49d: 0x0040, + 0x49e: 0x0018, 0x49f: 0x1308, 0x4a0: 0x1308, 0x4a1: 0x1308, 0x4a2: 0x1308, 0x4a3: 0x1308, + 0x4a4: 0x1308, 0x4a5: 0x0008, 0x4a6: 0x0008, 0x4a7: 0x1308, 0x4a8: 0x1308, 0x4a9: 0x0018, + 0x4aa: 0x1308, 0x4ab: 0x1308, 0x4ac: 0x1308, 0x4ad: 0x1308, 0x4ae: 0x0408, 0x4af: 0x0408, + 0x4b0: 0x0008, 0x4b1: 0x0008, 0x4b2: 0x0008, 0x4b3: 0x0008, 0x4b4: 0x0008, 0x4b5: 0x0008, + 0x4b6: 0x0008, 0x4b7: 0x0008, 0x4b8: 0x0008, 0x4b9: 0x0008, 0x4ba: 0x0208, 0x4bb: 0x0208, + 0x4bc: 0x0208, 0x4bd: 0x0008, 0x4be: 0x0008, 0x4bf: 0x0208, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x0018, 0x4c1: 0x0018, 0x4c2: 0x0018, 0x4c3: 0x0018, 0x4c4: 0x0018, 0x4c5: 0x0018, + 0x4c6: 0x0018, 0x4c7: 0x0018, 0x4c8: 0x0018, 0x4c9: 0x0018, 0x4ca: 0x0018, 0x4cb: 0x0018, + 0x4cc: 0x0018, 0x4cd: 0x0018, 0x4ce: 0x0040, 0x4cf: 0x0340, 0x4d0: 0x0408, 0x4d1: 0x1308, + 0x4d2: 0x0208, 0x4d3: 0x0208, 0x4d4: 0x0208, 0x4d5: 0x0408, 0x4d6: 0x0408, 0x4d7: 0x0408, + 0x4d8: 0x0408, 0x4d9: 0x0408, 0x4da: 0x0208, 0x4db: 0x0208, 0x4dc: 0x0208, 0x4dd: 0x0208, + 0x4de: 0x0408, 0x4df: 0x0208, 0x4e0: 0x0208, 0x4e1: 0x0208, 0x4e2: 0x0208, 0x4e3: 0x0208, + 0x4e4: 0x0208, 0x4e5: 0x0208, 0x4e6: 0x0208, 0x4e7: 0x0208, 0x4e8: 0x0408, 0x4e9: 0x0208, + 0x4ea: 0x0408, 0x4eb: 0x0208, 0x4ec: 0x0408, 0x4ed: 0x0208, 0x4ee: 0x0208, 0x4ef: 0x0408, + 0x4f0: 0x1308, 0x4f1: 0x1308, 0x4f2: 0x1308, 0x4f3: 0x1308, 0x4f4: 0x1308, 0x4f5: 0x1308, + 0x4f6: 0x1308, 0x4f7: 0x1308, 0x4f8: 0x1308, 0x4f9: 0x1308, 0x4fa: 0x1308, 0x4fb: 0x1308, + 0x4fc: 0x1308, 0x4fd: 0x1308, 0x4fe: 0x1308, 0x4ff: 0x1308, + // Block 0x14, offset 0x500 + 0x500: 0x1008, 0x501: 0x1308, 0x502: 0x1308, 0x503: 0x1308, 0x504: 0x1308, 0x505: 0x1308, + 0x506: 0x1308, 0x507: 0x1308, 0x508: 0x1308, 0x509: 0x1008, 0x50a: 0x1008, 0x50b: 0x1008, + 0x50c: 0x1008, 0x50d: 0x1b08, 0x50e: 0x1008, 0x50f: 0x1008, 0x510: 0x0008, 0x511: 0x1308, + 0x512: 0x1308, 0x513: 0x1308, 0x514: 0x1308, 0x515: 0x1308, 0x516: 0x1308, 0x517: 0x1308, + 0x518: 0x04c9, 0x519: 0x0501, 0x51a: 0x0539, 0x51b: 0x0571, 0x51c: 0x05a9, 0x51d: 0x05e1, + 0x51e: 0x0619, 0x51f: 0x0651, 0x520: 0x0008, 0x521: 0x0008, 0x522: 0x1308, 0x523: 0x1308, + 0x524: 0x0018, 0x525: 0x0018, 0x526: 0x0008, 0x527: 0x0008, 0x528: 0x0008, 0x529: 0x0008, + 0x52a: 0x0008, 0x52b: 0x0008, 0x52c: 0x0008, 0x52d: 0x0008, 0x52e: 0x0008, 0x52f: 0x0008, + 0x530: 0x0018, 0x531: 0x0008, 0x532: 0x0008, 0x533: 0x0008, 0x534: 0x0008, 0x535: 0x0008, + 0x536: 0x0008, 0x537: 0x0008, 0x538: 0x0008, 0x539: 0x0008, 0x53a: 0x0008, 0x53b: 0x0008, + 0x53c: 0x0008, 0x53d: 0x0008, 0x53e: 0x0008, 0x53f: 0x0008, + // Block 0x15, offset 0x540 + 0x540: 0x0008, 0x541: 0x1308, 0x542: 0x1008, 0x543: 0x1008, 0x544: 0x0040, 0x545: 0x0008, + 0x546: 0x0008, 0x547: 0x0008, 0x548: 0x0008, 0x549: 0x0008, 0x54a: 0x0008, 0x54b: 0x0008, + 0x54c: 0x0008, 0x54d: 0x0040, 0x54e: 0x0040, 0x54f: 0x0008, 0x550: 0x0008, 0x551: 0x0040, + 0x552: 0x0040, 0x553: 0x0008, 0x554: 0x0008, 0x555: 0x0008, 0x556: 0x0008, 0x557: 0x0008, + 0x558: 0x0008, 0x559: 0x0008, 0x55a: 0x0008, 0x55b: 0x0008, 0x55c: 0x0008, 0x55d: 0x0008, + 0x55e: 0x0008, 0x55f: 0x0008, 0x560: 0x0008, 0x561: 0x0008, 0x562: 0x0008, 0x563: 0x0008, + 0x564: 0x0008, 0x565: 0x0008, 0x566: 0x0008, 0x567: 0x0008, 0x568: 0x0008, 0x569: 0x0040, + 0x56a: 0x0008, 0x56b: 0x0008, 0x56c: 0x0008, 0x56d: 0x0008, 0x56e: 0x0008, 0x56f: 0x0008, + 0x570: 0x0008, 0x571: 0x0040, 0x572: 0x0008, 0x573: 0x0040, 0x574: 0x0040, 0x575: 0x0040, + 0x576: 0x0008, 0x577: 0x0008, 0x578: 0x0008, 0x579: 0x0008, 0x57a: 0x0040, 0x57b: 0x0040, + 0x57c: 0x1308, 0x57d: 0x0008, 0x57e: 0x1008, 0x57f: 0x1008, + // Block 0x16, offset 0x580 + 0x580: 0x1008, 0x581: 0x1308, 0x582: 0x1308, 0x583: 0x1308, 0x584: 0x1308, 0x585: 0x0040, + 0x586: 0x0040, 0x587: 0x1008, 0x588: 0x1008, 0x589: 0x0040, 0x58a: 0x0040, 0x58b: 0x1008, + 0x58c: 0x1008, 0x58d: 0x1b08, 0x58e: 0x0008, 0x58f: 0x0040, 0x590: 0x0040, 0x591: 0x0040, + 0x592: 0x0040, 0x593: 0x0040, 0x594: 0x0040, 0x595: 0x0040, 0x596: 0x0040, 0x597: 0x1008, + 0x598: 0x0040, 0x599: 0x0040, 0x59a: 0x0040, 0x59b: 0x0040, 0x59c: 0x0689, 0x59d: 0x06c1, + 0x59e: 0x0040, 0x59f: 0x06f9, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x1308, 0x5a3: 0x1308, + 0x5a4: 0x0040, 0x5a5: 0x0040, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0008, + 0x5aa: 0x0008, 0x5ab: 0x0008, 0x5ac: 0x0008, 0x5ad: 0x0008, 0x5ae: 0x0008, 0x5af: 0x0008, + 0x5b0: 0x0008, 0x5b1: 0x0008, 0x5b2: 0x0018, 0x5b3: 0x0018, 0x5b4: 0x0018, 0x5b5: 0x0018, + 0x5b6: 0x0018, 0x5b7: 0x0018, 0x5b8: 0x0018, 0x5b9: 0x0018, 0x5ba: 0x0018, 0x5bb: 0x0018, + 0x5bc: 0x0040, 0x5bd: 0x0040, 0x5be: 0x0040, 0x5bf: 0x0040, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x0040, 0x5c1: 0x1308, 0x5c2: 0x1308, 0x5c3: 0x1008, 0x5c4: 0x0040, 0x5c5: 0x0008, + 0x5c6: 0x0008, 0x5c7: 0x0008, 0x5c8: 0x0008, 0x5c9: 0x0008, 0x5ca: 0x0008, 0x5cb: 0x0040, + 0x5cc: 0x0040, 0x5cd: 0x0040, 0x5ce: 0x0040, 0x5cf: 0x0008, 0x5d0: 0x0008, 0x5d1: 0x0040, + 0x5d2: 0x0040, 0x5d3: 0x0008, 0x5d4: 0x0008, 0x5d5: 0x0008, 0x5d6: 0x0008, 0x5d7: 0x0008, + 0x5d8: 0x0008, 0x5d9: 0x0008, 0x5da: 0x0008, 0x5db: 0x0008, 0x5dc: 0x0008, 0x5dd: 0x0008, + 0x5de: 0x0008, 0x5df: 0x0008, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x0008, 0x5e3: 0x0008, + 0x5e4: 0x0008, 0x5e5: 0x0008, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0040, + 0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008, + 0x5f0: 0x0008, 0x5f1: 0x0040, 0x5f2: 0x0008, 0x5f3: 0x0731, 0x5f4: 0x0040, 0x5f5: 0x0008, + 0x5f6: 0x0769, 0x5f7: 0x0040, 0x5f8: 0x0008, 0x5f9: 0x0008, 0x5fa: 0x0040, 0x5fb: 0x0040, + 0x5fc: 0x1308, 0x5fd: 0x0040, 0x5fe: 0x1008, 0x5ff: 0x1008, + // Block 0x18, offset 0x600 + 0x600: 0x1008, 0x601: 0x1308, 0x602: 0x1308, 0x603: 0x0040, 0x604: 0x0040, 0x605: 0x0040, + 0x606: 0x0040, 0x607: 0x1308, 0x608: 0x1308, 0x609: 0x0040, 0x60a: 0x0040, 0x60b: 0x1308, + 0x60c: 0x1308, 0x60d: 0x1b08, 0x60e: 0x0040, 0x60f: 0x0040, 0x610: 0x0040, 0x611: 0x1308, + 0x612: 0x0040, 0x613: 0x0040, 0x614: 0x0040, 0x615: 0x0040, 0x616: 0x0040, 0x617: 0x0040, + 0x618: 0x0040, 0x619: 0x07a1, 0x61a: 0x07d9, 0x61b: 0x0811, 0x61c: 0x0008, 0x61d: 0x0040, + 0x61e: 0x0849, 0x61f: 0x0040, 0x620: 0x0040, 0x621: 0x0040, 0x622: 0x0040, 0x623: 0x0040, + 0x624: 0x0040, 0x625: 0x0040, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0008, + 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008, + 0x630: 0x1308, 0x631: 0x1308, 0x632: 0x0008, 0x633: 0x0008, 0x634: 0x0008, 0x635: 0x1308, + 0x636: 0x0040, 0x637: 0x0040, 0x638: 0x0040, 0x639: 0x0040, 0x63a: 0x0040, 0x63b: 0x0040, + 0x63c: 0x0040, 0x63d: 0x0040, 0x63e: 0x0040, 0x63f: 0x0040, + // Block 0x19, offset 0x640 + 0x640: 0x0040, 0x641: 0x1308, 0x642: 0x1308, 0x643: 0x1008, 0x644: 0x0040, 0x645: 0x0008, + 0x646: 0x0008, 0x647: 0x0008, 0x648: 0x0008, 0x649: 0x0008, 0x64a: 0x0008, 0x64b: 0x0008, + 0x64c: 0x0008, 0x64d: 0x0008, 0x64e: 0x0040, 0x64f: 0x0008, 0x650: 0x0008, 0x651: 0x0008, + 0x652: 0x0040, 0x653: 0x0008, 0x654: 0x0008, 0x655: 0x0008, 0x656: 0x0008, 0x657: 0x0008, + 0x658: 0x0008, 0x659: 0x0008, 0x65a: 0x0008, 0x65b: 0x0008, 0x65c: 0x0008, 0x65d: 0x0008, + 0x65e: 0x0008, 0x65f: 0x0008, 0x660: 0x0008, 0x661: 0x0008, 0x662: 0x0008, 0x663: 0x0008, + 0x664: 0x0008, 0x665: 0x0008, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0040, + 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008, + 0x670: 0x0008, 0x671: 0x0040, 0x672: 0x0008, 0x673: 0x0008, 0x674: 0x0040, 0x675: 0x0008, + 0x676: 0x0008, 0x677: 0x0008, 0x678: 0x0008, 0x679: 0x0008, 0x67a: 0x0040, 0x67b: 0x0040, + 0x67c: 0x1308, 0x67d: 0x0008, 0x67e: 0x1008, 0x67f: 0x1008, + // Block 0x1a, offset 0x680 + 0x680: 0x1008, 0x681: 0x1308, 0x682: 0x1308, 0x683: 0x1308, 0x684: 0x1308, 0x685: 0x1308, + 0x686: 0x0040, 0x687: 0x1308, 0x688: 0x1308, 0x689: 0x1008, 0x68a: 0x0040, 0x68b: 0x1008, + 0x68c: 0x1008, 0x68d: 0x1b08, 0x68e: 0x0040, 0x68f: 0x0040, 0x690: 0x0008, 0x691: 0x0040, + 0x692: 0x0040, 0x693: 0x0040, 0x694: 0x0040, 0x695: 0x0040, 0x696: 0x0040, 0x697: 0x0040, + 0x698: 0x0040, 0x699: 0x0040, 0x69a: 0x0040, 0x69b: 0x0040, 0x69c: 0x0040, 0x69d: 0x0040, + 0x69e: 0x0040, 0x69f: 0x0040, 0x6a0: 0x0008, 0x6a1: 0x0008, 0x6a2: 0x1308, 0x6a3: 0x1308, + 0x6a4: 0x0040, 0x6a5: 0x0040, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0008, + 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008, + 0x6b0: 0x0018, 0x6b1: 0x0018, 0x6b2: 0x0040, 0x6b3: 0x0040, 0x6b4: 0x0040, 0x6b5: 0x0040, + 0x6b6: 0x0040, 0x6b7: 0x0040, 0x6b8: 0x0040, 0x6b9: 0x0008, 0x6ba: 0x0040, 0x6bb: 0x0040, + 0x6bc: 0x0040, 0x6bd: 0x0040, 0x6be: 0x0040, 0x6bf: 0x0040, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x0040, 0x6c1: 0x1308, 0x6c2: 0x1008, 0x6c3: 0x1008, 0x6c4: 0x0040, 0x6c5: 0x0008, + 0x6c6: 0x0008, 0x6c7: 0x0008, 0x6c8: 0x0008, 0x6c9: 0x0008, 0x6ca: 0x0008, 0x6cb: 0x0008, + 0x6cc: 0x0008, 0x6cd: 0x0040, 0x6ce: 0x0040, 0x6cf: 0x0008, 0x6d0: 0x0008, 0x6d1: 0x0040, + 0x6d2: 0x0040, 0x6d3: 0x0008, 0x6d4: 0x0008, 0x6d5: 0x0008, 0x6d6: 0x0008, 0x6d7: 0x0008, + 0x6d8: 0x0008, 0x6d9: 0x0008, 0x6da: 0x0008, 0x6db: 0x0008, 0x6dc: 0x0008, 0x6dd: 0x0008, + 0x6de: 0x0008, 0x6df: 0x0008, 0x6e0: 0x0008, 0x6e1: 0x0008, 0x6e2: 0x0008, 0x6e3: 0x0008, + 0x6e4: 0x0008, 0x6e5: 0x0008, 0x6e6: 0x0008, 0x6e7: 0x0008, 0x6e8: 0x0008, 0x6e9: 0x0040, + 0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008, + 0x6f0: 0x0008, 0x6f1: 0x0040, 0x6f2: 0x0008, 0x6f3: 0x0008, 0x6f4: 0x0040, 0x6f5: 0x0008, + 0x6f6: 0x0008, 0x6f7: 0x0008, 0x6f8: 0x0008, 0x6f9: 0x0008, 0x6fa: 0x0040, 0x6fb: 0x0040, + 0x6fc: 0x1308, 0x6fd: 0x0008, 0x6fe: 0x1008, 0x6ff: 0x1308, + // Block 0x1c, offset 0x700 + 0x700: 0x1008, 0x701: 0x1308, 0x702: 0x1308, 0x703: 0x1308, 0x704: 0x1308, 0x705: 0x0040, + 0x706: 0x0040, 0x707: 0x1008, 0x708: 0x1008, 0x709: 0x0040, 0x70a: 0x0040, 0x70b: 0x1008, + 0x70c: 0x1008, 0x70d: 0x1b08, 0x70e: 0x0040, 0x70f: 0x0040, 0x710: 0x0040, 0x711: 0x0040, + 0x712: 0x0040, 0x713: 0x0040, 0x714: 0x0040, 0x715: 0x0040, 0x716: 0x1308, 0x717: 0x1008, + 0x718: 0x0040, 0x719: 0x0040, 0x71a: 0x0040, 0x71b: 0x0040, 0x71c: 0x0881, 0x71d: 0x08b9, + 0x71e: 0x0040, 0x71f: 0x0008, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x1308, 0x723: 0x1308, + 0x724: 0x0040, 0x725: 0x0040, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0008, + 0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008, + 0x730: 0x0018, 0x731: 0x0008, 0x732: 0x0018, 0x733: 0x0018, 0x734: 0x0018, 0x735: 0x0018, + 0x736: 0x0018, 0x737: 0x0018, 0x738: 0x0040, 0x739: 0x0040, 0x73a: 0x0040, 0x73b: 0x0040, + 0x73c: 0x0040, 0x73d: 0x0040, 0x73e: 0x0040, 0x73f: 0x0040, + // Block 0x1d, offset 0x740 + 0x740: 0x0040, 0x741: 0x0040, 0x742: 0x1308, 0x743: 0x0008, 0x744: 0x0040, 0x745: 0x0008, + 0x746: 0x0008, 0x747: 0x0008, 0x748: 0x0008, 0x749: 0x0008, 0x74a: 0x0008, 0x74b: 0x0040, + 0x74c: 0x0040, 0x74d: 0x0040, 0x74e: 0x0008, 0x74f: 0x0008, 0x750: 0x0008, 0x751: 0x0040, + 0x752: 0x0008, 0x753: 0x0008, 0x754: 0x0008, 0x755: 0x0008, 0x756: 0x0040, 0x757: 0x0040, + 0x758: 0x0040, 0x759: 0x0008, 0x75a: 0x0008, 0x75b: 0x0040, 0x75c: 0x0008, 0x75d: 0x0040, + 0x75e: 0x0008, 0x75f: 0x0008, 0x760: 0x0040, 0x761: 0x0040, 0x762: 0x0040, 0x763: 0x0008, + 0x764: 0x0008, 0x765: 0x0040, 0x766: 0x0040, 0x767: 0x0040, 0x768: 0x0008, 0x769: 0x0008, + 0x76a: 0x0008, 0x76b: 0x0040, 0x76c: 0x0040, 0x76d: 0x0040, 0x76e: 0x0008, 0x76f: 0x0008, + 0x770: 0x0008, 0x771: 0x0008, 0x772: 0x0008, 0x773: 0x0008, 0x774: 0x0008, 0x775: 0x0008, + 0x776: 0x0008, 0x777: 0x0008, 0x778: 0x0008, 0x779: 0x0008, 0x77a: 0x0040, 0x77b: 0x0040, + 0x77c: 0x0040, 0x77d: 0x0040, 0x77e: 0x1008, 0x77f: 0x1008, + // Block 0x1e, offset 0x780 + 0x780: 0x1308, 0x781: 0x1008, 0x782: 0x1008, 0x783: 0x1008, 0x784: 0x1008, 0x785: 0x0040, + 0x786: 0x1308, 0x787: 0x1308, 0x788: 0x1308, 0x789: 0x0040, 0x78a: 0x1308, 0x78b: 0x1308, + 0x78c: 0x1308, 0x78d: 0x1b08, 0x78e: 0x0040, 0x78f: 0x0040, 0x790: 0x0040, 0x791: 0x0040, + 0x792: 0x0040, 0x793: 0x0040, 0x794: 0x0040, 0x795: 0x1308, 0x796: 0x1308, 0x797: 0x0040, + 0x798: 0x0008, 0x799: 0x0008, 0x79a: 0x0008, 0x79b: 0x0040, 0x79c: 0x0040, 0x79d: 0x0040, + 0x79e: 0x0040, 0x79f: 0x0040, 0x7a0: 0x0008, 0x7a1: 0x0008, 0x7a2: 0x1308, 0x7a3: 0x1308, + 0x7a4: 0x0040, 0x7a5: 0x0040, 0x7a6: 0x0008, 0x7a7: 0x0008, 0x7a8: 0x0008, 0x7a9: 0x0008, + 0x7aa: 0x0008, 0x7ab: 0x0008, 0x7ac: 0x0008, 0x7ad: 0x0008, 0x7ae: 0x0008, 0x7af: 0x0008, + 0x7b0: 0x0040, 0x7b1: 0x0040, 0x7b2: 0x0040, 0x7b3: 0x0040, 0x7b4: 0x0040, 0x7b5: 0x0040, + 0x7b6: 0x0040, 0x7b7: 0x0040, 0x7b8: 0x0018, 0x7b9: 0x0018, 0x7ba: 0x0018, 0x7bb: 0x0018, + 0x7bc: 0x0018, 0x7bd: 0x0018, 0x7be: 0x0018, 0x7bf: 0x0018, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x0008, 0x7c1: 0x1308, 0x7c2: 0x1008, 0x7c3: 0x1008, 0x7c4: 0x0040, 0x7c5: 0x0008, + 0x7c6: 0x0008, 0x7c7: 0x0008, 0x7c8: 0x0008, 0x7c9: 0x0008, 0x7ca: 0x0008, 0x7cb: 0x0008, + 0x7cc: 0x0008, 0x7cd: 0x0040, 0x7ce: 0x0008, 0x7cf: 0x0008, 0x7d0: 0x0008, 0x7d1: 0x0040, + 0x7d2: 0x0008, 0x7d3: 0x0008, 0x7d4: 0x0008, 0x7d5: 0x0008, 0x7d6: 0x0008, 0x7d7: 0x0008, + 0x7d8: 0x0008, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0008, 0x7dc: 0x0008, 0x7dd: 0x0008, + 0x7de: 0x0008, 0x7df: 0x0008, 0x7e0: 0x0008, 0x7e1: 0x0008, 0x7e2: 0x0008, 0x7e3: 0x0008, + 0x7e4: 0x0008, 0x7e5: 0x0008, 0x7e6: 0x0008, 0x7e7: 0x0008, 0x7e8: 0x0008, 0x7e9: 0x0040, + 0x7ea: 0x0008, 0x7eb: 0x0008, 0x7ec: 0x0008, 0x7ed: 0x0008, 0x7ee: 0x0008, 0x7ef: 0x0008, + 0x7f0: 0x0008, 0x7f1: 0x0008, 0x7f2: 0x0008, 0x7f3: 0x0008, 0x7f4: 0x0040, 0x7f5: 0x0008, + 0x7f6: 0x0008, 0x7f7: 0x0008, 0x7f8: 0x0008, 0x7f9: 0x0008, 0x7fa: 0x0040, 0x7fb: 0x0040, + 0x7fc: 0x1308, 0x7fd: 0x0008, 0x7fe: 0x1008, 0x7ff: 0x1308, + // Block 0x20, offset 0x800 + 0x800: 0x1008, 0x801: 0x1008, 0x802: 0x1008, 0x803: 0x1008, 0x804: 0x1008, 0x805: 0x0040, + 0x806: 0x1308, 0x807: 0x1008, 0x808: 0x1008, 0x809: 0x0040, 0x80a: 0x1008, 0x80b: 0x1008, + 0x80c: 0x1308, 0x80d: 0x1b08, 0x80e: 0x0040, 0x80f: 0x0040, 0x810: 0x0040, 0x811: 0x0040, + 0x812: 0x0040, 0x813: 0x0040, 0x814: 0x0040, 0x815: 0x1008, 0x816: 0x1008, 0x817: 0x0040, + 0x818: 0x0040, 0x819: 0x0040, 0x81a: 0x0040, 0x81b: 0x0040, 0x81c: 0x0040, 0x81d: 0x0040, + 0x81e: 0x0008, 0x81f: 0x0040, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x1308, 0x823: 0x1308, + 0x824: 0x0040, 0x825: 0x0040, 0x826: 0x0008, 0x827: 0x0008, 0x828: 0x0008, 0x829: 0x0008, + 0x82a: 0x0008, 0x82b: 0x0008, 0x82c: 0x0008, 0x82d: 0x0008, 0x82e: 0x0008, 0x82f: 0x0008, + 0x830: 0x0040, 0x831: 0x0008, 0x832: 0x0008, 0x833: 0x0040, 0x834: 0x0040, 0x835: 0x0040, + 0x836: 0x0040, 0x837: 0x0040, 0x838: 0x0040, 0x839: 0x0040, 0x83a: 0x0040, 0x83b: 0x0040, + 0x83c: 0x0040, 0x83d: 0x0040, 0x83e: 0x0040, 0x83f: 0x0040, + // Block 0x21, offset 0x840 + 0x840: 0x1008, 0x841: 0x1308, 0x842: 0x1308, 0x843: 0x1308, 0x844: 0x1308, 0x845: 0x0040, + 0x846: 0x1008, 0x847: 0x1008, 0x848: 0x1008, 0x849: 0x0040, 0x84a: 0x1008, 0x84b: 0x1008, + 0x84c: 0x1008, 0x84d: 0x1b08, 0x84e: 0x0008, 0x84f: 0x0018, 0x850: 0x0040, 0x851: 0x0040, + 0x852: 0x0040, 0x853: 0x0040, 0x854: 0x0008, 0x855: 0x0008, 0x856: 0x0008, 0x857: 0x1008, + 0x858: 0x0018, 0x859: 0x0018, 0x85a: 0x0018, 0x85b: 0x0018, 0x85c: 0x0018, 0x85d: 0x0018, + 0x85e: 0x0018, 0x85f: 0x0008, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x1308, 0x863: 0x1308, + 0x864: 0x0040, 0x865: 0x0040, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0008, + 0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008, + 0x870: 0x0018, 0x871: 0x0018, 0x872: 0x0018, 0x873: 0x0018, 0x874: 0x0018, 0x875: 0x0018, + 0x876: 0x0018, 0x877: 0x0018, 0x878: 0x0018, 0x879: 0x0018, 0x87a: 0x0008, 0x87b: 0x0008, + 0x87c: 0x0008, 0x87d: 0x0008, 0x87e: 0x0008, 0x87f: 0x0008, + // Block 0x22, offset 0x880 + 0x880: 0x0040, 0x881: 0x0008, 0x882: 0x0008, 0x883: 0x0040, 0x884: 0x0008, 0x885: 0x0040, + 0x886: 0x0040, 0x887: 0x0008, 0x888: 0x0008, 0x889: 0x0040, 0x88a: 0x0008, 0x88b: 0x0040, + 0x88c: 0x0040, 0x88d: 0x0008, 0x88e: 0x0040, 0x88f: 0x0040, 0x890: 0x0040, 0x891: 0x0040, + 0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0008, 0x895: 0x0008, 0x896: 0x0008, 0x897: 0x0008, + 0x898: 0x0040, 0x899: 0x0008, 0x89a: 0x0008, 0x89b: 0x0008, 0x89c: 0x0008, 0x89d: 0x0008, + 0x89e: 0x0008, 0x89f: 0x0008, 0x8a0: 0x0040, 0x8a1: 0x0008, 0x8a2: 0x0008, 0x8a3: 0x0008, + 0x8a4: 0x0040, 0x8a5: 0x0008, 0x8a6: 0x0040, 0x8a7: 0x0008, 0x8a8: 0x0040, 0x8a9: 0x0040, + 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0040, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008, + 0x8b0: 0x0008, 0x8b1: 0x1308, 0x8b2: 0x0008, 0x8b3: 0x0929, 0x8b4: 0x1308, 0x8b5: 0x1308, + 0x8b6: 0x1308, 0x8b7: 0x1308, 0x8b8: 0x1308, 0x8b9: 0x1308, 0x8ba: 0x0040, 0x8bb: 0x1308, + 0x8bc: 0x1308, 0x8bd: 0x0008, 0x8be: 0x0040, 0x8bf: 0x0040, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x0008, 0x8c1: 0x0008, 0x8c2: 0x0008, 0x8c3: 0x09d1, 0x8c4: 0x0008, 0x8c5: 0x0008, + 0x8c6: 0x0008, 0x8c7: 0x0008, 0x8c8: 0x0040, 0x8c9: 0x0008, 0x8ca: 0x0008, 0x8cb: 0x0008, + 0x8cc: 0x0008, 0x8cd: 0x0a09, 0x8ce: 0x0008, 0x8cf: 0x0008, 0x8d0: 0x0008, 0x8d1: 0x0008, + 0x8d2: 0x0a41, 0x8d3: 0x0008, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x0a79, + 0x8d8: 0x0008, 0x8d9: 0x0008, 0x8da: 0x0008, 0x8db: 0x0008, 0x8dc: 0x0ab1, 0x8dd: 0x0008, + 0x8de: 0x0008, 0x8df: 0x0008, 0x8e0: 0x0008, 0x8e1: 0x0008, 0x8e2: 0x0008, 0x8e3: 0x0008, + 0x8e4: 0x0008, 0x8e5: 0x0008, 0x8e6: 0x0008, 0x8e7: 0x0008, 0x8e8: 0x0008, 0x8e9: 0x0ae9, + 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0008, 0x8ed: 0x0040, 0x8ee: 0x0040, 0x8ef: 0x0040, + 0x8f0: 0x0040, 0x8f1: 0x1308, 0x8f2: 0x1308, 0x8f3: 0x0b21, 0x8f4: 0x1308, 0x8f5: 0x0b59, + 0x8f6: 0x0b91, 0x8f7: 0x0bc9, 0x8f8: 0x0c19, 0x8f9: 0x0c51, 0x8fa: 0x1308, 0x8fb: 0x1308, + 0x8fc: 0x1308, 0x8fd: 0x1308, 0x8fe: 0x1308, 0x8ff: 0x1008, + // Block 0x24, offset 0x900 + 0x900: 0x1308, 0x901: 0x0ca1, 0x902: 0x1308, 0x903: 0x1308, 0x904: 0x1b08, 0x905: 0x0018, + 0x906: 0x1308, 0x907: 0x1308, 0x908: 0x0008, 0x909: 0x0008, 0x90a: 0x0008, 0x90b: 0x0008, + 0x90c: 0x0008, 0x90d: 0x1308, 0x90e: 0x1308, 0x90f: 0x1308, 0x910: 0x1308, 0x911: 0x1308, + 0x912: 0x1308, 0x913: 0x0cd9, 0x914: 0x1308, 0x915: 0x1308, 0x916: 0x1308, 0x917: 0x1308, + 0x918: 0x0040, 0x919: 0x1308, 0x91a: 0x1308, 0x91b: 0x1308, 0x91c: 0x1308, 0x91d: 0x0d11, + 0x91e: 0x1308, 0x91f: 0x1308, 0x920: 0x1308, 0x921: 0x1308, 0x922: 0x0d49, 0x923: 0x1308, + 0x924: 0x1308, 0x925: 0x1308, 0x926: 0x1308, 0x927: 0x0d81, 0x928: 0x1308, 0x929: 0x1308, + 0x92a: 0x1308, 0x92b: 0x1308, 0x92c: 0x0db9, 0x92d: 0x1308, 0x92e: 0x1308, 0x92f: 0x1308, + 0x930: 0x1308, 0x931: 0x1308, 0x932: 0x1308, 0x933: 0x1308, 0x934: 0x1308, 0x935: 0x1308, + 0x936: 0x1308, 0x937: 0x1308, 0x938: 0x1308, 0x939: 0x0df1, 0x93a: 0x1308, 0x93b: 0x1308, + 0x93c: 0x1308, 0x93d: 0x0040, 0x93e: 0x0018, 0x93f: 0x0018, + // Block 0x25, offset 0x940 + 0x940: 0x0008, 0x941: 0x0008, 0x942: 0x0008, 0x943: 0x0008, 0x944: 0x0008, 0x945: 0x0008, + 0x946: 0x0008, 0x947: 0x0008, 0x948: 0x0008, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008, + 0x94c: 0x0008, 0x94d: 0x0008, 0x94e: 0x0008, 0x94f: 0x0008, 0x950: 0x0008, 0x951: 0x0008, + 0x952: 0x0008, 0x953: 0x0008, 0x954: 0x0008, 0x955: 0x0008, 0x956: 0x0008, 0x957: 0x0008, + 0x958: 0x0008, 0x959: 0x0008, 0x95a: 0x0008, 0x95b: 0x0008, 0x95c: 0x0008, 0x95d: 0x0008, + 0x95e: 0x0008, 0x95f: 0x0008, 0x960: 0x0008, 0x961: 0x0008, 0x962: 0x0008, 0x963: 0x0008, + 0x964: 0x0008, 0x965: 0x0008, 0x966: 0x0008, 0x967: 0x0008, 0x968: 0x0008, 0x969: 0x0008, + 0x96a: 0x0008, 0x96b: 0x0008, 0x96c: 0x0039, 0x96d: 0x0ed1, 0x96e: 0x0ee9, 0x96f: 0x0008, + 0x970: 0x0ef9, 0x971: 0x0f09, 0x972: 0x0f19, 0x973: 0x0f31, 0x974: 0x0249, 0x975: 0x0f41, + 0x976: 0x0259, 0x977: 0x0f51, 0x978: 0x0359, 0x979: 0x0f61, 0x97a: 0x0f71, 0x97b: 0x0008, + 0x97c: 0x00d9, 0x97d: 0x0f81, 0x97e: 0x0f99, 0x97f: 0x0269, + // Block 0x26, offset 0x980 + 0x980: 0x0fa9, 0x981: 0x0fb9, 0x982: 0x0279, 0x983: 0x0039, 0x984: 0x0fc9, 0x985: 0x0fe1, + 0x986: 0x059d, 0x987: 0x0ee9, 0x988: 0x0ef9, 0x989: 0x0f09, 0x98a: 0x0ff9, 0x98b: 0x1011, + 0x98c: 0x1029, 0x98d: 0x0f31, 0x98e: 0x0008, 0x98f: 0x0f51, 0x990: 0x0f61, 0x991: 0x1041, + 0x992: 0x00d9, 0x993: 0x1059, 0x994: 0x05b5, 0x995: 0x05b5, 0x996: 0x0f99, 0x997: 0x0fa9, + 0x998: 0x0fb9, 0x999: 0x059d, 0x99a: 0x1071, 0x99b: 0x1089, 0x99c: 0x05cd, 0x99d: 0x1099, + 0x99e: 0x10b1, 0x99f: 0x10c9, 0x9a0: 0x10e1, 0x9a1: 0x10f9, 0x9a2: 0x0f41, 0x9a3: 0x0269, + 0x9a4: 0x0fb9, 0x9a5: 0x1089, 0x9a6: 0x1099, 0x9a7: 0x10b1, 0x9a8: 0x1111, 0x9a9: 0x10e1, + 0x9aa: 0x10f9, 0x9ab: 0x0008, 0x9ac: 0x0008, 0x9ad: 0x0008, 0x9ae: 0x0008, 0x9af: 0x0008, + 0x9b0: 0x0008, 0x9b1: 0x0008, 0x9b2: 0x0008, 0x9b3: 0x0008, 0x9b4: 0x0008, 0x9b5: 0x0008, + 0x9b6: 0x0008, 0x9b7: 0x0008, 0x9b8: 0x1129, 0x9b9: 0x0008, 0x9ba: 0x0008, 0x9bb: 0x0008, + 0x9bc: 0x0008, 0x9bd: 0x0008, 0x9be: 0x0008, 0x9bf: 0x0008, + // Block 0x27, offset 0x9c0 + 0x9c0: 0x0008, 0x9c1: 0x0008, 0x9c2: 0x0008, 0x9c3: 0x0008, 0x9c4: 0x0008, 0x9c5: 0x0008, + 0x9c6: 0x0008, 0x9c7: 0x0008, 0x9c8: 0x0008, 0x9c9: 0x0008, 0x9ca: 0x0008, 0x9cb: 0x0008, + 0x9cc: 0x0008, 0x9cd: 0x0008, 0x9ce: 0x0008, 0x9cf: 0x0008, 0x9d0: 0x0008, 0x9d1: 0x0008, + 0x9d2: 0x0008, 0x9d3: 0x0008, 0x9d4: 0x0008, 0x9d5: 0x0008, 0x9d6: 0x0008, 0x9d7: 0x0008, + 0x9d8: 0x0008, 0x9d9: 0x0008, 0x9da: 0x0008, 0x9db: 0x1141, 0x9dc: 0x1159, 0x9dd: 0x1169, + 0x9de: 0x1181, 0x9df: 0x1029, 0x9e0: 0x1199, 0x9e1: 0x11a9, 0x9e2: 0x11c1, 0x9e3: 0x11d9, + 0x9e4: 0x11f1, 0x9e5: 0x1209, 0x9e6: 0x1221, 0x9e7: 0x05e5, 0x9e8: 0x1239, 0x9e9: 0x1251, + 0x9ea: 0xe17d, 0x9eb: 0x1269, 0x9ec: 0x1281, 0x9ed: 0x1299, 0x9ee: 0x12b1, 0x9ef: 0x12c9, + 0x9f0: 0x12e1, 0x9f1: 0x12f9, 0x9f2: 0x1311, 0x9f3: 0x1329, 0x9f4: 0x1341, 0x9f5: 0x1359, + 0x9f6: 0x1371, 0x9f7: 0x1389, 0x9f8: 0x05fd, 0x9f9: 0x13a1, 0x9fa: 0x13b9, 0x9fb: 0x13d1, + 0x9fc: 0x13e1, 0x9fd: 0x13f9, 0x9fe: 0x1411, 0x9ff: 0x1429, + // Block 0x28, offset 0xa00 + 0xa00: 0xe00d, 0xa01: 0x0008, 0xa02: 0xe00d, 0xa03: 0x0008, 0xa04: 0xe00d, 0xa05: 0x0008, + 0xa06: 0xe00d, 0xa07: 0x0008, 0xa08: 0xe00d, 0xa09: 0x0008, 0xa0a: 0xe00d, 0xa0b: 0x0008, + 0xa0c: 0xe00d, 0xa0d: 0x0008, 0xa0e: 0xe00d, 0xa0f: 0x0008, 0xa10: 0xe00d, 0xa11: 0x0008, + 0xa12: 0xe00d, 0xa13: 0x0008, 0xa14: 0xe00d, 0xa15: 0x0008, 0xa16: 0xe00d, 0xa17: 0x0008, + 0xa18: 0xe00d, 0xa19: 0x0008, 0xa1a: 0xe00d, 0xa1b: 0x0008, 0xa1c: 0xe00d, 0xa1d: 0x0008, + 0xa1e: 0xe00d, 0xa1f: 0x0008, 0xa20: 0xe00d, 0xa21: 0x0008, 0xa22: 0xe00d, 0xa23: 0x0008, + 0xa24: 0xe00d, 0xa25: 0x0008, 0xa26: 0xe00d, 0xa27: 0x0008, 0xa28: 0xe00d, 0xa29: 0x0008, + 0xa2a: 0xe00d, 0xa2b: 0x0008, 0xa2c: 0xe00d, 0xa2d: 0x0008, 0xa2e: 0xe00d, 0xa2f: 0x0008, + 0xa30: 0xe00d, 0xa31: 0x0008, 0xa32: 0xe00d, 0xa33: 0x0008, 0xa34: 0xe00d, 0xa35: 0x0008, + 0xa36: 0xe00d, 0xa37: 0x0008, 0xa38: 0xe00d, 0xa39: 0x0008, 0xa3a: 0xe00d, 0xa3b: 0x0008, + 0xa3c: 0xe00d, 0xa3d: 0x0008, 0xa3e: 0xe00d, 0xa3f: 0x0008, + // Block 0x29, offset 0xa40 + 0xa40: 0xe00d, 0xa41: 0x0008, 0xa42: 0xe00d, 0xa43: 0x0008, 0xa44: 0xe00d, 0xa45: 0x0008, + 0xa46: 0xe00d, 0xa47: 0x0008, 0xa48: 0xe00d, 0xa49: 0x0008, 0xa4a: 0xe00d, 0xa4b: 0x0008, + 0xa4c: 0xe00d, 0xa4d: 0x0008, 0xa4e: 0xe00d, 0xa4f: 0x0008, 0xa50: 0xe00d, 0xa51: 0x0008, + 0xa52: 0xe00d, 0xa53: 0x0008, 0xa54: 0xe00d, 0xa55: 0x0008, 0xa56: 0x0008, 0xa57: 0x0008, + 0xa58: 0x0008, 0xa59: 0x0008, 0xa5a: 0x0615, 0xa5b: 0x0635, 0xa5c: 0x0008, 0xa5d: 0x0008, + 0xa5e: 0x1441, 0xa5f: 0x0008, 0xa60: 0xe00d, 0xa61: 0x0008, 0xa62: 0xe00d, 0xa63: 0x0008, + 0xa64: 0xe00d, 0xa65: 0x0008, 0xa66: 0xe00d, 0xa67: 0x0008, 0xa68: 0xe00d, 0xa69: 0x0008, + 0xa6a: 0xe00d, 0xa6b: 0x0008, 0xa6c: 0xe00d, 0xa6d: 0x0008, 0xa6e: 0xe00d, 0xa6f: 0x0008, + 0xa70: 0xe00d, 0xa71: 0x0008, 0xa72: 0xe00d, 0xa73: 0x0008, 0xa74: 0xe00d, 0xa75: 0x0008, + 0xa76: 0xe00d, 0xa77: 0x0008, 0xa78: 0xe00d, 0xa79: 0x0008, 0xa7a: 0xe00d, 0xa7b: 0x0008, + 0xa7c: 0xe00d, 0xa7d: 0x0008, 0xa7e: 0xe00d, 0xa7f: 0x0008, + // Block 0x2a, offset 0xa80 + 0xa80: 0x0008, 0xa81: 0x0008, 0xa82: 0x0008, 0xa83: 0x0008, 0xa84: 0x0008, 0xa85: 0x0008, + 0xa86: 0x0040, 0xa87: 0x0040, 0xa88: 0xe045, 0xa89: 0xe045, 0xa8a: 0xe045, 0xa8b: 0xe045, + 0xa8c: 0xe045, 0xa8d: 0xe045, 0xa8e: 0x0040, 0xa8f: 0x0040, 0xa90: 0x0008, 0xa91: 0x0008, + 0xa92: 0x0008, 0xa93: 0x0008, 0xa94: 0x0008, 0xa95: 0x0008, 0xa96: 0x0008, 0xa97: 0x0008, + 0xa98: 0x0040, 0xa99: 0xe045, 0xa9a: 0x0040, 0xa9b: 0xe045, 0xa9c: 0x0040, 0xa9d: 0xe045, + 0xa9e: 0x0040, 0xa9f: 0xe045, 0xaa0: 0x0008, 0xaa1: 0x0008, 0xaa2: 0x0008, 0xaa3: 0x0008, + 0xaa4: 0x0008, 0xaa5: 0x0008, 0xaa6: 0x0008, 0xaa7: 0x0008, 0xaa8: 0xe045, 0xaa9: 0xe045, + 0xaaa: 0xe045, 0xaab: 0xe045, 0xaac: 0xe045, 0xaad: 0xe045, 0xaae: 0xe045, 0xaaf: 0xe045, + 0xab0: 0x0008, 0xab1: 0x1459, 0xab2: 0x0008, 0xab3: 0x1471, 0xab4: 0x0008, 0xab5: 0x1489, + 0xab6: 0x0008, 0xab7: 0x14a1, 0xab8: 0x0008, 0xab9: 0x14b9, 0xaba: 0x0008, 0xabb: 0x14d1, + 0xabc: 0x0008, 0xabd: 0x14e9, 0xabe: 0x0040, 0xabf: 0x0040, + // Block 0x2b, offset 0xac0 + 0xac0: 0x1501, 0xac1: 0x1531, 0xac2: 0x1561, 0xac3: 0x1591, 0xac4: 0x15c1, 0xac5: 0x15f1, + 0xac6: 0x1621, 0xac7: 0x1651, 0xac8: 0x1501, 0xac9: 0x1531, 0xaca: 0x1561, 0xacb: 0x1591, + 0xacc: 0x15c1, 0xacd: 0x15f1, 0xace: 0x1621, 0xacf: 0x1651, 0xad0: 0x1681, 0xad1: 0x16b1, + 0xad2: 0x16e1, 0xad3: 0x1711, 0xad4: 0x1741, 0xad5: 0x1771, 0xad6: 0x17a1, 0xad7: 0x17d1, + 0xad8: 0x1681, 0xad9: 0x16b1, 0xada: 0x16e1, 0xadb: 0x1711, 0xadc: 0x1741, 0xadd: 0x1771, + 0xade: 0x17a1, 0xadf: 0x17d1, 0xae0: 0x1801, 0xae1: 0x1831, 0xae2: 0x1861, 0xae3: 0x1891, + 0xae4: 0x18c1, 0xae5: 0x18f1, 0xae6: 0x1921, 0xae7: 0x1951, 0xae8: 0x1801, 0xae9: 0x1831, + 0xaea: 0x1861, 0xaeb: 0x1891, 0xaec: 0x18c1, 0xaed: 0x18f1, 0xaee: 0x1921, 0xaef: 0x1951, + 0xaf0: 0x0008, 0xaf1: 0x0008, 0xaf2: 0x1981, 0xaf3: 0x19b1, 0xaf4: 0x19d9, 0xaf5: 0x0040, + 0xaf6: 0x0008, 0xaf7: 0x1a01, 0xaf8: 0xe045, 0xaf9: 0xe045, 0xafa: 0x064d, 0xafb: 0x1459, + 0xafc: 0x19b1, 0xafd: 0x0666, 0xafe: 0x1a31, 0xaff: 0x0686, + // Block 0x2c, offset 0xb00 + 0xb00: 0x06a6, 0xb01: 0x1a4a, 0xb02: 0x1a79, 0xb03: 0x1aa9, 0xb04: 0x1ad1, 0xb05: 0x0040, + 0xb06: 0x0008, 0xb07: 0x1af9, 0xb08: 0x06c5, 0xb09: 0x1471, 0xb0a: 0x06dd, 0xb0b: 0x1489, + 0xb0c: 0x1aa9, 0xb0d: 0x1b2a, 0xb0e: 0x1b5a, 0xb0f: 0x1b8a, 0xb10: 0x0008, 0xb11: 0x0008, + 0xb12: 0x0008, 0xb13: 0x1bb9, 0xb14: 0x0040, 0xb15: 0x0040, 0xb16: 0x0008, 0xb17: 0x0008, + 0xb18: 0xe045, 0xb19: 0xe045, 0xb1a: 0x06f5, 0xb1b: 0x14a1, 0xb1c: 0x0040, 0xb1d: 0x1bd2, + 0xb1e: 0x1c02, 0xb1f: 0x1c32, 0xb20: 0x0008, 0xb21: 0x0008, 0xb22: 0x0008, 0xb23: 0x1c61, + 0xb24: 0x0008, 0xb25: 0x0008, 0xb26: 0x0008, 0xb27: 0x0008, 0xb28: 0xe045, 0xb29: 0xe045, + 0xb2a: 0x070d, 0xb2b: 0x14d1, 0xb2c: 0xe04d, 0xb2d: 0x1c7a, 0xb2e: 0x03d2, 0xb2f: 0x1caa, + 0xb30: 0x0040, 0xb31: 0x0040, 0xb32: 0x1cb9, 0xb33: 0x1ce9, 0xb34: 0x1d11, 0xb35: 0x0040, + 0xb36: 0x0008, 0xb37: 0x1d39, 0xb38: 0x0725, 0xb39: 0x14b9, 0xb3a: 0x0515, 0xb3b: 0x14e9, + 0xb3c: 0x1ce9, 0xb3d: 0x073e, 0xb3e: 0x075e, 0xb3f: 0x0040, + // Block 0x2d, offset 0xb40 + 0xb40: 0x000a, 0xb41: 0x000a, 0xb42: 0x000a, 0xb43: 0x000a, 0xb44: 0x000a, 0xb45: 0x000a, + 0xb46: 0x000a, 0xb47: 0x000a, 0xb48: 0x000a, 0xb49: 0x000a, 0xb4a: 0x000a, 0xb4b: 0x03c0, + 0xb4c: 0x0003, 0xb4d: 0x0003, 0xb4e: 0x0340, 0xb4f: 0x0340, 0xb50: 0x0018, 0xb51: 0xe00d, + 0xb52: 0x0018, 0xb53: 0x0018, 0xb54: 0x0018, 0xb55: 0x0018, 0xb56: 0x0018, 0xb57: 0x077e, + 0xb58: 0x0018, 0xb59: 0x0018, 0xb5a: 0x0018, 0xb5b: 0x0018, 0xb5c: 0x0018, 0xb5d: 0x0018, + 0xb5e: 0x0018, 0xb5f: 0x0018, 0xb60: 0x0018, 0xb61: 0x0018, 0xb62: 0x0018, 0xb63: 0x0018, + 0xb64: 0x0040, 0xb65: 0x0040, 0xb66: 0x0040, 0xb67: 0x0018, 0xb68: 0x0040, 0xb69: 0x0040, + 0xb6a: 0x0340, 0xb6b: 0x0340, 0xb6c: 0x0340, 0xb6d: 0x0340, 0xb6e: 0x0340, 0xb6f: 0x000a, + 0xb70: 0x0018, 0xb71: 0x0018, 0xb72: 0x0018, 0xb73: 0x1d69, 0xb74: 0x1da1, 0xb75: 0x0018, + 0xb76: 0x1df1, 0xb77: 0x1e29, 0xb78: 0x0018, 0xb79: 0x0018, 0xb7a: 0x0018, 0xb7b: 0x0018, + 0xb7c: 0x1e7a, 0xb7d: 0x0018, 0xb7e: 0x079e, 0xb7f: 0x0018, + // Block 0x2e, offset 0xb80 + 0xb80: 0x0018, 0xb81: 0x0018, 0xb82: 0x0018, 0xb83: 0x0018, 0xb84: 0x0018, 0xb85: 0x0018, + 0xb86: 0x0018, 0xb87: 0x1e92, 0xb88: 0x1eaa, 0xb89: 0x1ec2, 0xb8a: 0x0018, 0xb8b: 0x0018, + 0xb8c: 0x0018, 0xb8d: 0x0018, 0xb8e: 0x0018, 0xb8f: 0x0018, 0xb90: 0x0018, 0xb91: 0x0018, + 0xb92: 0x0018, 0xb93: 0x0018, 0xb94: 0x0018, 0xb95: 0x0018, 0xb96: 0x0018, 0xb97: 0x1ed9, + 0xb98: 0x0018, 0xb99: 0x0018, 0xb9a: 0x0018, 0xb9b: 0x0018, 0xb9c: 0x0018, 0xb9d: 0x0018, + 0xb9e: 0x0018, 0xb9f: 0x000a, 0xba0: 0x03c0, 0xba1: 0x0340, 0xba2: 0x0340, 0xba3: 0x0340, + 0xba4: 0x03c0, 0xba5: 0x0040, 0xba6: 0x0040, 0xba7: 0x0040, 0xba8: 0x0040, 0xba9: 0x0040, + 0xbaa: 0x0340, 0xbab: 0x0340, 0xbac: 0x0340, 0xbad: 0x0340, 0xbae: 0x0340, 0xbaf: 0x0340, + 0xbb0: 0x1f41, 0xbb1: 0x0f41, 0xbb2: 0x0040, 0xbb3: 0x0040, 0xbb4: 0x1f51, 0xbb5: 0x1f61, + 0xbb6: 0x1f71, 0xbb7: 0x1f81, 0xbb8: 0x1f91, 0xbb9: 0x1fa1, 0xbba: 0x1fb2, 0xbbb: 0x07bd, + 0xbbc: 0x1fc2, 0xbbd: 0x1fd2, 0xbbe: 0x1fe2, 0xbbf: 0x0f71, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x1f41, 0xbc1: 0x00c9, 0xbc2: 0x0069, 0xbc3: 0x0079, 0xbc4: 0x1f51, 0xbc5: 0x1f61, + 0xbc6: 0x1f71, 0xbc7: 0x1f81, 0xbc8: 0x1f91, 0xbc9: 0x1fa1, 0xbca: 0x1fb2, 0xbcb: 0x07d5, + 0xbcc: 0x1fc2, 0xbcd: 0x1fd2, 0xbce: 0x1fe2, 0xbcf: 0x0040, 0xbd0: 0x0039, 0xbd1: 0x0f09, + 0xbd2: 0x00d9, 0xbd3: 0x0369, 0xbd4: 0x0ff9, 0xbd5: 0x0249, 0xbd6: 0x0f51, 0xbd7: 0x0359, + 0xbd8: 0x0f61, 0xbd9: 0x0f71, 0xbda: 0x0f99, 0xbdb: 0x01d9, 0xbdc: 0x0fa9, 0xbdd: 0x0040, + 0xbde: 0x0040, 0xbdf: 0x0040, 0xbe0: 0x0018, 0xbe1: 0x0018, 0xbe2: 0x0018, 0xbe3: 0x0018, + 0xbe4: 0x0018, 0xbe5: 0x0018, 0xbe6: 0x0018, 0xbe7: 0x0018, 0xbe8: 0x1ff1, 0xbe9: 0x0018, + 0xbea: 0x0018, 0xbeb: 0x0018, 0xbec: 0x0018, 0xbed: 0x0018, 0xbee: 0x0018, 0xbef: 0x0018, + 0xbf0: 0x0018, 0xbf1: 0x0018, 0xbf2: 0x0018, 0xbf3: 0x0018, 0xbf4: 0x0018, 0xbf5: 0x0018, + 0xbf6: 0x0018, 0xbf7: 0x0018, 0xbf8: 0x0018, 0xbf9: 0x0018, 0xbfa: 0x0018, 0xbfb: 0x0018, + 0xbfc: 0x0018, 0xbfd: 0x0018, 0xbfe: 0x0018, 0xbff: 0x0040, + // Block 0x30, offset 0xc00 + 0xc00: 0x07ee, 0xc01: 0x080e, 0xc02: 0x1159, 0xc03: 0x082d, 0xc04: 0x0018, 0xc05: 0x084e, + 0xc06: 0x086e, 0xc07: 0x1011, 0xc08: 0x0018, 0xc09: 0x088d, 0xc0a: 0x0f31, 0xc0b: 0x0249, + 0xc0c: 0x0249, 0xc0d: 0x0249, 0xc0e: 0x0249, 0xc0f: 0x2009, 0xc10: 0x0f41, 0xc11: 0x0f41, + 0xc12: 0x0359, 0xc13: 0x0359, 0xc14: 0x0018, 0xc15: 0x0f71, 0xc16: 0x2021, 0xc17: 0x0018, + 0xc18: 0x0018, 0xc19: 0x0f99, 0xc1a: 0x2039, 0xc1b: 0x0269, 0xc1c: 0x0269, 0xc1d: 0x0269, + 0xc1e: 0x0018, 0xc1f: 0x0018, 0xc20: 0x2049, 0xc21: 0x08ad, 0xc22: 0x2061, 0xc23: 0x0018, + 0xc24: 0x13d1, 0xc25: 0x0018, 0xc26: 0x2079, 0xc27: 0x0018, 0xc28: 0x13d1, 0xc29: 0x0018, + 0xc2a: 0x0f51, 0xc2b: 0x2091, 0xc2c: 0x0ee9, 0xc2d: 0x1159, 0xc2e: 0x0018, 0xc2f: 0x0f09, + 0xc30: 0x0f09, 0xc31: 0x1199, 0xc32: 0x0040, 0xc33: 0x0f61, 0xc34: 0x00d9, 0xc35: 0x20a9, + 0xc36: 0x20c1, 0xc37: 0x20d9, 0xc38: 0x20f1, 0xc39: 0x0f41, 0xc3a: 0x0018, 0xc3b: 0x08cd, + 0xc3c: 0x2109, 0xc3d: 0x10b1, 0xc3e: 0x10b1, 0xc3f: 0x2109, + // Block 0x31, offset 0xc40 + 0xc40: 0x08ed, 0xc41: 0x0018, 0xc42: 0x0018, 0xc43: 0x0018, 0xc44: 0x0018, 0xc45: 0x0ef9, + 0xc46: 0x0ef9, 0xc47: 0x0f09, 0xc48: 0x0f41, 0xc49: 0x0259, 0xc4a: 0x0018, 0xc4b: 0x0018, + 0xc4c: 0x0018, 0xc4d: 0x0018, 0xc4e: 0x0008, 0xc4f: 0x0018, 0xc50: 0x2121, 0xc51: 0x2151, + 0xc52: 0x2181, 0xc53: 0x21b9, 0xc54: 0x21e9, 0xc55: 0x2219, 0xc56: 0x2249, 0xc57: 0x2279, + 0xc58: 0x22a9, 0xc59: 0x22d9, 0xc5a: 0x2309, 0xc5b: 0x2339, 0xc5c: 0x2369, 0xc5d: 0x2399, + 0xc5e: 0x23c9, 0xc5f: 0x23f9, 0xc60: 0x0f41, 0xc61: 0x2421, 0xc62: 0x0905, 0xc63: 0x2439, + 0xc64: 0x1089, 0xc65: 0x2451, 0xc66: 0x0925, 0xc67: 0x2469, 0xc68: 0x2491, 0xc69: 0x0369, + 0xc6a: 0x24a9, 0xc6b: 0x0945, 0xc6c: 0x0359, 0xc6d: 0x1159, 0xc6e: 0x0ef9, 0xc6f: 0x0f61, + 0xc70: 0x0f41, 0xc71: 0x2421, 0xc72: 0x0965, 0xc73: 0x2439, 0xc74: 0x1089, 0xc75: 0x2451, + 0xc76: 0x0985, 0xc77: 0x2469, 0xc78: 0x2491, 0xc79: 0x0369, 0xc7a: 0x24a9, 0xc7b: 0x09a5, + 0xc7c: 0x0359, 0xc7d: 0x1159, 0xc7e: 0x0ef9, 0xc7f: 0x0f61, + // Block 0x32, offset 0xc80 + 0xc80: 0x0018, 0xc81: 0x0018, 0xc82: 0x0018, 0xc83: 0x0018, 0xc84: 0x0018, 0xc85: 0x0018, + 0xc86: 0x0018, 0xc87: 0x0018, 0xc88: 0x0018, 0xc89: 0x0018, 0xc8a: 0x0018, 0xc8b: 0x0040, + 0xc8c: 0x0040, 0xc8d: 0x0040, 0xc8e: 0x0040, 0xc8f: 0x0040, 0xc90: 0x0040, 0xc91: 0x0040, + 0xc92: 0x0040, 0xc93: 0x0040, 0xc94: 0x0040, 0xc95: 0x0040, 0xc96: 0x0040, 0xc97: 0x0040, + 0xc98: 0x0040, 0xc99: 0x0040, 0xc9a: 0x0040, 0xc9b: 0x0040, 0xc9c: 0x0040, 0xc9d: 0x0040, + 0xc9e: 0x0040, 0xc9f: 0x0040, 0xca0: 0x00c9, 0xca1: 0x0069, 0xca2: 0x0079, 0xca3: 0x1f51, + 0xca4: 0x1f61, 0xca5: 0x1f71, 0xca6: 0x1f81, 0xca7: 0x1f91, 0xca8: 0x1fa1, 0xca9: 0x2601, + 0xcaa: 0x2619, 0xcab: 0x2631, 0xcac: 0x2649, 0xcad: 0x2661, 0xcae: 0x2679, 0xcaf: 0x2691, + 0xcb0: 0x26a9, 0xcb1: 0x26c1, 0xcb2: 0x26d9, 0xcb3: 0x26f1, 0xcb4: 0x0a06, 0xcb5: 0x0a26, + 0xcb6: 0x0a46, 0xcb7: 0x0a66, 0xcb8: 0x0a86, 0xcb9: 0x0aa6, 0xcba: 0x0ac6, 0xcbb: 0x0ae6, + 0xcbc: 0x0b06, 0xcbd: 0x270a, 0xcbe: 0x2732, 0xcbf: 0x275a, + // Block 0x33, offset 0xcc0 + 0xcc0: 0x2782, 0xcc1: 0x27aa, 0xcc2: 0x27d2, 0xcc3: 0x27fa, 0xcc4: 0x2822, 0xcc5: 0x284a, + 0xcc6: 0x2872, 0xcc7: 0x289a, 0xcc8: 0x0040, 0xcc9: 0x0040, 0xcca: 0x0040, 0xccb: 0x0040, + 0xccc: 0x0040, 0xccd: 0x0040, 0xcce: 0x0040, 0xccf: 0x0040, 0xcd0: 0x0040, 0xcd1: 0x0040, + 0xcd2: 0x0040, 0xcd3: 0x0040, 0xcd4: 0x0040, 0xcd5: 0x0040, 0xcd6: 0x0040, 0xcd7: 0x0040, + 0xcd8: 0x0040, 0xcd9: 0x0040, 0xcda: 0x0040, 0xcdb: 0x0040, 0xcdc: 0x0b26, 0xcdd: 0x0b46, + 0xcde: 0x0b66, 0xcdf: 0x0b86, 0xce0: 0x0ba6, 0xce1: 0x0bc6, 0xce2: 0x0be6, 0xce3: 0x0c06, + 0xce4: 0x0c26, 0xce5: 0x0c46, 0xce6: 0x0c66, 0xce7: 0x0c86, 0xce8: 0x0ca6, 0xce9: 0x0cc6, + 0xcea: 0x0ce6, 0xceb: 0x0d06, 0xcec: 0x0d26, 0xced: 0x0d46, 0xcee: 0x0d66, 0xcef: 0x0d86, + 0xcf0: 0x0da6, 0xcf1: 0x0dc6, 0xcf2: 0x0de6, 0xcf3: 0x0e06, 0xcf4: 0x0e26, 0xcf5: 0x0e46, + 0xcf6: 0x0039, 0xcf7: 0x0ee9, 0xcf8: 0x1159, 0xcf9: 0x0ef9, 0xcfa: 0x0f09, 0xcfb: 0x1199, + 0xcfc: 0x0f31, 0xcfd: 0x0249, 0xcfe: 0x0f41, 0xcff: 0x0259, + // Block 0x34, offset 0xd00 + 0xd00: 0x0f51, 0xd01: 0x0359, 0xd02: 0x0f61, 0xd03: 0x0f71, 0xd04: 0x00d9, 0xd05: 0x0f99, + 0xd06: 0x2039, 0xd07: 0x0269, 0xd08: 0x01d9, 0xd09: 0x0fa9, 0xd0a: 0x0fb9, 0xd0b: 0x1089, + 0xd0c: 0x0279, 0xd0d: 0x0369, 0xd0e: 0x0289, 0xd0f: 0x13d1, 0xd10: 0x0039, 0xd11: 0x0ee9, + 0xd12: 0x1159, 0xd13: 0x0ef9, 0xd14: 0x0f09, 0xd15: 0x1199, 0xd16: 0x0f31, 0xd17: 0x0249, + 0xd18: 0x0f41, 0xd19: 0x0259, 0xd1a: 0x0f51, 0xd1b: 0x0359, 0xd1c: 0x0f61, 0xd1d: 0x0f71, + 0xd1e: 0x00d9, 0xd1f: 0x0f99, 0xd20: 0x2039, 0xd21: 0x0269, 0xd22: 0x01d9, 0xd23: 0x0fa9, + 0xd24: 0x0fb9, 0xd25: 0x1089, 0xd26: 0x0279, 0xd27: 0x0369, 0xd28: 0x0289, 0xd29: 0x13d1, + 0xd2a: 0x1f41, 0xd2b: 0x0018, 0xd2c: 0x0018, 0xd2d: 0x0018, 0xd2e: 0x0018, 0xd2f: 0x0018, + 0xd30: 0x0018, 0xd31: 0x0018, 0xd32: 0x0018, 0xd33: 0x0018, 0xd34: 0x0018, 0xd35: 0x0018, + 0xd36: 0x0018, 0xd37: 0x0018, 0xd38: 0x0018, 0xd39: 0x0018, 0xd3a: 0x0018, 0xd3b: 0x0018, + 0xd3c: 0x0018, 0xd3d: 0x0018, 0xd3e: 0x0018, 0xd3f: 0x0018, + // Block 0x35, offset 0xd40 + 0xd40: 0x0008, 0xd41: 0x0008, 0xd42: 0x0008, 0xd43: 0x0008, 0xd44: 0x0008, 0xd45: 0x0008, + 0xd46: 0x0008, 0xd47: 0x0008, 0xd48: 0x0008, 0xd49: 0x0008, 0xd4a: 0x0008, 0xd4b: 0x0008, + 0xd4c: 0x0008, 0xd4d: 0x0008, 0xd4e: 0x0008, 0xd4f: 0x0008, 0xd50: 0x0008, 0xd51: 0x0008, + 0xd52: 0x0008, 0xd53: 0x0008, 0xd54: 0x0008, 0xd55: 0x0008, 0xd56: 0x0008, 0xd57: 0x0008, + 0xd58: 0x0008, 0xd59: 0x0008, 0xd5a: 0x0008, 0xd5b: 0x0008, 0xd5c: 0x0008, 0xd5d: 0x0008, + 0xd5e: 0x0008, 0xd5f: 0x0040, 0xd60: 0xe00d, 0xd61: 0x0008, 0xd62: 0x2971, 0xd63: 0x0ebd, + 0xd64: 0x2989, 0xd65: 0x0008, 0xd66: 0x0008, 0xd67: 0xe07d, 0xd68: 0x0008, 0xd69: 0xe01d, + 0xd6a: 0x0008, 0xd6b: 0xe03d, 0xd6c: 0x0008, 0xd6d: 0x0fe1, 0xd6e: 0x1281, 0xd6f: 0x0fc9, + 0xd70: 0x1141, 0xd71: 0x0008, 0xd72: 0xe00d, 0xd73: 0x0008, 0xd74: 0x0008, 0xd75: 0xe01d, + 0xd76: 0x0008, 0xd77: 0x0008, 0xd78: 0x0008, 0xd79: 0x0008, 0xd7a: 0x0008, 0xd7b: 0x0008, + 0xd7c: 0x0259, 0xd7d: 0x1089, 0xd7e: 0x29a1, 0xd7f: 0x29b9, + // Block 0x36, offset 0xd80 + 0xd80: 0xe00d, 0xd81: 0x0008, 0xd82: 0xe00d, 0xd83: 0x0008, 0xd84: 0xe00d, 0xd85: 0x0008, + 0xd86: 0xe00d, 0xd87: 0x0008, 0xd88: 0xe00d, 0xd89: 0x0008, 0xd8a: 0xe00d, 0xd8b: 0x0008, + 0xd8c: 0xe00d, 0xd8d: 0x0008, 0xd8e: 0xe00d, 0xd8f: 0x0008, 0xd90: 0xe00d, 0xd91: 0x0008, + 0xd92: 0xe00d, 0xd93: 0x0008, 0xd94: 0xe00d, 0xd95: 0x0008, 0xd96: 0xe00d, 0xd97: 0x0008, + 0xd98: 0xe00d, 0xd99: 0x0008, 0xd9a: 0xe00d, 0xd9b: 0x0008, 0xd9c: 0xe00d, 0xd9d: 0x0008, + 0xd9e: 0xe00d, 0xd9f: 0x0008, 0xda0: 0xe00d, 0xda1: 0x0008, 0xda2: 0xe00d, 0xda3: 0x0008, + 0xda4: 0x0008, 0xda5: 0x0018, 0xda6: 0x0018, 0xda7: 0x0018, 0xda8: 0x0018, 0xda9: 0x0018, + 0xdaa: 0x0018, 0xdab: 0xe03d, 0xdac: 0x0008, 0xdad: 0xe01d, 0xdae: 0x0008, 0xdaf: 0x1308, + 0xdb0: 0x1308, 0xdb1: 0x1308, 0xdb2: 0xe00d, 0xdb3: 0x0008, 0xdb4: 0x0040, 0xdb5: 0x0040, + 0xdb6: 0x0040, 0xdb7: 0x0040, 0xdb8: 0x0040, 0xdb9: 0x0018, 0xdba: 0x0018, 0xdbb: 0x0018, + 0xdbc: 0x0018, 0xdbd: 0x0018, 0xdbe: 0x0018, 0xdbf: 0x0018, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x26fd, 0xdc1: 0x271d, 0xdc2: 0x273d, 0xdc3: 0x275d, 0xdc4: 0x277d, 0xdc5: 0x279d, + 0xdc6: 0x27bd, 0xdc7: 0x27dd, 0xdc8: 0x27fd, 0xdc9: 0x281d, 0xdca: 0x283d, 0xdcb: 0x285d, + 0xdcc: 0x287d, 0xdcd: 0x289d, 0xdce: 0x28bd, 0xdcf: 0x28dd, 0xdd0: 0x28fd, 0xdd1: 0x291d, + 0xdd2: 0x293d, 0xdd3: 0x295d, 0xdd4: 0x297d, 0xdd5: 0x299d, 0xdd6: 0x0040, 0xdd7: 0x0040, + 0xdd8: 0x0040, 0xdd9: 0x0040, 0xdda: 0x0040, 0xddb: 0x0040, 0xddc: 0x0040, 0xddd: 0x0040, + 0xdde: 0x0040, 0xddf: 0x0040, 0xde0: 0x0040, 0xde1: 0x0040, 0xde2: 0x0040, 0xde3: 0x0040, + 0xde4: 0x0040, 0xde5: 0x0040, 0xde6: 0x0040, 0xde7: 0x0040, 0xde8: 0x0040, 0xde9: 0x0040, + 0xdea: 0x0040, 0xdeb: 0x0040, 0xdec: 0x0040, 0xded: 0x0040, 0xdee: 0x0040, 0xdef: 0x0040, + 0xdf0: 0x0040, 0xdf1: 0x0040, 0xdf2: 0x0040, 0xdf3: 0x0040, 0xdf4: 0x0040, 0xdf5: 0x0040, + 0xdf6: 0x0040, 0xdf7: 0x0040, 0xdf8: 0x0040, 0xdf9: 0x0040, 0xdfa: 0x0040, 0xdfb: 0x0040, + 0xdfc: 0x0040, 0xdfd: 0x0040, 0xdfe: 0x0040, 0xdff: 0x0040, + // Block 0x38, offset 0xe00 + 0xe00: 0x000a, 0xe01: 0x0018, 0xe02: 0x29d1, 0xe03: 0x0018, 0xe04: 0x0018, 0xe05: 0x0008, + 0xe06: 0x0008, 0xe07: 0x0008, 0xe08: 0x0018, 0xe09: 0x0018, 0xe0a: 0x0018, 0xe0b: 0x0018, + 0xe0c: 0x0018, 0xe0d: 0x0018, 0xe0e: 0x0018, 0xe0f: 0x0018, 0xe10: 0x0018, 0xe11: 0x0018, + 0xe12: 0x0018, 0xe13: 0x0018, 0xe14: 0x0018, 0xe15: 0x0018, 0xe16: 0x0018, 0xe17: 0x0018, + 0xe18: 0x0018, 0xe19: 0x0018, 0xe1a: 0x0018, 0xe1b: 0x0018, 0xe1c: 0x0018, 0xe1d: 0x0018, + 0xe1e: 0x0018, 0xe1f: 0x0018, 0xe20: 0x0018, 0xe21: 0x0018, 0xe22: 0x0018, 0xe23: 0x0018, + 0xe24: 0x0018, 0xe25: 0x0018, 0xe26: 0x0018, 0xe27: 0x0018, 0xe28: 0x0018, 0xe29: 0x0018, + 0xe2a: 0x1308, 0xe2b: 0x1308, 0xe2c: 0x1308, 0xe2d: 0x1308, 0xe2e: 0x1018, 0xe2f: 0x1018, + 0xe30: 0x0018, 0xe31: 0x0018, 0xe32: 0x0018, 0xe33: 0x0018, 0xe34: 0x0018, 0xe35: 0x0018, + 0xe36: 0xe125, 0xe37: 0x0018, 0xe38: 0x29bd, 0xe39: 0x29dd, 0xe3a: 0x29fd, 0xe3b: 0x0018, + 0xe3c: 0x0008, 0xe3d: 0x0018, 0xe3e: 0x0018, 0xe3f: 0x0018, + // Block 0x39, offset 0xe40 + 0xe40: 0x2b3d, 0xe41: 0x2b5d, 0xe42: 0x2b7d, 0xe43: 0x2b9d, 0xe44: 0x2bbd, 0xe45: 0x2bdd, + 0xe46: 0x2bdd, 0xe47: 0x2bdd, 0xe48: 0x2bfd, 0xe49: 0x2bfd, 0xe4a: 0x2bfd, 0xe4b: 0x2bfd, + 0xe4c: 0x2c1d, 0xe4d: 0x2c1d, 0xe4e: 0x2c1d, 0xe4f: 0x2c3d, 0xe50: 0x2c5d, 0xe51: 0x2c5d, + 0xe52: 0x2a7d, 0xe53: 0x2a7d, 0xe54: 0x2c5d, 0xe55: 0x2c5d, 0xe56: 0x2c7d, 0xe57: 0x2c7d, + 0xe58: 0x2c5d, 0xe59: 0x2c5d, 0xe5a: 0x2a7d, 0xe5b: 0x2a7d, 0xe5c: 0x2c5d, 0xe5d: 0x2c5d, + 0xe5e: 0x2c3d, 0xe5f: 0x2c3d, 0xe60: 0x2c9d, 0xe61: 0x2c9d, 0xe62: 0x2cbd, 0xe63: 0x2cbd, + 0xe64: 0x0040, 0xe65: 0x2cdd, 0xe66: 0x2cfd, 0xe67: 0x2d1d, 0xe68: 0x2d1d, 0xe69: 0x2d3d, + 0xe6a: 0x2d5d, 0xe6b: 0x2d7d, 0xe6c: 0x2d9d, 0xe6d: 0x2dbd, 0xe6e: 0x2ddd, 0xe6f: 0x2dfd, + 0xe70: 0x2e1d, 0xe71: 0x2e3d, 0xe72: 0x2e3d, 0xe73: 0x2e5d, 0xe74: 0x2e7d, 0xe75: 0x2e7d, + 0xe76: 0x2e9d, 0xe77: 0x2ebd, 0xe78: 0x2e5d, 0xe79: 0x2edd, 0xe7a: 0x2efd, 0xe7b: 0x2edd, + 0xe7c: 0x2e5d, 0xe7d: 0x2f1d, 0xe7e: 0x2f3d, 0xe7f: 0x2f5d, + // Block 0x3a, offset 0xe80 + 0xe80: 0x2f7d, 0xe81: 0x2f9d, 0xe82: 0x2cfd, 0xe83: 0x2cdd, 0xe84: 0x2fbd, 0xe85: 0x2fdd, + 0xe86: 0x2ffd, 0xe87: 0x301d, 0xe88: 0x303d, 0xe89: 0x305d, 0xe8a: 0x307d, 0xe8b: 0x309d, + 0xe8c: 0x30bd, 0xe8d: 0x30dd, 0xe8e: 0x30fd, 0xe8f: 0x0040, 0xe90: 0x0018, 0xe91: 0x0018, + 0xe92: 0x311d, 0xe93: 0x313d, 0xe94: 0x315d, 0xe95: 0x317d, 0xe96: 0x319d, 0xe97: 0x31bd, + 0xe98: 0x31dd, 0xe99: 0x31fd, 0xe9a: 0x321d, 0xe9b: 0x323d, 0xe9c: 0x315d, 0xe9d: 0x325d, + 0xe9e: 0x327d, 0xe9f: 0x329d, 0xea0: 0x0008, 0xea1: 0x0008, 0xea2: 0x0008, 0xea3: 0x0008, + 0xea4: 0x0008, 0xea5: 0x0008, 0xea6: 0x0008, 0xea7: 0x0008, 0xea8: 0x0008, 0xea9: 0x0008, + 0xeaa: 0x0008, 0xeab: 0x0008, 0xeac: 0x0008, 0xead: 0x0008, 0xeae: 0x0008, 0xeaf: 0x0008, + 0xeb0: 0x0008, 0xeb1: 0x0008, 0xeb2: 0x0008, 0xeb3: 0x0008, 0xeb4: 0x0008, 0xeb5: 0x0008, + 0xeb6: 0x0008, 0xeb7: 0x0008, 0xeb8: 0x0008, 0xeb9: 0x0008, 0xeba: 0x0008, 0xebb: 0x0040, + 0xebc: 0x0040, 0xebd: 0x0040, 0xebe: 0x0040, 0xebf: 0x0040, + // Block 0x3b, offset 0xec0 + 0xec0: 0x36a2, 0xec1: 0x36d2, 0xec2: 0x3702, 0xec3: 0x3732, 0xec4: 0x32bd, 0xec5: 0x32dd, + 0xec6: 0x32fd, 0xec7: 0x331d, 0xec8: 0x0018, 0xec9: 0x0018, 0xeca: 0x0018, 0xecb: 0x0018, + 0xecc: 0x0018, 0xecd: 0x0018, 0xece: 0x0018, 0xecf: 0x0018, 0xed0: 0x333d, 0xed1: 0x3761, + 0xed2: 0x3779, 0xed3: 0x3791, 0xed4: 0x37a9, 0xed5: 0x37c1, 0xed6: 0x37d9, 0xed7: 0x37f1, + 0xed8: 0x3809, 0xed9: 0x3821, 0xeda: 0x3839, 0xedb: 0x3851, 0xedc: 0x3869, 0xedd: 0x3881, + 0xede: 0x3899, 0xedf: 0x38b1, 0xee0: 0x335d, 0xee1: 0x337d, 0xee2: 0x339d, 0xee3: 0x33bd, + 0xee4: 0x33dd, 0xee5: 0x33dd, 0xee6: 0x33fd, 0xee7: 0x341d, 0xee8: 0x343d, 0xee9: 0x345d, + 0xeea: 0x347d, 0xeeb: 0x349d, 0xeec: 0x34bd, 0xeed: 0x34dd, 0xeee: 0x34fd, 0xeef: 0x351d, + 0xef0: 0x353d, 0xef1: 0x355d, 0xef2: 0x357d, 0xef3: 0x359d, 0xef4: 0x35bd, 0xef5: 0x35dd, + 0xef6: 0x35fd, 0xef7: 0x361d, 0xef8: 0x363d, 0xef9: 0x365d, 0xefa: 0x367d, 0xefb: 0x369d, + 0xefc: 0x38c9, 0xefd: 0x3901, 0xefe: 0x36bd, 0xeff: 0x0018, + // Block 0x3c, offset 0xf00 + 0xf00: 0x36dd, 0xf01: 0x36fd, 0xf02: 0x371d, 0xf03: 0x373d, 0xf04: 0x375d, 0xf05: 0x377d, + 0xf06: 0x379d, 0xf07: 0x37bd, 0xf08: 0x37dd, 0xf09: 0x37fd, 0xf0a: 0x381d, 0xf0b: 0x383d, + 0xf0c: 0x385d, 0xf0d: 0x387d, 0xf0e: 0x389d, 0xf0f: 0x38bd, 0xf10: 0x38dd, 0xf11: 0x38fd, + 0xf12: 0x391d, 0xf13: 0x393d, 0xf14: 0x395d, 0xf15: 0x397d, 0xf16: 0x399d, 0xf17: 0x39bd, + 0xf18: 0x39dd, 0xf19: 0x39fd, 0xf1a: 0x3a1d, 0xf1b: 0x3a3d, 0xf1c: 0x3a5d, 0xf1d: 0x3a7d, + 0xf1e: 0x3a9d, 0xf1f: 0x3abd, 0xf20: 0x3add, 0xf21: 0x3afd, 0xf22: 0x3b1d, 0xf23: 0x3b3d, + 0xf24: 0x3b5d, 0xf25: 0x3b7d, 0xf26: 0x127d, 0xf27: 0x3b9d, 0xf28: 0x3bbd, 0xf29: 0x3bdd, + 0xf2a: 0x3bfd, 0xf2b: 0x3c1d, 0xf2c: 0x3c3d, 0xf2d: 0x3c5d, 0xf2e: 0x239d, 0xf2f: 0x3c7d, + 0xf30: 0x3c9d, 0xf31: 0x3939, 0xf32: 0x3951, 0xf33: 0x3969, 0xf34: 0x3981, 0xf35: 0x3999, + 0xf36: 0x39b1, 0xf37: 0x39c9, 0xf38: 0x39e1, 0xf39: 0x39f9, 0xf3a: 0x3a11, 0xf3b: 0x3a29, + 0xf3c: 0x3a41, 0xf3d: 0x3a59, 0xf3e: 0x3a71, 0xf3f: 0x3a89, + // Block 0x3d, offset 0xf40 + 0xf40: 0x3aa1, 0xf41: 0x3ac9, 0xf42: 0x3af1, 0xf43: 0x3b19, 0xf44: 0x3b41, 0xf45: 0x3b69, + 0xf46: 0x3b91, 0xf47: 0x3bb9, 0xf48: 0x3be1, 0xf49: 0x3c09, 0xf4a: 0x3c39, 0xf4b: 0x3c69, + 0xf4c: 0x3c99, 0xf4d: 0x3cbd, 0xf4e: 0x3cb1, 0xf4f: 0x3cdd, 0xf50: 0x3cfd, 0xf51: 0x3d15, + 0xf52: 0x3d2d, 0xf53: 0x3d45, 0xf54: 0x3d5d, 0xf55: 0x3d5d, 0xf56: 0x3d45, 0xf57: 0x3d75, + 0xf58: 0x07bd, 0xf59: 0x3d8d, 0xf5a: 0x3da5, 0xf5b: 0x3dbd, 0xf5c: 0x3dd5, 0xf5d: 0x3ded, + 0xf5e: 0x3e05, 0xf5f: 0x3e1d, 0xf60: 0x3e35, 0xf61: 0x3e4d, 0xf62: 0x3e65, 0xf63: 0x3e7d, + 0xf64: 0x3e95, 0xf65: 0x3e95, 0xf66: 0x3ead, 0xf67: 0x3ead, 0xf68: 0x3ec5, 0xf69: 0x3ec5, + 0xf6a: 0x3edd, 0xf6b: 0x3ef5, 0xf6c: 0x3f0d, 0xf6d: 0x3f25, 0xf6e: 0x3f3d, 0xf6f: 0x3f3d, + 0xf70: 0x3f55, 0xf71: 0x3f55, 0xf72: 0x3f55, 0xf73: 0x3f6d, 0xf74: 0x3f85, 0xf75: 0x3f9d, + 0xf76: 0x3fb5, 0xf77: 0x3f9d, 0xf78: 0x3fcd, 0xf79: 0x3fe5, 0xf7a: 0x3f6d, 0xf7b: 0x3ffd, + 0xf7c: 0x4015, 0xf7d: 0x4015, 0xf7e: 0x4015, 0xf7f: 0x0040, + // Block 0x3e, offset 0xf80 + 0xf80: 0x3cc9, 0xf81: 0x3d31, 0xf82: 0x3d99, 0xf83: 0x3e01, 0xf84: 0x3e51, 0xf85: 0x3eb9, + 0xf86: 0x3f09, 0xf87: 0x3f59, 0xf88: 0x3fd9, 0xf89: 0x4041, 0xf8a: 0x4091, 0xf8b: 0x40e1, + 0xf8c: 0x4131, 0xf8d: 0x4199, 0xf8e: 0x4201, 0xf8f: 0x4251, 0xf90: 0x42a1, 0xf91: 0x42d9, + 0xf92: 0x4329, 0xf93: 0x4391, 0xf94: 0x43f9, 0xf95: 0x4431, 0xf96: 0x44b1, 0xf97: 0x4549, + 0xf98: 0x45c9, 0xf99: 0x4619, 0xf9a: 0x4699, 0xf9b: 0x4719, 0xf9c: 0x4781, 0xf9d: 0x47d1, + 0xf9e: 0x4821, 0xf9f: 0x4871, 0xfa0: 0x48d9, 0xfa1: 0x4959, 0xfa2: 0x49c1, 0xfa3: 0x4a11, + 0xfa4: 0x4a61, 0xfa5: 0x4ab1, 0xfa6: 0x4ae9, 0xfa7: 0x4b21, 0xfa8: 0x4b59, 0xfa9: 0x4b91, + 0xfaa: 0x4be1, 0xfab: 0x4c31, 0xfac: 0x4cb1, 0xfad: 0x4d01, 0xfae: 0x4d69, 0xfaf: 0x4de9, + 0xfb0: 0x4e39, 0xfb1: 0x4e71, 0xfb2: 0x4ea9, 0xfb3: 0x4f29, 0xfb4: 0x4f91, 0xfb5: 0x5011, + 0xfb6: 0x5061, 0xfb7: 0x50e1, 0xfb8: 0x5119, 0xfb9: 0x5169, 0xfba: 0x51b9, 0xfbb: 0x5209, + 0xfbc: 0x5259, 0xfbd: 0x52a9, 0xfbe: 0x5311, 0xfbf: 0x5361, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x5399, 0xfc1: 0x53e9, 0xfc2: 0x5439, 0xfc3: 0x5489, 0xfc4: 0x54f1, 0xfc5: 0x5541, + 0xfc6: 0x5591, 0xfc7: 0x55e1, 0xfc8: 0x5661, 0xfc9: 0x56c9, 0xfca: 0x5701, 0xfcb: 0x5781, + 0xfcc: 0x57b9, 0xfcd: 0x5821, 0xfce: 0x5889, 0xfcf: 0x58d9, 0xfd0: 0x5929, 0xfd1: 0x5979, + 0xfd2: 0x59e1, 0xfd3: 0x5a19, 0xfd4: 0x5a69, 0xfd5: 0x5ad1, 0xfd6: 0x5b09, 0xfd7: 0x5b89, + 0xfd8: 0x5bd9, 0xfd9: 0x5c01, 0xfda: 0x5c29, 0xfdb: 0x5c51, 0xfdc: 0x5c79, 0xfdd: 0x5ca1, + 0xfde: 0x5cc9, 0xfdf: 0x5cf1, 0xfe0: 0x5d19, 0xfe1: 0x5d41, 0xfe2: 0x5d69, 0xfe3: 0x5d99, + 0xfe4: 0x5dc9, 0xfe5: 0x5df9, 0xfe6: 0x5e29, 0xfe7: 0x5e59, 0xfe8: 0x5e89, 0xfe9: 0x5eb9, + 0xfea: 0x5ee9, 0xfeb: 0x5f19, 0xfec: 0x5f49, 0xfed: 0x5f79, 0xfee: 0x5fa9, 0xfef: 0x5fd9, + 0xff0: 0x6009, 0xff1: 0x402d, 0xff2: 0x6039, 0xff3: 0x6051, 0xff4: 0x404d, 0xff5: 0x6069, + 0xff6: 0x6081, 0xff7: 0x6099, 0xff8: 0x406d, 0xff9: 0x406d, 0xffa: 0x60b1, 0xffb: 0x60c9, + 0xffc: 0x6101, 0xffd: 0x6139, 0xffe: 0x6171, 0xfff: 0x61a9, + // Block 0x40, offset 0x1000 + 0x1000: 0x6211, 0x1001: 0x6229, 0x1002: 0x408d, 0x1003: 0x6241, 0x1004: 0x6259, 0x1005: 0x6271, + 0x1006: 0x6289, 0x1007: 0x62a1, 0x1008: 0x40ad, 0x1009: 0x62b9, 0x100a: 0x62e1, 0x100b: 0x62f9, + 0x100c: 0x40cd, 0x100d: 0x40cd, 0x100e: 0x6311, 0x100f: 0x6329, 0x1010: 0x6341, 0x1011: 0x40ed, + 0x1012: 0x410d, 0x1013: 0x412d, 0x1014: 0x414d, 0x1015: 0x416d, 0x1016: 0x6359, 0x1017: 0x6371, + 0x1018: 0x6389, 0x1019: 0x63a1, 0x101a: 0x63b9, 0x101b: 0x418d, 0x101c: 0x63d1, 0x101d: 0x63e9, + 0x101e: 0x6401, 0x101f: 0x41ad, 0x1020: 0x41cd, 0x1021: 0x6419, 0x1022: 0x41ed, 0x1023: 0x420d, + 0x1024: 0x422d, 0x1025: 0x6431, 0x1026: 0x424d, 0x1027: 0x6449, 0x1028: 0x6479, 0x1029: 0x6211, + 0x102a: 0x426d, 0x102b: 0x428d, 0x102c: 0x42ad, 0x102d: 0x42cd, 0x102e: 0x64b1, 0x102f: 0x64f1, + 0x1030: 0x6539, 0x1031: 0x6551, 0x1032: 0x42ed, 0x1033: 0x6569, 0x1034: 0x6581, 0x1035: 0x6599, + 0x1036: 0x430d, 0x1037: 0x65b1, 0x1038: 0x65c9, 0x1039: 0x65b1, 0x103a: 0x65e1, 0x103b: 0x65f9, + 0x103c: 0x432d, 0x103d: 0x6611, 0x103e: 0x6629, 0x103f: 0x6611, + // Block 0x41, offset 0x1040 + 0x1040: 0x434d, 0x1041: 0x436d, 0x1042: 0x0040, 0x1043: 0x6641, 0x1044: 0x6659, 0x1045: 0x6671, + 0x1046: 0x6689, 0x1047: 0x0040, 0x1048: 0x66c1, 0x1049: 0x66d9, 0x104a: 0x66f1, 0x104b: 0x6709, + 0x104c: 0x6721, 0x104d: 0x6739, 0x104e: 0x6401, 0x104f: 0x6751, 0x1050: 0x6769, 0x1051: 0x6781, + 0x1052: 0x438d, 0x1053: 0x6799, 0x1054: 0x6289, 0x1055: 0x43ad, 0x1056: 0x43cd, 0x1057: 0x67b1, + 0x1058: 0x0040, 0x1059: 0x43ed, 0x105a: 0x67c9, 0x105b: 0x67e1, 0x105c: 0x67f9, 0x105d: 0x6811, + 0x105e: 0x6829, 0x105f: 0x6859, 0x1060: 0x6889, 0x1061: 0x68b1, 0x1062: 0x68d9, 0x1063: 0x6901, + 0x1064: 0x6929, 0x1065: 0x6951, 0x1066: 0x6979, 0x1067: 0x69a1, 0x1068: 0x69c9, 0x1069: 0x69f1, + 0x106a: 0x6a21, 0x106b: 0x6a51, 0x106c: 0x6a81, 0x106d: 0x6ab1, 0x106e: 0x6ae1, 0x106f: 0x6b11, + 0x1070: 0x6b41, 0x1071: 0x6b71, 0x1072: 0x6ba1, 0x1073: 0x6bd1, 0x1074: 0x6c01, 0x1075: 0x6c31, + 0x1076: 0x6c61, 0x1077: 0x6c91, 0x1078: 0x6cc1, 0x1079: 0x6cf1, 0x107a: 0x6d21, 0x107b: 0x6d51, + 0x107c: 0x6d81, 0x107d: 0x6db1, 0x107e: 0x6de1, 0x107f: 0x440d, + // Block 0x42, offset 0x1080 + 0x1080: 0xe00d, 0x1081: 0x0008, 0x1082: 0xe00d, 0x1083: 0x0008, 0x1084: 0xe00d, 0x1085: 0x0008, + 0x1086: 0xe00d, 0x1087: 0x0008, 0x1088: 0xe00d, 0x1089: 0x0008, 0x108a: 0xe00d, 0x108b: 0x0008, + 0x108c: 0xe00d, 0x108d: 0x0008, 0x108e: 0xe00d, 0x108f: 0x0008, 0x1090: 0xe00d, 0x1091: 0x0008, + 0x1092: 0xe00d, 0x1093: 0x0008, 0x1094: 0xe00d, 0x1095: 0x0008, 0x1096: 0xe00d, 0x1097: 0x0008, + 0x1098: 0xe00d, 0x1099: 0x0008, 0x109a: 0xe00d, 0x109b: 0x0008, 0x109c: 0xe00d, 0x109d: 0x0008, + 0x109e: 0xe00d, 0x109f: 0x0008, 0x10a0: 0xe00d, 0x10a1: 0x0008, 0x10a2: 0xe00d, 0x10a3: 0x0008, + 0x10a4: 0xe00d, 0x10a5: 0x0008, 0x10a6: 0xe00d, 0x10a7: 0x0008, 0x10a8: 0xe00d, 0x10a9: 0x0008, + 0x10aa: 0xe00d, 0x10ab: 0x0008, 0x10ac: 0xe00d, 0x10ad: 0x0008, 0x10ae: 0x0008, 0x10af: 0x1308, + 0x10b0: 0x1318, 0x10b1: 0x1318, 0x10b2: 0x1318, 0x10b3: 0x0018, 0x10b4: 0x1308, 0x10b5: 0x1308, + 0x10b6: 0x1308, 0x10b7: 0x1308, 0x10b8: 0x1308, 0x10b9: 0x1308, 0x10ba: 0x1308, 0x10bb: 0x1308, + 0x10bc: 0x1308, 0x10bd: 0x1308, 0x10be: 0x0018, 0x10bf: 0x0008, + // Block 0x43, offset 0x10c0 + 0x10c0: 0xe00d, 0x10c1: 0x0008, 0x10c2: 0xe00d, 0x10c3: 0x0008, 0x10c4: 0xe00d, 0x10c5: 0x0008, + 0x10c6: 0xe00d, 0x10c7: 0x0008, 0x10c8: 0xe00d, 0x10c9: 0x0008, 0x10ca: 0xe00d, 0x10cb: 0x0008, + 0x10cc: 0xe00d, 0x10cd: 0x0008, 0x10ce: 0xe00d, 0x10cf: 0x0008, 0x10d0: 0xe00d, 0x10d1: 0x0008, + 0x10d2: 0xe00d, 0x10d3: 0x0008, 0x10d4: 0xe00d, 0x10d5: 0x0008, 0x10d6: 0xe00d, 0x10d7: 0x0008, + 0x10d8: 0xe00d, 0x10d9: 0x0008, 0x10da: 0xe00d, 0x10db: 0x0008, 0x10dc: 0x0ea1, 0x10dd: 0x6e11, + 0x10de: 0x1308, 0x10df: 0x1308, 0x10e0: 0x0008, 0x10e1: 0x0008, 0x10e2: 0x0008, 0x10e3: 0x0008, + 0x10e4: 0x0008, 0x10e5: 0x0008, 0x10e6: 0x0008, 0x10e7: 0x0008, 0x10e8: 0x0008, 0x10e9: 0x0008, + 0x10ea: 0x0008, 0x10eb: 0x0008, 0x10ec: 0x0008, 0x10ed: 0x0008, 0x10ee: 0x0008, 0x10ef: 0x0008, + 0x10f0: 0x0008, 0x10f1: 0x0008, 0x10f2: 0x0008, 0x10f3: 0x0008, 0x10f4: 0x0008, 0x10f5: 0x0008, + 0x10f6: 0x0008, 0x10f7: 0x0008, 0x10f8: 0x0008, 0x10f9: 0x0008, 0x10fa: 0x0008, 0x10fb: 0x0008, + 0x10fc: 0x0008, 0x10fd: 0x0008, 0x10fe: 0x0008, 0x10ff: 0x0008, + // Block 0x44, offset 0x1100 + 0x1100: 0x0018, 0x1101: 0x0018, 0x1102: 0x0018, 0x1103: 0x0018, 0x1104: 0x0018, 0x1105: 0x0018, + 0x1106: 0x0018, 0x1107: 0x0018, 0x1108: 0x0018, 0x1109: 0x0018, 0x110a: 0x0018, 0x110b: 0x0018, + 0x110c: 0x0018, 0x110d: 0x0018, 0x110e: 0x0018, 0x110f: 0x0018, 0x1110: 0x0018, 0x1111: 0x0018, + 0x1112: 0x0018, 0x1113: 0x0018, 0x1114: 0x0018, 0x1115: 0x0018, 0x1116: 0x0018, 0x1117: 0x0008, + 0x1118: 0x0008, 0x1119: 0x0008, 0x111a: 0x0008, 0x111b: 0x0008, 0x111c: 0x0008, 0x111d: 0x0008, + 0x111e: 0x0008, 0x111f: 0x0008, 0x1120: 0x0018, 0x1121: 0x0018, 0x1122: 0xe00d, 0x1123: 0x0008, + 0x1124: 0xe00d, 0x1125: 0x0008, 0x1126: 0xe00d, 0x1127: 0x0008, 0x1128: 0xe00d, 0x1129: 0x0008, + 0x112a: 0xe00d, 0x112b: 0x0008, 0x112c: 0xe00d, 0x112d: 0x0008, 0x112e: 0xe00d, 0x112f: 0x0008, + 0x1130: 0x0008, 0x1131: 0x0008, 0x1132: 0xe00d, 0x1133: 0x0008, 0x1134: 0xe00d, 0x1135: 0x0008, + 0x1136: 0xe00d, 0x1137: 0x0008, 0x1138: 0xe00d, 0x1139: 0x0008, 0x113a: 0xe00d, 0x113b: 0x0008, + 0x113c: 0xe00d, 0x113d: 0x0008, 0x113e: 0xe00d, 0x113f: 0x0008, + // Block 0x45, offset 0x1140 + 0x1140: 0xe00d, 0x1141: 0x0008, 0x1142: 0xe00d, 0x1143: 0x0008, 0x1144: 0xe00d, 0x1145: 0x0008, + 0x1146: 0xe00d, 0x1147: 0x0008, 0x1148: 0xe00d, 0x1149: 0x0008, 0x114a: 0xe00d, 0x114b: 0x0008, + 0x114c: 0xe00d, 0x114d: 0x0008, 0x114e: 0xe00d, 0x114f: 0x0008, 0x1150: 0xe00d, 0x1151: 0x0008, + 0x1152: 0xe00d, 0x1153: 0x0008, 0x1154: 0xe00d, 0x1155: 0x0008, 0x1156: 0xe00d, 0x1157: 0x0008, + 0x1158: 0xe00d, 0x1159: 0x0008, 0x115a: 0xe00d, 0x115b: 0x0008, 0x115c: 0xe00d, 0x115d: 0x0008, + 0x115e: 0xe00d, 0x115f: 0x0008, 0x1160: 0xe00d, 0x1161: 0x0008, 0x1162: 0xe00d, 0x1163: 0x0008, + 0x1164: 0xe00d, 0x1165: 0x0008, 0x1166: 0xe00d, 0x1167: 0x0008, 0x1168: 0xe00d, 0x1169: 0x0008, + 0x116a: 0xe00d, 0x116b: 0x0008, 0x116c: 0xe00d, 0x116d: 0x0008, 0x116e: 0xe00d, 0x116f: 0x0008, + 0x1170: 0xe0fd, 0x1171: 0x0008, 0x1172: 0x0008, 0x1173: 0x0008, 0x1174: 0x0008, 0x1175: 0x0008, + 0x1176: 0x0008, 0x1177: 0x0008, 0x1178: 0x0008, 0x1179: 0xe01d, 0x117a: 0x0008, 0x117b: 0xe03d, + 0x117c: 0x0008, 0x117d: 0x442d, 0x117e: 0xe00d, 0x117f: 0x0008, + // Block 0x46, offset 0x1180 + 0x1180: 0xe00d, 0x1181: 0x0008, 0x1182: 0xe00d, 0x1183: 0x0008, 0x1184: 0xe00d, 0x1185: 0x0008, + 0x1186: 0xe00d, 0x1187: 0x0008, 0x1188: 0x0008, 0x1189: 0x0018, 0x118a: 0x0018, 0x118b: 0xe03d, + 0x118c: 0x0008, 0x118d: 0x11d9, 0x118e: 0x0008, 0x118f: 0x0008, 0x1190: 0xe00d, 0x1191: 0x0008, + 0x1192: 0xe00d, 0x1193: 0x0008, 0x1194: 0x0008, 0x1195: 0x0008, 0x1196: 0xe00d, 0x1197: 0x0008, + 0x1198: 0xe00d, 0x1199: 0x0008, 0x119a: 0xe00d, 0x119b: 0x0008, 0x119c: 0xe00d, 0x119d: 0x0008, + 0x119e: 0xe00d, 0x119f: 0x0008, 0x11a0: 0xe00d, 0x11a1: 0x0008, 0x11a2: 0xe00d, 0x11a3: 0x0008, + 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008, + 0x11aa: 0x6e29, 0x11ab: 0x1029, 0x11ac: 0x11c1, 0x11ad: 0x6e41, 0x11ae: 0x1221, 0x11af: 0x0040, + 0x11b0: 0x6e59, 0x11b1: 0x6e71, 0x11b2: 0x1239, 0x11b3: 0x444d, 0x11b4: 0xe00d, 0x11b5: 0x0008, + 0x11b6: 0xe00d, 0x11b7: 0x0008, 0x11b8: 0x0040, 0x11b9: 0x0040, 0x11ba: 0x0040, 0x11bb: 0x0040, + 0x11bc: 0x0040, 0x11bd: 0x0040, 0x11be: 0x0040, 0x11bf: 0x0040, + // Block 0x47, offset 0x11c0 + 0x11c0: 0x64d5, 0x11c1: 0x64f5, 0x11c2: 0x6515, 0x11c3: 0x6535, 0x11c4: 0x6555, 0x11c5: 0x6575, + 0x11c6: 0x6595, 0x11c7: 0x65b5, 0x11c8: 0x65d5, 0x11c9: 0x65f5, 0x11ca: 0x6615, 0x11cb: 0x6635, + 0x11cc: 0x6655, 0x11cd: 0x6675, 0x11ce: 0x0008, 0x11cf: 0x0008, 0x11d0: 0x6695, 0x11d1: 0x0008, + 0x11d2: 0x66b5, 0x11d3: 0x0008, 0x11d4: 0x0008, 0x11d5: 0x66d5, 0x11d6: 0x66f5, 0x11d7: 0x6715, + 0x11d8: 0x6735, 0x11d9: 0x6755, 0x11da: 0x6775, 0x11db: 0x6795, 0x11dc: 0x67b5, 0x11dd: 0x67d5, + 0x11de: 0x67f5, 0x11df: 0x0008, 0x11e0: 0x6815, 0x11e1: 0x0008, 0x11e2: 0x6835, 0x11e3: 0x0008, + 0x11e4: 0x0008, 0x11e5: 0x6855, 0x11e6: 0x6875, 0x11e7: 0x0008, 0x11e8: 0x0008, 0x11e9: 0x0008, + 0x11ea: 0x6895, 0x11eb: 0x68b5, 0x11ec: 0x68d5, 0x11ed: 0x68f5, 0x11ee: 0x6915, 0x11ef: 0x6935, + 0x11f0: 0x6955, 0x11f1: 0x6975, 0x11f2: 0x6995, 0x11f3: 0x69b5, 0x11f4: 0x69d5, 0x11f5: 0x69f5, + 0x11f6: 0x6a15, 0x11f7: 0x6a35, 0x11f8: 0x6a55, 0x11f9: 0x6a75, 0x11fa: 0x6a95, 0x11fb: 0x6ab5, + 0x11fc: 0x6ad5, 0x11fd: 0x6af5, 0x11fe: 0x6b15, 0x11ff: 0x6b35, + // Block 0x48, offset 0x1200 + 0x1200: 0x7a95, 0x1201: 0x7ab5, 0x1202: 0x7ad5, 0x1203: 0x7af5, 0x1204: 0x7b15, 0x1205: 0x7b35, + 0x1206: 0x7b55, 0x1207: 0x7b75, 0x1208: 0x7b95, 0x1209: 0x7bb5, 0x120a: 0x7bd5, 0x120b: 0x7bf5, + 0x120c: 0x7c15, 0x120d: 0x7c35, 0x120e: 0x7c55, 0x120f: 0x6ec9, 0x1210: 0x6ef1, 0x1211: 0x6f19, + 0x1212: 0x7c75, 0x1213: 0x7c95, 0x1214: 0x7cb5, 0x1215: 0x6f41, 0x1216: 0x6f69, 0x1217: 0x6f91, + 0x1218: 0x7cd5, 0x1219: 0x7cf5, 0x121a: 0x0040, 0x121b: 0x0040, 0x121c: 0x0040, 0x121d: 0x0040, + 0x121e: 0x0040, 0x121f: 0x0040, 0x1220: 0x0040, 0x1221: 0x0040, 0x1222: 0x0040, 0x1223: 0x0040, + 0x1224: 0x0040, 0x1225: 0x0040, 0x1226: 0x0040, 0x1227: 0x0040, 0x1228: 0x0040, 0x1229: 0x0040, + 0x122a: 0x0040, 0x122b: 0x0040, 0x122c: 0x0040, 0x122d: 0x0040, 0x122e: 0x0040, 0x122f: 0x0040, + 0x1230: 0x0040, 0x1231: 0x0040, 0x1232: 0x0040, 0x1233: 0x0040, 0x1234: 0x0040, 0x1235: 0x0040, + 0x1236: 0x0040, 0x1237: 0x0040, 0x1238: 0x0040, 0x1239: 0x0040, 0x123a: 0x0040, 0x123b: 0x0040, + 0x123c: 0x0040, 0x123d: 0x0040, 0x123e: 0x0040, 0x123f: 0x0040, + // Block 0x49, offset 0x1240 + 0x1240: 0x6fb9, 0x1241: 0x6fd1, 0x1242: 0x6fe9, 0x1243: 0x7d15, 0x1244: 0x7d35, 0x1245: 0x7001, + 0x1246: 0x7001, 0x1247: 0x0040, 0x1248: 0x0040, 0x1249: 0x0040, 0x124a: 0x0040, 0x124b: 0x0040, + 0x124c: 0x0040, 0x124d: 0x0040, 0x124e: 0x0040, 0x124f: 0x0040, 0x1250: 0x0040, 0x1251: 0x0040, + 0x1252: 0x0040, 0x1253: 0x7019, 0x1254: 0x7041, 0x1255: 0x7069, 0x1256: 0x7091, 0x1257: 0x70b9, + 0x1258: 0x0040, 0x1259: 0x0040, 0x125a: 0x0040, 0x125b: 0x0040, 0x125c: 0x0040, 0x125d: 0x70e1, + 0x125e: 0x1308, 0x125f: 0x7109, 0x1260: 0x7131, 0x1261: 0x20a9, 0x1262: 0x20f1, 0x1263: 0x7149, + 0x1264: 0x7161, 0x1265: 0x7179, 0x1266: 0x7191, 0x1267: 0x71a9, 0x1268: 0x71c1, 0x1269: 0x1fb2, + 0x126a: 0x71d9, 0x126b: 0x7201, 0x126c: 0x7229, 0x126d: 0x7261, 0x126e: 0x7299, 0x126f: 0x72c1, + 0x1270: 0x72e9, 0x1271: 0x7311, 0x1272: 0x7339, 0x1273: 0x7361, 0x1274: 0x7389, 0x1275: 0x73b1, + 0x1276: 0x73d9, 0x1277: 0x0040, 0x1278: 0x7401, 0x1279: 0x7429, 0x127a: 0x7451, 0x127b: 0x7479, + 0x127c: 0x74a1, 0x127d: 0x0040, 0x127e: 0x74c9, 0x127f: 0x0040, + // Block 0x4a, offset 0x1280 + 0x1280: 0x74f1, 0x1281: 0x7519, 0x1282: 0x0040, 0x1283: 0x7541, 0x1284: 0x7569, 0x1285: 0x0040, + 0x1286: 0x7591, 0x1287: 0x75b9, 0x1288: 0x75e1, 0x1289: 0x7609, 0x128a: 0x7631, 0x128b: 0x7659, + 0x128c: 0x7681, 0x128d: 0x76a9, 0x128e: 0x76d1, 0x128f: 0x76f9, 0x1290: 0x7721, 0x1291: 0x7721, + 0x1292: 0x7739, 0x1293: 0x7739, 0x1294: 0x7739, 0x1295: 0x7739, 0x1296: 0x7751, 0x1297: 0x7751, + 0x1298: 0x7751, 0x1299: 0x7751, 0x129a: 0x7769, 0x129b: 0x7769, 0x129c: 0x7769, 0x129d: 0x7769, + 0x129e: 0x7781, 0x129f: 0x7781, 0x12a0: 0x7781, 0x12a1: 0x7781, 0x12a2: 0x7799, 0x12a3: 0x7799, + 0x12a4: 0x7799, 0x12a5: 0x7799, 0x12a6: 0x77b1, 0x12a7: 0x77b1, 0x12a8: 0x77b1, 0x12a9: 0x77b1, + 0x12aa: 0x77c9, 0x12ab: 0x77c9, 0x12ac: 0x77c9, 0x12ad: 0x77c9, 0x12ae: 0x77e1, 0x12af: 0x77e1, + 0x12b0: 0x77e1, 0x12b1: 0x77e1, 0x12b2: 0x77f9, 0x12b3: 0x77f9, 0x12b4: 0x77f9, 0x12b5: 0x77f9, + 0x12b6: 0x7811, 0x12b7: 0x7811, 0x12b8: 0x7811, 0x12b9: 0x7811, 0x12ba: 0x7829, 0x12bb: 0x7829, + 0x12bc: 0x7829, 0x12bd: 0x7829, 0x12be: 0x7841, 0x12bf: 0x7841, + // Block 0x4b, offset 0x12c0 + 0x12c0: 0x7841, 0x12c1: 0x7841, 0x12c2: 0x7859, 0x12c3: 0x7859, 0x12c4: 0x7871, 0x12c5: 0x7871, + 0x12c6: 0x7889, 0x12c7: 0x7889, 0x12c8: 0x78a1, 0x12c9: 0x78a1, 0x12ca: 0x78b9, 0x12cb: 0x78b9, + 0x12cc: 0x78d1, 0x12cd: 0x78d1, 0x12ce: 0x78e9, 0x12cf: 0x78e9, 0x12d0: 0x78e9, 0x12d1: 0x78e9, + 0x12d2: 0x7901, 0x12d3: 0x7901, 0x12d4: 0x7901, 0x12d5: 0x7901, 0x12d6: 0x7919, 0x12d7: 0x7919, + 0x12d8: 0x7919, 0x12d9: 0x7919, 0x12da: 0x7931, 0x12db: 0x7931, 0x12dc: 0x7931, 0x12dd: 0x7931, + 0x12de: 0x7949, 0x12df: 0x7949, 0x12e0: 0x7961, 0x12e1: 0x7961, 0x12e2: 0x7961, 0x12e3: 0x7961, + 0x12e4: 0x7979, 0x12e5: 0x7979, 0x12e6: 0x7991, 0x12e7: 0x7991, 0x12e8: 0x7991, 0x12e9: 0x7991, + 0x12ea: 0x79a9, 0x12eb: 0x79a9, 0x12ec: 0x79a9, 0x12ed: 0x79a9, 0x12ee: 0x79c1, 0x12ef: 0x79c1, + 0x12f0: 0x79d9, 0x12f1: 0x79d9, 0x12f2: 0x0018, 0x12f3: 0x0018, 0x12f4: 0x0018, 0x12f5: 0x0018, + 0x12f6: 0x0018, 0x12f7: 0x0018, 0x12f8: 0x0018, 0x12f9: 0x0018, 0x12fa: 0x0018, 0x12fb: 0x0018, + 0x12fc: 0x0018, 0x12fd: 0x0018, 0x12fe: 0x0018, 0x12ff: 0x0018, + // Block 0x4c, offset 0x1300 + 0x1300: 0x0018, 0x1301: 0x0018, 0x1302: 0x0040, 0x1303: 0x0040, 0x1304: 0x0040, 0x1305: 0x0040, + 0x1306: 0x0040, 0x1307: 0x0040, 0x1308: 0x0040, 0x1309: 0x0040, 0x130a: 0x0040, 0x130b: 0x0040, + 0x130c: 0x0040, 0x130d: 0x0040, 0x130e: 0x0040, 0x130f: 0x0040, 0x1310: 0x0040, 0x1311: 0x0040, + 0x1312: 0x0040, 0x1313: 0x79f1, 0x1314: 0x79f1, 0x1315: 0x79f1, 0x1316: 0x79f1, 0x1317: 0x7a09, + 0x1318: 0x7a09, 0x1319: 0x7a21, 0x131a: 0x7a21, 0x131b: 0x7a39, 0x131c: 0x7a39, 0x131d: 0x0479, + 0x131e: 0x7a51, 0x131f: 0x7a51, 0x1320: 0x7a69, 0x1321: 0x7a69, 0x1322: 0x7a81, 0x1323: 0x7a81, + 0x1324: 0x7a99, 0x1325: 0x7a99, 0x1326: 0x7a99, 0x1327: 0x7a99, 0x1328: 0x7ab1, 0x1329: 0x7ab1, + 0x132a: 0x7ac9, 0x132b: 0x7ac9, 0x132c: 0x7af1, 0x132d: 0x7af1, 0x132e: 0x7b19, 0x132f: 0x7b19, + 0x1330: 0x7b41, 0x1331: 0x7b41, 0x1332: 0x7b69, 0x1333: 0x7b69, 0x1334: 0x7b91, 0x1335: 0x7b91, + 0x1336: 0x7bb9, 0x1337: 0x7bb9, 0x1338: 0x7bb9, 0x1339: 0x7be1, 0x133a: 0x7be1, 0x133b: 0x7be1, + 0x133c: 0x7c09, 0x133d: 0x7c09, 0x133e: 0x7c09, 0x133f: 0x7c09, + // Block 0x4d, offset 0x1340 + 0x1340: 0x85f9, 0x1341: 0x8621, 0x1342: 0x8649, 0x1343: 0x8671, 0x1344: 0x8699, 0x1345: 0x86c1, + 0x1346: 0x86e9, 0x1347: 0x8711, 0x1348: 0x8739, 0x1349: 0x8761, 0x134a: 0x8789, 0x134b: 0x87b1, + 0x134c: 0x87d9, 0x134d: 0x8801, 0x134e: 0x8829, 0x134f: 0x8851, 0x1350: 0x8879, 0x1351: 0x88a1, + 0x1352: 0x88c9, 0x1353: 0x88f1, 0x1354: 0x8919, 0x1355: 0x8941, 0x1356: 0x8969, 0x1357: 0x8991, + 0x1358: 0x89b9, 0x1359: 0x89e1, 0x135a: 0x8a09, 0x135b: 0x8a31, 0x135c: 0x8a59, 0x135d: 0x8a81, + 0x135e: 0x8aaa, 0x135f: 0x8ada, 0x1360: 0x8b0a, 0x1361: 0x8b3a, 0x1362: 0x8b6a, 0x1363: 0x8b9a, + 0x1364: 0x8bc9, 0x1365: 0x8bf1, 0x1366: 0x7c71, 0x1367: 0x8c19, 0x1368: 0x7be1, 0x1369: 0x7c99, + 0x136a: 0x8c41, 0x136b: 0x8c69, 0x136c: 0x7d39, 0x136d: 0x8c91, 0x136e: 0x7d61, 0x136f: 0x7d89, + 0x1370: 0x8cb9, 0x1371: 0x8ce1, 0x1372: 0x7e29, 0x1373: 0x8d09, 0x1374: 0x7e51, 0x1375: 0x7e79, + 0x1376: 0x8d31, 0x1377: 0x8d59, 0x1378: 0x7ec9, 0x1379: 0x8d81, 0x137a: 0x7ef1, 0x137b: 0x7f19, + 0x137c: 0x83a1, 0x137d: 0x83c9, 0x137e: 0x8441, 0x137f: 0x8469, + // Block 0x4e, offset 0x1380 + 0x1380: 0x8491, 0x1381: 0x8531, 0x1382: 0x8559, 0x1383: 0x8581, 0x1384: 0x85a9, 0x1385: 0x8649, + 0x1386: 0x8671, 0x1387: 0x8699, 0x1388: 0x8da9, 0x1389: 0x8739, 0x138a: 0x8dd1, 0x138b: 0x8df9, + 0x138c: 0x8829, 0x138d: 0x8e21, 0x138e: 0x8851, 0x138f: 0x8879, 0x1390: 0x8a81, 0x1391: 0x8e49, + 0x1392: 0x8e71, 0x1393: 0x89b9, 0x1394: 0x8e99, 0x1395: 0x89e1, 0x1396: 0x8a09, 0x1397: 0x7c21, + 0x1398: 0x7c49, 0x1399: 0x8ec1, 0x139a: 0x7c71, 0x139b: 0x8ee9, 0x139c: 0x7cc1, 0x139d: 0x7ce9, + 0x139e: 0x7d11, 0x139f: 0x7d39, 0x13a0: 0x8f11, 0x13a1: 0x7db1, 0x13a2: 0x7dd9, 0x13a3: 0x7e01, + 0x13a4: 0x7e29, 0x13a5: 0x8f39, 0x13a6: 0x7ec9, 0x13a7: 0x7f41, 0x13a8: 0x7f69, 0x13a9: 0x7f91, + 0x13aa: 0x7fb9, 0x13ab: 0x7fe1, 0x13ac: 0x8031, 0x13ad: 0x8059, 0x13ae: 0x8081, 0x13af: 0x80a9, + 0x13b0: 0x80d1, 0x13b1: 0x80f9, 0x13b2: 0x8f61, 0x13b3: 0x8121, 0x13b4: 0x8149, 0x13b5: 0x8171, + 0x13b6: 0x8199, 0x13b7: 0x81c1, 0x13b8: 0x81e9, 0x13b9: 0x8239, 0x13ba: 0x8261, 0x13bb: 0x8289, + 0x13bc: 0x82b1, 0x13bd: 0x82d9, 0x13be: 0x8301, 0x13bf: 0x8329, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x8351, 0x13c1: 0x8379, 0x13c2: 0x83f1, 0x13c3: 0x8419, 0x13c4: 0x84b9, 0x13c5: 0x84e1, + 0x13c6: 0x8509, 0x13c7: 0x8531, 0x13c8: 0x8559, 0x13c9: 0x85d1, 0x13ca: 0x85f9, 0x13cb: 0x8621, + 0x13cc: 0x8649, 0x13cd: 0x8f89, 0x13ce: 0x86c1, 0x13cf: 0x86e9, 0x13d0: 0x8711, 0x13d1: 0x8739, + 0x13d2: 0x87b1, 0x13d3: 0x87d9, 0x13d4: 0x8801, 0x13d5: 0x8829, 0x13d6: 0x8fb1, 0x13d7: 0x88a1, + 0x13d8: 0x88c9, 0x13d9: 0x8fd9, 0x13da: 0x8941, 0x13db: 0x8969, 0x13dc: 0x8991, 0x13dd: 0x89b9, + 0x13de: 0x9001, 0x13df: 0x7c71, 0x13e0: 0x8ee9, 0x13e1: 0x7d39, 0x13e2: 0x8f11, 0x13e3: 0x7e29, + 0x13e4: 0x8f39, 0x13e5: 0x7ec9, 0x13e6: 0x9029, 0x13e7: 0x80d1, 0x13e8: 0x9051, 0x13e9: 0x9079, + 0x13ea: 0x90a1, 0x13eb: 0x8531, 0x13ec: 0x8559, 0x13ed: 0x8649, 0x13ee: 0x8829, 0x13ef: 0x8fb1, + 0x13f0: 0x89b9, 0x13f1: 0x9001, 0x13f2: 0x90c9, 0x13f3: 0x9101, 0x13f4: 0x9139, 0x13f5: 0x9171, + 0x13f6: 0x9199, 0x13f7: 0x91c1, 0x13f8: 0x91e9, 0x13f9: 0x9211, 0x13fa: 0x9239, 0x13fb: 0x9261, + 0x13fc: 0x9289, 0x13fd: 0x92b1, 0x13fe: 0x92d9, 0x13ff: 0x9301, + // Block 0x50, offset 0x1400 + 0x1400: 0x9329, 0x1401: 0x9351, 0x1402: 0x9379, 0x1403: 0x93a1, 0x1404: 0x93c9, 0x1405: 0x93f1, + 0x1406: 0x9419, 0x1407: 0x9441, 0x1408: 0x9469, 0x1409: 0x9491, 0x140a: 0x94b9, 0x140b: 0x94e1, + 0x140c: 0x9079, 0x140d: 0x9509, 0x140e: 0x9531, 0x140f: 0x9559, 0x1410: 0x9581, 0x1411: 0x9171, + 0x1412: 0x9199, 0x1413: 0x91c1, 0x1414: 0x91e9, 0x1415: 0x9211, 0x1416: 0x9239, 0x1417: 0x9261, + 0x1418: 0x9289, 0x1419: 0x92b1, 0x141a: 0x92d9, 0x141b: 0x9301, 0x141c: 0x9329, 0x141d: 0x9351, + 0x141e: 0x9379, 0x141f: 0x93a1, 0x1420: 0x93c9, 0x1421: 0x93f1, 0x1422: 0x9419, 0x1423: 0x9441, + 0x1424: 0x9469, 0x1425: 0x9491, 0x1426: 0x94b9, 0x1427: 0x94e1, 0x1428: 0x9079, 0x1429: 0x9509, + 0x142a: 0x9531, 0x142b: 0x9559, 0x142c: 0x9581, 0x142d: 0x9491, 0x142e: 0x94b9, 0x142f: 0x94e1, + 0x1430: 0x9079, 0x1431: 0x9051, 0x1432: 0x90a1, 0x1433: 0x8211, 0x1434: 0x8059, 0x1435: 0x8081, + 0x1436: 0x80a9, 0x1437: 0x9491, 0x1438: 0x94b9, 0x1439: 0x94e1, 0x143a: 0x8211, 0x143b: 0x8239, + 0x143c: 0x95a9, 0x143d: 0x95a9, 0x143e: 0x0018, 0x143f: 0x0018, + // Block 0x51, offset 0x1440 + 0x1440: 0x0040, 0x1441: 0x0040, 0x1442: 0x0040, 0x1443: 0x0040, 0x1444: 0x0040, 0x1445: 0x0040, + 0x1446: 0x0040, 0x1447: 0x0040, 0x1448: 0x0040, 0x1449: 0x0040, 0x144a: 0x0040, 0x144b: 0x0040, + 0x144c: 0x0040, 0x144d: 0x0040, 0x144e: 0x0040, 0x144f: 0x0040, 0x1450: 0x95d1, 0x1451: 0x9609, + 0x1452: 0x9609, 0x1453: 0x9641, 0x1454: 0x9679, 0x1455: 0x96b1, 0x1456: 0x96e9, 0x1457: 0x9721, + 0x1458: 0x9759, 0x1459: 0x9759, 0x145a: 0x9791, 0x145b: 0x97c9, 0x145c: 0x9801, 0x145d: 0x9839, + 0x145e: 0x9871, 0x145f: 0x98a9, 0x1460: 0x98a9, 0x1461: 0x98e1, 0x1462: 0x9919, 0x1463: 0x9919, + 0x1464: 0x9951, 0x1465: 0x9951, 0x1466: 0x9989, 0x1467: 0x99c1, 0x1468: 0x99c1, 0x1469: 0x99f9, + 0x146a: 0x9a31, 0x146b: 0x9a31, 0x146c: 0x9a69, 0x146d: 0x9a69, 0x146e: 0x9aa1, 0x146f: 0x9ad9, + 0x1470: 0x9ad9, 0x1471: 0x9b11, 0x1472: 0x9b11, 0x1473: 0x9b49, 0x1474: 0x9b81, 0x1475: 0x9bb9, + 0x1476: 0x9bf1, 0x1477: 0x9bf1, 0x1478: 0x9c29, 0x1479: 0x9c61, 0x147a: 0x9c99, 0x147b: 0x9cd1, + 0x147c: 0x9d09, 0x147d: 0x9d09, 0x147e: 0x9d41, 0x147f: 0x9d79, + // Block 0x52, offset 0x1480 + 0x1480: 0xa949, 0x1481: 0xa981, 0x1482: 0xa9b9, 0x1483: 0xa8a1, 0x1484: 0x9bb9, 0x1485: 0x9989, + 0x1486: 0xa9f1, 0x1487: 0xaa29, 0x1488: 0x0040, 0x1489: 0x0040, 0x148a: 0x0040, 0x148b: 0x0040, + 0x148c: 0x0040, 0x148d: 0x0040, 0x148e: 0x0040, 0x148f: 0x0040, 0x1490: 0x0040, 0x1491: 0x0040, + 0x1492: 0x0040, 0x1493: 0x0040, 0x1494: 0x0040, 0x1495: 0x0040, 0x1496: 0x0040, 0x1497: 0x0040, + 0x1498: 0x0040, 0x1499: 0x0040, 0x149a: 0x0040, 0x149b: 0x0040, 0x149c: 0x0040, 0x149d: 0x0040, + 0x149e: 0x0040, 0x149f: 0x0040, 0x14a0: 0x0040, 0x14a1: 0x0040, 0x14a2: 0x0040, 0x14a3: 0x0040, + 0x14a4: 0x0040, 0x14a5: 0x0040, 0x14a6: 0x0040, 0x14a7: 0x0040, 0x14a8: 0x0040, 0x14a9: 0x0040, + 0x14aa: 0x0040, 0x14ab: 0x0040, 0x14ac: 0x0040, 0x14ad: 0x0040, 0x14ae: 0x0040, 0x14af: 0x0040, + 0x14b0: 0xaa61, 0x14b1: 0xaa99, 0x14b2: 0xaad1, 0x14b3: 0xab19, 0x14b4: 0xab61, 0x14b5: 0xaba9, + 0x14b6: 0xabf1, 0x14b7: 0xac39, 0x14b8: 0xac81, 0x14b9: 0xacc9, 0x14ba: 0xad02, 0x14bb: 0xae12, + 0x14bc: 0xae91, 0x14bd: 0x0018, 0x14be: 0x0040, 0x14bf: 0x0040, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x13c0, 0x14c1: 0x13c0, 0x14c2: 0x13c0, 0x14c3: 0x13c0, 0x14c4: 0x13c0, 0x14c5: 0x13c0, + 0x14c6: 0x13c0, 0x14c7: 0x13c0, 0x14c8: 0x13c0, 0x14c9: 0x13c0, 0x14ca: 0x13c0, 0x14cb: 0x13c0, + 0x14cc: 0x13c0, 0x14cd: 0x13c0, 0x14ce: 0x13c0, 0x14cf: 0x13c0, 0x14d0: 0xaeda, 0x14d1: 0x7d55, + 0x14d2: 0x0040, 0x14d3: 0xaeea, 0x14d4: 0x03c2, 0x14d5: 0xaefa, 0x14d6: 0xaf0a, 0x14d7: 0x7d75, + 0x14d8: 0x7d95, 0x14d9: 0x0040, 0x14da: 0x0040, 0x14db: 0x0040, 0x14dc: 0x0040, 0x14dd: 0x0040, + 0x14de: 0x0040, 0x14df: 0x0040, 0x14e0: 0x1308, 0x14e1: 0x1308, 0x14e2: 0x1308, 0x14e3: 0x1308, + 0x14e4: 0x1308, 0x14e5: 0x1308, 0x14e6: 0x1308, 0x14e7: 0x1308, 0x14e8: 0x1308, 0x14e9: 0x1308, + 0x14ea: 0x1308, 0x14eb: 0x1308, 0x14ec: 0x1308, 0x14ed: 0x1308, 0x14ee: 0x1308, 0x14ef: 0x1308, + 0x14f0: 0x0040, 0x14f1: 0x7db5, 0x14f2: 0x7dd5, 0x14f3: 0xaf1a, 0x14f4: 0xaf1a, 0x14f5: 0x1fd2, + 0x14f6: 0x1fe2, 0x14f7: 0xaf2a, 0x14f8: 0xaf3a, 0x14f9: 0x7df5, 0x14fa: 0x7e15, 0x14fb: 0x7e35, + 0x14fc: 0x7df5, 0x14fd: 0x7e55, 0x14fe: 0x7e75, 0x14ff: 0x7e55, + // Block 0x54, offset 0x1500 + 0x1500: 0x7e95, 0x1501: 0x7eb5, 0x1502: 0x7ed5, 0x1503: 0x7eb5, 0x1504: 0x7ef5, 0x1505: 0x0018, + 0x1506: 0x0018, 0x1507: 0xaf4a, 0x1508: 0xaf5a, 0x1509: 0x7f16, 0x150a: 0x7f36, 0x150b: 0x7f56, + 0x150c: 0x7f76, 0x150d: 0xaf1a, 0x150e: 0xaf1a, 0x150f: 0xaf1a, 0x1510: 0xaeda, 0x1511: 0x7f95, + 0x1512: 0x0040, 0x1513: 0x0040, 0x1514: 0x03c2, 0x1515: 0xaeea, 0x1516: 0xaf0a, 0x1517: 0xaefa, + 0x1518: 0x7fb5, 0x1519: 0x1fd2, 0x151a: 0x1fe2, 0x151b: 0xaf2a, 0x151c: 0xaf3a, 0x151d: 0x7e95, + 0x151e: 0x7ef5, 0x151f: 0xaf6a, 0x1520: 0xaf7a, 0x1521: 0xaf8a, 0x1522: 0x1fb2, 0x1523: 0xaf99, + 0x1524: 0xafaa, 0x1525: 0xafba, 0x1526: 0x1fc2, 0x1527: 0x0040, 0x1528: 0xafca, 0x1529: 0xafda, + 0x152a: 0xafea, 0x152b: 0xaffa, 0x152c: 0x0040, 0x152d: 0x0040, 0x152e: 0x0040, 0x152f: 0x0040, + 0x1530: 0x7fd6, 0x1531: 0xb009, 0x1532: 0x7ff6, 0x1533: 0x0008, 0x1534: 0x8016, 0x1535: 0x0040, + 0x1536: 0x8036, 0x1537: 0xb031, 0x1538: 0x8056, 0x1539: 0xb059, 0x153a: 0x8076, 0x153b: 0xb081, + 0x153c: 0x8096, 0x153d: 0xb0a9, 0x153e: 0x80b6, 0x153f: 0xb0d1, + // Block 0x55, offset 0x1540 + 0x1540: 0xb0f9, 0x1541: 0xb111, 0x1542: 0xb111, 0x1543: 0xb129, 0x1544: 0xb129, 0x1545: 0xb141, + 0x1546: 0xb141, 0x1547: 0xb159, 0x1548: 0xb159, 0x1549: 0xb171, 0x154a: 0xb171, 0x154b: 0xb171, + 0x154c: 0xb171, 0x154d: 0xb189, 0x154e: 0xb189, 0x154f: 0xb1a1, 0x1550: 0xb1a1, 0x1551: 0xb1a1, + 0x1552: 0xb1a1, 0x1553: 0xb1b9, 0x1554: 0xb1b9, 0x1555: 0xb1d1, 0x1556: 0xb1d1, 0x1557: 0xb1d1, + 0x1558: 0xb1d1, 0x1559: 0xb1e9, 0x155a: 0xb1e9, 0x155b: 0xb1e9, 0x155c: 0xb1e9, 0x155d: 0xb201, + 0x155e: 0xb201, 0x155f: 0xb201, 0x1560: 0xb201, 0x1561: 0xb219, 0x1562: 0xb219, 0x1563: 0xb219, + 0x1564: 0xb219, 0x1565: 0xb231, 0x1566: 0xb231, 0x1567: 0xb231, 0x1568: 0xb231, 0x1569: 0xb249, + 0x156a: 0xb249, 0x156b: 0xb261, 0x156c: 0xb261, 0x156d: 0xb279, 0x156e: 0xb279, 0x156f: 0xb291, + 0x1570: 0xb291, 0x1571: 0xb2a9, 0x1572: 0xb2a9, 0x1573: 0xb2a9, 0x1574: 0xb2a9, 0x1575: 0xb2c1, + 0x1576: 0xb2c1, 0x1577: 0xb2c1, 0x1578: 0xb2c1, 0x1579: 0xb2d9, 0x157a: 0xb2d9, 0x157b: 0xb2d9, + 0x157c: 0xb2d9, 0x157d: 0xb2f1, 0x157e: 0xb2f1, 0x157f: 0xb2f1, + // Block 0x56, offset 0x1580 + 0x1580: 0xb2f1, 0x1581: 0xb309, 0x1582: 0xb309, 0x1583: 0xb309, 0x1584: 0xb309, 0x1585: 0xb321, + 0x1586: 0xb321, 0x1587: 0xb321, 0x1588: 0xb321, 0x1589: 0xb339, 0x158a: 0xb339, 0x158b: 0xb339, + 0x158c: 0xb339, 0x158d: 0xb351, 0x158e: 0xb351, 0x158f: 0xb351, 0x1590: 0xb351, 0x1591: 0xb369, + 0x1592: 0xb369, 0x1593: 0xb369, 0x1594: 0xb369, 0x1595: 0xb381, 0x1596: 0xb381, 0x1597: 0xb381, + 0x1598: 0xb381, 0x1599: 0xb399, 0x159a: 0xb399, 0x159b: 0xb399, 0x159c: 0xb399, 0x159d: 0xb3b1, + 0x159e: 0xb3b1, 0x159f: 0xb3b1, 0x15a0: 0xb3b1, 0x15a1: 0xb3c9, 0x15a2: 0xb3c9, 0x15a3: 0xb3c9, + 0x15a4: 0xb3c9, 0x15a5: 0xb3e1, 0x15a6: 0xb3e1, 0x15a7: 0xb3e1, 0x15a8: 0xb3e1, 0x15a9: 0xb3f9, + 0x15aa: 0xb3f9, 0x15ab: 0xb3f9, 0x15ac: 0xb3f9, 0x15ad: 0xb411, 0x15ae: 0xb411, 0x15af: 0x7ab1, + 0x15b0: 0x7ab1, 0x15b1: 0xb429, 0x15b2: 0xb429, 0x15b3: 0xb429, 0x15b4: 0xb429, 0x15b5: 0xb441, + 0x15b6: 0xb441, 0x15b7: 0xb469, 0x15b8: 0xb469, 0x15b9: 0xb491, 0x15ba: 0xb491, 0x15bb: 0xb4b9, + 0x15bc: 0xb4b9, 0x15bd: 0x0040, 0x15be: 0x0040, 0x15bf: 0x03c0, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x0040, 0x15c1: 0xaefa, 0x15c2: 0xb4e2, 0x15c3: 0xaf6a, 0x15c4: 0xafda, 0x15c5: 0xafea, + 0x15c6: 0xaf7a, 0x15c7: 0xb4f2, 0x15c8: 0x1fd2, 0x15c9: 0x1fe2, 0x15ca: 0xaf8a, 0x15cb: 0x1fb2, + 0x15cc: 0xaeda, 0x15cd: 0xaf99, 0x15ce: 0x29d1, 0x15cf: 0xb502, 0x15d0: 0x1f41, 0x15d1: 0x00c9, + 0x15d2: 0x0069, 0x15d3: 0x0079, 0x15d4: 0x1f51, 0x15d5: 0x1f61, 0x15d6: 0x1f71, 0x15d7: 0x1f81, + 0x15d8: 0x1f91, 0x15d9: 0x1fa1, 0x15da: 0xaeea, 0x15db: 0x03c2, 0x15dc: 0xafaa, 0x15dd: 0x1fc2, + 0x15de: 0xafba, 0x15df: 0xaf0a, 0x15e0: 0xaffa, 0x15e1: 0x0039, 0x15e2: 0x0ee9, 0x15e3: 0x1159, + 0x15e4: 0x0ef9, 0x15e5: 0x0f09, 0x15e6: 0x1199, 0x15e7: 0x0f31, 0x15e8: 0x0249, 0x15e9: 0x0f41, + 0x15ea: 0x0259, 0x15eb: 0x0f51, 0x15ec: 0x0359, 0x15ed: 0x0f61, 0x15ee: 0x0f71, 0x15ef: 0x00d9, + 0x15f0: 0x0f99, 0x15f1: 0x2039, 0x15f2: 0x0269, 0x15f3: 0x01d9, 0x15f4: 0x0fa9, 0x15f5: 0x0fb9, + 0x15f6: 0x1089, 0x15f7: 0x0279, 0x15f8: 0x0369, 0x15f9: 0x0289, 0x15fa: 0x13d1, 0x15fb: 0xaf4a, + 0x15fc: 0xafca, 0x15fd: 0xaf5a, 0x15fe: 0xb512, 0x15ff: 0xaf1a, + // Block 0x58, offset 0x1600 + 0x1600: 0x1caa, 0x1601: 0x0039, 0x1602: 0x0ee9, 0x1603: 0x1159, 0x1604: 0x0ef9, 0x1605: 0x0f09, + 0x1606: 0x1199, 0x1607: 0x0f31, 0x1608: 0x0249, 0x1609: 0x0f41, 0x160a: 0x0259, 0x160b: 0x0f51, + 0x160c: 0x0359, 0x160d: 0x0f61, 0x160e: 0x0f71, 0x160f: 0x00d9, 0x1610: 0x0f99, 0x1611: 0x2039, + 0x1612: 0x0269, 0x1613: 0x01d9, 0x1614: 0x0fa9, 0x1615: 0x0fb9, 0x1616: 0x1089, 0x1617: 0x0279, + 0x1618: 0x0369, 0x1619: 0x0289, 0x161a: 0x13d1, 0x161b: 0xaf2a, 0x161c: 0xb522, 0x161d: 0xaf3a, + 0x161e: 0xb532, 0x161f: 0x80d5, 0x1620: 0x80f5, 0x1621: 0x29d1, 0x1622: 0x8115, 0x1623: 0x8115, + 0x1624: 0x8135, 0x1625: 0x8155, 0x1626: 0x8175, 0x1627: 0x8195, 0x1628: 0x81b5, 0x1629: 0x81d5, + 0x162a: 0x81f5, 0x162b: 0x8215, 0x162c: 0x8235, 0x162d: 0x8255, 0x162e: 0x8275, 0x162f: 0x8295, + 0x1630: 0x82b5, 0x1631: 0x82d5, 0x1632: 0x82f5, 0x1633: 0x8315, 0x1634: 0x8335, 0x1635: 0x8355, + 0x1636: 0x8375, 0x1637: 0x8395, 0x1638: 0x83b5, 0x1639: 0x83d5, 0x163a: 0x83f5, 0x163b: 0x8415, + 0x163c: 0x81b5, 0x163d: 0x8435, 0x163e: 0x8455, 0x163f: 0x8215, + // Block 0x59, offset 0x1640 + 0x1640: 0x8475, 0x1641: 0x8495, 0x1642: 0x84b5, 0x1643: 0x84d5, 0x1644: 0x84f5, 0x1645: 0x8515, + 0x1646: 0x8535, 0x1647: 0x8555, 0x1648: 0x84d5, 0x1649: 0x8575, 0x164a: 0x84d5, 0x164b: 0x8595, + 0x164c: 0x8595, 0x164d: 0x85b5, 0x164e: 0x85b5, 0x164f: 0x85d5, 0x1650: 0x8515, 0x1651: 0x85f5, + 0x1652: 0x8615, 0x1653: 0x85f5, 0x1654: 0x8635, 0x1655: 0x8615, 0x1656: 0x8655, 0x1657: 0x8655, + 0x1658: 0x8675, 0x1659: 0x8675, 0x165a: 0x8695, 0x165b: 0x8695, 0x165c: 0x8615, 0x165d: 0x8115, + 0x165e: 0x86b5, 0x165f: 0x86d5, 0x1660: 0x0040, 0x1661: 0x86f5, 0x1662: 0x8715, 0x1663: 0x8735, + 0x1664: 0x8755, 0x1665: 0x8735, 0x1666: 0x8775, 0x1667: 0x8795, 0x1668: 0x87b5, 0x1669: 0x87b5, + 0x166a: 0x87d5, 0x166b: 0x87d5, 0x166c: 0x87f5, 0x166d: 0x87f5, 0x166e: 0x87d5, 0x166f: 0x87d5, + 0x1670: 0x8815, 0x1671: 0x8835, 0x1672: 0x8855, 0x1673: 0x8875, 0x1674: 0x8895, 0x1675: 0x88b5, + 0x1676: 0x88b5, 0x1677: 0x88b5, 0x1678: 0x88d5, 0x1679: 0x88d5, 0x167a: 0x88d5, 0x167b: 0x88d5, + 0x167c: 0x87b5, 0x167d: 0x87b5, 0x167e: 0x87b5, 0x167f: 0x0040, + // Block 0x5a, offset 0x1680 + 0x1680: 0x0040, 0x1681: 0x0040, 0x1682: 0x8715, 0x1683: 0x86f5, 0x1684: 0x88f5, 0x1685: 0x86f5, + 0x1686: 0x8715, 0x1687: 0x86f5, 0x1688: 0x0040, 0x1689: 0x0040, 0x168a: 0x8915, 0x168b: 0x8715, + 0x168c: 0x8935, 0x168d: 0x88f5, 0x168e: 0x8935, 0x168f: 0x8715, 0x1690: 0x0040, 0x1691: 0x0040, + 0x1692: 0x8955, 0x1693: 0x8975, 0x1694: 0x8875, 0x1695: 0x8935, 0x1696: 0x88f5, 0x1697: 0x8935, + 0x1698: 0x0040, 0x1699: 0x0040, 0x169a: 0x8995, 0x169b: 0x89b5, 0x169c: 0x8995, 0x169d: 0x0040, + 0x169e: 0x0040, 0x169f: 0x0040, 0x16a0: 0xb541, 0x16a1: 0xb559, 0x16a2: 0xb571, 0x16a3: 0x89d6, + 0x16a4: 0xb589, 0x16a5: 0xb5a1, 0x16a6: 0x89f5, 0x16a7: 0x0040, 0x16a8: 0x8a15, 0x16a9: 0x8a35, + 0x16aa: 0x8a55, 0x16ab: 0x8a35, 0x16ac: 0x8a75, 0x16ad: 0x8a95, 0x16ae: 0x8ab5, 0x16af: 0x0040, + 0x16b0: 0x0040, 0x16b1: 0x0040, 0x16b2: 0x0040, 0x16b3: 0x0040, 0x16b4: 0x0040, 0x16b5: 0x0040, + 0x16b6: 0x0040, 0x16b7: 0x0040, 0x16b8: 0x0040, 0x16b9: 0x0340, 0x16ba: 0x0340, 0x16bb: 0x0340, + 0x16bc: 0x0040, 0x16bd: 0x0040, 0x16be: 0x0040, 0x16bf: 0x0040, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x0208, 0x16c1: 0x0208, 0x16c2: 0x0208, 0x16c3: 0x0208, 0x16c4: 0x0208, 0x16c5: 0x0408, + 0x16c6: 0x0008, 0x16c7: 0x0408, 0x16c8: 0x0018, 0x16c9: 0x0408, 0x16ca: 0x0408, 0x16cb: 0x0008, + 0x16cc: 0x0008, 0x16cd: 0x0108, 0x16ce: 0x0408, 0x16cf: 0x0408, 0x16d0: 0x0408, 0x16d1: 0x0408, + 0x16d2: 0x0408, 0x16d3: 0x0208, 0x16d4: 0x0208, 0x16d5: 0x0208, 0x16d6: 0x0208, 0x16d7: 0x0108, + 0x16d8: 0x0208, 0x16d9: 0x0208, 0x16da: 0x0208, 0x16db: 0x0208, 0x16dc: 0x0208, 0x16dd: 0x0408, + 0x16de: 0x0208, 0x16df: 0x0208, 0x16e0: 0x0208, 0x16e1: 0x0408, 0x16e2: 0x0008, 0x16e3: 0x0008, + 0x16e4: 0x0408, 0x16e5: 0x1308, 0x16e6: 0x1308, 0x16e7: 0x0040, 0x16e8: 0x0040, 0x16e9: 0x0040, + 0x16ea: 0x0040, 0x16eb: 0x0218, 0x16ec: 0x0218, 0x16ed: 0x0218, 0x16ee: 0x0218, 0x16ef: 0x0418, + 0x16f0: 0x0018, 0x16f1: 0x0018, 0x16f2: 0x0018, 0x16f3: 0x0018, 0x16f4: 0x0018, 0x16f5: 0x0018, + 0x16f6: 0x0018, 0x16f7: 0x0040, 0x16f8: 0x0040, 0x16f9: 0x0040, 0x16fa: 0x0040, 0x16fb: 0x0040, + 0x16fc: 0x0040, 0x16fd: 0x0040, 0x16fe: 0x0040, 0x16ff: 0x0040, + // Block 0x5c, offset 0x1700 + 0x1700: 0x0208, 0x1701: 0x0408, 0x1702: 0x0208, 0x1703: 0x0408, 0x1704: 0x0408, 0x1705: 0x0408, + 0x1706: 0x0208, 0x1707: 0x0208, 0x1708: 0x0208, 0x1709: 0x0408, 0x170a: 0x0208, 0x170b: 0x0208, + 0x170c: 0x0408, 0x170d: 0x0208, 0x170e: 0x0408, 0x170f: 0x0408, 0x1710: 0x0208, 0x1711: 0x0408, + 0x1712: 0x0040, 0x1713: 0x0040, 0x1714: 0x0040, 0x1715: 0x0040, 0x1716: 0x0040, 0x1717: 0x0040, + 0x1718: 0x0040, 0x1719: 0x0018, 0x171a: 0x0018, 0x171b: 0x0018, 0x171c: 0x0018, 0x171d: 0x0040, + 0x171e: 0x0040, 0x171f: 0x0040, 0x1720: 0x0040, 0x1721: 0x0040, 0x1722: 0x0040, 0x1723: 0x0040, + 0x1724: 0x0040, 0x1725: 0x0040, 0x1726: 0x0040, 0x1727: 0x0040, 0x1728: 0x0040, 0x1729: 0x0418, + 0x172a: 0x0418, 0x172b: 0x0418, 0x172c: 0x0418, 0x172d: 0x0218, 0x172e: 0x0218, 0x172f: 0x0018, + 0x1730: 0x0040, 0x1731: 0x0040, 0x1732: 0x0040, 0x1733: 0x0040, 0x1734: 0x0040, 0x1735: 0x0040, + 0x1736: 0x0040, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0040, 0x173a: 0x0040, 0x173b: 0x0040, + 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040, + // Block 0x5d, offset 0x1740 + 0x1740: 0x1308, 0x1741: 0x1308, 0x1742: 0x1008, 0x1743: 0x1008, 0x1744: 0x0040, 0x1745: 0x0008, + 0x1746: 0x0008, 0x1747: 0x0008, 0x1748: 0x0008, 0x1749: 0x0008, 0x174a: 0x0008, 0x174b: 0x0008, + 0x174c: 0x0008, 0x174d: 0x0040, 0x174e: 0x0040, 0x174f: 0x0008, 0x1750: 0x0008, 0x1751: 0x0040, + 0x1752: 0x0040, 0x1753: 0x0008, 0x1754: 0x0008, 0x1755: 0x0008, 0x1756: 0x0008, 0x1757: 0x0008, + 0x1758: 0x0008, 0x1759: 0x0008, 0x175a: 0x0008, 0x175b: 0x0008, 0x175c: 0x0008, 0x175d: 0x0008, + 0x175e: 0x0008, 0x175f: 0x0008, 0x1760: 0x0008, 0x1761: 0x0008, 0x1762: 0x0008, 0x1763: 0x0008, + 0x1764: 0x0008, 0x1765: 0x0008, 0x1766: 0x0008, 0x1767: 0x0008, 0x1768: 0x0008, 0x1769: 0x0040, + 0x176a: 0x0008, 0x176b: 0x0008, 0x176c: 0x0008, 0x176d: 0x0008, 0x176e: 0x0008, 0x176f: 0x0008, + 0x1770: 0x0008, 0x1771: 0x0040, 0x1772: 0x0008, 0x1773: 0x0008, 0x1774: 0x0040, 0x1775: 0x0008, + 0x1776: 0x0008, 0x1777: 0x0008, 0x1778: 0x0008, 0x1779: 0x0008, 0x177a: 0x0040, 0x177b: 0x0040, + 0x177c: 0x1308, 0x177d: 0x0008, 0x177e: 0x1008, 0x177f: 0x1008, + // Block 0x5e, offset 0x1780 + 0x1780: 0x1308, 0x1781: 0x1008, 0x1782: 0x1008, 0x1783: 0x1008, 0x1784: 0x1008, 0x1785: 0x0040, + 0x1786: 0x0040, 0x1787: 0x1008, 0x1788: 0x1008, 0x1789: 0x0040, 0x178a: 0x0040, 0x178b: 0x1008, + 0x178c: 0x1008, 0x178d: 0x1808, 0x178e: 0x0040, 0x178f: 0x0040, 0x1790: 0x0008, 0x1791: 0x0040, + 0x1792: 0x0040, 0x1793: 0x0040, 0x1794: 0x0040, 0x1795: 0x0040, 0x1796: 0x0040, 0x1797: 0x1008, + 0x1798: 0x0040, 0x1799: 0x0040, 0x179a: 0x0040, 0x179b: 0x0040, 0x179c: 0x0040, 0x179d: 0x0008, + 0x179e: 0x0008, 0x179f: 0x0008, 0x17a0: 0x0008, 0x17a1: 0x0008, 0x17a2: 0x1008, 0x17a3: 0x1008, + 0x17a4: 0x0040, 0x17a5: 0x0040, 0x17a6: 0x1308, 0x17a7: 0x1308, 0x17a8: 0x1308, 0x17a9: 0x1308, + 0x17aa: 0x1308, 0x17ab: 0x1308, 0x17ac: 0x1308, 0x17ad: 0x0040, 0x17ae: 0x0040, 0x17af: 0x0040, + 0x17b0: 0x1308, 0x17b1: 0x1308, 0x17b2: 0x1308, 0x17b3: 0x1308, 0x17b4: 0x1308, 0x17b5: 0x0040, + 0x17b6: 0x0040, 0x17b7: 0x0040, 0x17b8: 0x0040, 0x17b9: 0x0040, 0x17ba: 0x0040, 0x17bb: 0x0040, + 0x17bc: 0x0040, 0x17bd: 0x0040, 0x17be: 0x0040, 0x17bf: 0x0040, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x0039, 0x17c1: 0x0ee9, 0x17c2: 0x1159, 0x17c3: 0x0ef9, 0x17c4: 0x0f09, 0x17c5: 0x1199, + 0x17c6: 0x0f31, 0x17c7: 0x0249, 0x17c8: 0x0f41, 0x17c9: 0x0259, 0x17ca: 0x0f51, 0x17cb: 0x0359, + 0x17cc: 0x0f61, 0x17cd: 0x0f71, 0x17ce: 0x00d9, 0x17cf: 0x0f99, 0x17d0: 0x2039, 0x17d1: 0x0269, + 0x17d2: 0x01d9, 0x17d3: 0x0fa9, 0x17d4: 0x0fb9, 0x17d5: 0x1089, 0x17d6: 0x0279, 0x17d7: 0x0369, + 0x17d8: 0x0289, 0x17d9: 0x13d1, 0x17da: 0x0039, 0x17db: 0x0ee9, 0x17dc: 0x1159, 0x17dd: 0x0ef9, + 0x17de: 0x0f09, 0x17df: 0x1199, 0x17e0: 0x0f31, 0x17e1: 0x0249, 0x17e2: 0x0f41, 0x17e3: 0x0259, + 0x17e4: 0x0f51, 0x17e5: 0x0359, 0x17e6: 0x0f61, 0x17e7: 0x0f71, 0x17e8: 0x00d9, 0x17e9: 0x0f99, + 0x17ea: 0x2039, 0x17eb: 0x0269, 0x17ec: 0x01d9, 0x17ed: 0x0fa9, 0x17ee: 0x0fb9, 0x17ef: 0x1089, + 0x17f0: 0x0279, 0x17f1: 0x0369, 0x17f2: 0x0289, 0x17f3: 0x13d1, 0x17f4: 0x0039, 0x17f5: 0x0ee9, + 0x17f6: 0x1159, 0x17f7: 0x0ef9, 0x17f8: 0x0f09, 0x17f9: 0x1199, 0x17fa: 0x0f31, 0x17fb: 0x0249, + 0x17fc: 0x0f41, 0x17fd: 0x0259, 0x17fe: 0x0f51, 0x17ff: 0x0359, + // Block 0x60, offset 0x1800 + 0x1800: 0x0f61, 0x1801: 0x0f71, 0x1802: 0x00d9, 0x1803: 0x0f99, 0x1804: 0x2039, 0x1805: 0x0269, + 0x1806: 0x01d9, 0x1807: 0x0fa9, 0x1808: 0x0fb9, 0x1809: 0x1089, 0x180a: 0x0279, 0x180b: 0x0369, + 0x180c: 0x0289, 0x180d: 0x13d1, 0x180e: 0x0039, 0x180f: 0x0ee9, 0x1810: 0x1159, 0x1811: 0x0ef9, + 0x1812: 0x0f09, 0x1813: 0x1199, 0x1814: 0x0f31, 0x1815: 0x0040, 0x1816: 0x0f41, 0x1817: 0x0259, + 0x1818: 0x0f51, 0x1819: 0x0359, 0x181a: 0x0f61, 0x181b: 0x0f71, 0x181c: 0x00d9, 0x181d: 0x0f99, + 0x181e: 0x2039, 0x181f: 0x0269, 0x1820: 0x01d9, 0x1821: 0x0fa9, 0x1822: 0x0fb9, 0x1823: 0x1089, + 0x1824: 0x0279, 0x1825: 0x0369, 0x1826: 0x0289, 0x1827: 0x13d1, 0x1828: 0x0039, 0x1829: 0x0ee9, + 0x182a: 0x1159, 0x182b: 0x0ef9, 0x182c: 0x0f09, 0x182d: 0x1199, 0x182e: 0x0f31, 0x182f: 0x0249, + 0x1830: 0x0f41, 0x1831: 0x0259, 0x1832: 0x0f51, 0x1833: 0x0359, 0x1834: 0x0f61, 0x1835: 0x0f71, + 0x1836: 0x00d9, 0x1837: 0x0f99, 0x1838: 0x2039, 0x1839: 0x0269, 0x183a: 0x01d9, 0x183b: 0x0fa9, + 0x183c: 0x0fb9, 0x183d: 0x1089, 0x183e: 0x0279, 0x183f: 0x0369, + // Block 0x61, offset 0x1840 + 0x1840: 0x0289, 0x1841: 0x13d1, 0x1842: 0x0039, 0x1843: 0x0ee9, 0x1844: 0x1159, 0x1845: 0x0ef9, + 0x1846: 0x0f09, 0x1847: 0x1199, 0x1848: 0x0f31, 0x1849: 0x0249, 0x184a: 0x0f41, 0x184b: 0x0259, + 0x184c: 0x0f51, 0x184d: 0x0359, 0x184e: 0x0f61, 0x184f: 0x0f71, 0x1850: 0x00d9, 0x1851: 0x0f99, + 0x1852: 0x2039, 0x1853: 0x0269, 0x1854: 0x01d9, 0x1855: 0x0fa9, 0x1856: 0x0fb9, 0x1857: 0x1089, + 0x1858: 0x0279, 0x1859: 0x0369, 0x185a: 0x0289, 0x185b: 0x13d1, 0x185c: 0x0039, 0x185d: 0x0040, + 0x185e: 0x1159, 0x185f: 0x0ef9, 0x1860: 0x0040, 0x1861: 0x0040, 0x1862: 0x0f31, 0x1863: 0x0040, + 0x1864: 0x0040, 0x1865: 0x0259, 0x1866: 0x0f51, 0x1867: 0x0040, 0x1868: 0x0040, 0x1869: 0x0f71, + 0x186a: 0x00d9, 0x186b: 0x0f99, 0x186c: 0x2039, 0x186d: 0x0040, 0x186e: 0x01d9, 0x186f: 0x0fa9, + 0x1870: 0x0fb9, 0x1871: 0x1089, 0x1872: 0x0279, 0x1873: 0x0369, 0x1874: 0x0289, 0x1875: 0x13d1, + 0x1876: 0x0039, 0x1877: 0x0ee9, 0x1878: 0x1159, 0x1879: 0x0ef9, 0x187a: 0x0040, 0x187b: 0x1199, + 0x187c: 0x0040, 0x187d: 0x0249, 0x187e: 0x0f41, 0x187f: 0x0259, + // Block 0x62, offset 0x1880 + 0x1880: 0x0f51, 0x1881: 0x0359, 0x1882: 0x0f61, 0x1883: 0x0f71, 0x1884: 0x0040, 0x1885: 0x0f99, + 0x1886: 0x2039, 0x1887: 0x0269, 0x1888: 0x01d9, 0x1889: 0x0fa9, 0x188a: 0x0fb9, 0x188b: 0x1089, + 0x188c: 0x0279, 0x188d: 0x0369, 0x188e: 0x0289, 0x188f: 0x13d1, 0x1890: 0x0039, 0x1891: 0x0ee9, + 0x1892: 0x1159, 0x1893: 0x0ef9, 0x1894: 0x0f09, 0x1895: 0x1199, 0x1896: 0x0f31, 0x1897: 0x0249, + 0x1898: 0x0f41, 0x1899: 0x0259, 0x189a: 0x0f51, 0x189b: 0x0359, 0x189c: 0x0f61, 0x189d: 0x0f71, + 0x189e: 0x00d9, 0x189f: 0x0f99, 0x18a0: 0x2039, 0x18a1: 0x0269, 0x18a2: 0x01d9, 0x18a3: 0x0fa9, + 0x18a4: 0x0fb9, 0x18a5: 0x1089, 0x18a6: 0x0279, 0x18a7: 0x0369, 0x18a8: 0x0289, 0x18a9: 0x13d1, + 0x18aa: 0x0039, 0x18ab: 0x0ee9, 0x18ac: 0x1159, 0x18ad: 0x0ef9, 0x18ae: 0x0f09, 0x18af: 0x1199, + 0x18b0: 0x0f31, 0x18b1: 0x0249, 0x18b2: 0x0f41, 0x18b3: 0x0259, 0x18b4: 0x0f51, 0x18b5: 0x0359, + 0x18b6: 0x0f61, 0x18b7: 0x0f71, 0x18b8: 0x00d9, 0x18b9: 0x0f99, 0x18ba: 0x2039, 0x18bb: 0x0269, + 0x18bc: 0x01d9, 0x18bd: 0x0fa9, 0x18be: 0x0fb9, 0x18bf: 0x1089, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x0279, 0x18c1: 0x0369, 0x18c2: 0x0289, 0x18c3: 0x13d1, 0x18c4: 0x0039, 0x18c5: 0x0ee9, + 0x18c6: 0x0040, 0x18c7: 0x0ef9, 0x18c8: 0x0f09, 0x18c9: 0x1199, 0x18ca: 0x0f31, 0x18cb: 0x0040, + 0x18cc: 0x0040, 0x18cd: 0x0259, 0x18ce: 0x0f51, 0x18cf: 0x0359, 0x18d0: 0x0f61, 0x18d1: 0x0f71, + 0x18d2: 0x00d9, 0x18d3: 0x0f99, 0x18d4: 0x2039, 0x18d5: 0x0040, 0x18d6: 0x01d9, 0x18d7: 0x0fa9, + 0x18d8: 0x0fb9, 0x18d9: 0x1089, 0x18da: 0x0279, 0x18db: 0x0369, 0x18dc: 0x0289, 0x18dd: 0x0040, + 0x18de: 0x0039, 0x18df: 0x0ee9, 0x18e0: 0x1159, 0x18e1: 0x0ef9, 0x18e2: 0x0f09, 0x18e3: 0x1199, + 0x18e4: 0x0f31, 0x18e5: 0x0249, 0x18e6: 0x0f41, 0x18e7: 0x0259, 0x18e8: 0x0f51, 0x18e9: 0x0359, + 0x18ea: 0x0f61, 0x18eb: 0x0f71, 0x18ec: 0x00d9, 0x18ed: 0x0f99, 0x18ee: 0x2039, 0x18ef: 0x0269, + 0x18f0: 0x01d9, 0x18f1: 0x0fa9, 0x18f2: 0x0fb9, 0x18f3: 0x1089, 0x18f4: 0x0279, 0x18f5: 0x0369, + 0x18f6: 0x0289, 0x18f7: 0x13d1, 0x18f8: 0x0039, 0x18f9: 0x0ee9, 0x18fa: 0x0040, 0x18fb: 0x0ef9, + 0x18fc: 0x0f09, 0x18fd: 0x1199, 0x18fe: 0x0f31, 0x18ff: 0x0040, + // Block 0x64, offset 0x1900 + 0x1900: 0x0f41, 0x1901: 0x0259, 0x1902: 0x0f51, 0x1903: 0x0359, 0x1904: 0x0f61, 0x1905: 0x0040, + 0x1906: 0x00d9, 0x1907: 0x0040, 0x1908: 0x0040, 0x1909: 0x0040, 0x190a: 0x01d9, 0x190b: 0x0fa9, + 0x190c: 0x0fb9, 0x190d: 0x1089, 0x190e: 0x0279, 0x190f: 0x0369, 0x1910: 0x0289, 0x1911: 0x0040, + 0x1912: 0x0039, 0x1913: 0x0ee9, 0x1914: 0x1159, 0x1915: 0x0ef9, 0x1916: 0x0f09, 0x1917: 0x1199, + 0x1918: 0x0f31, 0x1919: 0x0249, 0x191a: 0x0f41, 0x191b: 0x0259, 0x191c: 0x0f51, 0x191d: 0x0359, + 0x191e: 0x0f61, 0x191f: 0x0f71, 0x1920: 0x00d9, 0x1921: 0x0f99, 0x1922: 0x2039, 0x1923: 0x0269, + 0x1924: 0x01d9, 0x1925: 0x0fa9, 0x1926: 0x0fb9, 0x1927: 0x1089, 0x1928: 0x0279, 0x1929: 0x0369, + 0x192a: 0x0289, 0x192b: 0x13d1, 0x192c: 0x0039, 0x192d: 0x0ee9, 0x192e: 0x1159, 0x192f: 0x0ef9, + 0x1930: 0x0f09, 0x1931: 0x1199, 0x1932: 0x0f31, 0x1933: 0x0249, 0x1934: 0x0f41, 0x1935: 0x0259, + 0x1936: 0x0f51, 0x1937: 0x0359, 0x1938: 0x0f61, 0x1939: 0x0f71, 0x193a: 0x00d9, 0x193b: 0x0f99, + 0x193c: 0x2039, 0x193d: 0x0269, 0x193e: 0x01d9, 0x193f: 0x0fa9, + // Block 0x65, offset 0x1940 + 0x1940: 0x0fb9, 0x1941: 0x1089, 0x1942: 0x0279, 0x1943: 0x0369, 0x1944: 0x0289, 0x1945: 0x13d1, + 0x1946: 0x0039, 0x1947: 0x0ee9, 0x1948: 0x1159, 0x1949: 0x0ef9, 0x194a: 0x0f09, 0x194b: 0x1199, + 0x194c: 0x0f31, 0x194d: 0x0249, 0x194e: 0x0f41, 0x194f: 0x0259, 0x1950: 0x0f51, 0x1951: 0x0359, + 0x1952: 0x0f61, 0x1953: 0x0f71, 0x1954: 0x00d9, 0x1955: 0x0f99, 0x1956: 0x2039, 0x1957: 0x0269, + 0x1958: 0x01d9, 0x1959: 0x0fa9, 0x195a: 0x0fb9, 0x195b: 0x1089, 0x195c: 0x0279, 0x195d: 0x0369, + 0x195e: 0x0289, 0x195f: 0x13d1, 0x1960: 0x0039, 0x1961: 0x0ee9, 0x1962: 0x1159, 0x1963: 0x0ef9, + 0x1964: 0x0f09, 0x1965: 0x1199, 0x1966: 0x0f31, 0x1967: 0x0249, 0x1968: 0x0f41, 0x1969: 0x0259, + 0x196a: 0x0f51, 0x196b: 0x0359, 0x196c: 0x0f61, 0x196d: 0x0f71, 0x196e: 0x00d9, 0x196f: 0x0f99, + 0x1970: 0x2039, 0x1971: 0x0269, 0x1972: 0x01d9, 0x1973: 0x0fa9, 0x1974: 0x0fb9, 0x1975: 0x1089, + 0x1976: 0x0279, 0x1977: 0x0369, 0x1978: 0x0289, 0x1979: 0x13d1, 0x197a: 0x0039, 0x197b: 0x0ee9, + 0x197c: 0x1159, 0x197d: 0x0ef9, 0x197e: 0x0f09, 0x197f: 0x1199, + // Block 0x66, offset 0x1980 + 0x1980: 0x0f31, 0x1981: 0x0249, 0x1982: 0x0f41, 0x1983: 0x0259, 0x1984: 0x0f51, 0x1985: 0x0359, + 0x1986: 0x0f61, 0x1987: 0x0f71, 0x1988: 0x00d9, 0x1989: 0x0f99, 0x198a: 0x2039, 0x198b: 0x0269, + 0x198c: 0x01d9, 0x198d: 0x0fa9, 0x198e: 0x0fb9, 0x198f: 0x1089, 0x1990: 0x0279, 0x1991: 0x0369, + 0x1992: 0x0289, 0x1993: 0x13d1, 0x1994: 0x0039, 0x1995: 0x0ee9, 0x1996: 0x1159, 0x1997: 0x0ef9, + 0x1998: 0x0f09, 0x1999: 0x1199, 0x199a: 0x0f31, 0x199b: 0x0249, 0x199c: 0x0f41, 0x199d: 0x0259, + 0x199e: 0x0f51, 0x199f: 0x0359, 0x19a0: 0x0f61, 0x19a1: 0x0f71, 0x19a2: 0x00d9, 0x19a3: 0x0f99, + 0x19a4: 0x2039, 0x19a5: 0x0269, 0x19a6: 0x01d9, 0x19a7: 0x0fa9, 0x19a8: 0x0fb9, 0x19a9: 0x1089, + 0x19aa: 0x0279, 0x19ab: 0x0369, 0x19ac: 0x0289, 0x19ad: 0x13d1, 0x19ae: 0x0039, 0x19af: 0x0ee9, + 0x19b0: 0x1159, 0x19b1: 0x0ef9, 0x19b2: 0x0f09, 0x19b3: 0x1199, 0x19b4: 0x0f31, 0x19b5: 0x0249, + 0x19b6: 0x0f41, 0x19b7: 0x0259, 0x19b8: 0x0f51, 0x19b9: 0x0359, 0x19ba: 0x0f61, 0x19bb: 0x0f71, + 0x19bc: 0x00d9, 0x19bd: 0x0f99, 0x19be: 0x2039, 0x19bf: 0x0269, + // Block 0x67, offset 0x19c0 + 0x19c0: 0x01d9, 0x19c1: 0x0fa9, 0x19c2: 0x0fb9, 0x19c3: 0x1089, 0x19c4: 0x0279, 0x19c5: 0x0369, + 0x19c6: 0x0289, 0x19c7: 0x13d1, 0x19c8: 0x0039, 0x19c9: 0x0ee9, 0x19ca: 0x1159, 0x19cb: 0x0ef9, + 0x19cc: 0x0f09, 0x19cd: 0x1199, 0x19ce: 0x0f31, 0x19cf: 0x0249, 0x19d0: 0x0f41, 0x19d1: 0x0259, + 0x19d2: 0x0f51, 0x19d3: 0x0359, 0x19d4: 0x0f61, 0x19d5: 0x0f71, 0x19d6: 0x00d9, 0x19d7: 0x0f99, + 0x19d8: 0x2039, 0x19d9: 0x0269, 0x19da: 0x01d9, 0x19db: 0x0fa9, 0x19dc: 0x0fb9, 0x19dd: 0x1089, + 0x19de: 0x0279, 0x19df: 0x0369, 0x19e0: 0x0289, 0x19e1: 0x13d1, 0x19e2: 0x0039, 0x19e3: 0x0ee9, + 0x19e4: 0x1159, 0x19e5: 0x0ef9, 0x19e6: 0x0f09, 0x19e7: 0x1199, 0x19e8: 0x0f31, 0x19e9: 0x0249, + 0x19ea: 0x0f41, 0x19eb: 0x0259, 0x19ec: 0x0f51, 0x19ed: 0x0359, 0x19ee: 0x0f61, 0x19ef: 0x0f71, + 0x19f0: 0x00d9, 0x19f1: 0x0f99, 0x19f2: 0x2039, 0x19f3: 0x0269, 0x19f4: 0x01d9, 0x19f5: 0x0fa9, + 0x19f6: 0x0fb9, 0x19f7: 0x1089, 0x19f8: 0x0279, 0x19f9: 0x0369, 0x19fa: 0x0289, 0x19fb: 0x13d1, + 0x19fc: 0x0039, 0x19fd: 0x0ee9, 0x19fe: 0x1159, 0x19ff: 0x0ef9, + // Block 0x68, offset 0x1a00 + 0x1a00: 0x0f09, 0x1a01: 0x1199, 0x1a02: 0x0f31, 0x1a03: 0x0249, 0x1a04: 0x0f41, 0x1a05: 0x0259, + 0x1a06: 0x0f51, 0x1a07: 0x0359, 0x1a08: 0x0f61, 0x1a09: 0x0f71, 0x1a0a: 0x00d9, 0x1a0b: 0x0f99, + 0x1a0c: 0x2039, 0x1a0d: 0x0269, 0x1a0e: 0x01d9, 0x1a0f: 0x0fa9, 0x1a10: 0x0fb9, 0x1a11: 0x1089, + 0x1a12: 0x0279, 0x1a13: 0x0369, 0x1a14: 0x0289, 0x1a15: 0x13d1, 0x1a16: 0x0039, 0x1a17: 0x0ee9, + 0x1a18: 0x1159, 0x1a19: 0x0ef9, 0x1a1a: 0x0f09, 0x1a1b: 0x1199, 0x1a1c: 0x0f31, 0x1a1d: 0x0249, + 0x1a1e: 0x0f41, 0x1a1f: 0x0259, 0x1a20: 0x0f51, 0x1a21: 0x0359, 0x1a22: 0x0f61, 0x1a23: 0x0f71, + 0x1a24: 0x00d9, 0x1a25: 0x0f99, 0x1a26: 0x2039, 0x1a27: 0x0269, 0x1a28: 0x01d9, 0x1a29: 0x0fa9, + 0x1a2a: 0x0fb9, 0x1a2b: 0x1089, 0x1a2c: 0x0279, 0x1a2d: 0x0369, 0x1a2e: 0x0289, 0x1a2f: 0x13d1, + 0x1a30: 0x0039, 0x1a31: 0x0ee9, 0x1a32: 0x1159, 0x1a33: 0x0ef9, 0x1a34: 0x0f09, 0x1a35: 0x1199, + 0x1a36: 0x0f31, 0x1a37: 0x0249, 0x1a38: 0x0f41, 0x1a39: 0x0259, 0x1a3a: 0x0f51, 0x1a3b: 0x0359, + 0x1a3c: 0x0f61, 0x1a3d: 0x0f71, 0x1a3e: 0x00d9, 0x1a3f: 0x0f99, + // Block 0x69, offset 0x1a40 + 0x1a40: 0x2039, 0x1a41: 0x0269, 0x1a42: 0x01d9, 0x1a43: 0x0fa9, 0x1a44: 0x0fb9, 0x1a45: 0x1089, + 0x1a46: 0x0279, 0x1a47: 0x0369, 0x1a48: 0x0289, 0x1a49: 0x13d1, 0x1a4a: 0x0039, 0x1a4b: 0x0ee9, + 0x1a4c: 0x1159, 0x1a4d: 0x0ef9, 0x1a4e: 0x0f09, 0x1a4f: 0x1199, 0x1a50: 0x0f31, 0x1a51: 0x0249, + 0x1a52: 0x0f41, 0x1a53: 0x0259, 0x1a54: 0x0f51, 0x1a55: 0x0359, 0x1a56: 0x0f61, 0x1a57: 0x0f71, + 0x1a58: 0x00d9, 0x1a59: 0x0f99, 0x1a5a: 0x2039, 0x1a5b: 0x0269, 0x1a5c: 0x01d9, 0x1a5d: 0x0fa9, + 0x1a5e: 0x0fb9, 0x1a5f: 0x1089, 0x1a60: 0x0279, 0x1a61: 0x0369, 0x1a62: 0x0289, 0x1a63: 0x13d1, + 0x1a64: 0xba81, 0x1a65: 0xba99, 0x1a66: 0x0040, 0x1a67: 0x0040, 0x1a68: 0xbab1, 0x1a69: 0x1099, + 0x1a6a: 0x10b1, 0x1a6b: 0x10c9, 0x1a6c: 0xbac9, 0x1a6d: 0xbae1, 0x1a6e: 0xbaf9, 0x1a6f: 0x1429, + 0x1a70: 0x1a31, 0x1a71: 0xbb11, 0x1a72: 0xbb29, 0x1a73: 0xbb41, 0x1a74: 0xbb59, 0x1a75: 0xbb71, + 0x1a76: 0xbb89, 0x1a77: 0x2109, 0x1a78: 0x1111, 0x1a79: 0x1429, 0x1a7a: 0xbba1, 0x1a7b: 0xbbb9, + 0x1a7c: 0xbbd1, 0x1a7d: 0x10e1, 0x1a7e: 0x10f9, 0x1a7f: 0xbbe9, + // Block 0x6a, offset 0x1a80 + 0x1a80: 0x2079, 0x1a81: 0xbc01, 0x1a82: 0xbab1, 0x1a83: 0x1099, 0x1a84: 0x10b1, 0x1a85: 0x10c9, + 0x1a86: 0xbac9, 0x1a87: 0xbae1, 0x1a88: 0xbaf9, 0x1a89: 0x1429, 0x1a8a: 0x1a31, 0x1a8b: 0xbb11, + 0x1a8c: 0xbb29, 0x1a8d: 0xbb41, 0x1a8e: 0xbb59, 0x1a8f: 0xbb71, 0x1a90: 0xbb89, 0x1a91: 0x2109, + 0x1a92: 0x1111, 0x1a93: 0xbba1, 0x1a94: 0xbba1, 0x1a95: 0xbbb9, 0x1a96: 0xbbd1, 0x1a97: 0x10e1, + 0x1a98: 0x10f9, 0x1a99: 0xbbe9, 0x1a9a: 0x2079, 0x1a9b: 0xbc21, 0x1a9c: 0xbac9, 0x1a9d: 0x1429, + 0x1a9e: 0xbb11, 0x1a9f: 0x10e1, 0x1aa0: 0x1111, 0x1aa1: 0x2109, 0x1aa2: 0xbab1, 0x1aa3: 0x1099, + 0x1aa4: 0x10b1, 0x1aa5: 0x10c9, 0x1aa6: 0xbac9, 0x1aa7: 0xbae1, 0x1aa8: 0xbaf9, 0x1aa9: 0x1429, + 0x1aaa: 0x1a31, 0x1aab: 0xbb11, 0x1aac: 0xbb29, 0x1aad: 0xbb41, 0x1aae: 0xbb59, 0x1aaf: 0xbb71, + 0x1ab0: 0xbb89, 0x1ab1: 0x2109, 0x1ab2: 0x1111, 0x1ab3: 0x1429, 0x1ab4: 0xbba1, 0x1ab5: 0xbbb9, + 0x1ab6: 0xbbd1, 0x1ab7: 0x10e1, 0x1ab8: 0x10f9, 0x1ab9: 0xbbe9, 0x1aba: 0x2079, 0x1abb: 0xbc01, + 0x1abc: 0xbab1, 0x1abd: 0x1099, 0x1abe: 0x10b1, 0x1abf: 0x10c9, + // Block 0x6b, offset 0x1ac0 + 0x1ac0: 0xbac9, 0x1ac1: 0xbae1, 0x1ac2: 0xbaf9, 0x1ac3: 0x1429, 0x1ac4: 0x1a31, 0x1ac5: 0xbb11, + 0x1ac6: 0xbb29, 0x1ac7: 0xbb41, 0x1ac8: 0xbb59, 0x1ac9: 0xbb71, 0x1aca: 0xbb89, 0x1acb: 0x2109, + 0x1acc: 0x1111, 0x1acd: 0xbba1, 0x1ace: 0xbba1, 0x1acf: 0xbbb9, 0x1ad0: 0xbbd1, 0x1ad1: 0x10e1, + 0x1ad2: 0x10f9, 0x1ad3: 0xbbe9, 0x1ad4: 0x2079, 0x1ad5: 0xbc21, 0x1ad6: 0xbac9, 0x1ad7: 0x1429, + 0x1ad8: 0xbb11, 0x1ad9: 0x10e1, 0x1ada: 0x1111, 0x1adb: 0x2109, 0x1adc: 0xbab1, 0x1add: 0x1099, + 0x1ade: 0x10b1, 0x1adf: 0x10c9, 0x1ae0: 0xbac9, 0x1ae1: 0xbae1, 0x1ae2: 0xbaf9, 0x1ae3: 0x1429, + 0x1ae4: 0x1a31, 0x1ae5: 0xbb11, 0x1ae6: 0xbb29, 0x1ae7: 0xbb41, 0x1ae8: 0xbb59, 0x1ae9: 0xbb71, + 0x1aea: 0xbb89, 0x1aeb: 0x2109, 0x1aec: 0x1111, 0x1aed: 0x1429, 0x1aee: 0xbba1, 0x1aef: 0xbbb9, + 0x1af0: 0xbbd1, 0x1af1: 0x10e1, 0x1af2: 0x10f9, 0x1af3: 0xbbe9, 0x1af4: 0x2079, 0x1af5: 0xbc01, + 0x1af6: 0xbab1, 0x1af7: 0x1099, 0x1af8: 0x10b1, 0x1af9: 0x10c9, 0x1afa: 0xbac9, 0x1afb: 0xbae1, + 0x1afc: 0xbaf9, 0x1afd: 0x1429, 0x1afe: 0x1a31, 0x1aff: 0xbb11, + // Block 0x6c, offset 0x1b00 + 0x1b00: 0xbb29, 0x1b01: 0xbb41, 0x1b02: 0xbb59, 0x1b03: 0xbb71, 0x1b04: 0xbb89, 0x1b05: 0x2109, + 0x1b06: 0x1111, 0x1b07: 0xbba1, 0x1b08: 0xbba1, 0x1b09: 0xbbb9, 0x1b0a: 0xbbd1, 0x1b0b: 0x10e1, + 0x1b0c: 0x10f9, 0x1b0d: 0xbbe9, 0x1b0e: 0x2079, 0x1b0f: 0xbc21, 0x1b10: 0xbac9, 0x1b11: 0x1429, + 0x1b12: 0xbb11, 0x1b13: 0x10e1, 0x1b14: 0x1111, 0x1b15: 0x2109, 0x1b16: 0xbab1, 0x1b17: 0x1099, + 0x1b18: 0x10b1, 0x1b19: 0x10c9, 0x1b1a: 0xbac9, 0x1b1b: 0xbae1, 0x1b1c: 0xbaf9, 0x1b1d: 0x1429, + 0x1b1e: 0x1a31, 0x1b1f: 0xbb11, 0x1b20: 0xbb29, 0x1b21: 0xbb41, 0x1b22: 0xbb59, 0x1b23: 0xbb71, + 0x1b24: 0xbb89, 0x1b25: 0x2109, 0x1b26: 0x1111, 0x1b27: 0x1429, 0x1b28: 0xbba1, 0x1b29: 0xbbb9, + 0x1b2a: 0xbbd1, 0x1b2b: 0x10e1, 0x1b2c: 0x10f9, 0x1b2d: 0xbbe9, 0x1b2e: 0x2079, 0x1b2f: 0xbc01, + 0x1b30: 0xbab1, 0x1b31: 0x1099, 0x1b32: 0x10b1, 0x1b33: 0x10c9, 0x1b34: 0xbac9, 0x1b35: 0xbae1, + 0x1b36: 0xbaf9, 0x1b37: 0x1429, 0x1b38: 0x1a31, 0x1b39: 0xbb11, 0x1b3a: 0xbb29, 0x1b3b: 0xbb41, + 0x1b3c: 0xbb59, 0x1b3d: 0xbb71, 0x1b3e: 0xbb89, 0x1b3f: 0x2109, + // Block 0x6d, offset 0x1b40 + 0x1b40: 0x1111, 0x1b41: 0xbba1, 0x1b42: 0xbba1, 0x1b43: 0xbbb9, 0x1b44: 0xbbd1, 0x1b45: 0x10e1, + 0x1b46: 0x10f9, 0x1b47: 0xbbe9, 0x1b48: 0x2079, 0x1b49: 0xbc21, 0x1b4a: 0xbac9, 0x1b4b: 0x1429, + 0x1b4c: 0xbb11, 0x1b4d: 0x10e1, 0x1b4e: 0x1111, 0x1b4f: 0x2109, 0x1b50: 0xbab1, 0x1b51: 0x1099, + 0x1b52: 0x10b1, 0x1b53: 0x10c9, 0x1b54: 0xbac9, 0x1b55: 0xbae1, 0x1b56: 0xbaf9, 0x1b57: 0x1429, + 0x1b58: 0x1a31, 0x1b59: 0xbb11, 0x1b5a: 0xbb29, 0x1b5b: 0xbb41, 0x1b5c: 0xbb59, 0x1b5d: 0xbb71, + 0x1b5e: 0xbb89, 0x1b5f: 0x2109, 0x1b60: 0x1111, 0x1b61: 0x1429, 0x1b62: 0xbba1, 0x1b63: 0xbbb9, + 0x1b64: 0xbbd1, 0x1b65: 0x10e1, 0x1b66: 0x10f9, 0x1b67: 0xbbe9, 0x1b68: 0x2079, 0x1b69: 0xbc01, + 0x1b6a: 0xbab1, 0x1b6b: 0x1099, 0x1b6c: 0x10b1, 0x1b6d: 0x10c9, 0x1b6e: 0xbac9, 0x1b6f: 0xbae1, + 0x1b70: 0xbaf9, 0x1b71: 0x1429, 0x1b72: 0x1a31, 0x1b73: 0xbb11, 0x1b74: 0xbb29, 0x1b75: 0xbb41, + 0x1b76: 0xbb59, 0x1b77: 0xbb71, 0x1b78: 0xbb89, 0x1b79: 0x2109, 0x1b7a: 0x1111, 0x1b7b: 0xbba1, + 0x1b7c: 0xbba1, 0x1b7d: 0xbbb9, 0x1b7e: 0xbbd1, 0x1b7f: 0x10e1, + // Block 0x6e, offset 0x1b80 + 0x1b80: 0x10f9, 0x1b81: 0xbbe9, 0x1b82: 0x2079, 0x1b83: 0xbc21, 0x1b84: 0xbac9, 0x1b85: 0x1429, + 0x1b86: 0xbb11, 0x1b87: 0x10e1, 0x1b88: 0x1111, 0x1b89: 0x2109, 0x1b8a: 0xbc41, 0x1b8b: 0xbc41, + 0x1b8c: 0x0040, 0x1b8d: 0x0040, 0x1b8e: 0x1f41, 0x1b8f: 0x00c9, 0x1b90: 0x0069, 0x1b91: 0x0079, + 0x1b92: 0x1f51, 0x1b93: 0x1f61, 0x1b94: 0x1f71, 0x1b95: 0x1f81, 0x1b96: 0x1f91, 0x1b97: 0x1fa1, + 0x1b98: 0x1f41, 0x1b99: 0x00c9, 0x1b9a: 0x0069, 0x1b9b: 0x0079, 0x1b9c: 0x1f51, 0x1b9d: 0x1f61, + 0x1b9e: 0x1f71, 0x1b9f: 0x1f81, 0x1ba0: 0x1f91, 0x1ba1: 0x1fa1, 0x1ba2: 0x1f41, 0x1ba3: 0x00c9, + 0x1ba4: 0x0069, 0x1ba5: 0x0079, 0x1ba6: 0x1f51, 0x1ba7: 0x1f61, 0x1ba8: 0x1f71, 0x1ba9: 0x1f81, + 0x1baa: 0x1f91, 0x1bab: 0x1fa1, 0x1bac: 0x1f41, 0x1bad: 0x00c9, 0x1bae: 0x0069, 0x1baf: 0x0079, + 0x1bb0: 0x1f51, 0x1bb1: 0x1f61, 0x1bb2: 0x1f71, 0x1bb3: 0x1f81, 0x1bb4: 0x1f91, 0x1bb5: 0x1fa1, + 0x1bb6: 0x1f41, 0x1bb7: 0x00c9, 0x1bb8: 0x0069, 0x1bb9: 0x0079, 0x1bba: 0x1f51, 0x1bbb: 0x1f61, + 0x1bbc: 0x1f71, 0x1bbd: 0x1f81, 0x1bbe: 0x1f91, 0x1bbf: 0x1fa1, + // Block 0x6f, offset 0x1bc0 + 0x1bc0: 0xe115, 0x1bc1: 0xe115, 0x1bc2: 0xe135, 0x1bc3: 0xe135, 0x1bc4: 0xe115, 0x1bc5: 0xe115, + 0x1bc6: 0xe175, 0x1bc7: 0xe175, 0x1bc8: 0xe115, 0x1bc9: 0xe115, 0x1bca: 0xe135, 0x1bcb: 0xe135, + 0x1bcc: 0xe115, 0x1bcd: 0xe115, 0x1bce: 0xe1f5, 0x1bcf: 0xe1f5, 0x1bd0: 0xe115, 0x1bd1: 0xe115, + 0x1bd2: 0xe135, 0x1bd3: 0xe135, 0x1bd4: 0xe115, 0x1bd5: 0xe115, 0x1bd6: 0xe175, 0x1bd7: 0xe175, + 0x1bd8: 0xe115, 0x1bd9: 0xe115, 0x1bda: 0xe135, 0x1bdb: 0xe135, 0x1bdc: 0xe115, 0x1bdd: 0xe115, + 0x1bde: 0x8b05, 0x1bdf: 0x8b05, 0x1be0: 0x04b5, 0x1be1: 0x04b5, 0x1be2: 0x0208, 0x1be3: 0x0208, + 0x1be4: 0x0208, 0x1be5: 0x0208, 0x1be6: 0x0208, 0x1be7: 0x0208, 0x1be8: 0x0208, 0x1be9: 0x0208, + 0x1bea: 0x0208, 0x1beb: 0x0208, 0x1bec: 0x0208, 0x1bed: 0x0208, 0x1bee: 0x0208, 0x1bef: 0x0208, + 0x1bf0: 0x0208, 0x1bf1: 0x0208, 0x1bf2: 0x0208, 0x1bf3: 0x0208, 0x1bf4: 0x0208, 0x1bf5: 0x0208, + 0x1bf6: 0x0208, 0x1bf7: 0x0208, 0x1bf8: 0x0208, 0x1bf9: 0x0208, 0x1bfa: 0x0208, 0x1bfb: 0x0208, + 0x1bfc: 0x0208, 0x1bfd: 0x0208, 0x1bfe: 0x0208, 0x1bff: 0x0208, + // Block 0x70, offset 0x1c00 + 0x1c00: 0xb189, 0x1c01: 0xb1a1, 0x1c02: 0xb201, 0x1c03: 0xb249, 0x1c04: 0x0040, 0x1c05: 0xb411, + 0x1c06: 0xb291, 0x1c07: 0xb219, 0x1c08: 0xb309, 0x1c09: 0xb429, 0x1c0a: 0xb399, 0x1c0b: 0xb3b1, + 0x1c0c: 0xb3c9, 0x1c0d: 0xb3e1, 0x1c0e: 0xb2a9, 0x1c0f: 0xb339, 0x1c10: 0xb369, 0x1c11: 0xb2d9, + 0x1c12: 0xb381, 0x1c13: 0xb279, 0x1c14: 0xb2c1, 0x1c15: 0xb1d1, 0x1c16: 0xb1e9, 0x1c17: 0xb231, + 0x1c18: 0xb261, 0x1c19: 0xb2f1, 0x1c1a: 0xb321, 0x1c1b: 0xb351, 0x1c1c: 0xbc59, 0x1c1d: 0x7949, + 0x1c1e: 0xbc71, 0x1c1f: 0xbc89, 0x1c20: 0x0040, 0x1c21: 0xb1a1, 0x1c22: 0xb201, 0x1c23: 0x0040, + 0x1c24: 0xb3f9, 0x1c25: 0x0040, 0x1c26: 0x0040, 0x1c27: 0xb219, 0x1c28: 0x0040, 0x1c29: 0xb429, + 0x1c2a: 0xb399, 0x1c2b: 0xb3b1, 0x1c2c: 0xb3c9, 0x1c2d: 0xb3e1, 0x1c2e: 0xb2a9, 0x1c2f: 0xb339, + 0x1c30: 0xb369, 0x1c31: 0xb2d9, 0x1c32: 0xb381, 0x1c33: 0x0040, 0x1c34: 0xb2c1, 0x1c35: 0xb1d1, + 0x1c36: 0xb1e9, 0x1c37: 0xb231, 0x1c38: 0x0040, 0x1c39: 0xb2f1, 0x1c3a: 0x0040, 0x1c3b: 0xb351, + 0x1c3c: 0x0040, 0x1c3d: 0x0040, 0x1c3e: 0x0040, 0x1c3f: 0x0040, + // Block 0x71, offset 0x1c40 + 0x1c40: 0x0040, 0x1c41: 0x0040, 0x1c42: 0xb201, 0x1c43: 0x0040, 0x1c44: 0x0040, 0x1c45: 0x0040, + 0x1c46: 0x0040, 0x1c47: 0xb219, 0x1c48: 0x0040, 0x1c49: 0xb429, 0x1c4a: 0x0040, 0x1c4b: 0xb3b1, + 0x1c4c: 0x0040, 0x1c4d: 0xb3e1, 0x1c4e: 0xb2a9, 0x1c4f: 0xb339, 0x1c50: 0x0040, 0x1c51: 0xb2d9, + 0x1c52: 0xb381, 0x1c53: 0x0040, 0x1c54: 0xb2c1, 0x1c55: 0x0040, 0x1c56: 0x0040, 0x1c57: 0xb231, + 0x1c58: 0x0040, 0x1c59: 0xb2f1, 0x1c5a: 0x0040, 0x1c5b: 0xb351, 0x1c5c: 0x0040, 0x1c5d: 0x7949, + 0x1c5e: 0x0040, 0x1c5f: 0xbc89, 0x1c60: 0x0040, 0x1c61: 0xb1a1, 0x1c62: 0xb201, 0x1c63: 0x0040, + 0x1c64: 0xb3f9, 0x1c65: 0x0040, 0x1c66: 0x0040, 0x1c67: 0xb219, 0x1c68: 0xb309, 0x1c69: 0xb429, + 0x1c6a: 0xb399, 0x1c6b: 0x0040, 0x1c6c: 0xb3c9, 0x1c6d: 0xb3e1, 0x1c6e: 0xb2a9, 0x1c6f: 0xb339, + 0x1c70: 0xb369, 0x1c71: 0xb2d9, 0x1c72: 0xb381, 0x1c73: 0x0040, 0x1c74: 0xb2c1, 0x1c75: 0xb1d1, + 0x1c76: 0xb1e9, 0x1c77: 0xb231, 0x1c78: 0x0040, 0x1c79: 0xb2f1, 0x1c7a: 0xb321, 0x1c7b: 0xb351, + 0x1c7c: 0xbc59, 0x1c7d: 0x0040, 0x1c7e: 0xbc71, 0x1c7f: 0x0040, + // Block 0x72, offset 0x1c80 + 0x1c80: 0xb189, 0x1c81: 0xb1a1, 0x1c82: 0xb201, 0x1c83: 0xb249, 0x1c84: 0xb3f9, 0x1c85: 0xb411, + 0x1c86: 0xb291, 0x1c87: 0xb219, 0x1c88: 0xb309, 0x1c89: 0xb429, 0x1c8a: 0x0040, 0x1c8b: 0xb3b1, + 0x1c8c: 0xb3c9, 0x1c8d: 0xb3e1, 0x1c8e: 0xb2a9, 0x1c8f: 0xb339, 0x1c90: 0xb369, 0x1c91: 0xb2d9, + 0x1c92: 0xb381, 0x1c93: 0xb279, 0x1c94: 0xb2c1, 0x1c95: 0xb1d1, 0x1c96: 0xb1e9, 0x1c97: 0xb231, + 0x1c98: 0xb261, 0x1c99: 0xb2f1, 0x1c9a: 0xb321, 0x1c9b: 0xb351, 0x1c9c: 0x0040, 0x1c9d: 0x0040, + 0x1c9e: 0x0040, 0x1c9f: 0x0040, 0x1ca0: 0x0040, 0x1ca1: 0xb1a1, 0x1ca2: 0xb201, 0x1ca3: 0xb249, + 0x1ca4: 0x0040, 0x1ca5: 0xb411, 0x1ca6: 0xb291, 0x1ca7: 0xb219, 0x1ca8: 0xb309, 0x1ca9: 0xb429, + 0x1caa: 0x0040, 0x1cab: 0xb3b1, 0x1cac: 0xb3c9, 0x1cad: 0xb3e1, 0x1cae: 0xb2a9, 0x1caf: 0xb339, + 0x1cb0: 0xb369, 0x1cb1: 0xb2d9, 0x1cb2: 0xb381, 0x1cb3: 0xb279, 0x1cb4: 0xb2c1, 0x1cb5: 0xb1d1, + 0x1cb6: 0xb1e9, 0x1cb7: 0xb231, 0x1cb8: 0xb261, 0x1cb9: 0xb2f1, 0x1cba: 0xb321, 0x1cbb: 0xb351, + 0x1cbc: 0x0040, 0x1cbd: 0x0040, 0x1cbe: 0x0040, 0x1cbf: 0x0040, + // Block 0x73, offset 0x1cc0 + 0x1cc0: 0x0040, 0x1cc1: 0xbca2, 0x1cc2: 0xbcba, 0x1cc3: 0xbcd2, 0x1cc4: 0xbcea, 0x1cc5: 0xbd02, + 0x1cc6: 0xbd1a, 0x1cc7: 0xbd32, 0x1cc8: 0xbd4a, 0x1cc9: 0xbd62, 0x1cca: 0xbd7a, 0x1ccb: 0x0018, + 0x1ccc: 0x0018, 0x1ccd: 0x0040, 0x1cce: 0x0040, 0x1ccf: 0x0040, 0x1cd0: 0xbd92, 0x1cd1: 0xbdb2, + 0x1cd2: 0xbdd2, 0x1cd3: 0xbdf2, 0x1cd4: 0xbe12, 0x1cd5: 0xbe32, 0x1cd6: 0xbe52, 0x1cd7: 0xbe72, + 0x1cd8: 0xbe92, 0x1cd9: 0xbeb2, 0x1cda: 0xbed2, 0x1cdb: 0xbef2, 0x1cdc: 0xbf12, 0x1cdd: 0xbf32, + 0x1cde: 0xbf52, 0x1cdf: 0xbf72, 0x1ce0: 0xbf92, 0x1ce1: 0xbfb2, 0x1ce2: 0xbfd2, 0x1ce3: 0xbff2, + 0x1ce4: 0xc012, 0x1ce5: 0xc032, 0x1ce6: 0xc052, 0x1ce7: 0xc072, 0x1ce8: 0xc092, 0x1ce9: 0xc0b2, + 0x1cea: 0xc0d1, 0x1ceb: 0x1159, 0x1cec: 0x0269, 0x1ced: 0x6671, 0x1cee: 0xc111, 0x1cef: 0x0040, + 0x1cf0: 0x0039, 0x1cf1: 0x0ee9, 0x1cf2: 0x1159, 0x1cf3: 0x0ef9, 0x1cf4: 0x0f09, 0x1cf5: 0x1199, + 0x1cf6: 0x0f31, 0x1cf7: 0x0249, 0x1cf8: 0x0f41, 0x1cf9: 0x0259, 0x1cfa: 0x0f51, 0x1cfb: 0x0359, + 0x1cfc: 0x0f61, 0x1cfd: 0x0f71, 0x1cfe: 0x00d9, 0x1cff: 0x0f99, + // Block 0x74, offset 0x1d00 + 0x1d00: 0x2039, 0x1d01: 0x0269, 0x1d02: 0x01d9, 0x1d03: 0x0fa9, 0x1d04: 0x0fb9, 0x1d05: 0x1089, + 0x1d06: 0x0279, 0x1d07: 0x0369, 0x1d08: 0x0289, 0x1d09: 0x13d1, 0x1d0a: 0xc129, 0x1d0b: 0x65b1, + 0x1d0c: 0xc141, 0x1d0d: 0x1441, 0x1d0e: 0xc159, 0x1d0f: 0xc179, 0x1d10: 0x0018, 0x1d11: 0x0018, + 0x1d12: 0x0018, 0x1d13: 0x0018, 0x1d14: 0x0018, 0x1d15: 0x0018, 0x1d16: 0x0018, 0x1d17: 0x0018, + 0x1d18: 0x0018, 0x1d19: 0x0018, 0x1d1a: 0x0018, 0x1d1b: 0x0018, 0x1d1c: 0x0018, 0x1d1d: 0x0018, + 0x1d1e: 0x0018, 0x1d1f: 0x0018, 0x1d20: 0x0018, 0x1d21: 0x0018, 0x1d22: 0x0018, 0x1d23: 0x0018, + 0x1d24: 0x0018, 0x1d25: 0x0018, 0x1d26: 0x0018, 0x1d27: 0x0018, 0x1d28: 0x0018, 0x1d29: 0x0018, + 0x1d2a: 0xc191, 0x1d2b: 0xc1a9, 0x1d2c: 0x0040, 0x1d2d: 0x0040, 0x1d2e: 0x0040, 0x1d2f: 0x0040, + 0x1d30: 0x0018, 0x1d31: 0x0018, 0x1d32: 0x0018, 0x1d33: 0x0018, 0x1d34: 0x0018, 0x1d35: 0x0018, + 0x1d36: 0x0018, 0x1d37: 0x0018, 0x1d38: 0x0018, 0x1d39: 0x0018, 0x1d3a: 0x0018, 0x1d3b: 0x0018, + 0x1d3c: 0x0018, 0x1d3d: 0x0018, 0x1d3e: 0x0018, 0x1d3f: 0x0018, + // Block 0x75, offset 0x1d40 + 0x1d40: 0xc1d9, 0x1d41: 0xc211, 0x1d42: 0xc249, 0x1d43: 0x0040, 0x1d44: 0x0040, 0x1d45: 0x0040, + 0x1d46: 0x0040, 0x1d47: 0x0040, 0x1d48: 0x0040, 0x1d49: 0x0040, 0x1d4a: 0x0040, 0x1d4b: 0x0040, + 0x1d4c: 0x0040, 0x1d4d: 0x0040, 0x1d4e: 0x0040, 0x1d4f: 0x0040, 0x1d50: 0xc269, 0x1d51: 0xc289, + 0x1d52: 0xc2a9, 0x1d53: 0xc2c9, 0x1d54: 0xc2e9, 0x1d55: 0xc309, 0x1d56: 0xc329, 0x1d57: 0xc349, + 0x1d58: 0xc369, 0x1d59: 0xc389, 0x1d5a: 0xc3a9, 0x1d5b: 0xc3c9, 0x1d5c: 0xc3e9, 0x1d5d: 0xc409, + 0x1d5e: 0xc429, 0x1d5f: 0xc449, 0x1d60: 0xc469, 0x1d61: 0xc489, 0x1d62: 0xc4a9, 0x1d63: 0xc4c9, + 0x1d64: 0xc4e9, 0x1d65: 0xc509, 0x1d66: 0xc529, 0x1d67: 0xc549, 0x1d68: 0xc569, 0x1d69: 0xc589, + 0x1d6a: 0xc5a9, 0x1d6b: 0xc5c9, 0x1d6c: 0xc5e9, 0x1d6d: 0xc609, 0x1d6e: 0xc629, 0x1d6f: 0xc649, + 0x1d70: 0xc669, 0x1d71: 0xc689, 0x1d72: 0xc6a9, 0x1d73: 0xc6c9, 0x1d74: 0xc6e9, 0x1d75: 0xc709, + 0x1d76: 0xc729, 0x1d77: 0xc749, 0x1d78: 0xc769, 0x1d79: 0xc789, 0x1d7a: 0xc7a9, 0x1d7b: 0xc7c9, + 0x1d7c: 0x0040, 0x1d7d: 0x0040, 0x1d7e: 0x0040, 0x1d7f: 0x0040, + // Block 0x76, offset 0x1d80 + 0x1d80: 0xcaf9, 0x1d81: 0xcb19, 0x1d82: 0xcb39, 0x1d83: 0x8b1d, 0x1d84: 0xcb59, 0x1d85: 0xcb79, + 0x1d86: 0xcb99, 0x1d87: 0xcbb9, 0x1d88: 0xcbd9, 0x1d89: 0xcbf9, 0x1d8a: 0xcc19, 0x1d8b: 0xcc39, + 0x1d8c: 0xcc59, 0x1d8d: 0x8b3d, 0x1d8e: 0xcc79, 0x1d8f: 0xcc99, 0x1d90: 0xccb9, 0x1d91: 0xccd9, + 0x1d92: 0x8b5d, 0x1d93: 0xccf9, 0x1d94: 0xcd19, 0x1d95: 0xc429, 0x1d96: 0x8b7d, 0x1d97: 0xcd39, + 0x1d98: 0xcd59, 0x1d99: 0xcd79, 0x1d9a: 0xcd99, 0x1d9b: 0xcdb9, 0x1d9c: 0x8b9d, 0x1d9d: 0xcdd9, + 0x1d9e: 0xcdf9, 0x1d9f: 0xce19, 0x1da0: 0xce39, 0x1da1: 0xce59, 0x1da2: 0xc789, 0x1da3: 0xce79, + 0x1da4: 0xce99, 0x1da5: 0xceb9, 0x1da6: 0xced9, 0x1da7: 0xcef9, 0x1da8: 0xcf19, 0x1da9: 0xcf39, + 0x1daa: 0xcf59, 0x1dab: 0xcf79, 0x1dac: 0xcf99, 0x1dad: 0xcfb9, 0x1dae: 0xcfd9, 0x1daf: 0xcff9, + 0x1db0: 0xd019, 0x1db1: 0xd039, 0x1db2: 0xd039, 0x1db3: 0xd039, 0x1db4: 0x8bbd, 0x1db5: 0xd059, + 0x1db6: 0xd079, 0x1db7: 0xd099, 0x1db8: 0x8bdd, 0x1db9: 0xd0b9, 0x1dba: 0xd0d9, 0x1dbb: 0xd0f9, + 0x1dbc: 0xd119, 0x1dbd: 0xd139, 0x1dbe: 0xd159, 0x1dbf: 0xd179, + // Block 0x77, offset 0x1dc0 + 0x1dc0: 0xd199, 0x1dc1: 0xd1b9, 0x1dc2: 0xd1d9, 0x1dc3: 0xd1f9, 0x1dc4: 0xd219, 0x1dc5: 0xd239, + 0x1dc6: 0xd239, 0x1dc7: 0xd259, 0x1dc8: 0xd279, 0x1dc9: 0xd299, 0x1dca: 0xd2b9, 0x1dcb: 0xd2d9, + 0x1dcc: 0xd2f9, 0x1dcd: 0xd319, 0x1dce: 0xd339, 0x1dcf: 0xd359, 0x1dd0: 0xd379, 0x1dd1: 0xd399, + 0x1dd2: 0xd3b9, 0x1dd3: 0xd3d9, 0x1dd4: 0xd3f9, 0x1dd5: 0xd419, 0x1dd6: 0xd439, 0x1dd7: 0xd459, + 0x1dd8: 0xd479, 0x1dd9: 0x8bfd, 0x1dda: 0xd499, 0x1ddb: 0xd4b9, 0x1ddc: 0xd4d9, 0x1ddd: 0xc309, + 0x1dde: 0xd4f9, 0x1ddf: 0xd519, 0x1de0: 0x8c1d, 0x1de1: 0x8c3d, 0x1de2: 0xd539, 0x1de3: 0xd559, + 0x1de4: 0xd579, 0x1de5: 0xd599, 0x1de6: 0xd5b9, 0x1de7: 0xd5d9, 0x1de8: 0x0040, 0x1de9: 0xd5f9, + 0x1dea: 0xd619, 0x1deb: 0xd619, 0x1dec: 0x8c5d, 0x1ded: 0xd639, 0x1dee: 0xd659, 0x1def: 0xd679, + 0x1df0: 0xd699, 0x1df1: 0x8c7d, 0x1df2: 0xd6b9, 0x1df3: 0xd6d9, 0x1df4: 0x0040, 0x1df5: 0xd6f9, + 0x1df6: 0xd719, 0x1df7: 0xd739, 0x1df8: 0xd759, 0x1df9: 0xd779, 0x1dfa: 0xd799, 0x1dfb: 0x8c9d, + 0x1dfc: 0xd7b9, 0x1dfd: 0x8cbd, 0x1dfe: 0xd7d9, 0x1dff: 0xd7f9, + // Block 0x78, offset 0x1e00 + 0x1e00: 0xd819, 0x1e01: 0xd839, 0x1e02: 0xd859, 0x1e03: 0xd879, 0x1e04: 0xd899, 0x1e05: 0xd8b9, + 0x1e06: 0xd8d9, 0x1e07: 0xd8f9, 0x1e08: 0xd919, 0x1e09: 0x8cdd, 0x1e0a: 0xd939, 0x1e0b: 0xd959, + 0x1e0c: 0xd979, 0x1e0d: 0xd999, 0x1e0e: 0xd9b9, 0x1e0f: 0x8cfd, 0x1e10: 0xd9d9, 0x1e11: 0x8d1d, + 0x1e12: 0x8d3d, 0x1e13: 0xd9f9, 0x1e14: 0xda19, 0x1e15: 0xda19, 0x1e16: 0xda39, 0x1e17: 0x8d5d, + 0x1e18: 0x8d7d, 0x1e19: 0xda59, 0x1e1a: 0xda79, 0x1e1b: 0xda99, 0x1e1c: 0xdab9, 0x1e1d: 0xdad9, + 0x1e1e: 0xdaf9, 0x1e1f: 0xdb19, 0x1e20: 0xdb39, 0x1e21: 0xdb59, 0x1e22: 0xdb79, 0x1e23: 0xdb99, + 0x1e24: 0x8d9d, 0x1e25: 0xdbb9, 0x1e26: 0xdbd9, 0x1e27: 0xdbf9, 0x1e28: 0xdc19, 0x1e29: 0xdbf9, + 0x1e2a: 0xdc39, 0x1e2b: 0xdc59, 0x1e2c: 0xdc79, 0x1e2d: 0xdc99, 0x1e2e: 0xdcb9, 0x1e2f: 0xdcd9, + 0x1e30: 0xdcf9, 0x1e31: 0xdd19, 0x1e32: 0xdd39, 0x1e33: 0xdd59, 0x1e34: 0xdd79, 0x1e35: 0xdd99, + 0x1e36: 0xddb9, 0x1e37: 0xddd9, 0x1e38: 0x8dbd, 0x1e39: 0xddf9, 0x1e3a: 0xde19, 0x1e3b: 0xde39, + 0x1e3c: 0xde59, 0x1e3d: 0xde79, 0x1e3e: 0x8ddd, 0x1e3f: 0xde99, + // Block 0x79, offset 0x1e40 + 0x1e40: 0xe599, 0x1e41: 0xe5b9, 0x1e42: 0xe5d9, 0x1e43: 0xe5f9, 0x1e44: 0xe619, 0x1e45: 0xe639, + 0x1e46: 0x8efd, 0x1e47: 0xe659, 0x1e48: 0xe679, 0x1e49: 0xe699, 0x1e4a: 0xe6b9, 0x1e4b: 0xe6d9, + 0x1e4c: 0xe6f9, 0x1e4d: 0x8f1d, 0x1e4e: 0xe719, 0x1e4f: 0xe739, 0x1e50: 0x8f3d, 0x1e51: 0x8f5d, + 0x1e52: 0xe759, 0x1e53: 0xe779, 0x1e54: 0xe799, 0x1e55: 0xe7b9, 0x1e56: 0xe7d9, 0x1e57: 0xe7f9, + 0x1e58: 0xe819, 0x1e59: 0xe839, 0x1e5a: 0xe859, 0x1e5b: 0x8f7d, 0x1e5c: 0xe879, 0x1e5d: 0x8f9d, + 0x1e5e: 0xe899, 0x1e5f: 0x0040, 0x1e60: 0xe8b9, 0x1e61: 0xe8d9, 0x1e62: 0xe8f9, 0x1e63: 0x8fbd, + 0x1e64: 0xe919, 0x1e65: 0xe939, 0x1e66: 0x8fdd, 0x1e67: 0x8ffd, 0x1e68: 0xe959, 0x1e69: 0xe979, + 0x1e6a: 0xe999, 0x1e6b: 0xe9b9, 0x1e6c: 0xe9d9, 0x1e6d: 0xe9d9, 0x1e6e: 0xe9f9, 0x1e6f: 0xea19, + 0x1e70: 0xea39, 0x1e71: 0xea59, 0x1e72: 0xea79, 0x1e73: 0xea99, 0x1e74: 0xeab9, 0x1e75: 0x901d, + 0x1e76: 0xead9, 0x1e77: 0x903d, 0x1e78: 0xeaf9, 0x1e79: 0x905d, 0x1e7a: 0xeb19, 0x1e7b: 0x907d, + 0x1e7c: 0x909d, 0x1e7d: 0x90bd, 0x1e7e: 0xeb39, 0x1e7f: 0xeb59, + // Block 0x7a, offset 0x1e80 + 0x1e80: 0xeb79, 0x1e81: 0x90dd, 0x1e82: 0x90fd, 0x1e83: 0x911d, 0x1e84: 0x913d, 0x1e85: 0xeb99, + 0x1e86: 0xebb9, 0x1e87: 0xebb9, 0x1e88: 0xebd9, 0x1e89: 0xebf9, 0x1e8a: 0xec19, 0x1e8b: 0xec39, + 0x1e8c: 0xec59, 0x1e8d: 0x915d, 0x1e8e: 0xec79, 0x1e8f: 0xec99, 0x1e90: 0xecb9, 0x1e91: 0xecd9, + 0x1e92: 0x917d, 0x1e93: 0xecf9, 0x1e94: 0x919d, 0x1e95: 0x91bd, 0x1e96: 0xed19, 0x1e97: 0xed39, + 0x1e98: 0xed59, 0x1e99: 0xed79, 0x1e9a: 0xed99, 0x1e9b: 0xedb9, 0x1e9c: 0x91dd, 0x1e9d: 0x91fd, + 0x1e9e: 0x921d, 0x1e9f: 0x0040, 0x1ea0: 0xedd9, 0x1ea1: 0x923d, 0x1ea2: 0xedf9, 0x1ea3: 0xee19, + 0x1ea4: 0xee39, 0x1ea5: 0x925d, 0x1ea6: 0xee59, 0x1ea7: 0xee79, 0x1ea8: 0xee99, 0x1ea9: 0xeeb9, + 0x1eaa: 0xeed9, 0x1eab: 0x927d, 0x1eac: 0xeef9, 0x1ead: 0xef19, 0x1eae: 0xef39, 0x1eaf: 0xef59, + 0x1eb0: 0xef79, 0x1eb1: 0xef99, 0x1eb2: 0x929d, 0x1eb3: 0x92bd, 0x1eb4: 0xefb9, 0x1eb5: 0x92dd, + 0x1eb6: 0xefd9, 0x1eb7: 0x92fd, 0x1eb8: 0xeff9, 0x1eb9: 0xf019, 0x1eba: 0xf039, 0x1ebb: 0x931d, + 0x1ebc: 0x933d, 0x1ebd: 0xf059, 0x1ebe: 0x935d, 0x1ebf: 0xf079, + // Block 0x7b, offset 0x1ec0 + 0x1ec0: 0xf6b9, 0x1ec1: 0xf6d9, 0x1ec2: 0xf6f9, 0x1ec3: 0xf719, 0x1ec4: 0xf739, 0x1ec5: 0x951d, + 0x1ec6: 0xf759, 0x1ec7: 0xf779, 0x1ec8: 0xf799, 0x1ec9: 0xf7b9, 0x1eca: 0xf7d9, 0x1ecb: 0x953d, + 0x1ecc: 0x955d, 0x1ecd: 0xf7f9, 0x1ece: 0xf819, 0x1ecf: 0xf839, 0x1ed0: 0xf859, 0x1ed1: 0xf879, + 0x1ed2: 0xf899, 0x1ed3: 0x957d, 0x1ed4: 0xf8b9, 0x1ed5: 0xf8d9, 0x1ed6: 0xf8f9, 0x1ed7: 0xf919, + 0x1ed8: 0x959d, 0x1ed9: 0x95bd, 0x1eda: 0xf939, 0x1edb: 0xf959, 0x1edc: 0xf979, 0x1edd: 0x95dd, + 0x1ede: 0xf999, 0x1edf: 0xf9b9, 0x1ee0: 0x6815, 0x1ee1: 0x95fd, 0x1ee2: 0xf9d9, 0x1ee3: 0xf9f9, + 0x1ee4: 0xfa19, 0x1ee5: 0x961d, 0x1ee6: 0xfa39, 0x1ee7: 0xfa59, 0x1ee8: 0xfa79, 0x1ee9: 0xfa99, + 0x1eea: 0xfab9, 0x1eeb: 0xfad9, 0x1eec: 0xfaf9, 0x1eed: 0x963d, 0x1eee: 0xfb19, 0x1eef: 0xfb39, + 0x1ef0: 0xfb59, 0x1ef1: 0x965d, 0x1ef2: 0xfb79, 0x1ef3: 0xfb99, 0x1ef4: 0xfbb9, 0x1ef5: 0xfbd9, + 0x1ef6: 0x7b35, 0x1ef7: 0x967d, 0x1ef8: 0xfbf9, 0x1ef9: 0xfc19, 0x1efa: 0xfc39, 0x1efb: 0x969d, + 0x1efc: 0xfc59, 0x1efd: 0x96bd, 0x1efe: 0xfc79, 0x1eff: 0xfc79, + // Block 0x7c, offset 0x1f00 + 0x1f00: 0xfc99, 0x1f01: 0x96dd, 0x1f02: 0xfcb9, 0x1f03: 0xfcd9, 0x1f04: 0xfcf9, 0x1f05: 0xfd19, + 0x1f06: 0xfd39, 0x1f07: 0xfd59, 0x1f08: 0xfd79, 0x1f09: 0x96fd, 0x1f0a: 0xfd99, 0x1f0b: 0xfdb9, + 0x1f0c: 0xfdd9, 0x1f0d: 0xfdf9, 0x1f0e: 0xfe19, 0x1f0f: 0xfe39, 0x1f10: 0x971d, 0x1f11: 0xfe59, + 0x1f12: 0x973d, 0x1f13: 0x975d, 0x1f14: 0x977d, 0x1f15: 0xfe79, 0x1f16: 0xfe99, 0x1f17: 0xfeb9, + 0x1f18: 0xfed9, 0x1f19: 0xfef9, 0x1f1a: 0xff19, 0x1f1b: 0xff39, 0x1f1c: 0xff59, 0x1f1d: 0x979d, + 0x1f1e: 0x0040, 0x1f1f: 0x0040, 0x1f20: 0x0040, 0x1f21: 0x0040, 0x1f22: 0x0040, 0x1f23: 0x0040, + 0x1f24: 0x0040, 0x1f25: 0x0040, 0x1f26: 0x0040, 0x1f27: 0x0040, 0x1f28: 0x0040, 0x1f29: 0x0040, + 0x1f2a: 0x0040, 0x1f2b: 0x0040, 0x1f2c: 0x0040, 0x1f2d: 0x0040, 0x1f2e: 0x0040, 0x1f2f: 0x0040, + 0x1f30: 0x0040, 0x1f31: 0x0040, 0x1f32: 0x0040, 0x1f33: 0x0040, 0x1f34: 0x0040, 0x1f35: 0x0040, + 0x1f36: 0x0040, 0x1f37: 0x0040, 0x1f38: 0x0040, 0x1f39: 0x0040, 0x1f3a: 0x0040, 0x1f3b: 0x0040, + 0x1f3c: 0x0040, 0x1f3d: 0x0040, 0x1f3e: 0x0040, 0x1f3f: 0x0040, +} + +// idnaIndex: 35 blocks, 2240 entries, 4480 bytes +// Block 0 is the zero block. +var idnaIndex = [2240]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x7b, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05, + 0xc8: 0x06, 0xc9: 0x7c, 0xca: 0x7d, 0xcb: 0x07, 0xcc: 0x7e, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a, + 0xd0: 0x7f, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x80, 0xd6: 0x81, 0xd7: 0x82, + 0xd8: 0x0f, 0xd9: 0x83, 0xda: 0x84, 0xdb: 0x10, 0xdc: 0x11, 0xdd: 0x85, 0xde: 0x86, 0xdf: 0x87, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07, + 0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x0a, 0xee: 0x0b, 0xef: 0x0c, + 0xf0: 0x1c, 0xf1: 0x1d, 0xf2: 0x1d, 0xf3: 0x1f, 0xf4: 0x20, + // Block 0x4, offset 0x100 + 0x120: 0x88, 0x121: 0x89, 0x122: 0x8a, 0x123: 0x8b, 0x124: 0x8c, 0x125: 0x12, 0x126: 0x13, 0x127: 0x14, + 0x128: 0x15, 0x129: 0x16, 0x12a: 0x17, 0x12b: 0x18, 0x12c: 0x19, 0x12d: 0x1a, 0x12e: 0x1b, 0x12f: 0x8d, + 0x130: 0x8e, 0x131: 0x1c, 0x132: 0x1d, 0x133: 0x1e, 0x134: 0x8f, 0x135: 0x1f, 0x136: 0x90, 0x137: 0x91, + 0x138: 0x92, 0x139: 0x93, 0x13a: 0x20, 0x13b: 0x94, 0x13c: 0x95, 0x13d: 0x21, 0x13e: 0x22, 0x13f: 0x96, + // Block 0x5, offset 0x140 + 0x140: 0x97, 0x141: 0x98, 0x142: 0x99, 0x143: 0x9a, 0x144: 0x9b, 0x145: 0x9c, 0x146: 0x9b, 0x147: 0x9b, + 0x148: 0x9d, 0x149: 0x9e, 0x14a: 0x9f, 0x14b: 0xa0, 0x14c: 0xa1, 0x14d: 0xa2, 0x14e: 0xa3, 0x14f: 0xa4, + 0x150: 0xa5, 0x151: 0x9d, 0x152: 0x9d, 0x153: 0x9d, 0x154: 0x9d, 0x155: 0x9d, 0x156: 0x9d, 0x157: 0x9d, + 0x158: 0x9d, 0x159: 0xa6, 0x15a: 0xa7, 0x15b: 0xa8, 0x15c: 0xa9, 0x15d: 0xaa, 0x15e: 0xab, 0x15f: 0xac, + 0x160: 0xad, 0x161: 0xae, 0x162: 0xaf, 0x163: 0xb0, 0x164: 0xb1, 0x165: 0xb2, 0x166: 0xb3, 0x167: 0xb4, + 0x168: 0xb5, 0x169: 0xb6, 0x16a: 0xb7, 0x16b: 0xb8, 0x16c: 0xb9, 0x16d: 0xba, 0x16e: 0xbb, 0x16f: 0xbc, + 0x170: 0xbd, 0x171: 0xbe, 0x172: 0xbf, 0x173: 0xc0, 0x174: 0x23, 0x175: 0x24, 0x176: 0x25, 0x177: 0xc1, + 0x178: 0x26, 0x179: 0x26, 0x17a: 0x27, 0x17b: 0x26, 0x17c: 0xc2, 0x17d: 0x28, 0x17e: 0x29, 0x17f: 0x2a, + // Block 0x6, offset 0x180 + 0x180: 0x2b, 0x181: 0x2c, 0x182: 0x2d, 0x183: 0xc3, 0x184: 0x2e, 0x185: 0x2f, 0x186: 0xc4, 0x187: 0x9b, + 0x188: 0xc5, 0x189: 0xc6, 0x18a: 0x9b, 0x18b: 0x9b, 0x18c: 0xc7, 0x18d: 0x9b, 0x18e: 0x9b, 0x18f: 0xc8, + 0x190: 0xc9, 0x191: 0x30, 0x192: 0x31, 0x193: 0x32, 0x194: 0x9b, 0x195: 0x9b, 0x196: 0x9b, 0x197: 0x9b, + 0x198: 0x9b, 0x199: 0x9b, 0x19a: 0x9b, 0x19b: 0x9b, 0x19c: 0x9b, 0x19d: 0x9b, 0x19e: 0x9b, 0x19f: 0x9b, + 0x1a0: 0x9b, 0x1a1: 0x9b, 0x1a2: 0x9b, 0x1a3: 0x9b, 0x1a4: 0x9b, 0x1a5: 0x9b, 0x1a6: 0x9b, 0x1a7: 0x9b, + 0x1a8: 0xca, 0x1a9: 0xcb, 0x1aa: 0x9b, 0x1ab: 0xcc, 0x1ac: 0x9b, 0x1ad: 0xcd, 0x1ae: 0xce, 0x1af: 0xcf, + 0x1b0: 0xd0, 0x1b1: 0x33, 0x1b2: 0x26, 0x1b3: 0x34, 0x1b4: 0xd1, 0x1b5: 0xd2, 0x1b6: 0xd3, 0x1b7: 0xd4, + 0x1b8: 0xd5, 0x1b9: 0xd6, 0x1ba: 0xd7, 0x1bb: 0xd8, 0x1bc: 0xd9, 0x1bd: 0xda, 0x1be: 0xdb, 0x1bf: 0x35, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x36, 0x1c1: 0xdc, 0x1c2: 0xdd, 0x1c3: 0xde, 0x1c4: 0xdf, 0x1c5: 0x37, 0x1c6: 0x38, 0x1c7: 0xe0, + 0x1c8: 0xe1, 0x1c9: 0x39, 0x1ca: 0x3a, 0x1cb: 0x3b, 0x1cc: 0x3c, 0x1cd: 0x3d, 0x1ce: 0x3e, 0x1cf: 0x3f, + 0x1d0: 0x9d, 0x1d1: 0x9d, 0x1d2: 0x9d, 0x1d3: 0x9d, 0x1d4: 0x9d, 0x1d5: 0x9d, 0x1d6: 0x9d, 0x1d7: 0x9d, + 0x1d8: 0x9d, 0x1d9: 0x9d, 0x1da: 0x9d, 0x1db: 0x9d, 0x1dc: 0x9d, 0x1dd: 0x9d, 0x1de: 0x9d, 0x1df: 0x9d, + 0x1e0: 0x9d, 0x1e1: 0x9d, 0x1e2: 0x9d, 0x1e3: 0x9d, 0x1e4: 0x9d, 0x1e5: 0x9d, 0x1e6: 0x9d, 0x1e7: 0x9d, + 0x1e8: 0x9d, 0x1e9: 0x9d, 0x1ea: 0x9d, 0x1eb: 0x9d, 0x1ec: 0x9d, 0x1ed: 0x9d, 0x1ee: 0x9d, 0x1ef: 0x9d, + 0x1f0: 0x9d, 0x1f1: 0x9d, 0x1f2: 0x9d, 0x1f3: 0x9d, 0x1f4: 0x9d, 0x1f5: 0x9d, 0x1f6: 0x9d, 0x1f7: 0x9d, + 0x1f8: 0x9d, 0x1f9: 0x9d, 0x1fa: 0x9d, 0x1fb: 0x9d, 0x1fc: 0x9d, 0x1fd: 0x9d, 0x1fe: 0x9d, 0x1ff: 0x9d, + // Block 0x8, offset 0x200 + 0x200: 0x9d, 0x201: 0x9d, 0x202: 0x9d, 0x203: 0x9d, 0x204: 0x9d, 0x205: 0x9d, 0x206: 0x9d, 0x207: 0x9d, + 0x208: 0x9d, 0x209: 0x9d, 0x20a: 0x9d, 0x20b: 0x9d, 0x20c: 0x9d, 0x20d: 0x9d, 0x20e: 0x9d, 0x20f: 0x9d, + 0x210: 0x9d, 0x211: 0x9d, 0x212: 0x9d, 0x213: 0x9d, 0x214: 0x9d, 0x215: 0x9d, 0x216: 0x9d, 0x217: 0x9d, + 0x218: 0x9d, 0x219: 0x9d, 0x21a: 0x9d, 0x21b: 0x9d, 0x21c: 0x9d, 0x21d: 0x9d, 0x21e: 0x9d, 0x21f: 0x9d, + 0x220: 0x9d, 0x221: 0x9d, 0x222: 0x9d, 0x223: 0x9d, 0x224: 0x9d, 0x225: 0x9d, 0x226: 0x9d, 0x227: 0x9d, + 0x228: 0x9d, 0x229: 0x9d, 0x22a: 0x9d, 0x22b: 0x9d, 0x22c: 0x9d, 0x22d: 0x9d, 0x22e: 0x9d, 0x22f: 0x9d, + 0x230: 0x9d, 0x231: 0x9d, 0x232: 0x9d, 0x233: 0x9d, 0x234: 0x9d, 0x235: 0x9d, 0x236: 0xb0, 0x237: 0x9b, + 0x238: 0x9d, 0x239: 0x9d, 0x23a: 0x9d, 0x23b: 0x9d, 0x23c: 0x9d, 0x23d: 0x9d, 0x23e: 0x9d, 0x23f: 0x9d, + // Block 0x9, offset 0x240 + 0x240: 0x9d, 0x241: 0x9d, 0x242: 0x9d, 0x243: 0x9d, 0x244: 0x9d, 0x245: 0x9d, 0x246: 0x9d, 0x247: 0x9d, + 0x248: 0x9d, 0x249: 0x9d, 0x24a: 0x9d, 0x24b: 0x9d, 0x24c: 0x9d, 0x24d: 0x9d, 0x24e: 0x9d, 0x24f: 0x9d, + 0x250: 0x9d, 0x251: 0x9d, 0x252: 0x9d, 0x253: 0x9d, 0x254: 0x9d, 0x255: 0x9d, 0x256: 0x9d, 0x257: 0x9d, + 0x258: 0x9d, 0x259: 0x9d, 0x25a: 0x9d, 0x25b: 0x9d, 0x25c: 0x9d, 0x25d: 0x9d, 0x25e: 0x9d, 0x25f: 0x9d, + 0x260: 0x9d, 0x261: 0x9d, 0x262: 0x9d, 0x263: 0x9d, 0x264: 0x9d, 0x265: 0x9d, 0x266: 0x9d, 0x267: 0x9d, + 0x268: 0x9d, 0x269: 0x9d, 0x26a: 0x9d, 0x26b: 0x9d, 0x26c: 0x9d, 0x26d: 0x9d, 0x26e: 0x9d, 0x26f: 0x9d, + 0x270: 0x9d, 0x271: 0x9d, 0x272: 0x9d, 0x273: 0x9d, 0x274: 0x9d, 0x275: 0x9d, 0x276: 0x9d, 0x277: 0x9d, + 0x278: 0x9d, 0x279: 0x9d, 0x27a: 0x9d, 0x27b: 0x9d, 0x27c: 0x9d, 0x27d: 0x9d, 0x27e: 0x9d, 0x27f: 0x9d, + // Block 0xa, offset 0x280 + 0x280: 0x9d, 0x281: 0x9d, 0x282: 0x9d, 0x283: 0x9d, 0x284: 0x9d, 0x285: 0x9d, 0x286: 0x9d, 0x287: 0x9d, + 0x288: 0x9d, 0x289: 0x9d, 0x28a: 0x9d, 0x28b: 0x9d, 0x28c: 0x9d, 0x28d: 0x9d, 0x28e: 0x9d, 0x28f: 0x9d, + 0x290: 0x9d, 0x291: 0x9d, 0x292: 0x9d, 0x293: 0x9d, 0x294: 0x9d, 0x295: 0x9d, 0x296: 0x9d, 0x297: 0x9d, + 0x298: 0x9d, 0x299: 0x9d, 0x29a: 0x9d, 0x29b: 0x9d, 0x29c: 0x9d, 0x29d: 0x9d, 0x29e: 0x9d, 0x29f: 0x9d, + 0x2a0: 0x9d, 0x2a1: 0x9d, 0x2a2: 0x9d, 0x2a3: 0x9d, 0x2a4: 0x9d, 0x2a5: 0x9d, 0x2a6: 0x9d, 0x2a7: 0x9d, + 0x2a8: 0x9d, 0x2a9: 0x9d, 0x2aa: 0x9d, 0x2ab: 0x9d, 0x2ac: 0x9d, 0x2ad: 0x9d, 0x2ae: 0x9d, 0x2af: 0x9d, + 0x2b0: 0x9d, 0x2b1: 0x9d, 0x2b2: 0x9d, 0x2b3: 0x9d, 0x2b4: 0x9d, 0x2b5: 0x9d, 0x2b6: 0x9d, 0x2b7: 0x9d, + 0x2b8: 0x9d, 0x2b9: 0x9d, 0x2ba: 0x9d, 0x2bb: 0x9d, 0x2bc: 0x9d, 0x2bd: 0x9d, 0x2be: 0x9d, 0x2bf: 0xe2, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x9d, 0x2c1: 0x9d, 0x2c2: 0x9d, 0x2c3: 0x9d, 0x2c4: 0x9d, 0x2c5: 0x9d, 0x2c6: 0x9d, 0x2c7: 0x9d, + 0x2c8: 0x9d, 0x2c9: 0x9d, 0x2ca: 0x9d, 0x2cb: 0x9d, 0x2cc: 0x9d, 0x2cd: 0x9d, 0x2ce: 0x9d, 0x2cf: 0x9d, + 0x2d0: 0x9d, 0x2d1: 0x9d, 0x2d2: 0xe3, 0x2d3: 0xe4, 0x2d4: 0x9d, 0x2d5: 0x9d, 0x2d6: 0x9d, 0x2d7: 0x9d, + 0x2d8: 0xe5, 0x2d9: 0x40, 0x2da: 0x41, 0x2db: 0xe6, 0x2dc: 0x42, 0x2dd: 0x43, 0x2de: 0x44, 0x2df: 0xe7, + 0x2e0: 0xe8, 0x2e1: 0xe9, 0x2e2: 0xea, 0x2e3: 0xeb, 0x2e4: 0xec, 0x2e5: 0xed, 0x2e6: 0xee, 0x2e7: 0xef, + 0x2e8: 0xf0, 0x2e9: 0xf1, 0x2ea: 0xf2, 0x2eb: 0xf3, 0x2ec: 0xf4, 0x2ed: 0xf5, 0x2ee: 0xf6, 0x2ef: 0xf7, + 0x2f0: 0x9d, 0x2f1: 0x9d, 0x2f2: 0x9d, 0x2f3: 0x9d, 0x2f4: 0x9d, 0x2f5: 0x9d, 0x2f6: 0x9d, 0x2f7: 0x9d, + 0x2f8: 0x9d, 0x2f9: 0x9d, 0x2fa: 0x9d, 0x2fb: 0x9d, 0x2fc: 0x9d, 0x2fd: 0x9d, 0x2fe: 0x9d, 0x2ff: 0x9d, + // Block 0xc, offset 0x300 + 0x300: 0x9d, 0x301: 0x9d, 0x302: 0x9d, 0x303: 0x9d, 0x304: 0x9d, 0x305: 0x9d, 0x306: 0x9d, 0x307: 0x9d, + 0x308: 0x9d, 0x309: 0x9d, 0x30a: 0x9d, 0x30b: 0x9d, 0x30c: 0x9d, 0x30d: 0x9d, 0x30e: 0x9d, 0x30f: 0x9d, + 0x310: 0x9d, 0x311: 0x9d, 0x312: 0x9d, 0x313: 0x9d, 0x314: 0x9d, 0x315: 0x9d, 0x316: 0x9d, 0x317: 0x9d, + 0x318: 0x9d, 0x319: 0x9d, 0x31a: 0x9d, 0x31b: 0x9d, 0x31c: 0x9d, 0x31d: 0x9d, 0x31e: 0xf8, 0x31f: 0xf9, + // Block 0xd, offset 0x340 + 0x340: 0xb8, 0x341: 0xb8, 0x342: 0xb8, 0x343: 0xb8, 0x344: 0xb8, 0x345: 0xb8, 0x346: 0xb8, 0x347: 0xb8, + 0x348: 0xb8, 0x349: 0xb8, 0x34a: 0xb8, 0x34b: 0xb8, 0x34c: 0xb8, 0x34d: 0xb8, 0x34e: 0xb8, 0x34f: 0xb8, + 0x350: 0xb8, 0x351: 0xb8, 0x352: 0xb8, 0x353: 0xb8, 0x354: 0xb8, 0x355: 0xb8, 0x356: 0xb8, 0x357: 0xb8, + 0x358: 0xb8, 0x359: 0xb8, 0x35a: 0xb8, 0x35b: 0xb8, 0x35c: 0xb8, 0x35d: 0xb8, 0x35e: 0xb8, 0x35f: 0xb8, + 0x360: 0xb8, 0x361: 0xb8, 0x362: 0xb8, 0x363: 0xb8, 0x364: 0xb8, 0x365: 0xb8, 0x366: 0xb8, 0x367: 0xb8, + 0x368: 0xb8, 0x369: 0xb8, 0x36a: 0xb8, 0x36b: 0xb8, 0x36c: 0xb8, 0x36d: 0xb8, 0x36e: 0xb8, 0x36f: 0xb8, + 0x370: 0xb8, 0x371: 0xb8, 0x372: 0xb8, 0x373: 0xb8, 0x374: 0xb8, 0x375: 0xb8, 0x376: 0xb8, 0x377: 0xb8, + 0x378: 0xb8, 0x379: 0xb8, 0x37a: 0xb8, 0x37b: 0xb8, 0x37c: 0xb8, 0x37d: 0xb8, 0x37e: 0xb8, 0x37f: 0xb8, + // Block 0xe, offset 0x380 + 0x380: 0xb8, 0x381: 0xb8, 0x382: 0xb8, 0x383: 0xb8, 0x384: 0xb8, 0x385: 0xb8, 0x386: 0xb8, 0x387: 0xb8, + 0x388: 0xb8, 0x389: 0xb8, 0x38a: 0xb8, 0x38b: 0xb8, 0x38c: 0xb8, 0x38d: 0xb8, 0x38e: 0xb8, 0x38f: 0xb8, + 0x390: 0xb8, 0x391: 0xb8, 0x392: 0xb8, 0x393: 0xb8, 0x394: 0xb8, 0x395: 0xb8, 0x396: 0xb8, 0x397: 0xb8, + 0x398: 0xb8, 0x399: 0xb8, 0x39a: 0xb8, 0x39b: 0xb8, 0x39c: 0xb8, 0x39d: 0xb8, 0x39e: 0xb8, 0x39f: 0xb8, + 0x3a0: 0xb8, 0x3a1: 0xb8, 0x3a2: 0xb8, 0x3a3: 0xb8, 0x3a4: 0xfa, 0x3a5: 0xfb, 0x3a6: 0xfc, 0x3a7: 0xfd, + 0x3a8: 0x45, 0x3a9: 0xfe, 0x3aa: 0xff, 0x3ab: 0x46, 0x3ac: 0x47, 0x3ad: 0x48, 0x3ae: 0x49, 0x3af: 0x4a, + 0x3b0: 0x100, 0x3b1: 0x4b, 0x3b2: 0x4c, 0x3b3: 0x4d, 0x3b4: 0x4e, 0x3b5: 0x4f, 0x3b6: 0x101, 0x3b7: 0x50, + 0x3b8: 0x51, 0x3b9: 0x52, 0x3ba: 0x53, 0x3bb: 0x54, 0x3bc: 0x55, 0x3bd: 0x56, 0x3be: 0x57, 0x3bf: 0x58, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x102, 0x3c1: 0x103, 0x3c2: 0x9d, 0x3c3: 0x104, 0x3c4: 0x105, 0x3c5: 0x9b, 0x3c6: 0x106, 0x3c7: 0x107, + 0x3c8: 0xb8, 0x3c9: 0xb8, 0x3ca: 0x108, 0x3cb: 0x109, 0x3cc: 0x10a, 0x3cd: 0x10b, 0x3ce: 0x10c, 0x3cf: 0x10d, + 0x3d0: 0x10e, 0x3d1: 0x9d, 0x3d2: 0x10f, 0x3d3: 0x110, 0x3d4: 0x111, 0x3d5: 0x112, 0x3d6: 0xb8, 0x3d7: 0xb8, + 0x3d8: 0x9d, 0x3d9: 0x9d, 0x3da: 0x9d, 0x3db: 0x9d, 0x3dc: 0x113, 0x3dd: 0x114, 0x3de: 0xb8, 0x3df: 0xb8, + 0x3e0: 0x115, 0x3e1: 0x116, 0x3e2: 0x117, 0x3e3: 0x118, 0x3e4: 0x119, 0x3e5: 0xb8, 0x3e6: 0x11a, 0x3e7: 0x11b, + 0x3e8: 0x11c, 0x3e9: 0x11d, 0x3ea: 0x11e, 0x3eb: 0x59, 0x3ec: 0x11f, 0x3ed: 0x120, 0x3ee: 0x5a, 0x3ef: 0xb8, + 0x3f0: 0x9d, 0x3f1: 0x121, 0x3f2: 0x122, 0x3f3: 0x123, 0x3f4: 0xb8, 0x3f5: 0xb8, 0x3f6: 0xb8, 0x3f7: 0xb8, + 0x3f8: 0xb8, 0x3f9: 0x124, 0x3fa: 0xb8, 0x3fb: 0xb8, 0x3fc: 0xb8, 0x3fd: 0xb8, 0x3fe: 0xb8, 0x3ff: 0xb8, + // Block 0x10, offset 0x400 + 0x400: 0x125, 0x401: 0x126, 0x402: 0x127, 0x403: 0x128, 0x404: 0x129, 0x405: 0x12a, 0x406: 0x12b, 0x407: 0x12c, + 0x408: 0x12d, 0x409: 0xb8, 0x40a: 0x12e, 0x40b: 0x12f, 0x40c: 0x5b, 0x40d: 0x5c, 0x40e: 0xb8, 0x40f: 0xb8, + 0x410: 0x130, 0x411: 0x131, 0x412: 0x132, 0x413: 0x133, 0x414: 0xb8, 0x415: 0xb8, 0x416: 0x134, 0x417: 0x135, + 0x418: 0x136, 0x419: 0x137, 0x41a: 0x138, 0x41b: 0x139, 0x41c: 0x13a, 0x41d: 0xb8, 0x41e: 0xb8, 0x41f: 0xb8, + 0x420: 0xb8, 0x421: 0xb8, 0x422: 0x13b, 0x423: 0x13c, 0x424: 0xb8, 0x425: 0xb8, 0x426: 0xb8, 0x427: 0xb8, + 0x428: 0xb8, 0x429: 0xb8, 0x42a: 0xb8, 0x42b: 0x13d, 0x42c: 0xb8, 0x42d: 0xb8, 0x42e: 0xb8, 0x42f: 0xb8, + 0x430: 0x13e, 0x431: 0x13f, 0x432: 0x140, 0x433: 0xb8, 0x434: 0xb8, 0x435: 0xb8, 0x436: 0xb8, 0x437: 0xb8, + 0x438: 0xb8, 0x439: 0xb8, 0x43a: 0xb8, 0x43b: 0xb8, 0x43c: 0xb8, 0x43d: 0xb8, 0x43e: 0xb8, 0x43f: 0xb8, + // Block 0x11, offset 0x440 + 0x440: 0x9d, 0x441: 0x9d, 0x442: 0x9d, 0x443: 0x9d, 0x444: 0x9d, 0x445: 0x9d, 0x446: 0x9d, 0x447: 0x9d, + 0x448: 0x9d, 0x449: 0x9d, 0x44a: 0x9d, 0x44b: 0x9d, 0x44c: 0x9d, 0x44d: 0x9d, 0x44e: 0x141, 0x44f: 0xb8, + 0x450: 0x9b, 0x451: 0x142, 0x452: 0x9d, 0x453: 0x9d, 0x454: 0x9d, 0x455: 0x143, 0x456: 0xb8, 0x457: 0xb8, + 0x458: 0xb8, 0x459: 0xb8, 0x45a: 0xb8, 0x45b: 0xb8, 0x45c: 0xb8, 0x45d: 0xb8, 0x45e: 0xb8, 0x45f: 0xb8, + 0x460: 0xb8, 0x461: 0xb8, 0x462: 0xb8, 0x463: 0xb8, 0x464: 0xb8, 0x465: 0xb8, 0x466: 0xb8, 0x467: 0xb8, + 0x468: 0xb8, 0x469: 0xb8, 0x46a: 0xb8, 0x46b: 0xb8, 0x46c: 0xb8, 0x46d: 0xb8, 0x46e: 0xb8, 0x46f: 0xb8, + 0x470: 0xb8, 0x471: 0xb8, 0x472: 0xb8, 0x473: 0xb8, 0x474: 0xb8, 0x475: 0xb8, 0x476: 0xb8, 0x477: 0xb8, + 0x478: 0xb8, 0x479: 0xb8, 0x47a: 0xb8, 0x47b: 0xb8, 0x47c: 0xb8, 0x47d: 0xb8, 0x47e: 0xb8, 0x47f: 0xb8, + // Block 0x12, offset 0x480 + 0x480: 0x9d, 0x481: 0x9d, 0x482: 0x9d, 0x483: 0x9d, 0x484: 0x9d, 0x485: 0x9d, 0x486: 0x9d, 0x487: 0x9d, + 0x488: 0x9d, 0x489: 0x9d, 0x48a: 0x9d, 0x48b: 0x9d, 0x48c: 0x9d, 0x48d: 0x9d, 0x48e: 0x9d, 0x48f: 0x9d, + 0x490: 0x144, 0x491: 0xb8, 0x492: 0xb8, 0x493: 0xb8, 0x494: 0xb8, 0x495: 0xb8, 0x496: 0xb8, 0x497: 0xb8, + 0x498: 0xb8, 0x499: 0xb8, 0x49a: 0xb8, 0x49b: 0xb8, 0x49c: 0xb8, 0x49d: 0xb8, 0x49e: 0xb8, 0x49f: 0xb8, + 0x4a0: 0xb8, 0x4a1: 0xb8, 0x4a2: 0xb8, 0x4a3: 0xb8, 0x4a4: 0xb8, 0x4a5: 0xb8, 0x4a6: 0xb8, 0x4a7: 0xb8, + 0x4a8: 0xb8, 0x4a9: 0xb8, 0x4aa: 0xb8, 0x4ab: 0xb8, 0x4ac: 0xb8, 0x4ad: 0xb8, 0x4ae: 0xb8, 0x4af: 0xb8, + 0x4b0: 0xb8, 0x4b1: 0xb8, 0x4b2: 0xb8, 0x4b3: 0xb8, 0x4b4: 0xb8, 0x4b5: 0xb8, 0x4b6: 0xb8, 0x4b7: 0xb8, + 0x4b8: 0xb8, 0x4b9: 0xb8, 0x4ba: 0xb8, 0x4bb: 0xb8, 0x4bc: 0xb8, 0x4bd: 0xb8, 0x4be: 0xb8, 0x4bf: 0xb8, + // Block 0x13, offset 0x4c0 + 0x4c0: 0xb8, 0x4c1: 0xb8, 0x4c2: 0xb8, 0x4c3: 0xb8, 0x4c4: 0xb8, 0x4c5: 0xb8, 0x4c6: 0xb8, 0x4c7: 0xb8, + 0x4c8: 0xb8, 0x4c9: 0xb8, 0x4ca: 0xb8, 0x4cb: 0xb8, 0x4cc: 0xb8, 0x4cd: 0xb8, 0x4ce: 0xb8, 0x4cf: 0xb8, + 0x4d0: 0x9d, 0x4d1: 0x9d, 0x4d2: 0x9d, 0x4d3: 0x9d, 0x4d4: 0x9d, 0x4d5: 0x9d, 0x4d6: 0x9d, 0x4d7: 0x9d, + 0x4d8: 0x9d, 0x4d9: 0x145, 0x4da: 0xb8, 0x4db: 0xb8, 0x4dc: 0xb8, 0x4dd: 0xb8, 0x4de: 0xb8, 0x4df: 0xb8, + 0x4e0: 0xb8, 0x4e1: 0xb8, 0x4e2: 0xb8, 0x4e3: 0xb8, 0x4e4: 0xb8, 0x4e5: 0xb8, 0x4e6: 0xb8, 0x4e7: 0xb8, + 0x4e8: 0xb8, 0x4e9: 0xb8, 0x4ea: 0xb8, 0x4eb: 0xb8, 0x4ec: 0xb8, 0x4ed: 0xb8, 0x4ee: 0xb8, 0x4ef: 0xb8, + 0x4f0: 0xb8, 0x4f1: 0xb8, 0x4f2: 0xb8, 0x4f3: 0xb8, 0x4f4: 0xb8, 0x4f5: 0xb8, 0x4f6: 0xb8, 0x4f7: 0xb8, + 0x4f8: 0xb8, 0x4f9: 0xb8, 0x4fa: 0xb8, 0x4fb: 0xb8, 0x4fc: 0xb8, 0x4fd: 0xb8, 0x4fe: 0xb8, 0x4ff: 0xb8, + // Block 0x14, offset 0x500 + 0x500: 0xb8, 0x501: 0xb8, 0x502: 0xb8, 0x503: 0xb8, 0x504: 0xb8, 0x505: 0xb8, 0x506: 0xb8, 0x507: 0xb8, + 0x508: 0xb8, 0x509: 0xb8, 0x50a: 0xb8, 0x50b: 0xb8, 0x50c: 0xb8, 0x50d: 0xb8, 0x50e: 0xb8, 0x50f: 0xb8, + 0x510: 0xb8, 0x511: 0xb8, 0x512: 0xb8, 0x513: 0xb8, 0x514: 0xb8, 0x515: 0xb8, 0x516: 0xb8, 0x517: 0xb8, + 0x518: 0xb8, 0x519: 0xb8, 0x51a: 0xb8, 0x51b: 0xb8, 0x51c: 0xb8, 0x51d: 0xb8, 0x51e: 0xb8, 0x51f: 0xb8, + 0x520: 0x9d, 0x521: 0x9d, 0x522: 0x9d, 0x523: 0x9d, 0x524: 0x9d, 0x525: 0x9d, 0x526: 0x9d, 0x527: 0x9d, + 0x528: 0x13d, 0x529: 0x146, 0x52a: 0xb8, 0x52b: 0x147, 0x52c: 0x148, 0x52d: 0x149, 0x52e: 0x14a, 0x52f: 0xb8, + 0x530: 0xb8, 0x531: 0xb8, 0x532: 0xb8, 0x533: 0xb8, 0x534: 0xb8, 0x535: 0xb8, 0x536: 0xb8, 0x537: 0xb8, + 0x538: 0xb8, 0x539: 0xb8, 0x53a: 0xb8, 0x53b: 0xb8, 0x53c: 0x9d, 0x53d: 0x14b, 0x53e: 0x14c, 0x53f: 0x14d, + // Block 0x15, offset 0x540 + 0x540: 0x9d, 0x541: 0x9d, 0x542: 0x9d, 0x543: 0x9d, 0x544: 0x9d, 0x545: 0x9d, 0x546: 0x9d, 0x547: 0x9d, + 0x548: 0x9d, 0x549: 0x9d, 0x54a: 0x9d, 0x54b: 0x9d, 0x54c: 0x9d, 0x54d: 0x9d, 0x54e: 0x9d, 0x54f: 0x9d, + 0x550: 0x9d, 0x551: 0x9d, 0x552: 0x9d, 0x553: 0x9d, 0x554: 0x9d, 0x555: 0x9d, 0x556: 0x9d, 0x557: 0x9d, + 0x558: 0x9d, 0x559: 0x9d, 0x55a: 0x9d, 0x55b: 0x9d, 0x55c: 0x9d, 0x55d: 0x9d, 0x55e: 0x9d, 0x55f: 0x14e, + 0x560: 0x9d, 0x561: 0x9d, 0x562: 0x9d, 0x563: 0x9d, 0x564: 0x9d, 0x565: 0x9d, 0x566: 0x9d, 0x567: 0x9d, + 0x568: 0x9d, 0x569: 0x9d, 0x56a: 0x9d, 0x56b: 0x14f, 0x56c: 0xb8, 0x56d: 0xb8, 0x56e: 0xb8, 0x56f: 0xb8, + 0x570: 0xb8, 0x571: 0xb8, 0x572: 0xb8, 0x573: 0xb8, 0x574: 0xb8, 0x575: 0xb8, 0x576: 0xb8, 0x577: 0xb8, + 0x578: 0xb8, 0x579: 0xb8, 0x57a: 0xb8, 0x57b: 0xb8, 0x57c: 0xb8, 0x57d: 0xb8, 0x57e: 0xb8, 0x57f: 0xb8, + // Block 0x16, offset 0x580 + 0x580: 0x150, 0x581: 0xb8, 0x582: 0xb8, 0x583: 0xb8, 0x584: 0xb8, 0x585: 0xb8, 0x586: 0xb8, 0x587: 0xb8, + 0x588: 0xb8, 0x589: 0xb8, 0x58a: 0xb8, 0x58b: 0xb8, 0x58c: 0xb8, 0x58d: 0xb8, 0x58e: 0xb8, 0x58f: 0xb8, + 0x590: 0xb8, 0x591: 0xb8, 0x592: 0xb8, 0x593: 0xb8, 0x594: 0xb8, 0x595: 0xb8, 0x596: 0xb8, 0x597: 0xb8, + 0x598: 0xb8, 0x599: 0xb8, 0x59a: 0xb8, 0x59b: 0xb8, 0x59c: 0xb8, 0x59d: 0xb8, 0x59e: 0xb8, 0x59f: 0xb8, + 0x5a0: 0xb8, 0x5a1: 0xb8, 0x5a2: 0xb8, 0x5a3: 0xb8, 0x5a4: 0xb8, 0x5a5: 0xb8, 0x5a6: 0xb8, 0x5a7: 0xb8, + 0x5a8: 0xb8, 0x5a9: 0xb8, 0x5aa: 0xb8, 0x5ab: 0xb8, 0x5ac: 0xb8, 0x5ad: 0xb8, 0x5ae: 0xb8, 0x5af: 0xb8, + 0x5b0: 0x9d, 0x5b1: 0x151, 0x5b2: 0x152, 0x5b3: 0xb8, 0x5b4: 0xb8, 0x5b5: 0xb8, 0x5b6: 0xb8, 0x5b7: 0xb8, + 0x5b8: 0xb8, 0x5b9: 0xb8, 0x5ba: 0xb8, 0x5bb: 0xb8, 0x5bc: 0xb8, 0x5bd: 0xb8, 0x5be: 0xb8, 0x5bf: 0xb8, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x9b, 0x5c1: 0x9b, 0x5c2: 0x9b, 0x5c3: 0x153, 0x5c4: 0x154, 0x5c5: 0x155, 0x5c6: 0x156, 0x5c7: 0x157, + 0x5c8: 0x9b, 0x5c9: 0x158, 0x5ca: 0xb8, 0x5cb: 0xb8, 0x5cc: 0x9b, 0x5cd: 0x159, 0x5ce: 0xb8, 0x5cf: 0xb8, + 0x5d0: 0x5d, 0x5d1: 0x5e, 0x5d2: 0x5f, 0x5d3: 0x60, 0x5d4: 0x61, 0x5d5: 0x62, 0x5d6: 0x63, 0x5d7: 0x64, + 0x5d8: 0x65, 0x5d9: 0x66, 0x5da: 0x67, 0x5db: 0x68, 0x5dc: 0x69, 0x5dd: 0x6a, 0x5de: 0x6b, 0x5df: 0x6c, + 0x5e0: 0x9b, 0x5e1: 0x9b, 0x5e2: 0x9b, 0x5e3: 0x9b, 0x5e4: 0x9b, 0x5e5: 0x9b, 0x5e6: 0x9b, 0x5e7: 0x9b, + 0x5e8: 0x15a, 0x5e9: 0x15b, 0x5ea: 0x15c, 0x5eb: 0xb8, 0x5ec: 0xb8, 0x5ed: 0xb8, 0x5ee: 0xb8, 0x5ef: 0xb8, + 0x5f0: 0xb8, 0x5f1: 0xb8, 0x5f2: 0xb8, 0x5f3: 0xb8, 0x5f4: 0xb8, 0x5f5: 0xb8, 0x5f6: 0xb8, 0x5f7: 0xb8, + 0x5f8: 0xb8, 0x5f9: 0xb8, 0x5fa: 0xb8, 0x5fb: 0xb8, 0x5fc: 0xb8, 0x5fd: 0xb8, 0x5fe: 0xb8, 0x5ff: 0xb8, + // Block 0x18, offset 0x600 + 0x600: 0x15d, 0x601: 0xb8, 0x602: 0xb8, 0x603: 0xb8, 0x604: 0xb8, 0x605: 0xb8, 0x606: 0xb8, 0x607: 0xb8, + 0x608: 0xb8, 0x609: 0xb8, 0x60a: 0xb8, 0x60b: 0xb8, 0x60c: 0xb8, 0x60d: 0xb8, 0x60e: 0xb8, 0x60f: 0xb8, + 0x610: 0xb8, 0x611: 0xb8, 0x612: 0xb8, 0x613: 0xb8, 0x614: 0xb8, 0x615: 0xb8, 0x616: 0xb8, 0x617: 0xb8, + 0x618: 0xb8, 0x619: 0xb8, 0x61a: 0xb8, 0x61b: 0xb8, 0x61c: 0xb8, 0x61d: 0xb8, 0x61e: 0xb8, 0x61f: 0xb8, + 0x620: 0x9d, 0x621: 0x9d, 0x622: 0x9d, 0x623: 0x15e, 0x624: 0x6d, 0x625: 0x15f, 0x626: 0xb8, 0x627: 0xb8, + 0x628: 0xb8, 0x629: 0xb8, 0x62a: 0xb8, 0x62b: 0xb8, 0x62c: 0xb8, 0x62d: 0xb8, 0x62e: 0xb8, 0x62f: 0xb8, + 0x630: 0xb8, 0x631: 0xb8, 0x632: 0xb8, 0x633: 0xb8, 0x634: 0xb8, 0x635: 0xb8, 0x636: 0xb8, 0x637: 0xb8, + 0x638: 0x6e, 0x639: 0x6f, 0x63a: 0x70, 0x63b: 0x160, 0x63c: 0xb8, 0x63d: 0xb8, 0x63e: 0xb8, 0x63f: 0xb8, + // Block 0x19, offset 0x640 + 0x640: 0x161, 0x641: 0x9b, 0x642: 0x162, 0x643: 0x163, 0x644: 0x71, 0x645: 0x72, 0x646: 0x164, 0x647: 0x165, + 0x648: 0x73, 0x649: 0x166, 0x64a: 0xb8, 0x64b: 0xb8, 0x64c: 0x9b, 0x64d: 0x9b, 0x64e: 0x9b, 0x64f: 0x9b, + 0x650: 0x9b, 0x651: 0x9b, 0x652: 0x9b, 0x653: 0x9b, 0x654: 0x9b, 0x655: 0x9b, 0x656: 0x9b, 0x657: 0x9b, + 0x658: 0x9b, 0x659: 0x9b, 0x65a: 0x9b, 0x65b: 0x167, 0x65c: 0x9b, 0x65d: 0x168, 0x65e: 0x9b, 0x65f: 0x169, + 0x660: 0x16a, 0x661: 0x16b, 0x662: 0x16c, 0x663: 0xb8, 0x664: 0x16d, 0x665: 0x16e, 0x666: 0x16f, 0x667: 0x170, + 0x668: 0xb8, 0x669: 0xb8, 0x66a: 0xb8, 0x66b: 0xb8, 0x66c: 0xb8, 0x66d: 0xb8, 0x66e: 0xb8, 0x66f: 0xb8, + 0x670: 0xb8, 0x671: 0xb8, 0x672: 0xb8, 0x673: 0xb8, 0x674: 0xb8, 0x675: 0xb8, 0x676: 0xb8, 0x677: 0xb8, + 0x678: 0xb8, 0x679: 0xb8, 0x67a: 0xb8, 0x67b: 0xb8, 0x67c: 0xb8, 0x67d: 0xb8, 0x67e: 0xb8, 0x67f: 0xb8, + // Block 0x1a, offset 0x680 + 0x680: 0x9d, 0x681: 0x9d, 0x682: 0x9d, 0x683: 0x9d, 0x684: 0x9d, 0x685: 0x9d, 0x686: 0x9d, 0x687: 0x9d, + 0x688: 0x9d, 0x689: 0x9d, 0x68a: 0x9d, 0x68b: 0x9d, 0x68c: 0x9d, 0x68d: 0x9d, 0x68e: 0x9d, 0x68f: 0x9d, + 0x690: 0x9d, 0x691: 0x9d, 0x692: 0x9d, 0x693: 0x9d, 0x694: 0x9d, 0x695: 0x9d, 0x696: 0x9d, 0x697: 0x9d, + 0x698: 0x9d, 0x699: 0x9d, 0x69a: 0x9d, 0x69b: 0x171, 0x69c: 0x9d, 0x69d: 0x9d, 0x69e: 0x9d, 0x69f: 0x9d, + 0x6a0: 0x9d, 0x6a1: 0x9d, 0x6a2: 0x9d, 0x6a3: 0x9d, 0x6a4: 0x9d, 0x6a5: 0x9d, 0x6a6: 0x9d, 0x6a7: 0x9d, + 0x6a8: 0x9d, 0x6a9: 0x9d, 0x6aa: 0x9d, 0x6ab: 0x9d, 0x6ac: 0x9d, 0x6ad: 0x9d, 0x6ae: 0x9d, 0x6af: 0x9d, + 0x6b0: 0x9d, 0x6b1: 0x9d, 0x6b2: 0x9d, 0x6b3: 0x9d, 0x6b4: 0x9d, 0x6b5: 0x9d, 0x6b6: 0x9d, 0x6b7: 0x9d, + 0x6b8: 0x9d, 0x6b9: 0x9d, 0x6ba: 0x9d, 0x6bb: 0x9d, 0x6bc: 0x9d, 0x6bd: 0x9d, 0x6be: 0x9d, 0x6bf: 0x9d, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x9d, 0x6c1: 0x9d, 0x6c2: 0x9d, 0x6c3: 0x9d, 0x6c4: 0x9d, 0x6c5: 0x9d, 0x6c6: 0x9d, 0x6c7: 0x9d, + 0x6c8: 0x9d, 0x6c9: 0x9d, 0x6ca: 0x9d, 0x6cb: 0x9d, 0x6cc: 0x9d, 0x6cd: 0x9d, 0x6ce: 0x9d, 0x6cf: 0x9d, + 0x6d0: 0x9d, 0x6d1: 0x9d, 0x6d2: 0x9d, 0x6d3: 0x9d, 0x6d4: 0x9d, 0x6d5: 0x9d, 0x6d6: 0x9d, 0x6d7: 0x9d, + 0x6d8: 0x9d, 0x6d9: 0x9d, 0x6da: 0x9d, 0x6db: 0x9d, 0x6dc: 0x172, 0x6dd: 0x9d, 0x6de: 0x9d, 0x6df: 0x9d, + 0x6e0: 0x173, 0x6e1: 0x9d, 0x6e2: 0x9d, 0x6e3: 0x9d, 0x6e4: 0x9d, 0x6e5: 0x9d, 0x6e6: 0x9d, 0x6e7: 0x9d, + 0x6e8: 0x9d, 0x6e9: 0x9d, 0x6ea: 0x9d, 0x6eb: 0x9d, 0x6ec: 0x9d, 0x6ed: 0x9d, 0x6ee: 0x9d, 0x6ef: 0x9d, + 0x6f0: 0x9d, 0x6f1: 0x9d, 0x6f2: 0x9d, 0x6f3: 0x9d, 0x6f4: 0x9d, 0x6f5: 0x9d, 0x6f6: 0x9d, 0x6f7: 0x9d, + 0x6f8: 0x9d, 0x6f9: 0x9d, 0x6fa: 0x9d, 0x6fb: 0x9d, 0x6fc: 0x9d, 0x6fd: 0x9d, 0x6fe: 0x9d, 0x6ff: 0x9d, + // Block 0x1c, offset 0x700 + 0x700: 0x9d, 0x701: 0x9d, 0x702: 0x9d, 0x703: 0x9d, 0x704: 0x9d, 0x705: 0x9d, 0x706: 0x9d, 0x707: 0x9d, + 0x708: 0x9d, 0x709: 0x9d, 0x70a: 0x9d, 0x70b: 0x9d, 0x70c: 0x9d, 0x70d: 0x9d, 0x70e: 0x9d, 0x70f: 0x9d, + 0x710: 0x9d, 0x711: 0x9d, 0x712: 0x9d, 0x713: 0x9d, 0x714: 0x9d, 0x715: 0x9d, 0x716: 0x9d, 0x717: 0x9d, + 0x718: 0x9d, 0x719: 0x9d, 0x71a: 0x9d, 0x71b: 0x9d, 0x71c: 0x9d, 0x71d: 0x9d, 0x71e: 0x9d, 0x71f: 0x9d, + 0x720: 0x9d, 0x721: 0x9d, 0x722: 0x9d, 0x723: 0x9d, 0x724: 0x9d, 0x725: 0x9d, 0x726: 0x9d, 0x727: 0x9d, + 0x728: 0x9d, 0x729: 0x9d, 0x72a: 0x9d, 0x72b: 0x9d, 0x72c: 0x9d, 0x72d: 0x9d, 0x72e: 0x9d, 0x72f: 0x9d, + 0x730: 0x9d, 0x731: 0x9d, 0x732: 0x9d, 0x733: 0x9d, 0x734: 0x9d, 0x735: 0x9d, 0x736: 0x9d, 0x737: 0x9d, + 0x738: 0x9d, 0x739: 0x9d, 0x73a: 0x174, 0x73b: 0xb8, 0x73c: 0xb8, 0x73d: 0xb8, 0x73e: 0xb8, 0x73f: 0xb8, + // Block 0x1d, offset 0x740 + 0x740: 0xb8, 0x741: 0xb8, 0x742: 0xb8, 0x743: 0xb8, 0x744: 0xb8, 0x745: 0xb8, 0x746: 0xb8, 0x747: 0xb8, + 0x748: 0xb8, 0x749: 0xb8, 0x74a: 0xb8, 0x74b: 0xb8, 0x74c: 0xb8, 0x74d: 0xb8, 0x74e: 0xb8, 0x74f: 0xb8, + 0x750: 0xb8, 0x751: 0xb8, 0x752: 0xb8, 0x753: 0xb8, 0x754: 0xb8, 0x755: 0xb8, 0x756: 0xb8, 0x757: 0xb8, + 0x758: 0xb8, 0x759: 0xb8, 0x75a: 0xb8, 0x75b: 0xb8, 0x75c: 0xb8, 0x75d: 0xb8, 0x75e: 0xb8, 0x75f: 0xb8, + 0x760: 0x74, 0x761: 0x75, 0x762: 0x76, 0x763: 0x175, 0x764: 0x77, 0x765: 0x78, 0x766: 0x176, 0x767: 0x79, + 0x768: 0x7a, 0x769: 0xb8, 0x76a: 0xb8, 0x76b: 0xb8, 0x76c: 0xb8, 0x76d: 0xb8, 0x76e: 0xb8, 0x76f: 0xb8, + 0x770: 0xb8, 0x771: 0xb8, 0x772: 0xb8, 0x773: 0xb8, 0x774: 0xb8, 0x775: 0xb8, 0x776: 0xb8, 0x777: 0xb8, + 0x778: 0xb8, 0x779: 0xb8, 0x77a: 0xb8, 0x77b: 0xb8, 0x77c: 0xb8, 0x77d: 0xb8, 0x77e: 0xb8, 0x77f: 0xb8, + // Block 0x1e, offset 0x780 + 0x790: 0x0d, 0x791: 0x0e, 0x792: 0x0f, 0x793: 0x10, 0x794: 0x11, 0x795: 0x0b, 0x796: 0x12, 0x797: 0x07, + 0x798: 0x13, 0x799: 0x0b, 0x79a: 0x0b, 0x79b: 0x14, 0x79c: 0x0b, 0x79d: 0x15, 0x79e: 0x16, 0x79f: 0x17, + 0x7a0: 0x07, 0x7a1: 0x07, 0x7a2: 0x07, 0x7a3: 0x07, 0x7a4: 0x07, 0x7a5: 0x07, 0x7a6: 0x07, 0x7a7: 0x07, + 0x7a8: 0x07, 0x7a9: 0x07, 0x7aa: 0x18, 0x7ab: 0x19, 0x7ac: 0x1a, 0x7ad: 0x0b, 0x7ae: 0x0b, 0x7af: 0x1b, + 0x7b0: 0x0b, 0x7b1: 0x0b, 0x7b2: 0x0b, 0x7b3: 0x0b, 0x7b4: 0x0b, 0x7b5: 0x0b, 0x7b6: 0x0b, 0x7b7: 0x0b, + 0x7b8: 0x0b, 0x7b9: 0x0b, 0x7ba: 0x0b, 0x7bb: 0x0b, 0x7bc: 0x0b, 0x7bd: 0x0b, 0x7be: 0x0b, 0x7bf: 0x0b, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x0b, 0x7c1: 0x0b, 0x7c2: 0x0b, 0x7c3: 0x0b, 0x7c4: 0x0b, 0x7c5: 0x0b, 0x7c6: 0x0b, 0x7c7: 0x0b, + 0x7c8: 0x0b, 0x7c9: 0x0b, 0x7ca: 0x0b, 0x7cb: 0x0b, 0x7cc: 0x0b, 0x7cd: 0x0b, 0x7ce: 0x0b, 0x7cf: 0x0b, + 0x7d0: 0x0b, 0x7d1: 0x0b, 0x7d2: 0x0b, 0x7d3: 0x0b, 0x7d4: 0x0b, 0x7d5: 0x0b, 0x7d6: 0x0b, 0x7d7: 0x0b, + 0x7d8: 0x0b, 0x7d9: 0x0b, 0x7da: 0x0b, 0x7db: 0x0b, 0x7dc: 0x0b, 0x7dd: 0x0b, 0x7de: 0x0b, 0x7df: 0x0b, + 0x7e0: 0x0b, 0x7e1: 0x0b, 0x7e2: 0x0b, 0x7e3: 0x0b, 0x7e4: 0x0b, 0x7e5: 0x0b, 0x7e6: 0x0b, 0x7e7: 0x0b, + 0x7e8: 0x0b, 0x7e9: 0x0b, 0x7ea: 0x0b, 0x7eb: 0x0b, 0x7ec: 0x0b, 0x7ed: 0x0b, 0x7ee: 0x0b, 0x7ef: 0x0b, + 0x7f0: 0x0b, 0x7f1: 0x0b, 0x7f2: 0x0b, 0x7f3: 0x0b, 0x7f4: 0x0b, 0x7f5: 0x0b, 0x7f6: 0x0b, 0x7f7: 0x0b, + 0x7f8: 0x0b, 0x7f9: 0x0b, 0x7fa: 0x0b, 0x7fb: 0x0b, 0x7fc: 0x0b, 0x7fd: 0x0b, 0x7fe: 0x0b, 0x7ff: 0x0b, + // Block 0x20, offset 0x800 + 0x800: 0x177, 0x801: 0x178, 0x802: 0xb8, 0x803: 0xb8, 0x804: 0x179, 0x805: 0x179, 0x806: 0x179, 0x807: 0x17a, + 0x808: 0xb8, 0x809: 0xb8, 0x80a: 0xb8, 0x80b: 0xb8, 0x80c: 0xb8, 0x80d: 0xb8, 0x80e: 0xb8, 0x80f: 0xb8, + 0x810: 0xb8, 0x811: 0xb8, 0x812: 0xb8, 0x813: 0xb8, 0x814: 0xb8, 0x815: 0xb8, 0x816: 0xb8, 0x817: 0xb8, + 0x818: 0xb8, 0x819: 0xb8, 0x81a: 0xb8, 0x81b: 0xb8, 0x81c: 0xb8, 0x81d: 0xb8, 0x81e: 0xb8, 0x81f: 0xb8, + 0x820: 0xb8, 0x821: 0xb8, 0x822: 0xb8, 0x823: 0xb8, 0x824: 0xb8, 0x825: 0xb8, 0x826: 0xb8, 0x827: 0xb8, + 0x828: 0xb8, 0x829: 0xb8, 0x82a: 0xb8, 0x82b: 0xb8, 0x82c: 0xb8, 0x82d: 0xb8, 0x82e: 0xb8, 0x82f: 0xb8, + 0x830: 0xb8, 0x831: 0xb8, 0x832: 0xb8, 0x833: 0xb8, 0x834: 0xb8, 0x835: 0xb8, 0x836: 0xb8, 0x837: 0xb8, + 0x838: 0xb8, 0x839: 0xb8, 0x83a: 0xb8, 0x83b: 0xb8, 0x83c: 0xb8, 0x83d: 0xb8, 0x83e: 0xb8, 0x83f: 0xb8, + // Block 0x21, offset 0x840 + 0x840: 0x0b, 0x841: 0x0b, 0x842: 0x0b, 0x843: 0x0b, 0x844: 0x0b, 0x845: 0x0b, 0x846: 0x0b, 0x847: 0x0b, + 0x848: 0x0b, 0x849: 0x0b, 0x84a: 0x0b, 0x84b: 0x0b, 0x84c: 0x0b, 0x84d: 0x0b, 0x84e: 0x0b, 0x84f: 0x0b, + 0x850: 0x0b, 0x851: 0x0b, 0x852: 0x0b, 0x853: 0x0b, 0x854: 0x0b, 0x855: 0x0b, 0x856: 0x0b, 0x857: 0x0b, + 0x858: 0x0b, 0x859: 0x0b, 0x85a: 0x0b, 0x85b: 0x0b, 0x85c: 0x0b, 0x85d: 0x0b, 0x85e: 0x0b, 0x85f: 0x0b, + 0x860: 0x1e, 0x861: 0x0b, 0x862: 0x0b, 0x863: 0x0b, 0x864: 0x0b, 0x865: 0x0b, 0x866: 0x0b, 0x867: 0x0b, + 0x868: 0x0b, 0x869: 0x0b, 0x86a: 0x0b, 0x86b: 0x0b, 0x86c: 0x0b, 0x86d: 0x0b, 0x86e: 0x0b, 0x86f: 0x0b, + 0x870: 0x0b, 0x871: 0x0b, 0x872: 0x0b, 0x873: 0x0b, 0x874: 0x0b, 0x875: 0x0b, 0x876: 0x0b, 0x877: 0x0b, + 0x878: 0x0b, 0x879: 0x0b, 0x87a: 0x0b, 0x87b: 0x0b, 0x87c: 0x0b, 0x87d: 0x0b, 0x87e: 0x0b, 0x87f: 0x0b, + // Block 0x22, offset 0x880 + 0x880: 0x0b, 0x881: 0x0b, 0x882: 0x0b, 0x883: 0x0b, 0x884: 0x0b, 0x885: 0x0b, 0x886: 0x0b, 0x887: 0x0b, + 0x888: 0x0b, 0x889: 0x0b, 0x88a: 0x0b, 0x88b: 0x0b, 0x88c: 0x0b, 0x88d: 0x0b, 0x88e: 0x0b, 0x88f: 0x0b, +} + +// idnaSparseOffset: 256 entries, 512 bytes +var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x34, 0x3f, 0x4b, 0x5c, 0x60, 0x6f, 0x74, 0x7b, 0x87, 0x95, 0xa3, 0xa8, 0xb1, 0xc1, 0xcf, 0xdc, 0xe8, 0xf9, 0x103, 0x10a, 0x117, 0x128, 0x12f, 0x13a, 0x149, 0x157, 0x161, 0x163, 0x167, 0x169, 0x175, 0x180, 0x188, 0x18e, 0x194, 0x199, 0x19e, 0x1a1, 0x1a5, 0x1ab, 0x1b0, 0x1bc, 0x1c6, 0x1cc, 0x1dd, 0x1e7, 0x1ea, 0x1f2, 0x1f5, 0x202, 0x20a, 0x20e, 0x215, 0x21d, 0x22d, 0x239, 0x23b, 0x245, 0x251, 0x25d, 0x269, 0x271, 0x276, 0x280, 0x291, 0x295, 0x2a0, 0x2a4, 0x2ad, 0x2b5, 0x2bb, 0x2c0, 0x2c3, 0x2c6, 0x2ca, 0x2d0, 0x2d4, 0x2d8, 0x2de, 0x2e5, 0x2eb, 0x2f3, 0x2fa, 0x305, 0x30f, 0x313, 0x316, 0x31c, 0x320, 0x322, 0x325, 0x327, 0x32a, 0x334, 0x337, 0x346, 0x34a, 0x34f, 0x352, 0x356, 0x35b, 0x360, 0x366, 0x36c, 0x37b, 0x381, 0x385, 0x394, 0x399, 0x3a1, 0x3ab, 0x3b6, 0x3be, 0x3cf, 0x3d8, 0x3e8, 0x3f5, 0x3ff, 0x404, 0x411, 0x415, 0x41a, 0x41c, 0x420, 0x422, 0x426, 0x42f, 0x435, 0x439, 0x449, 0x453, 0x458, 0x45b, 0x461, 0x468, 0x46d, 0x471, 0x477, 0x47c, 0x485, 0x48a, 0x490, 0x497, 0x49e, 0x4a5, 0x4a9, 0x4ae, 0x4b1, 0x4b6, 0x4c2, 0x4c8, 0x4cd, 0x4d4, 0x4dc, 0x4e1, 0x4e5, 0x4f5, 0x4fc, 0x500, 0x504, 0x50b, 0x50e, 0x511, 0x515, 0x519, 0x51f, 0x528, 0x534, 0x53b, 0x544, 0x54c, 0x553, 0x561, 0x56e, 0x57b, 0x584, 0x588, 0x596, 0x59e, 0x5a9, 0x5b2, 0x5b8, 0x5c0, 0x5c9, 0x5d3, 0x5d6, 0x5e2, 0x5e5, 0x5ea, 0x5ed, 0x5f7, 0x600, 0x60c, 0x60f, 0x614, 0x617, 0x61a, 0x61d, 0x624, 0x62b, 0x62f, 0x63a, 0x63d, 0x643, 0x648, 0x64c, 0x64f, 0x652, 0x655, 0x65a, 0x664, 0x667, 0x66b, 0x67a, 0x686, 0x68a, 0x68f, 0x694, 0x698, 0x69d, 0x6a6, 0x6b1, 0x6b7, 0x6bf, 0x6c3, 0x6c7, 0x6cd, 0x6d3, 0x6d8, 0x6db, 0x6e9, 0x6f0, 0x6f3, 0x6f6, 0x6fa, 0x700, 0x705, 0x70f, 0x714, 0x717, 0x71a, 0x71d, 0x720, 0x724, 0x727, 0x737, 0x748, 0x74d, 0x74f, 0x751} + +// idnaSparseValues: 1876 entries, 7504 bytes +var idnaSparseValues = [1876]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0000, lo: 0x07}, + {value: 0xe105, lo: 0x80, hi: 0x96}, + {value: 0x0018, lo: 0x97, hi: 0x97}, + {value: 0xe105, lo: 0x98, hi: 0x9e}, + {value: 0x001f, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbf}, + // Block 0x1, offset 0x8 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0xe01d, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0335, lo: 0x83, hi: 0x83}, + {value: 0x034d, lo: 0x84, hi: 0x84}, + {value: 0x0365, lo: 0x85, hi: 0x85}, + {value: 0xe00d, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0xe00d, lo: 0x88, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x89}, + {value: 0xe00d, lo: 0x8a, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe00d, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0x8d}, + {value: 0xe00d, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0xbf}, + // Block 0x2, offset 0x19 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x0249, lo: 0xb0, hi: 0xb0}, + {value: 0x037d, lo: 0xb1, hi: 0xb1}, + {value: 0x0259, lo: 0xb2, hi: 0xb2}, + {value: 0x0269, lo: 0xb3, hi: 0xb3}, + {value: 0x034d, lo: 0xb4, hi: 0xb4}, + {value: 0x0395, lo: 0xb5, hi: 0xb5}, + {value: 0xe1bd, lo: 0xb6, hi: 0xb6}, + {value: 0x0279, lo: 0xb7, hi: 0xb7}, + {value: 0x0289, lo: 0xb8, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbf}, + // Block 0x3, offset 0x25 + {value: 0x0000, lo: 0x01}, + {value: 0x1308, lo: 0x80, hi: 0xbf}, + // Block 0x4, offset 0x27 + {value: 0x0000, lo: 0x04}, + {value: 0x03f5, lo: 0x80, hi: 0x8f}, + {value: 0xe105, lo: 0x90, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x5, offset 0x2c + {value: 0x0000, lo: 0x07}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x0545, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x0008, lo: 0x99, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xbf}, + // Block 0x6, offset 0x34 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0401, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x88}, + {value: 0x0018, lo: 0x89, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x1308, lo: 0x91, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbe}, + {value: 0x1308, lo: 0xbf, hi: 0xbf}, + // Block 0x7, offset 0x3f + {value: 0x0000, lo: 0x0b}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x1308, lo: 0x81, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0x83}, + {value: 0x1308, lo: 0x84, hi: 0x85}, + {value: 0x0018, lo: 0x86, hi: 0x86}, + {value: 0x1308, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x8, offset 0x4b + {value: 0x0000, lo: 0x10}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0208, lo: 0x81, hi: 0x87}, + {value: 0x0408, lo: 0x88, hi: 0x88}, + {value: 0x0208, lo: 0x89, hi: 0x8a}, + {value: 0x1308, lo: 0x8b, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xad}, + {value: 0x0208, lo: 0xae, hi: 0xaf}, + {value: 0x1308, lo: 0xb0, hi: 0xb0}, + {value: 0x0408, lo: 0xb1, hi: 0xb3}, + {value: 0x0008, lo: 0xb4, hi: 0xb4}, + {value: 0x0429, lo: 0xb5, hi: 0xb5}, + {value: 0x0451, lo: 0xb6, hi: 0xb6}, + {value: 0x0479, lo: 0xb7, hi: 0xb7}, + {value: 0x04a1, lo: 0xb8, hi: 0xb8}, + {value: 0x0208, lo: 0xb9, hi: 0xbf}, + // Block 0x9, offset 0x5c + {value: 0x0000, lo: 0x03}, + {value: 0x0208, lo: 0x80, hi: 0x87}, + {value: 0x0408, lo: 0x88, hi: 0x99}, + {value: 0x0208, lo: 0x9a, hi: 0xbf}, + // Block 0xa, offset 0x60 + {value: 0x0000, lo: 0x0e}, + {value: 0x1308, lo: 0x80, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0408, lo: 0x8d, hi: 0x8d}, + {value: 0x0208, lo: 0x8e, hi: 0x98}, + {value: 0x0408, lo: 0x99, hi: 0x9b}, + {value: 0x0208, lo: 0x9c, hi: 0xaa}, + {value: 0x0408, lo: 0xab, hi: 0xac}, + {value: 0x0208, lo: 0xad, hi: 0xb0}, + {value: 0x0408, lo: 0xb1, hi: 0xb1}, + {value: 0x0208, lo: 0xb2, hi: 0xb2}, + {value: 0x0408, lo: 0xb3, hi: 0xb4}, + {value: 0x0208, lo: 0xb5, hi: 0xb7}, + {value: 0x0408, lo: 0xb8, hi: 0xb9}, + {value: 0x0208, lo: 0xba, hi: 0xbf}, + // Block 0xb, offset 0x6f + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x1308, lo: 0xa6, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xc, offset 0x74 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0208, lo: 0x8a, hi: 0xaa}, + {value: 0x1308, lo: 0xab, hi: 0xb3}, + {value: 0x0008, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0xd, offset 0x7b + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x1308, lo: 0x96, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9a}, + {value: 0x1308, lo: 0x9b, hi: 0xa3}, + {value: 0x0008, lo: 0xa4, hi: 0xa4}, + {value: 0x1308, lo: 0xa5, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xa8}, + {value: 0x1308, lo: 0xa9, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xe, offset 0x87 + {value: 0x0000, lo: 0x0d}, + {value: 0x0408, lo: 0x80, hi: 0x80}, + {value: 0x0208, lo: 0x81, hi: 0x85}, + {value: 0x0408, lo: 0x86, hi: 0x87}, + {value: 0x0208, lo: 0x88, hi: 0x88}, + {value: 0x0408, lo: 0x89, hi: 0x89}, + {value: 0x0208, lo: 0x8a, hi: 0x93}, + {value: 0x0408, lo: 0x94, hi: 0x94}, + {value: 0x0208, lo: 0x95, hi: 0x95}, + {value: 0x0008, lo: 0x96, hi: 0x98}, + {value: 0x1308, lo: 0x99, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xbf}, + // Block 0xf, offset 0x95 + {value: 0x0000, lo: 0x0d}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0208, lo: 0xa0, hi: 0xa9}, + {value: 0x0408, lo: 0xaa, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xad}, + {value: 0x0408, lo: 0xae, hi: 0xae}, + {value: 0x0208, lo: 0xaf, hi: 0xb0}, + {value: 0x0408, lo: 0xb1, hi: 0xb2}, + {value: 0x0208, lo: 0xb3, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb5}, + {value: 0x0208, lo: 0xb6, hi: 0xb8}, + {value: 0x0408, lo: 0xb9, hi: 0xb9}, + {value: 0x0208, lo: 0xba, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x10, offset 0xa3 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x93}, + {value: 0x1308, lo: 0x94, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x1308, lo: 0xa3, hi: 0xbf}, + // Block 0x11, offset 0xa8 + {value: 0x0000, lo: 0x08}, + {value: 0x1308, lo: 0x80, hi: 0x82}, + {value: 0x1008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb9}, + {value: 0x1308, lo: 0xba, hi: 0xba}, + {value: 0x1008, lo: 0xbb, hi: 0xbb}, + {value: 0x1308, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x1008, lo: 0xbe, hi: 0xbf}, + // Block 0x12, offset 0xb1 + {value: 0x0000, lo: 0x0f}, + {value: 0x1308, lo: 0x80, hi: 0x80}, + {value: 0x1008, lo: 0x81, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x85}, + {value: 0x1008, lo: 0x86, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x1008, lo: 0x8a, hi: 0x8c}, + {value: 0x1b08, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x96}, + {value: 0x1008, lo: 0x97, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x13, offset 0xc1 + {value: 0x0000, lo: 0x0d}, + {value: 0x1308, lo: 0x80, hi: 0x80}, + {value: 0x1008, lo: 0x81, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xa9}, + {value: 0x0008, lo: 0xaa, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x1308, lo: 0xbe, hi: 0xbf}, + // Block 0x14, offset 0xcf + {value: 0x0000, lo: 0x0c}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x1308, lo: 0x81, hi: 0x81}, + {value: 0x1008, lo: 0x82, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x1008, lo: 0xbe, hi: 0xbf}, + // Block 0x15, offset 0xdc + {value: 0x0000, lo: 0x0b}, + {value: 0x0040, lo: 0x80, hi: 0x81}, + {value: 0x1008, lo: 0x82, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x16, offset 0xe8 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x89}, + {value: 0x1b08, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8e}, + {value: 0x1008, lo: 0x8f, hi: 0x91}, + {value: 0x1308, lo: 0x92, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x95}, + {value: 0x1308, lo: 0x96, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x1008, lo: 0x98, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xb1}, + {value: 0x1008, lo: 0xb2, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x17, offset 0xf9 + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xb0}, + {value: 0x1308, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb2}, + {value: 0x08f1, lo: 0xb3, hi: 0xb3}, + {value: 0x1308, lo: 0xb4, hi: 0xb9}, + {value: 0x1b08, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0x18, offset 0x103 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x1308, lo: 0x87, hi: 0x8e}, + {value: 0x0018, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0xbf}, + // Block 0x19, offset 0x10a + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x1308, lo: 0x88, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0961, lo: 0x9c, hi: 0x9c}, + {value: 0x0999, lo: 0x9d, hi: 0x9d}, + {value: 0x0008, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x1a, offset 0x117 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe03d, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x97}, + {value: 0x1308, lo: 0x98, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb4}, + {value: 0x1308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb6}, + {value: 0x1308, lo: 0xb7, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xb8}, + {value: 0x1308, lo: 0xb9, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbd}, + {value: 0x1008, lo: 0xbe, hi: 0xbf}, + // Block 0x1b, offset 0x128 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x1308, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0xbf}, + // Block 0x1c, offset 0x12f + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x1008, lo: 0xab, hi: 0xac}, + {value: 0x1308, lo: 0xad, hi: 0xb0}, + {value: 0x1008, lo: 0xb1, hi: 0xb1}, + {value: 0x1308, lo: 0xb2, hi: 0xb7}, + {value: 0x1008, lo: 0xb8, hi: 0xb8}, + {value: 0x1b08, lo: 0xb9, hi: 0xba}, + {value: 0x1008, lo: 0xbb, hi: 0xbc}, + {value: 0x1308, lo: 0xbd, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x1d, offset 0x13a + {value: 0x0000, lo: 0x0e}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x1008, lo: 0x96, hi: 0x97}, + {value: 0x1308, lo: 0x98, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x1308, lo: 0x9e, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xa1}, + {value: 0x1008, lo: 0xa2, hi: 0xa4}, + {value: 0x0008, lo: 0xa5, hi: 0xa6}, + {value: 0x1008, lo: 0xa7, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x1308, lo: 0xb1, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xbf}, + // Block 0x1e, offset 0x149 + {value: 0x0000, lo: 0x0d}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x1308, lo: 0x82, hi: 0x82}, + {value: 0x1008, lo: 0x83, hi: 0x84}, + {value: 0x1308, lo: 0x85, hi: 0x86}, + {value: 0x1008, lo: 0x87, hi: 0x8c}, + {value: 0x1308, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x8e}, + {value: 0x1008, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x1008, lo: 0x9a, hi: 0x9c}, + {value: 0x1308, lo: 0x9d, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x1f, offset 0x157 + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x86}, + {value: 0x055d, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8c}, + {value: 0x055d, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbb}, + {value: 0xe105, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0x20, offset 0x161 + {value: 0x0000, lo: 0x01}, + {value: 0x0018, lo: 0x80, hi: 0xbf}, + // Block 0x21, offset 0x163 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xbf}, + // Block 0x22, offset 0x167 + {value: 0x0000, lo: 0x01}, + {value: 0x0008, lo: 0x80, hi: 0xbf}, + // Block 0x23, offset 0x169 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x24, offset 0x175 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x25, offset 0x180 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x26, offset 0x188 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x27, offset 0x18e + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9c}, + {value: 0x1308, lo: 0x9d, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x28, offset 0x194 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x29, offset 0x199 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x2a, offset 0x19e + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x2b, offset 0x1a1 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xbf}, + // Block 0x2c, offset 0x1a5 + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x2d, offset 0x1ab + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0x2e, offset 0x1b0 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x91}, + {value: 0x1308, lo: 0x92, hi: 0x93}, + {value: 0x1b08, lo: 0x94, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb1}, + {value: 0x1308, lo: 0xb2, hi: 0xb3}, + {value: 0x1b08, lo: 0xb4, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x2f, offset 0x1bc + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x1308, lo: 0x92, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x1308, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x30, offset 0x1c6 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xb3}, + {value: 0x1340, lo: 0xb4, hi: 0xb5}, + {value: 0x1008, lo: 0xb6, hi: 0xb6}, + {value: 0x1308, lo: 0xb7, hi: 0xbd}, + {value: 0x1008, lo: 0xbe, hi: 0xbf}, + // Block 0x31, offset 0x1cc + {value: 0x0000, lo: 0x10}, + {value: 0x1008, lo: 0x80, hi: 0x85}, + {value: 0x1308, lo: 0x86, hi: 0x86}, + {value: 0x1008, lo: 0x87, hi: 0x88}, + {value: 0x1308, lo: 0x89, hi: 0x91}, + {value: 0x1b08, lo: 0x92, hi: 0x92}, + {value: 0x1308, lo: 0x93, hi: 0x93}, + {value: 0x0018, lo: 0x94, hi: 0x96}, + {value: 0x0008, lo: 0x97, hi: 0x97}, + {value: 0x0018, lo: 0x98, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x1308, lo: 0x9d, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x32, offset 0x1dd + {value: 0x0000, lo: 0x09}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x86}, + {value: 0x0218, lo: 0x87, hi: 0x87}, + {value: 0x0018, lo: 0x88, hi: 0x8a}, + {value: 0x13c0, lo: 0x8b, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0208, lo: 0xa0, hi: 0xbf}, + // Block 0x33, offset 0x1e7 + {value: 0x0000, lo: 0x02}, + {value: 0x0208, lo: 0x80, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x34, offset 0x1ea + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x1308, lo: 0x85, hi: 0x86}, + {value: 0x0208, lo: 0x87, hi: 0xa8}, + {value: 0x1308, lo: 0xa9, hi: 0xa9}, + {value: 0x0208, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x35, offset 0x1f2 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0x36, offset 0x1f5 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x1308, lo: 0xa0, hi: 0xa2}, + {value: 0x1008, lo: 0xa3, hi: 0xa6}, + {value: 0x1308, lo: 0xa7, hi: 0xa8}, + {value: 0x1008, lo: 0xa9, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x1008, lo: 0xb0, hi: 0xb1}, + {value: 0x1308, lo: 0xb2, hi: 0xb2}, + {value: 0x1008, lo: 0xb3, hi: 0xb8}, + {value: 0x1308, lo: 0xb9, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x37, offset 0x202 + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x38, offset 0x20a + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x39, offset 0x20e + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0028, lo: 0x9a, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0xbf}, + // Block 0x3a, offset 0x215 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x1308, lo: 0x97, hi: 0x98}, + {value: 0x1008, lo: 0x99, hi: 0x9a}, + {value: 0x1308, lo: 0x9b, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x3b, offset 0x21d + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x94}, + {value: 0x1008, lo: 0x95, hi: 0x95}, + {value: 0x1308, lo: 0x96, hi: 0x96}, + {value: 0x1008, lo: 0x97, hi: 0x97}, + {value: 0x1308, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x1b08, lo: 0xa0, hi: 0xa0}, + {value: 0x1008, lo: 0xa1, hi: 0xa1}, + {value: 0x1308, lo: 0xa2, hi: 0xa2}, + {value: 0x1008, lo: 0xa3, hi: 0xa4}, + {value: 0x1308, lo: 0xa5, hi: 0xac}, + {value: 0x1008, lo: 0xad, hi: 0xb2}, + {value: 0x1308, lo: 0xb3, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x1308, lo: 0xbf, hi: 0xbf}, + // Block 0x3c, offset 0x22d + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x1308, lo: 0xb0, hi: 0xbd}, + {value: 0x1318, lo: 0xbe, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x3d, offset 0x239 + {value: 0x0000, lo: 0x01}, + {value: 0x0040, lo: 0x80, hi: 0xbf}, + // Block 0x3e, offset 0x23b + {value: 0x0000, lo: 0x09}, + {value: 0x1308, lo: 0x80, hi: 0x83}, + {value: 0x1008, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xb3}, + {value: 0x1308, lo: 0xb4, hi: 0xb4}, + {value: 0x1008, lo: 0xb5, hi: 0xb5}, + {value: 0x1308, lo: 0xb6, hi: 0xba}, + {value: 0x1008, lo: 0xbb, hi: 0xbb}, + {value: 0x1308, lo: 0xbc, hi: 0xbc}, + {value: 0x1008, lo: 0xbd, hi: 0xbf}, + // Block 0x3f, offset 0x245 + {value: 0x0000, lo: 0x0b}, + {value: 0x1008, lo: 0x80, hi: 0x81}, + {value: 0x1308, lo: 0x82, hi: 0x82}, + {value: 0x1008, lo: 0x83, hi: 0x83}, + {value: 0x1808, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xaa}, + {value: 0x1308, lo: 0xab, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x40, offset 0x251 + {value: 0x0000, lo: 0x0b}, + {value: 0x1308, lo: 0x80, hi: 0x81}, + {value: 0x1008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa0}, + {value: 0x1008, lo: 0xa1, hi: 0xa1}, + {value: 0x1308, lo: 0xa2, hi: 0xa5}, + {value: 0x1008, lo: 0xa6, hi: 0xa7}, + {value: 0x1308, lo: 0xa8, hi: 0xa9}, + {value: 0x1808, lo: 0xaa, hi: 0xaa}, + {value: 0x1b08, lo: 0xab, hi: 0xab}, + {value: 0x1308, lo: 0xac, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xbf}, + // Block 0x41, offset 0x25d + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x1308, lo: 0xa6, hi: 0xa6}, + {value: 0x1008, lo: 0xa7, hi: 0xa7}, + {value: 0x1308, lo: 0xa8, hi: 0xa9}, + {value: 0x1008, lo: 0xaa, hi: 0xac}, + {value: 0x1308, lo: 0xad, hi: 0xad}, + {value: 0x1008, lo: 0xae, hi: 0xae}, + {value: 0x1308, lo: 0xaf, hi: 0xb1}, + {value: 0x1808, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbf}, + // Block 0x42, offset 0x269 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x1008, lo: 0xa4, hi: 0xab}, + {value: 0x1308, lo: 0xac, hi: 0xb3}, + {value: 0x1008, lo: 0xb4, hi: 0xb5}, + {value: 0x1308, lo: 0xb6, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbf}, + // Block 0x43, offset 0x271 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x44, offset 0x276 + {value: 0x0000, lo: 0x09}, + {value: 0x0e29, lo: 0x80, hi: 0x80}, + {value: 0x0e41, lo: 0x81, hi: 0x81}, + {value: 0x0e59, lo: 0x82, hi: 0x82}, + {value: 0x0e71, lo: 0x83, hi: 0x83}, + {value: 0x0e89, lo: 0x84, hi: 0x85}, + {value: 0x0ea1, lo: 0x86, hi: 0x86}, + {value: 0x0eb9, lo: 0x87, hi: 0x87}, + {value: 0x057d, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0x45, offset 0x280 + {value: 0x0000, lo: 0x10}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x1308, lo: 0x90, hi: 0x92}, + {value: 0x0018, lo: 0x93, hi: 0x93}, + {value: 0x1308, lo: 0x94, hi: 0xa0}, + {value: 0x1008, lo: 0xa1, hi: 0xa1}, + {value: 0x1308, lo: 0xa2, hi: 0xa8}, + {value: 0x0008, lo: 0xa9, hi: 0xac}, + {value: 0x1308, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb1}, + {value: 0x1008, lo: 0xb2, hi: 0xb3}, + {value: 0x1308, lo: 0xb4, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x1308, lo: 0xb8, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x46, offset 0x291 + {value: 0x0000, lo: 0x03}, + {value: 0x1308, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xba}, + {value: 0x1308, lo: 0xbb, hi: 0xbf}, + // Block 0x47, offset 0x295 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x87}, + {value: 0xe045, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0xe045, lo: 0x98, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0xe045, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbf}, + // Block 0x48, offset 0x2a0 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x1318, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbf}, + // Block 0x49, offset 0x2a4 + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x88}, + {value: 0x24c1, lo: 0x89, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x4a, offset 0x2ad + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x24f1, lo: 0xac, hi: 0xac}, + {value: 0x2529, lo: 0xad, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xae}, + {value: 0x2579, lo: 0xaf, hi: 0xaf}, + {value: 0x25b1, lo: 0xb0, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0x4b, offset 0x2b5 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x9f}, + {value: 0x0080, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xad}, + {value: 0x0080, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x4c, offset 0x2bb + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xa8}, + {value: 0x09c5, lo: 0xa9, hi: 0xa9}, + {value: 0x09e5, lo: 0xaa, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xbf}, + // Block 0x4d, offset 0x2c0 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x4e, offset 0x2c3 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xbf}, + // Block 0x4f, offset 0x2c6 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x28c1, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0xbf}, + // Block 0x50, offset 0x2ca + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0e66, lo: 0xb4, hi: 0xb4}, + {value: 0x292a, lo: 0xb5, hi: 0xb5}, + {value: 0x0e86, lo: 0xb6, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x51, offset 0x2d0 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x9b}, + {value: 0x2941, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0xbf}, + // Block 0x52, offset 0x2d4 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x53, offset 0x2d8 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0018, lo: 0x98, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbc}, + {value: 0x0018, lo: 0xbd, hi: 0xbf}, + // Block 0x54, offset 0x2de + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0xab}, + {value: 0x0018, lo: 0xac, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x55, offset 0x2e5 + {value: 0x0000, lo: 0x05}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x03f5, lo: 0x90, hi: 0x9f}, + {value: 0x0ea5, lo: 0xa0, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x56, offset 0x2eb + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x57, offset 0x2f3 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xae}, + {value: 0xe075, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbe}, + {value: 0x1b08, lo: 0xbf, hi: 0xbf}, + // Block 0x58, offset 0x2fa + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x59, offset 0x305 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x1308, lo: 0xa0, hi: 0xbf}, + // Block 0x5a, offset 0x30f + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x5b, offset 0x313 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0xbf}, + // Block 0x5c, offset 0x316 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9e}, + {value: 0x0edd, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0x5d, offset 0x31c + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb2}, + {value: 0x0efd, lo: 0xb3, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x5e, offset 0x320 + {value: 0x0020, lo: 0x01}, + {value: 0x0f1d, lo: 0x80, hi: 0xbf}, + // Block 0x5f, offset 0x322 + {value: 0x0020, lo: 0x02}, + {value: 0x171d, lo: 0x80, hi: 0x8f}, + {value: 0x18fd, lo: 0x90, hi: 0xbf}, + // Block 0x60, offset 0x325 + {value: 0x0020, lo: 0x01}, + {value: 0x1efd, lo: 0x80, hi: 0xbf}, + // Block 0x61, offset 0x327 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x62, offset 0x32a + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x1308, lo: 0x99, hi: 0x9a}, + {value: 0x29e2, lo: 0x9b, hi: 0x9b}, + {value: 0x2a0a, lo: 0x9c, hi: 0x9c}, + {value: 0x0008, lo: 0x9d, hi: 0x9e}, + {value: 0x2a31, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xbf}, + // Block 0x63, offset 0x334 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbe}, + {value: 0x2a69, lo: 0xbf, hi: 0xbf}, + // Block 0x64, offset 0x337 + {value: 0x0000, lo: 0x0e}, + {value: 0x0040, lo: 0x80, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xb0}, + {value: 0x2a1d, lo: 0xb1, hi: 0xb1}, + {value: 0x2a3d, lo: 0xb2, hi: 0xb2}, + {value: 0x2a5d, lo: 0xb3, hi: 0xb3}, + {value: 0x2a7d, lo: 0xb4, hi: 0xb4}, + {value: 0x2a5d, lo: 0xb5, hi: 0xb5}, + {value: 0x2a9d, lo: 0xb6, hi: 0xb6}, + {value: 0x2abd, lo: 0xb7, hi: 0xb7}, + {value: 0x2add, lo: 0xb8, hi: 0xb9}, + {value: 0x2afd, lo: 0xba, hi: 0xbb}, + {value: 0x2b1d, lo: 0xbc, hi: 0xbd}, + {value: 0x2afd, lo: 0xbe, hi: 0xbf}, + // Block 0x65, offset 0x346 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x66, offset 0x34a + {value: 0x0030, lo: 0x04}, + {value: 0x2aa2, lo: 0x80, hi: 0x9d}, + {value: 0x305a, lo: 0x9e, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x30a2, lo: 0xa0, hi: 0xbf}, + // Block 0x67, offset 0x34f + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0xbf}, + // Block 0x68, offset 0x352 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x69, offset 0x356 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x6a, offset 0x35b + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xbf}, + // Block 0x6b, offset 0x360 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xaf}, + {value: 0x1308, lo: 0xb0, hi: 0xb1}, + {value: 0x0018, lo: 0xb2, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x6c, offset 0x366 + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0xb6}, + {value: 0x0008, lo: 0xb7, hi: 0xb7}, + {value: 0x2009, lo: 0xb8, hi: 0xb8}, + {value: 0x6e89, lo: 0xb9, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xbf}, + // Block 0x6d, offset 0x36c + {value: 0x0000, lo: 0x0e}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x1308, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0x85}, + {value: 0x1b08, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x1308, lo: 0x8b, hi: 0x8b}, + {value: 0x0008, lo: 0x8c, hi: 0xa2}, + {value: 0x1008, lo: 0xa3, hi: 0xa4}, + {value: 0x1308, lo: 0xa5, hi: 0xa6}, + {value: 0x1008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x6e, offset 0x37b + {value: 0x0000, lo: 0x05}, + {value: 0x0208, lo: 0x80, hi: 0xb1}, + {value: 0x0108, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x6f, offset 0x381 + {value: 0x0000, lo: 0x03}, + {value: 0x1008, lo: 0x80, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0xb3}, + {value: 0x1008, lo: 0xb4, hi: 0xbf}, + // Block 0x70, offset 0x385 + {value: 0x0000, lo: 0x0e}, + {value: 0x1008, lo: 0x80, hi: 0x83}, + {value: 0x1b08, lo: 0x84, hi: 0x84}, + {value: 0x1308, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x1308, lo: 0xa0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xba}, + {value: 0x0008, lo: 0xbb, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x71, offset 0x394 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x1308, lo: 0xa6, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x72, offset 0x399 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x1308, lo: 0x87, hi: 0x91}, + {value: 0x1008, lo: 0x92, hi: 0x92}, + {value: 0x1808, lo: 0x93, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x73, offset 0x3a1 + {value: 0x0000, lo: 0x09}, + {value: 0x1308, lo: 0x80, hi: 0x82}, + {value: 0x1008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb2}, + {value: 0x1308, lo: 0xb3, hi: 0xb3}, + {value: 0x1008, lo: 0xb4, hi: 0xb5}, + {value: 0x1308, lo: 0xb6, hi: 0xb9}, + {value: 0x1008, lo: 0xba, hi: 0xbb}, + {value: 0x1308, lo: 0xbc, hi: 0xbc}, + {value: 0x1008, lo: 0xbd, hi: 0xbf}, + // Block 0x74, offset 0x3ab + {value: 0x0000, lo: 0x0a}, + {value: 0x1808, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa4}, + {value: 0x1308, lo: 0xa5, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x75, offset 0x3b6 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa8}, + {value: 0x1308, lo: 0xa9, hi: 0xae}, + {value: 0x1008, lo: 0xaf, hi: 0xb0}, + {value: 0x1308, lo: 0xb1, hi: 0xb2}, + {value: 0x1008, lo: 0xb3, hi: 0xb4}, + {value: 0x1308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x76, offset 0x3be + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x82}, + {value: 0x1308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x8b}, + {value: 0x1308, lo: 0x8c, hi: 0x8c}, + {value: 0x1008, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x1008, lo: 0xbb, hi: 0xbb}, + {value: 0x1308, lo: 0xbc, hi: 0xbc}, + {value: 0x1008, lo: 0xbd, hi: 0xbd}, + {value: 0x0008, lo: 0xbe, hi: 0xbf}, + // Block 0x77, offset 0x3cf + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x1308, lo: 0xb0, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb1}, + {value: 0x1308, lo: 0xb2, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x1308, lo: 0xb7, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbd}, + {value: 0x1308, lo: 0xbe, hi: 0xbf}, + // Block 0x78, offset 0x3d8 + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x1308, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x9a}, + {value: 0x0008, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xaa}, + {value: 0x1008, lo: 0xab, hi: 0xab}, + {value: 0x1308, lo: 0xac, hi: 0xad}, + {value: 0x1008, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb4}, + {value: 0x1008, lo: 0xb5, hi: 0xb5}, + {value: 0x1b08, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x79, offset 0x3e8 + {value: 0x0000, lo: 0x0c}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x90}, + {value: 0x0008, lo: 0x91, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x7a, offset 0x3f5 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x4465, lo: 0x9c, hi: 0x9c}, + {value: 0x447d, lo: 0x9d, hi: 0x9d}, + {value: 0x2971, lo: 0x9e, hi: 0x9e}, + {value: 0xe06d, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xaf}, + {value: 0x4495, lo: 0xb0, hi: 0xbf}, + // Block 0x7b, offset 0x3ff + {value: 0x0000, lo: 0x04}, + {value: 0x44b5, lo: 0x80, hi: 0x8f}, + {value: 0x44d5, lo: 0x90, hi: 0x9f}, + {value: 0x44f5, lo: 0xa0, hi: 0xaf}, + {value: 0x44d5, lo: 0xb0, hi: 0xbf}, + // Block 0x7c, offset 0x404 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0xa2}, + {value: 0x1008, lo: 0xa3, hi: 0xa4}, + {value: 0x1308, lo: 0xa5, hi: 0xa5}, + {value: 0x1008, lo: 0xa6, hi: 0xa7}, + {value: 0x1308, lo: 0xa8, hi: 0xa8}, + {value: 0x1008, lo: 0xa9, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xab}, + {value: 0x1008, lo: 0xac, hi: 0xac}, + {value: 0x1b08, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x7d, offset 0x411 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x7e, offset 0x415 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x7f, offset 0x41a + {value: 0x0020, lo: 0x01}, + {value: 0x4515, lo: 0x80, hi: 0xbf}, + // Block 0x80, offset 0x41c + {value: 0x0020, lo: 0x03}, + {value: 0x4d15, lo: 0x80, hi: 0x94}, + {value: 0x4ad5, lo: 0x95, hi: 0x95}, + {value: 0x4fb5, lo: 0x96, hi: 0xbf}, + // Block 0x81, offset 0x420 + {value: 0x0020, lo: 0x01}, + {value: 0x54f5, lo: 0x80, hi: 0xbf}, + // Block 0x82, offset 0x422 + {value: 0x0020, lo: 0x03}, + {value: 0x5cf5, lo: 0x80, hi: 0x84}, + {value: 0x5655, lo: 0x85, hi: 0x85}, + {value: 0x5d95, lo: 0x86, hi: 0xbf}, + // Block 0x83, offset 0x426 + {value: 0x0020, lo: 0x08}, + {value: 0x6b55, lo: 0x80, hi: 0x8f}, + {value: 0x6d15, lo: 0x90, hi: 0x90}, + {value: 0x6d55, lo: 0x91, hi: 0xab}, + {value: 0x6ea1, lo: 0xac, hi: 0xac}, + {value: 0x70b5, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x70d5, lo: 0xb0, hi: 0xbf}, + // Block 0x84, offset 0x42f + {value: 0x0020, lo: 0x05}, + {value: 0x72d5, lo: 0x80, hi: 0xad}, + {value: 0x6535, lo: 0xae, hi: 0xae}, + {value: 0x7895, lo: 0xaf, hi: 0xb5}, + {value: 0x6f55, lo: 0xb6, hi: 0xb6}, + {value: 0x7975, lo: 0xb7, hi: 0xbf}, + // Block 0x85, offset 0x435 + {value: 0x0028, lo: 0x03}, + {value: 0x7c21, lo: 0x80, hi: 0x82}, + {value: 0x7be1, lo: 0x83, hi: 0x83}, + {value: 0x7c99, lo: 0x84, hi: 0xbf}, + // Block 0x86, offset 0x439 + {value: 0x0038, lo: 0x0f}, + {value: 0x9db1, lo: 0x80, hi: 0x83}, + {value: 0x9e59, lo: 0x84, hi: 0x85}, + {value: 0x9e91, lo: 0x86, hi: 0x87}, + {value: 0x9ec9, lo: 0x88, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0xa089, lo: 0x92, hi: 0x97}, + {value: 0xa1a1, lo: 0x98, hi: 0x9c}, + {value: 0xa281, lo: 0x9d, hi: 0xb3}, + {value: 0x9d41, lo: 0xb4, hi: 0xb4}, + {value: 0x9db1, lo: 0xb5, hi: 0xb5}, + {value: 0xa789, lo: 0xb6, hi: 0xbb}, + {value: 0xa869, lo: 0xbc, hi: 0xbc}, + {value: 0xa7f9, lo: 0xbd, hi: 0xbd}, + {value: 0xa8d9, lo: 0xbe, hi: 0xbf}, + // Block 0x87, offset 0x449 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x0008, lo: 0xbc, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x88, offset 0x453 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0x89, offset 0x458 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x8a, offset 0x45b + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x8b, offset 0x461 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0x8c, offset 0x468 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbc}, + {value: 0x1308, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x8d, offset 0x46d + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x8e, offset 0x471 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x9f}, + {value: 0x1308, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x8f, offset 0x477 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x90, offset 0x47c + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb5}, + {value: 0x1308, lo: 0xb6, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x91, offset 0x485 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x92, offset 0x48a + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0xbf}, + // Block 0x93, offset 0x490 + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x97}, + {value: 0x8ad5, lo: 0x98, hi: 0x9f}, + {value: 0x8aed, lo: 0xa0, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xbf}, + // Block 0x94, offset 0x497 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x8aed, lo: 0xb0, hi: 0xb7}, + {value: 0x8ad5, lo: 0xb8, hi: 0xbf}, + // Block 0x95, offset 0x49e + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x96, offset 0x4a5 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x97, offset 0x4a9 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xae}, + {value: 0x0018, lo: 0xaf, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x98, offset 0x4ae + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x99, offset 0x4b1 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xbf}, + // Block 0x9a, offset 0x4b6 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb6}, + {value: 0x0008, lo: 0xb7, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbb}, + {value: 0x0008, lo: 0xbc, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x9b, offset 0x4c2 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x96}, + {value: 0x0018, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x9c, offset 0x4c8 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa6}, + {value: 0x0018, lo: 0xa7, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x9d, offset 0x4cd + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb3}, + {value: 0x0008, lo: 0xb4, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbf}, + // Block 0x9e, offset 0x4d4 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x0018, lo: 0x96, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0x9f, offset 0x4dc + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbd}, + {value: 0x0008, lo: 0xbe, hi: 0xbf}, + // Block 0xa0, offset 0x4e1 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x0018, lo: 0x92, hi: 0xbf}, + // Block 0xa1, offset 0x4e5 + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x1308, lo: 0x81, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x1308, lo: 0x85, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8b}, + {value: 0x1308, lo: 0x8c, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x94}, + {value: 0x0008, lo: 0x95, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0x98}, + {value: 0x0008, lo: 0x99, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb7}, + {value: 0x1308, lo: 0xb8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x1b08, lo: 0xbf, hi: 0xbf}, + // Block 0xa2, offset 0x4f5 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbc}, + {value: 0x0018, lo: 0xbd, hi: 0xbf}, + // Block 0xa3, offset 0x4fc + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xa4, offset 0x500 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb8}, + {value: 0x0018, lo: 0xb9, hi: 0xbf}, + // Block 0xa5, offset 0x504 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0018, lo: 0x98, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xbf}, + // Block 0xa6, offset 0x50b + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0xa7, offset 0x50e + {value: 0x0000, lo: 0x02}, + {value: 0x03dd, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xa8, offset 0x511 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbf}, + // Block 0xa9, offset 0x515 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xaa, offset 0x519 + {value: 0x0000, lo: 0x05}, + {value: 0x1008, lo: 0x80, hi: 0x80}, + {value: 0x1308, lo: 0x81, hi: 0x81}, + {value: 0x1008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb7}, + {value: 0x1308, lo: 0xb8, hi: 0xbf}, + // Block 0xab, offset 0x51f + {value: 0x0000, lo: 0x08}, + {value: 0x1308, lo: 0x80, hi: 0x85}, + {value: 0x1b08, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x91}, + {value: 0x0018, lo: 0x92, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbe}, + {value: 0x1b08, lo: 0xbf, hi: 0xbf}, + // Block 0xac, offset 0x528 + {value: 0x0000, lo: 0x0b}, + {value: 0x1308, lo: 0x80, hi: 0x81}, + {value: 0x1008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xaf}, + {value: 0x1008, lo: 0xb0, hi: 0xb2}, + {value: 0x1308, lo: 0xb3, hi: 0xb6}, + {value: 0x1008, lo: 0xb7, hi: 0xb8}, + {value: 0x1b08, lo: 0xb9, hi: 0xb9}, + {value: 0x1308, lo: 0xba, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbc}, + {value: 0x0340, lo: 0xbd, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0xad, offset 0x534 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xae, offset 0x53b + {value: 0x0000, lo: 0x08}, + {value: 0x1308, lo: 0x80, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa6}, + {value: 0x1308, lo: 0xa7, hi: 0xab}, + {value: 0x1008, lo: 0xac, hi: 0xac}, + {value: 0x1308, lo: 0xad, hi: 0xb2}, + {value: 0x1b08, lo: 0xb3, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xbf}, + // Block 0xaf, offset 0x544 + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb2}, + {value: 0x1308, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xb0, offset 0x54c + {value: 0x0000, lo: 0x06}, + {value: 0x1308, lo: 0x80, hi: 0x81}, + {value: 0x1008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb2}, + {value: 0x1008, lo: 0xb3, hi: 0xb5}, + {value: 0x1308, lo: 0xb6, hi: 0xbe}, + {value: 0x1008, lo: 0xbf, hi: 0xbf}, + // Block 0xb1, offset 0x553 + {value: 0x0000, lo: 0x0d}, + {value: 0x1808, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x89}, + {value: 0x1308, lo: 0x8a, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xb2, offset 0x561 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0xab}, + {value: 0x1008, lo: 0xac, hi: 0xae}, + {value: 0x1308, lo: 0xaf, hi: 0xb1}, + {value: 0x1008, lo: 0xb2, hi: 0xb3}, + {value: 0x1308, lo: 0xb4, hi: 0xb4}, + {value: 0x1808, lo: 0xb5, hi: 0xb5}, + {value: 0x1308, lo: 0xb6, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xbd}, + {value: 0x1308, lo: 0xbe, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xb3, offset 0x56e + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0008, lo: 0x9f, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xb4, offset 0x57b + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x1308, lo: 0x9f, hi: 0x9f}, + {value: 0x1008, lo: 0xa0, hi: 0xa2}, + {value: 0x1308, lo: 0xa3, hi: 0xa9}, + {value: 0x1b08, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xb5, offset 0x584 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xb4}, + {value: 0x1008, lo: 0xb5, hi: 0xb7}, + {value: 0x1308, lo: 0xb8, hi: 0xbf}, + // Block 0xb6, offset 0x588 + {value: 0x0000, lo: 0x0d}, + {value: 0x1008, lo: 0x80, hi: 0x81}, + {value: 0x1b08, lo: 0x82, hi: 0x82}, + {value: 0x1308, lo: 0x83, hi: 0x84}, + {value: 0x1008, lo: 0x85, hi: 0x85}, + {value: 0x1308, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0xb7, offset 0x596 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x1008, lo: 0xb0, hi: 0xb2}, + {value: 0x1308, lo: 0xb3, hi: 0xb8}, + {value: 0x1008, lo: 0xb9, hi: 0xb9}, + {value: 0x1308, lo: 0xba, hi: 0xba}, + {value: 0x1008, lo: 0xbb, hi: 0xbe}, + {value: 0x1308, lo: 0xbf, hi: 0xbf}, + // Block 0xb8, offset 0x59e + {value: 0x0000, lo: 0x0a}, + {value: 0x1308, lo: 0x80, hi: 0x80}, + {value: 0x1008, lo: 0x81, hi: 0x81}, + {value: 0x1b08, lo: 0x82, hi: 0x82}, + {value: 0x1308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x85}, + {value: 0x0018, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xb9, offset 0x5a9 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xae}, + {value: 0x1008, lo: 0xaf, hi: 0xb1}, + {value: 0x1308, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x1008, lo: 0xb8, hi: 0xbb}, + {value: 0x1308, lo: 0xbc, hi: 0xbd}, + {value: 0x1008, lo: 0xbe, hi: 0xbe}, + {value: 0x1b08, lo: 0xbf, hi: 0xbf}, + // Block 0xba, offset 0x5b2 + {value: 0x0000, lo: 0x05}, + {value: 0x1308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9b}, + {value: 0x1308, lo: 0x9c, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0xbb, offset 0x5b8 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x1008, lo: 0xb0, hi: 0xb2}, + {value: 0x1308, lo: 0xb3, hi: 0xba}, + {value: 0x1008, lo: 0xbb, hi: 0xbc}, + {value: 0x1308, lo: 0xbd, hi: 0xbd}, + {value: 0x1008, lo: 0xbe, hi: 0xbe}, + {value: 0x1b08, lo: 0xbf, hi: 0xbf}, + // Block 0xbc, offset 0x5c0 + {value: 0x0000, lo: 0x08}, + {value: 0x1308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xbd, offset 0x5c9 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x1308, lo: 0xab, hi: 0xab}, + {value: 0x1008, lo: 0xac, hi: 0xac}, + {value: 0x1308, lo: 0xad, hi: 0xad}, + {value: 0x1008, lo: 0xae, hi: 0xaf}, + {value: 0x1308, lo: 0xb0, hi: 0xb5}, + {value: 0x1808, lo: 0xb6, hi: 0xb6}, + {value: 0x1308, lo: 0xb7, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0xbe, offset 0x5d3 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0xbf}, + // Block 0xbf, offset 0x5d6 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9c}, + {value: 0x1308, lo: 0x9d, hi: 0x9f}, + {value: 0x1008, lo: 0xa0, hi: 0xa1}, + {value: 0x1308, lo: 0xa2, hi: 0xa5}, + {value: 0x1008, lo: 0xa6, hi: 0xa6}, + {value: 0x1308, lo: 0xa7, hi: 0xaa}, + {value: 0x1b08, lo: 0xab, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbf}, + // Block 0xc0, offset 0x5e2 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xbf}, + // Block 0xc1, offset 0x5e5 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0xc2, offset 0x5ea + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xc3, offset 0x5ed + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0xae}, + {value: 0x1008, lo: 0xaf, hi: 0xaf}, + {value: 0x1308, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x1308, lo: 0xb8, hi: 0xbd}, + {value: 0x1008, lo: 0xbe, hi: 0xbe}, + {value: 0x1b08, lo: 0xbf, hi: 0xbf}, + // Block 0xc4, offset 0x5f7 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xbf}, + // Block 0xc5, offset 0x600 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x1308, lo: 0x92, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xa8}, + {value: 0x1008, lo: 0xa9, hi: 0xa9}, + {value: 0x1308, lo: 0xaa, hi: 0xb0}, + {value: 0x1008, lo: 0xb1, hi: 0xb1}, + {value: 0x1308, lo: 0xb2, hi: 0xb3}, + {value: 0x1008, lo: 0xb4, hi: 0xb4}, + {value: 0x1308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xc6, offset 0x60c + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xc7, offset 0x60f + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xc8, offset 0x614 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0xbf}, + // Block 0xc9, offset 0x617 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xbf}, + // Block 0xca, offset 0x61a + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0xbf}, + // Block 0xcb, offset 0x61d + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0xcc, offset 0x624 + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x1308, lo: 0xb0, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xcd, offset 0x62b + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x1308, lo: 0xb0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0xce, offset 0x62f + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x0008, lo: 0xa3, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0xcf, offset 0x63a + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0xbf}, + // Block 0xd0, offset 0x63d + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x1008, lo: 0x91, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xd1, offset 0x643 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x8e}, + {value: 0x1308, lo: 0x8f, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xd2, offset 0x648 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0xd3, offset 0x64c + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xd4, offset 0x64f + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xd5, offset 0x652 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0xbf}, + // Block 0xd6, offset 0x655 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0xd7, offset 0x65a + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9c}, + {value: 0x1308, lo: 0x9d, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x03c0, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xbf}, + // Block 0xd8, offset 0x664 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xd9, offset 0x667 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xbf}, + // Block 0xda, offset 0x66b + {value: 0x0000, lo: 0x0e}, + {value: 0x0018, lo: 0x80, hi: 0x9d}, + {value: 0xb5b9, lo: 0x9e, hi: 0x9e}, + {value: 0xb601, lo: 0x9f, hi: 0x9f}, + {value: 0xb649, lo: 0xa0, hi: 0xa0}, + {value: 0xb6b1, lo: 0xa1, hi: 0xa1}, + {value: 0xb719, lo: 0xa2, hi: 0xa2}, + {value: 0xb781, lo: 0xa3, hi: 0xa3}, + {value: 0xb7e9, lo: 0xa4, hi: 0xa4}, + {value: 0x1018, lo: 0xa5, hi: 0xa6}, + {value: 0x1318, lo: 0xa7, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xac}, + {value: 0x1018, lo: 0xad, hi: 0xb2}, + {value: 0x0340, lo: 0xb3, hi: 0xba}, + {value: 0x1318, lo: 0xbb, hi: 0xbf}, + // Block 0xdb, offset 0x67a + {value: 0x0000, lo: 0x0b}, + {value: 0x1318, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0x84}, + {value: 0x1318, lo: 0x85, hi: 0x8b}, + {value: 0x0018, lo: 0x8c, hi: 0xa9}, + {value: 0x1318, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xba}, + {value: 0xb851, lo: 0xbb, hi: 0xbb}, + {value: 0xb899, lo: 0xbc, hi: 0xbc}, + {value: 0xb8e1, lo: 0xbd, hi: 0xbd}, + {value: 0xb949, lo: 0xbe, hi: 0xbe}, + {value: 0xb9b1, lo: 0xbf, hi: 0xbf}, + // Block 0xdc, offset 0x686 + {value: 0x0000, lo: 0x03}, + {value: 0xba19, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xbf}, + // Block 0xdd, offset 0x68a + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x1318, lo: 0x82, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0xbf}, + // Block 0xde, offset 0x68f + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xdf, offset 0x694 + {value: 0x0000, lo: 0x03}, + {value: 0x1308, lo: 0x80, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xba}, + {value: 0x1308, lo: 0xbb, hi: 0xbf}, + // Block 0xe0, offset 0x698 + {value: 0x0000, lo: 0x04}, + {value: 0x1308, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xb4}, + {value: 0x1308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0xe1, offset 0x69d + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x1308, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x9a}, + {value: 0x1308, lo: 0x9b, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x1308, lo: 0xa1, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0xe2, offset 0x6a6 + {value: 0x0000, lo: 0x0a}, + {value: 0x1308, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x1308, lo: 0x88, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9a}, + {value: 0x1308, lo: 0x9b, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x1308, lo: 0xa3, hi: 0xa4}, + {value: 0x0040, lo: 0xa5, hi: 0xa5}, + {value: 0x1308, lo: 0xa6, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xbf}, + // Block 0xe3, offset 0x6b1 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8f}, + {value: 0x1308, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0xbf}, + // Block 0xe4, offset 0x6b7 + {value: 0x0000, lo: 0x07}, + {value: 0x0208, lo: 0x80, hi: 0x83}, + {value: 0x1308, lo: 0x84, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xe5, offset 0x6bf + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xe6, offset 0x6c3 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0xe7, offset 0x6c7 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0xe8, offset 0x6cd + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xe9, offset 0x6d3 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8f}, + {value: 0xc1c1, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xea, offset 0x6d8 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xbf}, + // Block 0xeb, offset 0x6db + {value: 0x0000, lo: 0x0d}, + {value: 0xc7e9, lo: 0x80, hi: 0x80}, + {value: 0xc839, lo: 0x81, hi: 0x81}, + {value: 0xc889, lo: 0x82, hi: 0x82}, + {value: 0xc8d9, lo: 0x83, hi: 0x83}, + {value: 0xc929, lo: 0x84, hi: 0x84}, + {value: 0xc979, lo: 0x85, hi: 0x85}, + {value: 0xc9c9, lo: 0x86, hi: 0x86}, + {value: 0xca19, lo: 0x87, hi: 0x87}, + {value: 0xca69, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0xcab9, lo: 0x90, hi: 0x90}, + {value: 0xcad9, lo: 0x91, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0xbf}, + // Block 0xec, offset 0x6e9 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x92}, + {value: 0x0040, lo: 0x93, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xed, offset 0x6f0 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0xee, offset 0x6f3 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0xbf}, + // Block 0xef, offset 0x6f6 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0xf0, offset 0x6fa + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0xf1, offset 0x700 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xbf}, + // Block 0xf2, offset 0x705 + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb2}, + {value: 0x0018, lo: 0xb3, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xf3, offset 0x70f + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xbf}, + // Block 0xf4, offset 0x714 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0xbf}, + // Block 0xf5, offset 0x717 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0xbf}, + // Block 0xf6, offset 0x71a + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0xbf}, + // Block 0xf7, offset 0x71d + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xf8, offset 0x720 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0xf9, offset 0x724 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xbf}, + // Block 0xfa, offset 0x727 + {value: 0x0020, lo: 0x0f}, + {value: 0xdeb9, lo: 0x80, hi: 0x89}, + {value: 0x8dfd, lo: 0x8a, hi: 0x8a}, + {value: 0xdff9, lo: 0x8b, hi: 0x9c}, + {value: 0x8e1d, lo: 0x9d, hi: 0x9d}, + {value: 0xe239, lo: 0x9e, hi: 0xa2}, + {value: 0x8e3d, lo: 0xa3, hi: 0xa3}, + {value: 0xe2d9, lo: 0xa4, hi: 0xab}, + {value: 0x7ed5, lo: 0xac, hi: 0xac}, + {value: 0xe3d9, lo: 0xad, hi: 0xaf}, + {value: 0x8e5d, lo: 0xb0, hi: 0xb0}, + {value: 0xe439, lo: 0xb1, hi: 0xb6}, + {value: 0x8e7d, lo: 0xb7, hi: 0xb9}, + {value: 0xe4f9, lo: 0xba, hi: 0xba}, + {value: 0x8edd, lo: 0xbb, hi: 0xbb}, + {value: 0xe519, lo: 0xbc, hi: 0xbf}, + // Block 0xfb, offset 0x737 + {value: 0x0020, lo: 0x10}, + {value: 0x937d, lo: 0x80, hi: 0x80}, + {value: 0xf099, lo: 0x81, hi: 0x86}, + {value: 0x939d, lo: 0x87, hi: 0x8a}, + {value: 0xd9f9, lo: 0x8b, hi: 0x8b}, + {value: 0xf159, lo: 0x8c, hi: 0x96}, + {value: 0x941d, lo: 0x97, hi: 0x97}, + {value: 0xf2b9, lo: 0x98, hi: 0xa3}, + {value: 0x943d, lo: 0xa4, hi: 0xa6}, + {value: 0xf439, lo: 0xa7, hi: 0xaa}, + {value: 0x949d, lo: 0xab, hi: 0xab}, + {value: 0xf4b9, lo: 0xac, hi: 0xac}, + {value: 0x94bd, lo: 0xad, hi: 0xad}, + {value: 0xf4d9, lo: 0xae, hi: 0xaf}, + {value: 0x94dd, lo: 0xb0, hi: 0xb1}, + {value: 0xf519, lo: 0xb2, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xfc, offset 0x748 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0340, lo: 0x81, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0x9f}, + {value: 0x0340, lo: 0xa0, hi: 0xbf}, + // Block 0xfd, offset 0x74d + {value: 0x0000, lo: 0x01}, + {value: 0x0340, lo: 0x80, hi: 0xbf}, + // Block 0xfe, offset 0x74f + {value: 0x0000, lo: 0x01}, + {value: 0x13c0, lo: 0x80, hi: 0xbf}, + // Block 0xff, offset 0x751 + {value: 0x0000, lo: 0x02}, + {value: 0x13c0, lo: 0x80, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, +} + +// Total table size 41559 bytes (40KiB); checksum: F4A1FA4E diff --git a/vendor/golang.org/x/net/idna/trie.go b/vendor/golang.org/x/net/idna/trie.go new file mode 100644 index 000000000..c4ef847e7 --- /dev/null +++ b/vendor/golang.org/x/net/idna/trie.go @@ -0,0 +1,72 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package idna + +// appendMapping appends the mapping for the respective rune. isMapped must be +// true. A mapping is a categorization of a rune as defined in UTS #46. +func (c info) appendMapping(b []byte, s string) []byte { + index := int(c >> indexShift) + if c&xorBit == 0 { + s := mappings[index:] + return append(b, s[1:s[0]+1]...) + } + b = append(b, s...) + if c&inlineXOR == inlineXOR { + // TODO: support and handle two-byte inline masks + b[len(b)-1] ^= byte(index) + } else { + for p := len(b) - int(xorData[index]); p < len(b); p++ { + index++ + b[p] ^= xorData[index] + } + } + return b +} + +// Sparse block handling code. + +type valueRange struct { + value uint16 // header: value:stride + lo, hi byte // header: lo:n +} + +type sparseBlocks struct { + values []valueRange + offset []uint16 +} + +var idnaSparse = sparseBlocks{ + values: idnaSparseValues[:], + offset: idnaSparseOffset[:], +} + +// Don't use newIdnaTrie to avoid unconditional linking in of the table. +var trie = &idnaTrie{} + +// lookup determines the type of block n and looks up the value for b. +// For n < t.cutoff, the block is a simple lookup table. Otherwise, the block +// is a list of ranges with an accompanying value. Given a matching range r, +// the value for b is by r.value + (b - r.lo) * stride. +func (t *sparseBlocks) lookup(n uint32, b byte) uint16 { + offset := t.offset[n] + header := t.values[offset] + lo := offset + 1 + hi := lo + uint16(header.lo) + for lo < hi { + m := lo + (hi-lo)/2 + r := t.values[m] + if r.lo <= b && b <= r.hi { + return r.value + uint16(b-r.lo)*header.value + } + if b < r.lo { + hi = m + } else { + lo = m + 1 + } + } + return 0 +} diff --git a/vendor/golang.org/x/net/idna/trieval.go b/vendor/golang.org/x/net/idna/trieval.go new file mode 100644 index 000000000..63cb03b59 --- /dev/null +++ b/vendor/golang.org/x/net/idna/trieval.go @@ -0,0 +1,114 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package idna + +// This file contains definitions for interpreting the trie value of the idna +// trie generated by "go run gen*.go". It is shared by both the generator +// program and the resultant package. Sharing is achieved by the generator +// copying gen_trieval.go to trieval.go and changing what's above this comment. + +// info holds information from the IDNA mapping table for a single rune. It is +// the value returned by a trie lookup. In most cases, all information fits in +// a 16-bit value. For mappings, this value may contain an index into a slice +// with the mapped string. Such mappings can consist of the actual mapped value +// or an XOR pattern to be applied to the bytes of the UTF8 encoding of the +// input rune. This technique is used by the cases packages and reduces the +// table size significantly. +// +// The per-rune values have the following format: +// +// if mapped { +// if inlinedXOR { +// 15..13 inline XOR marker +// 12..11 unused +// 10..3 inline XOR mask +// } else { +// 15..3 index into xor or mapping table +// } +// } else { +// 15..13 unused +// 12 modifier (including virama) +// 11 virama modifier +// 10..8 joining type +// 7..3 category type +// } +// 2 use xor pattern +// 1..0 mapped category +// +// See the definitions below for a more detailed description of the various +// bits. +type info uint16 + +const ( + catSmallMask = 0x3 + catBigMask = 0xF8 + indexShift = 3 + xorBit = 0x4 // interpret the index as an xor pattern + inlineXOR = 0xE000 // These bits are set if the XOR pattern is inlined. + + joinShift = 8 + joinMask = 0x07 + + viramaModifier = 0x0800 + modifier = 0x1000 +) + +// A category corresponds to a category defined in the IDNA mapping table. +type category uint16 + +const ( + unknown category = 0 // not defined currently in unicode. + mapped category = 1 + disallowedSTD3Mapped category = 2 + deviation category = 3 +) + +const ( + valid category = 0x08 + validNV8 category = 0x18 + validXV8 category = 0x28 + disallowed category = 0x40 + disallowedSTD3Valid category = 0x80 + ignored category = 0xC0 +) + +// join types and additional rune information +const ( + joiningL = (iota + 1) + joiningD + joiningT + joiningR + + //the following types are derived during processing + joinZWJ + joinZWNJ + joinVirama + numJoinTypes +) + +func (c info) isMapped() bool { + return c&0x3 != 0 +} + +func (c info) category() category { + small := c & catSmallMask + if small != 0 { + return category(small) + } + return category(c & catBigMask) +} + +func (c info) joinType() info { + if c.isMapped() { + return 0 + } + return (c >> joinShift) & joinMask +} + +func (c info) isModifier() bool { + return c&(modifier|catSmallMask) == modifier +} + +func (c info) isViramaModifier() bool { + return c&(viramaModifier|catSmallMask) == viramaModifier +} diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/golang.org/x/net/internal/timeseries/timeseries.go new file mode 100644 index 000000000..685f0e7ea --- /dev/null +++ b/vendor/golang.org/x/net/internal/timeseries/timeseries.go @@ -0,0 +1,525 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package timeseries implements a time series structure for stats collection. +package timeseries // import "golang.org/x/net/internal/timeseries" + +import ( + "fmt" + "log" + "time" +) + +const ( + timeSeriesNumBuckets = 64 + minuteHourSeriesNumBuckets = 60 +) + +var timeSeriesResolutions = []time.Duration{ + 1 * time.Second, + 10 * time.Second, + 1 * time.Minute, + 10 * time.Minute, + 1 * time.Hour, + 6 * time.Hour, + 24 * time.Hour, // 1 day + 7 * 24 * time.Hour, // 1 week + 4 * 7 * 24 * time.Hour, // 4 weeks + 16 * 7 * 24 * time.Hour, // 16 weeks +} + +var minuteHourSeriesResolutions = []time.Duration{ + 1 * time.Second, + 1 * time.Minute, +} + +// An Observable is a kind of data that can be aggregated in a time series. +type Observable interface { + Multiply(ratio float64) // Multiplies the data in self by a given ratio + Add(other Observable) // Adds the data from a different observation to self + Clear() // Clears the observation so it can be reused. + CopyFrom(other Observable) // Copies the contents of a given observation to self +} + +// Float attaches the methods of Observable to a float64. +type Float float64 + +// NewFloat returns a Float. +func NewFloat() Observable { + f := Float(0) + return &f +} + +// String returns the float as a string. +func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) } + +// Value returns the float's value. +func (f *Float) Value() float64 { return float64(*f) } + +func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) } + +func (f *Float) Add(other Observable) { + o := other.(*Float) + *f += *o +} + +func (f *Float) Clear() { *f = 0 } + +func (f *Float) CopyFrom(other Observable) { + o := other.(*Float) + *f = *o +} + +// A Clock tells the current time. +type Clock interface { + Time() time.Time +} + +type defaultClock int + +var defaultClockInstance defaultClock + +func (defaultClock) Time() time.Time { return time.Now() } + +// Information kept per level. Each level consists of a circular list of +// observations. The start of the level may be derived from end and the +// len(buckets) * sizeInMillis. +type tsLevel struct { + oldest int // index to oldest bucketed Observable + newest int // index to newest bucketed Observable + end time.Time // end timestamp for this level + size time.Duration // duration of the bucketed Observable + buckets []Observable // collections of observations + provider func() Observable // used for creating new Observable +} + +func (l *tsLevel) Clear() { + l.oldest = 0 + l.newest = len(l.buckets) - 1 + l.end = time.Time{} + for i := range l.buckets { + if l.buckets[i] != nil { + l.buckets[i].Clear() + l.buckets[i] = nil + } + } +} + +func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) { + l.size = size + l.provider = f + l.buckets = make([]Observable, numBuckets) +} + +// Keeps a sequence of levels. Each level is responsible for storing data at +// a given resolution. For example, the first level stores data at a one +// minute resolution while the second level stores data at a one hour +// resolution. + +// Each level is represented by a sequence of buckets. Each bucket spans an +// interval equal to the resolution of the level. New observations are added +// to the last bucket. +type timeSeries struct { + provider func() Observable // make more Observable + numBuckets int // number of buckets in each level + levels []*tsLevel // levels of bucketed Observable + lastAdd time.Time // time of last Observable tracked + total Observable // convenient aggregation of all Observable + clock Clock // Clock for getting current time + pending Observable // observations not yet bucketed + pendingTime time.Time // what time are we keeping in pending + dirty bool // if there are pending observations +} + +// init initializes a level according to the supplied criteria. +func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) { + ts.provider = f + ts.numBuckets = numBuckets + ts.clock = clock + ts.levels = make([]*tsLevel, len(resolutions)) + + for i := range resolutions { + if i > 0 && resolutions[i-1] >= resolutions[i] { + log.Print("timeseries: resolutions must be monotonically increasing") + break + } + newLevel := new(tsLevel) + newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider) + ts.levels[i] = newLevel + } + + ts.Clear() +} + +// Clear removes all observations from the time series. +func (ts *timeSeries) Clear() { + ts.lastAdd = time.Time{} + ts.total = ts.resetObservation(ts.total) + ts.pending = ts.resetObservation(ts.pending) + ts.pendingTime = time.Time{} + ts.dirty = false + + for i := range ts.levels { + ts.levels[i].Clear() + } +} + +// Add records an observation at the current time. +func (ts *timeSeries) Add(observation Observable) { + ts.AddWithTime(observation, ts.clock.Time()) +} + +// AddWithTime records an observation at the specified time. +func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) { + + smallBucketDuration := ts.levels[0].size + + if t.After(ts.lastAdd) { + ts.lastAdd = t + } + + if t.After(ts.pendingTime) { + ts.advance(t) + ts.mergePendingUpdates() + ts.pendingTime = ts.levels[0].end + ts.pending.CopyFrom(observation) + ts.dirty = true + } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) { + // The observation is close enough to go into the pending bucket. + // This compensates for clock skewing and small scheduling delays + // by letting the update stay in the fast path. + ts.pending.Add(observation) + ts.dirty = true + } else { + ts.mergeValue(observation, t) + } +} + +// mergeValue inserts the observation at the specified time in the past into all levels. +func (ts *timeSeries) mergeValue(observation Observable, t time.Time) { + for _, level := range ts.levels { + index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size) + if 0 <= index && index < ts.numBuckets { + bucketNumber := (level.oldest + index) % ts.numBuckets + if level.buckets[bucketNumber] == nil { + level.buckets[bucketNumber] = level.provider() + } + level.buckets[bucketNumber].Add(observation) + } + } + ts.total.Add(observation) +} + +// mergePendingUpdates applies the pending updates into all levels. +func (ts *timeSeries) mergePendingUpdates() { + if ts.dirty { + ts.mergeValue(ts.pending, ts.pendingTime) + ts.pending = ts.resetObservation(ts.pending) + ts.dirty = false + } +} + +// advance cycles the buckets at each level until the latest bucket in +// each level can hold the time specified. +func (ts *timeSeries) advance(t time.Time) { + if !t.After(ts.levels[0].end) { + return + } + for i := 0; i < len(ts.levels); i++ { + level := ts.levels[i] + if !level.end.Before(t) { + break + } + + // If the time is sufficiently far, just clear the level and advance + // directly. + if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) { + for _, b := range level.buckets { + ts.resetObservation(b) + } + level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds()) + } + + for t.After(level.end) { + level.end = level.end.Add(level.size) + level.newest = level.oldest + level.oldest = (level.oldest + 1) % ts.numBuckets + ts.resetObservation(level.buckets[level.newest]) + } + + t = level.end + } +} + +// Latest returns the sum of the num latest buckets from the level. +func (ts *timeSeries) Latest(level, num int) Observable { + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + result := ts.provider() + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + if l.buckets[index] != nil { + result.Add(l.buckets[index]) + } + if index == 0 { + index = ts.numBuckets + } + index-- + } + + return result +} + +// LatestBuckets returns a copy of the num latest buckets from level. +func (ts *timeSeries) LatestBuckets(level, num int) []Observable { + if level < 0 || level > len(ts.levels) { + log.Print("timeseries: bad level argument: ", level) + return nil + } + if num < 0 || num >= ts.numBuckets { + log.Print("timeseries: bad num argument: ", num) + return nil + } + + results := make([]Observable, num) + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + result := ts.provider() + results[i] = result + if l.buckets[index] != nil { + result.CopyFrom(l.buckets[index]) + } + + if index == 0 { + index = ts.numBuckets + } + index -= 1 + } + return results +} + +// ScaleBy updates observations by scaling by factor. +func (ts *timeSeries) ScaleBy(factor float64) { + for _, l := range ts.levels { + for i := 0; i < ts.numBuckets; i++ { + l.buckets[i].Multiply(factor) + } + } + + ts.total.Multiply(factor) + ts.pending.Multiply(factor) +} + +// Range returns the sum of observations added over the specified time range. +// If start or finish times don't fall on bucket boundaries of the same +// level, then return values are approximate answers. +func (ts *timeSeries) Range(start, finish time.Time) Observable { + return ts.ComputeRange(start, finish, 1)[0] +} + +// Recent returns the sum of observations from the last delta. +func (ts *timeSeries) Recent(delta time.Duration) Observable { + now := ts.clock.Time() + return ts.Range(now.Add(-delta), now) +} + +// Total returns the total of all observations. +func (ts *timeSeries) Total() Observable { + ts.mergePendingUpdates() + return ts.total +} + +// ComputeRange computes a specified number of values into a slice using +// the observations recorded over the specified time period. The return +// values are approximate if the start or finish times don't fall on the +// bucket boundaries at the same level or if the number of buckets spanning +// the range is not an integral multiple of num. +func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable { + if start.After(finish) { + log.Printf("timeseries: start > finish, %v>%v", start, finish) + return nil + } + + if num < 0 { + log.Printf("timeseries: num < 0, %v", num) + return nil + } + + results := make([]Observable, num) + + for _, l := range ts.levels { + if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) { + ts.extract(l, start, finish, num, results) + return results + } + } + + // Failed to find a level that covers the desired range. So just + // extract from the last level, even if it doesn't cover the entire + // desired range. + ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results) + + return results +} + +// RecentList returns the specified number of values in slice over the most +// recent time period of the specified range. +func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable { + if delta < 0 { + return nil + } + now := ts.clock.Time() + return ts.ComputeRange(now.Add(-delta), now, num) +} + +// extract returns a slice of specified number of observations from a given +// level over a given range. +func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) { + ts.mergePendingUpdates() + + srcInterval := l.size + dstInterval := finish.Sub(start) / time.Duration(num) + dstStart := start + srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets)) + + srcIndex := 0 + + // Where should scanning start? + if dstStart.After(srcStart) { + advance := dstStart.Sub(srcStart) / srcInterval + srcIndex += int(advance) + srcStart = srcStart.Add(advance * srcInterval) + } + + // The i'th value is computed as show below. + // interval = (finish/start)/num + // i'th value = sum of observation in range + // [ start + i * interval, + // start + (i + 1) * interval ) + for i := 0; i < num; i++ { + results[i] = ts.resetObservation(results[i]) + dstEnd := dstStart.Add(dstInterval) + for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) { + srcEnd := srcStart.Add(srcInterval) + if srcEnd.After(ts.lastAdd) { + srcEnd = ts.lastAdd + } + + if !srcEnd.Before(dstStart) { + srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets] + if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) { + // dst completely contains src. + if srcValue != nil { + results[i].Add(srcValue) + } + } else { + // dst partially overlaps src. + overlapStart := maxTime(srcStart, dstStart) + overlapEnd := minTime(srcEnd, dstEnd) + base := srcEnd.Sub(srcStart) + fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds() + + used := ts.provider() + if srcValue != nil { + used.CopyFrom(srcValue) + } + used.Multiply(fraction) + results[i].Add(used) + } + + if srcEnd.After(dstEnd) { + break + } + } + srcIndex++ + srcStart = srcStart.Add(srcInterval) + } + dstStart = dstStart.Add(dstInterval) + } +} + +// resetObservation clears the content so the struct may be reused. +func (ts *timeSeries) resetObservation(observation Observable) Observable { + if observation == nil { + observation = ts.provider() + } else { + observation.Clear() + } + return observation +} + +// TimeSeries tracks data at granularities from 1 second to 16 weeks. +type TimeSeries struct { + timeSeries +} + +// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable. +func NewTimeSeries(f func() Observable) *TimeSeries { + return NewTimeSeriesWithClock(f, defaultClockInstance) +} + +// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries { + ts := new(TimeSeries) + ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock) + return ts +} + +// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour. +type MinuteHourSeries struct { + timeSeries +} + +// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable. +func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries { + return NewMinuteHourSeriesWithClock(f, defaultClockInstance) +} + +// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries { + ts := new(MinuteHourSeries) + ts.timeSeries.init(minuteHourSeriesResolutions, f, + minuteHourSeriesNumBuckets, clock) + return ts +} + +func (ts *MinuteHourSeries) Minute() Observable { + return ts.timeSeries.Latest(0, 60) +} + +func (ts *MinuteHourSeries) Hour() Observable { + return ts.timeSeries.Latest(1, 60) +} + +func minTime(a, b time.Time) time.Time { + if a.Before(b) { + return a + } + return b +} + +func maxTime(a, b time.Time) time.Time { + if a.After(b) { + return a + } + return b +} diff --git a/vendor/golang.org/x/net/lex/httplex/httplex.go b/vendor/golang.org/x/net/lex/httplex/httplex.go new file mode 100644 index 000000000..20f2b8940 --- /dev/null +++ b/vendor/golang.org/x/net/lex/httplex/httplex.go @@ -0,0 +1,351 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package httplex contains rules around lexical matters of various +// HTTP-related specifications. +// +// This package is shared by the standard library (which vendors it) +// and x/net/http2. It comes with no API stability promise. +package httplex + +import ( + "net" + "strings" + "unicode/utf8" + + "golang.org/x/net/idna" +) + +var isTokenTable = [127]bool{ + '!': true, + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '*': true, + '+': true, + '-': true, + '.': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'W': true, + 'V': true, + 'X': true, + 'Y': true, + 'Z': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '|': true, + '~': true, +} + +func IsTokenRune(r rune) bool { + i := int(r) + return i < len(isTokenTable) && isTokenTable[i] +} + +func isNotToken(r rune) bool { + return !IsTokenRune(r) +} + +// HeaderValuesContainsToken reports whether any string in values +// contains the provided token, ASCII case-insensitively. +func HeaderValuesContainsToken(values []string, token string) bool { + for _, v := range values { + if headerValueContainsToken(v, token) { + return true + } + } + return false +} + +// isOWS reports whether b is an optional whitespace byte, as defined +// by RFC 7230 section 3.2.3. +func isOWS(b byte) bool { return b == ' ' || b == '\t' } + +// trimOWS returns x with all optional whitespace removes from the +// beginning and end. +func trimOWS(x string) string { + // TODO: consider using strings.Trim(x, " \t") instead, + // if and when it's fast enough. See issue 10292. + // But this ASCII-only code will probably always beat UTF-8 + // aware code. + for len(x) > 0 && isOWS(x[0]) { + x = x[1:] + } + for len(x) > 0 && isOWS(x[len(x)-1]) { + x = x[:len(x)-1] + } + return x +} + +// headerValueContainsToken reports whether v (assumed to be a +// 0#element, in the ABNF extension described in RFC 7230 section 7) +// contains token amongst its comma-separated tokens, ASCII +// case-insensitively. +func headerValueContainsToken(v string, token string) bool { + v = trimOWS(v) + if comma := strings.IndexByte(v, ','); comma != -1 { + return tokenEqual(trimOWS(v[:comma]), token) || headerValueContainsToken(v[comma+1:], token) + } + return tokenEqual(v, token) +} + +// lowerASCII returns the ASCII lowercase version of b. +func lowerASCII(b byte) byte { + if 'A' <= b && b <= 'Z' { + return b + ('a' - 'A') + } + return b +} + +// tokenEqual reports whether t1 and t2 are equal, ASCII case-insensitively. +func tokenEqual(t1, t2 string) bool { + if len(t1) != len(t2) { + return false + } + for i, b := range t1 { + if b >= utf8.RuneSelf { + // No UTF-8 or non-ASCII allowed in tokens. + return false + } + if lowerASCII(byte(b)) != lowerASCII(t2[i]) { + return false + } + } + return true +} + +// isLWS reports whether b is linear white space, according +// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 +// LWS = [CRLF] 1*( SP | HT ) +func isLWS(b byte) bool { return b == ' ' || b == '\t' } + +// isCTL reports whether b is a control byte, according +// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 +// CTL = +func isCTL(b byte) bool { + const del = 0x7f // a CTL + return b < ' ' || b == del +} + +// ValidHeaderFieldName reports whether v is a valid HTTP/1.x header name. +// HTTP/2 imposes the additional restriction that uppercase ASCII +// letters are not allowed. +// +// RFC 7230 says: +// header-field = field-name ":" OWS field-value OWS +// field-name = token +// token = 1*tchar +// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." / +// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA +func ValidHeaderFieldName(v string) bool { + if len(v) == 0 { + return false + } + for _, r := range v { + if !IsTokenRune(r) { + return false + } + } + return true +} + +// ValidHostHeader reports whether h is a valid host header. +func ValidHostHeader(h string) bool { + // The latest spec is actually this: + // + // http://tools.ietf.org/html/rfc7230#section-5.4 + // Host = uri-host [ ":" port ] + // + // Where uri-host is: + // http://tools.ietf.org/html/rfc3986#section-3.2.2 + // + // But we're going to be much more lenient for now and just + // search for any byte that's not a valid byte in any of those + // expressions. + for i := 0; i < len(h); i++ { + if !validHostByte[h[i]] { + return false + } + } + return true +} + +// See the validHostHeader comment. +var validHostByte = [256]bool{ + '0': true, '1': true, '2': true, '3': true, '4': true, '5': true, '6': true, '7': true, + '8': true, '9': true, + + 'a': true, 'b': true, 'c': true, 'd': true, 'e': true, 'f': true, 'g': true, 'h': true, + 'i': true, 'j': true, 'k': true, 'l': true, 'm': true, 'n': true, 'o': true, 'p': true, + 'q': true, 'r': true, 's': true, 't': true, 'u': true, 'v': true, 'w': true, 'x': true, + 'y': true, 'z': true, + + 'A': true, 'B': true, 'C': true, 'D': true, 'E': true, 'F': true, 'G': true, 'H': true, + 'I': true, 'J': true, 'K': true, 'L': true, 'M': true, 'N': true, 'O': true, 'P': true, + 'Q': true, 'R': true, 'S': true, 'T': true, 'U': true, 'V': true, 'W': true, 'X': true, + 'Y': true, 'Z': true, + + '!': true, // sub-delims + '$': true, // sub-delims + '%': true, // pct-encoded (and used in IPv6 zones) + '&': true, // sub-delims + '(': true, // sub-delims + ')': true, // sub-delims + '*': true, // sub-delims + '+': true, // sub-delims + ',': true, // sub-delims + '-': true, // unreserved + '.': true, // unreserved + ':': true, // IPv6address + Host expression's optional port + ';': true, // sub-delims + '=': true, // sub-delims + '[': true, + '\'': true, // sub-delims + ']': true, + '_': true, // unreserved + '~': true, // unreserved +} + +// ValidHeaderFieldValue reports whether v is a valid "field-value" according to +// http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 : +// +// message-header = field-name ":" [ field-value ] +// field-value = *( field-content | LWS ) +// field-content = +// +// http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 : +// +// TEXT = +// LWS = [CRLF] 1*( SP | HT ) +// CTL = +// +// RFC 7230 says: +// field-value = *( field-content / obs-fold ) +// obj-fold = N/A to http2, and deprecated +// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] +// field-vchar = VCHAR / obs-text +// obs-text = %x80-FF +// VCHAR = "any visible [USASCII] character" +// +// http2 further says: "Similarly, HTTP/2 allows header field values +// that are not valid. While most of the values that can be encoded +// will not alter header field parsing, carriage return (CR, ASCII +// 0xd), line feed (LF, ASCII 0xa), and the zero character (NUL, ASCII +// 0x0) might be exploited by an attacker if they are translated +// verbatim. Any request or response that contains a character not +// permitted in a header field value MUST be treated as malformed +// (Section 8.1.2.6). Valid characters are defined by the +// field-content ABNF rule in Section 3.2 of [RFC7230]." +// +// This function does not (yet?) properly handle the rejection of +// strings that begin or end with SP or HTAB. +func ValidHeaderFieldValue(v string) bool { + for i := 0; i < len(v); i++ { + b := v[i] + if isCTL(b) && !isLWS(b) { + return false + } + } + return true +} + +func isASCII(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} + +// PunycodeHostPort returns the IDNA Punycode version +// of the provided "host" or "host:port" string. +func PunycodeHostPort(v string) (string, error) { + if isASCII(v) { + return v, nil + } + + host, port, err := net.SplitHostPort(v) + if err != nil { + // The input 'v' argument was just a "host" argument, + // without a port. This error should not be returned + // to the caller. + host = v + port = "" + } + host, err = idna.ToASCII(host) + if err != nil { + // Non-UTF-8? Not representable in Punycode, in any + // case. + return "", err + } + if port == "" { + return host, nil + } + return net.JoinHostPort(host, port), nil +} diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go new file mode 100644 index 000000000..d8daec1a7 --- /dev/null +++ b/vendor/golang.org/x/net/trace/events.go @@ -0,0 +1,532 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "bytes" + "fmt" + "html/template" + "io" + "log" + "net/http" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "text/tabwriter" + "time" +) + +const maxEventsPerLog = 100 + +type bucket struct { + MaxErrAge time.Duration + String string +} + +var buckets = []bucket{ + {0, "total"}, + {10 * time.Second, "errs<10s"}, + {1 * time.Minute, "errs<1m"}, + {10 * time.Minute, "errs<10m"}, + {1 * time.Hour, "errs<1h"}, + {10 * time.Hour, "errs<10h"}, + {24000 * time.Hour, "errors"}, +} + +// RenderEvents renders the HTML page typically served at /debug/events. +// It does not do any auth checking; see AuthRequest for the default auth check +// used by the handler registered on http.DefaultServeMux. +// req may be nil. +func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { + now := time.Now() + data := &struct { + Families []string // family names + Buckets []bucket + Counts [][]int // eventLog count per family/bucket + + // Set when a bucket has been selected. + Family string + Bucket int + EventLogs eventLogs + Expanded bool + }{ + Buckets: buckets, + } + + data.Families = make([]string, 0, len(families)) + famMu.RLock() + for name := range families { + data.Families = append(data.Families, name) + } + famMu.RUnlock() + sort.Strings(data.Families) + + // Count the number of eventLogs in each family for each error age. + data.Counts = make([][]int, len(data.Families)) + for i, name := range data.Families { + // TODO(sameer): move this loop under the family lock. + f := getEventFamily(name) + data.Counts[i] = make([]int, len(data.Buckets)) + for j, b := range data.Buckets { + data.Counts[i][j] = f.Count(now, b.MaxErrAge) + } + } + + if req != nil { + var ok bool + data.Family, data.Bucket, ok = parseEventsArgs(req) + if !ok { + // No-op + } else { + data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge) + } + if data.EventLogs != nil { + defer data.EventLogs.Free() + sort.Sort(data.EventLogs) + } + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + } + + famMu.RLock() + defer famMu.RUnlock() + if err := eventsTmpl().Execute(w, data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) { + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < 0 || b >= len(buckets) { + return "", 0, false + } + return fam, b, true +} + +// An EventLog provides a log of events associated with a specific object. +type EventLog interface { + // Printf formats its arguments with fmt.Sprintf and adds the + // result to the event log. + Printf(format string, a ...interface{}) + + // Errorf is like Printf, but it marks this event as an error. + Errorf(format string, a ...interface{}) + + // Finish declares that this event log is complete. + // The event log should not be used after calling this method. + Finish() +} + +// NewEventLog returns a new EventLog with the specified family name +// and title. +func NewEventLog(family, title string) EventLog { + el := newEventLog() + el.ref() + el.Family, el.Title = family, title + el.Start = time.Now() + el.events = make([]logEntry, 0, maxEventsPerLog) + el.stack = make([]uintptr, 32) + n := runtime.Callers(2, el.stack) + el.stack = el.stack[:n] + + getEventFamily(family).add(el) + return el +} + +func (el *eventLog) Finish() { + getEventFamily(el.Family).remove(el) + el.unref() // matches ref in New +} + +var ( + famMu sync.RWMutex + families = make(map[string]*eventFamily) // family name => family +) + +func getEventFamily(fam string) *eventFamily { + famMu.Lock() + defer famMu.Unlock() + f := families[fam] + if f == nil { + f = &eventFamily{} + families[fam] = f + } + return f +} + +type eventFamily struct { + mu sync.RWMutex + eventLogs eventLogs +} + +func (f *eventFamily) add(el *eventLog) { + f.mu.Lock() + f.eventLogs = append(f.eventLogs, el) + f.mu.Unlock() +} + +func (f *eventFamily) remove(el *eventLog) { + f.mu.Lock() + defer f.mu.Unlock() + for i, el0 := range f.eventLogs { + if el == el0 { + copy(f.eventLogs[i:], f.eventLogs[i+1:]) + f.eventLogs = f.eventLogs[:len(f.eventLogs)-1] + return + } + } +} + +func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) { + f.mu.RLock() + defer f.mu.RUnlock() + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + n++ + } + } + return +} + +func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) { + f.mu.RLock() + defer f.mu.RUnlock() + els = make(eventLogs, 0, len(f.eventLogs)) + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + el.ref() + els = append(els, el) + } + } + return +} + +type eventLogs []*eventLog + +// Free calls unref on each element of the list. +func (els eventLogs) Free() { + for _, el := range els { + el.unref() + } +} + +// eventLogs may be sorted in reverse chronological order. +func (els eventLogs) Len() int { return len(els) } +func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) } +func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] } + +// A logEntry is a timestamped log entry in an event log. +type logEntry struct { + When time.Time + Elapsed time.Duration // since previous event in log + NewDay bool // whether this event is on a different day to the previous event + What string + IsErr bool +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e logEntry) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// An eventLog represents an active event log. +type eventLog struct { + // Family is the top-level grouping of event logs to which this belongs. + Family string + + // Title is the title of this event log. + Title string + + // Timing information. + Start time.Time + + // Call stack where this event log was created. + stack []uintptr + + // Append-only sequence of events. + // + // TODO(sameer): change this to a ring buffer to avoid the array copy + // when we hit maxEventsPerLog. + mu sync.RWMutex + events []logEntry + LastErrorTime time.Time + discarded int + + refs int32 // how many buckets this is in +} + +func (el *eventLog) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + el.Family = "" + el.Title = "" + el.Start = time.Time{} + el.stack = nil + el.events = nil + el.LastErrorTime = time.Time{} + el.discarded = 0 + el.refs = 0 +} + +func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool { + if maxErrAge == 0 { + return true + } + el.mu.RLock() + defer el.mu.RUnlock() + return now.Sub(el.LastErrorTime) < maxErrAge +} + +// delta returns the elapsed time since the last event or the log start, +// and whether it spans midnight. +// L >= el.mu +func (el *eventLog) delta(t time.Time) (time.Duration, bool) { + if len(el.events) == 0 { + return t.Sub(el.Start), false + } + prev := el.events[len(el.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() + +} + +func (el *eventLog) Printf(format string, a ...interface{}) { + el.printf(false, format, a...) +} + +func (el *eventLog) Errorf(format string, a ...interface{}) { + el.printf(true, format, a...) +} + +func (el *eventLog) printf(isErr bool, format string, a ...interface{}) { + e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)} + el.mu.Lock() + e.Elapsed, e.NewDay = el.delta(e.When) + if len(el.events) < maxEventsPerLog { + el.events = append(el.events, e) + } else { + // Discard the oldest event. + if el.discarded == 0 { + // el.discarded starts at two to count for the event it + // is replacing, plus the next one that we are about to + // drop. + el.discarded = 2 + } else { + el.discarded++ + } + // TODO(sameer): if this causes allocations on a critical path, + // change eventLog.What to be a fmt.Stringer, as in trace.go. + el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded) + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + el.events[0].When = el.events[1].When + copy(el.events[1:], el.events[2:]) + el.events[maxEventsPerLog-1] = e + } + if e.IsErr { + el.LastErrorTime = e.When + } + el.mu.Unlock() +} + +func (el *eventLog) ref() { + atomic.AddInt32(&el.refs, 1) +} + +func (el *eventLog) unref() { + if atomic.AddInt32(&el.refs, -1) == 0 { + freeEventLog(el) + } +} + +func (el *eventLog) When() string { + return el.Start.Format("2006/01/02 15:04:05.000000") +} + +func (el *eventLog) ElapsedTime() string { + elapsed := time.Since(el.Start) + return fmt.Sprintf("%.6f", elapsed.Seconds()) +} + +func (el *eventLog) Stack() string { + buf := new(bytes.Buffer) + tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0) + printStackRecord(tw, el.stack) + tw.Flush() + return buf.String() +} + +// printStackRecord prints the function + source line information +// for a single stack trace. +// Adapted from runtime/pprof/pprof.go. +func printStackRecord(w io.Writer, stk []uintptr) { + for _, pc := range stk { + f := runtime.FuncForPC(pc) + if f == nil { + continue + } + file, line := f.FileLine(pc) + name := f.Name() + // Hide runtime.goexit and any runtime functions at the beginning. + if strings.HasPrefix(name, "runtime.") { + continue + } + fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line) + } +} + +func (el *eventLog) Events() []logEntry { + el.mu.RLock() + defer el.mu.RUnlock() + return el.events +} + +// freeEventLogs is a freelist of *eventLog +var freeEventLogs = make(chan *eventLog, 1000) + +// newEventLog returns a event log ready to use. +func newEventLog() *eventLog { + select { + case el := <-freeEventLogs: + return el + default: + return new(eventLog) + } +} + +// freeEventLog adds el to freeEventLogs if there's room. +// This is non-blocking. +func freeEventLog(el *eventLog) { + el.reset() + select { + case freeEventLogs <- el: + default: + } +} + +var eventsTmplCache *template.Template +var eventsTmplOnce sync.Once + +func eventsTmpl() *template.Template { + eventsTmplOnce.Do(func() { + eventsTmplCache = template.Must(template.New("events").Funcs(template.FuncMap{ + "elapsed": elapsed, + "trimSpace": strings.TrimSpace, + }).Parse(eventsHTML)) + }) + return eventsTmplCache +} + +const eventsHTML = ` + + + events + + + + +

/debug/events

+ + + {{range $i, $fam := .Families}} + + + + {{range $j, $bucket := $.Buckets}} + {{$n := index $.Counts $i $j}} + + {{end}} + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} {{$bucket.String}}] + {{if $n}}{{end}} +
+ +{{if $.EventLogs}} +
+

Family: {{$.Family}}

+ +{{if $.Expanded}}{{end}} +[Summary]{{if $.Expanded}}{{end}} + +{{if not $.Expanded}}{{end}} +[Expanded]{{if not $.Expanded}}{{end}} + + + + {{range $el := $.EventLogs}} + + + + + {{if $.Expanded}} + + + + + + {{range $el.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
WhenElapsed
{{$el.When}}{{$el.ElapsedTime}}{{$el.Title}} +
{{$el.Stack|trimSpace}}
{{.WhenString}}{{elapsed .Elapsed}}.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}
+{{end}} + + +` diff --git a/vendor/golang.org/x/net/trace/histogram.go b/vendor/golang.org/x/net/trace/histogram.go new file mode 100644 index 000000000..9bf4286c7 --- /dev/null +++ b/vendor/golang.org/x/net/trace/histogram.go @@ -0,0 +1,365 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +// This file implements histogramming for RPC statistics collection. + +import ( + "bytes" + "fmt" + "html/template" + "log" + "math" + "sync" + + "golang.org/x/net/internal/timeseries" +) + +const ( + bucketCount = 38 +) + +// histogram keeps counts of values in buckets that are spaced +// out in powers of 2: 0-1, 2-3, 4-7... +// histogram implements timeseries.Observable +type histogram struct { + sum int64 // running total of measurements + sumOfSquares float64 // square of running total + buckets []int64 // bucketed values for histogram + value int // holds a single value as an optimization + valueCount int64 // number of values recorded for single value +} + +// AddMeasurement records a value measurement observation to the histogram. +func (h *histogram) addMeasurement(value int64) { + // TODO: assert invariant + h.sum += value + h.sumOfSquares += float64(value) * float64(value) + + bucketIndex := getBucket(value) + + if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) { + h.value = bucketIndex + h.valueCount++ + } else { + h.allocateBuckets() + h.buckets[bucketIndex]++ + } +} + +func (h *histogram) allocateBuckets() { + if h.buckets == nil { + h.buckets = make([]int64, bucketCount) + h.buckets[h.value] = h.valueCount + h.value = 0 + h.valueCount = -1 + } +} + +func log2(i int64) int { + n := 0 + for ; i >= 0x100; i >>= 8 { + n += 8 + } + for ; i > 0; i >>= 1 { + n += 1 + } + return n +} + +func getBucket(i int64) (index int) { + index = log2(i) - 1 + if index < 0 { + index = 0 + } + if index >= bucketCount { + index = bucketCount - 1 + } + return +} + +// Total returns the number of recorded observations. +func (h *histogram) total() (total int64) { + if h.valueCount >= 0 { + total = h.valueCount + } + for _, val := range h.buckets { + total += int64(val) + } + return +} + +// Average returns the average value of recorded observations. +func (h *histogram) average() float64 { + t := h.total() + if t == 0 { + return 0 + } + return float64(h.sum) / float64(t) +} + +// Variance returns the variance of recorded observations. +func (h *histogram) variance() float64 { + t := float64(h.total()) + if t == 0 { + return 0 + } + s := float64(h.sum) / t + return h.sumOfSquares/t - s*s +} + +// StandardDeviation returns the standard deviation of recorded observations. +func (h *histogram) standardDeviation() float64 { + return math.Sqrt(h.variance()) +} + +// PercentileBoundary estimates the value that the given fraction of recorded +// observations are less than. +func (h *histogram) percentileBoundary(percentile float64) int64 { + total := h.total() + + // Corner cases (make sure result is strictly less than Total()) + if total == 0 { + return 0 + } else if total == 1 { + return int64(h.average()) + } + + percentOfTotal := round(float64(total) * percentile) + var runningTotal int64 + + for i := range h.buckets { + value := h.buckets[i] + runningTotal += value + if runningTotal == percentOfTotal { + // We hit an exact bucket boundary. If the next bucket has data, it is a + // good estimate of the value. If the bucket is empty, we interpolate the + // midpoint between the next bucket's boundary and the next non-zero + // bucket. If the remaining buckets are all empty, then we use the + // boundary for the next bucket as the estimate. + j := uint8(i + 1) + min := bucketBoundary(j) + if runningTotal < total { + for h.buckets[j] == 0 { + j++ + } + } + max := bucketBoundary(j) + return min + round(float64(max-min)/2) + } else if runningTotal > percentOfTotal { + // The value is in this bucket. Interpolate the value. + delta := runningTotal - percentOfTotal + percentBucket := float64(value-delta) / float64(value) + bucketMin := bucketBoundary(uint8(i)) + nextBucketMin := bucketBoundary(uint8(i + 1)) + bucketSize := nextBucketMin - bucketMin + return bucketMin + round(percentBucket*float64(bucketSize)) + } + } + return bucketBoundary(bucketCount - 1) +} + +// Median returns the estimated median of the observed values. +func (h *histogram) median() int64 { + return h.percentileBoundary(0.5) +} + +// Add adds other to h. +func (h *histogram) Add(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == 0 { + // Other histogram is empty + } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value { + // Both have a single bucketed value, aggregate them + h.valueCount += o.valueCount + } else { + // Two different values necessitate buckets in this histogram + h.allocateBuckets() + if o.valueCount >= 0 { + h.buckets[o.value] += o.valueCount + } else { + for i := range h.buckets { + h.buckets[i] += o.buckets[i] + } + } + } + h.sumOfSquares += o.sumOfSquares + h.sum += o.sum +} + +// Clear resets the histogram to an empty state, removing all observed values. +func (h *histogram) Clear() { + h.buckets = nil + h.value = 0 + h.valueCount = 0 + h.sum = 0 + h.sumOfSquares = 0 +} + +// CopyFrom copies from other, which must be a *histogram, into h. +func (h *histogram) CopyFrom(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == -1 { + h.allocateBuckets() + copy(h.buckets, o.buckets) + } + h.sum = o.sum + h.sumOfSquares = o.sumOfSquares + h.value = o.value + h.valueCount = o.valueCount +} + +// Multiply scales the histogram by the specified ratio. +func (h *histogram) Multiply(ratio float64) { + if h.valueCount == -1 { + for i := range h.buckets { + h.buckets[i] = int64(float64(h.buckets[i]) * ratio) + } + } else { + h.valueCount = int64(float64(h.valueCount) * ratio) + } + h.sum = int64(float64(h.sum) * ratio) + h.sumOfSquares = h.sumOfSquares * ratio +} + +// New creates a new histogram. +func (h *histogram) New() timeseries.Observable { + r := new(histogram) + r.Clear() + return r +} + +func (h *histogram) String() string { + return fmt.Sprintf("%d, %f, %d, %d, %v", + h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets) +} + +// round returns the closest int64 to the argument +func round(in float64) int64 { + return int64(math.Floor(in + 0.5)) +} + +// bucketBoundary returns the first value in the bucket. +func bucketBoundary(bucket uint8) int64 { + if bucket == 0 { + return 0 + } + return 1 << bucket +} + +// bucketData holds data about a specific bucket for use in distTmpl. +type bucketData struct { + Lower, Upper int64 + N int64 + Pct, CumulativePct float64 + GraphWidth int +} + +// data holds data about a Distribution for use in distTmpl. +type data struct { + Buckets []*bucketData + Count, Median int64 + Mean, StandardDeviation float64 +} + +// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets. +const maxHTMLBarWidth = 350.0 + +// newData returns data representing h for use in distTmpl. +func (h *histogram) newData() *data { + // Force the allocation of buckets to simplify the rendering implementation + h.allocateBuckets() + // We scale the bars on the right so that the largest bar is + // maxHTMLBarWidth pixels in width. + maxBucket := int64(0) + for _, n := range h.buckets { + if n > maxBucket { + maxBucket = n + } + } + total := h.total() + barsizeMult := maxHTMLBarWidth / float64(maxBucket) + var pctMult float64 + if total == 0 { + pctMult = 1.0 + } else { + pctMult = 100.0 / float64(total) + } + + buckets := make([]*bucketData, len(h.buckets)) + runningTotal := int64(0) + for i, n := range h.buckets { + if n == 0 { + continue + } + runningTotal += n + var upperBound int64 + if i < bucketCount-1 { + upperBound = bucketBoundary(uint8(i + 1)) + } else { + upperBound = math.MaxInt64 + } + buckets[i] = &bucketData{ + Lower: bucketBoundary(uint8(i)), + Upper: upperBound, + N: n, + Pct: float64(n) * pctMult, + CumulativePct: float64(runningTotal) * pctMult, + GraphWidth: int(float64(n) * barsizeMult), + } + } + return &data{ + Buckets: buckets, + Count: total, + Median: h.median(), + Mean: h.average(), + StandardDeviation: h.standardDeviation(), + } +} + +func (h *histogram) html() template.HTML { + buf := new(bytes.Buffer) + if err := distTmpl().Execute(buf, h.newData()); err != nil { + buf.Reset() + log.Printf("net/trace: couldn't execute template: %v", err) + } + return template.HTML(buf.String()) +} + +var distTmplCache *template.Template +var distTmplOnce sync.Once + +func distTmpl() *template.Template { + distTmplOnce.Do(func() { + // Input: data + distTmplCache = template.Must(template.New("distTmpl").Parse(` + + + + + + + +
Count: {{.Count}}Mean: {{printf "%.0f" .Mean}}StdDev: {{printf "%.0f" .StandardDeviation}}Median: {{.Median}}
+
+ +{{range $b := .Buckets}} +{{if $b}} + + + + + + + + + +{{end}} +{{end}} +
[{{.Lower}},{{.Upper}}){{.N}}{{printf "%#.3f" .Pct}}%{{printf "%#.3f" .CumulativePct}}%
+`)) + }) + return distTmplCache +} diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go new file mode 100644 index 000000000..3d9b64611 --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace.go @@ -0,0 +1,1066 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package trace implements tracing of requests and long-lived objects. +It exports HTTP interfaces on /debug/requests and /debug/events. + +A trace.Trace provides tracing for short-lived objects, usually requests. +A request handler might be implemented like this: + + func fooHandler(w http.ResponseWriter, req *http.Request) { + tr := trace.New("mypkg.Foo", req.URL.Path) + defer tr.Finish() + ... + tr.LazyPrintf("some event %q happened", str) + ... + if err := somethingImportant(); err != nil { + tr.LazyPrintf("somethingImportant failed: %v", err) + tr.SetError() + } + } + +The /debug/requests HTTP endpoint organizes the traces by family, +errors, and duration. It also provides histogram of request duration +for each family. + +A trace.EventLog provides tracing for long-lived objects, such as RPC +connections. + + // A Fetcher fetches URL paths for a single domain. + type Fetcher struct { + domain string + events trace.EventLog + } + + func NewFetcher(domain string) *Fetcher { + return &Fetcher{ + domain, + trace.NewEventLog("mypkg.Fetcher", domain), + } + } + + func (f *Fetcher) Fetch(path string) (string, error) { + resp, err := http.Get("http://" + f.domain + "/" + path) + if err != nil { + f.events.Errorf("Get(%q) = %v", path, err) + return "", err + } + f.events.Printf("Get(%q) = %s", path, resp.Status) + ... + } + + func (f *Fetcher) Close() error { + f.events.Finish() + return nil + } + +The /debug/events HTTP endpoint organizes the event logs by family and +by time since the last error. The expanded view displays recent log +entries and the log's call stack. +*/ +package trace // import "golang.org/x/net/trace" + +import ( + "bytes" + "fmt" + "html/template" + "io" + "log" + "net" + "net/http" + "runtime" + "sort" + "strconv" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/internal/timeseries" +) + +// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing. +// FOR DEBUGGING ONLY. This will slow down the program. +var DebugUseAfterFinish = false + +// AuthRequest determines whether a specific request is permitted to load the +// /debug/requests or /debug/events pages. +// +// It returns two bools; the first indicates whether the page may be viewed at all, +// and the second indicates whether sensitive events will be shown. +// +// AuthRequest may be replaced by a program to customize its authorization requirements. +// +// The default AuthRequest function returns (true, true) if and only if the request +// comes from localhost/127.0.0.1/[::1]. +var AuthRequest = func(req *http.Request) (any, sensitive bool) { + // RemoteAddr is commonly in the form "IP" or "IP:port". + // If it is in the form "IP:port", split off the port. + host, _, err := net.SplitHostPort(req.RemoteAddr) + if err != nil { + host = req.RemoteAddr + } + switch host { + case "localhost", "127.0.0.1", "::1": + return true, true + default: + return false, false + } +} + +func init() { + http.HandleFunc("/debug/requests", func(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + Render(w, req, sensitive) + }) + http.HandleFunc("/debug/events", func(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + RenderEvents(w, req, sensitive) + }) +} + +// Render renders the HTML page typically served at /debug/requests. +// It does not do any auth checking; see AuthRequest for the default auth check +// used by the handler registered on http.DefaultServeMux. +// req may be nil. +func Render(w io.Writer, req *http.Request, sensitive bool) { + data := &struct { + Families []string + ActiveTraceCount map[string]int + CompletedTraces map[string]*family + + // Set when a bucket has been selected. + Traces traceList + Family string + Bucket int + Expanded bool + Traced bool + Active bool + ShowSensitive bool // whether to show sensitive events + + Histogram template.HTML + HistogramWindow string // e.g. "last minute", "last hour", "all time" + + // If non-zero, the set of traces is a partial set, + // and this is the total number. + Total int + }{ + CompletedTraces: completedTraces, + } + + data.ShowSensitive = sensitive + if req != nil { + // Allow show_sensitive=0 to force hiding of sensitive data for testing. + // This only goes one way; you can't use show_sensitive=1 to see things. + if req.FormValue("show_sensitive") == "0" { + data.ShowSensitive = false + } + + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil { + data.Traced = exp + } + } + + completedMu.RLock() + data.Families = make([]string, 0, len(completedTraces)) + for fam := range completedTraces { + data.Families = append(data.Families, fam) + } + completedMu.RUnlock() + sort.Strings(data.Families) + + // We are careful here to minimize the time spent locking activeMu, + // since that lock is required every time an RPC starts and finishes. + data.ActiveTraceCount = make(map[string]int, len(data.Families)) + activeMu.RLock() + for fam, s := range activeTraces { + data.ActiveTraceCount[fam] = s.Len() + } + activeMu.RUnlock() + + var ok bool + data.Family, data.Bucket, ok = parseArgs(req) + switch { + case !ok: + // No-op + case data.Bucket == -1: + data.Active = true + n := data.ActiveTraceCount[data.Family] + data.Traces = getActiveTraces(data.Family) + if len(data.Traces) < n { + data.Total = n + } + case data.Bucket < bucketsPerFamily: + if b := lookupBucket(data.Family, data.Bucket); b != nil { + data.Traces = b.Copy(data.Traced) + } + default: + if f := getFamily(data.Family, false); f != nil { + var obs timeseries.Observable + f.LatencyMu.RLock() + switch o := data.Bucket - bucketsPerFamily; o { + case 0: + obs = f.Latency.Minute() + data.HistogramWindow = "last minute" + case 1: + obs = f.Latency.Hour() + data.HistogramWindow = "last hour" + case 2: + obs = f.Latency.Total() + data.HistogramWindow = "all time" + } + f.LatencyMu.RUnlock() + if obs != nil { + data.Histogram = obs.(*histogram).html() + } + } + } + + if data.Traces != nil { + defer data.Traces.Free() + sort.Sort(data.Traces) + } + + completedMu.RLock() + defer completedMu.RUnlock() + if err := pageTmpl().ExecuteTemplate(w, "Page", data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseArgs(req *http.Request) (fam string, b int, ok bool) { + if req == nil { + return "", 0, false + } + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < -1 { + return "", 0, false + } + + return fam, b, true +} + +func lookupBucket(fam string, b int) *traceBucket { + f := getFamily(fam, false) + if f == nil || b < 0 || b >= len(f.Buckets) { + return nil + } + return f.Buckets[b] +} + +type contextKeyT string + +var contextKey = contextKeyT("golang.org/x/net/trace.Trace") + +// Trace represents an active request. +type Trace interface { + // LazyLog adds x to the event log. It will be evaluated each time the + // /debug/requests page is rendered. Any memory referenced by x will be + // pinned until the trace is finished and later discarded. + LazyLog(x fmt.Stringer, sensitive bool) + + // LazyPrintf evaluates its arguments with fmt.Sprintf each time the + // /debug/requests page is rendered. Any memory referenced by a will be + // pinned until the trace is finished and later discarded. + LazyPrintf(format string, a ...interface{}) + + // SetError declares that this trace resulted in an error. + SetError() + + // SetRecycler sets a recycler for the trace. + // f will be called for each event passed to LazyLog at a time when + // it is no longer required, whether while the trace is still active + // and the event is discarded, or when a completed trace is discarded. + SetRecycler(f func(interface{})) + + // SetTraceInfo sets the trace info for the trace. + // This is currently unused. + SetTraceInfo(traceID, spanID uint64) + + // SetMaxEvents sets the maximum number of events that will be stored + // in the trace. This has no effect if any events have already been + // added to the trace. + SetMaxEvents(m int) + + // Finish declares that this trace is complete. + // The trace should not be used after calling this method. + Finish() +} + +type lazySprintf struct { + format string + a []interface{} +} + +func (l *lazySprintf) String() string { + return fmt.Sprintf(l.format, l.a...) +} + +// New returns a new Trace with the specified family and title. +func New(family, title string) Trace { + tr := newTrace() + tr.ref() + tr.Family, tr.Title = family, title + tr.Start = time.Now() + tr.maxEvents = maxEventsPerTrace + tr.events = tr.eventsBuf[:0] + + activeMu.RLock() + s := activeTraces[tr.Family] + activeMu.RUnlock() + if s == nil { + activeMu.Lock() + s = activeTraces[tr.Family] // check again + if s == nil { + s = new(traceSet) + activeTraces[tr.Family] = s + } + activeMu.Unlock() + } + s.Add(tr) + + // Trigger allocation of the completed trace structure for this family. + // This will cause the family to be present in the request page during + // the first trace of this family. We don't care about the return value, + // nor is there any need for this to run inline, so we execute it in its + // own goroutine, but only if the family isn't allocated yet. + completedMu.RLock() + if _, ok := completedTraces[tr.Family]; !ok { + go allocFamily(tr.Family) + } + completedMu.RUnlock() + + return tr +} + +func (tr *trace) Finish() { + tr.Elapsed = time.Now().Sub(tr.Start) + if DebugUseAfterFinish { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + tr.finishStack = buf[:n] + } + + activeMu.RLock() + m := activeTraces[tr.Family] + activeMu.RUnlock() + m.Remove(tr) + + f := getFamily(tr.Family, true) + for _, b := range f.Buckets { + if b.Cond.match(tr) { + b.Add(tr) + } + } + // Add a sample of elapsed time as microseconds to the family's timeseries + h := new(histogram) + h.addMeasurement(tr.Elapsed.Nanoseconds() / 1e3) + f.LatencyMu.Lock() + f.Latency.Add(h) + f.LatencyMu.Unlock() + + tr.unref() // matches ref in New +} + +const ( + bucketsPerFamily = 9 + tracesPerBucket = 10 + maxActiveTraces = 20 // Maximum number of active traces to show. + maxEventsPerTrace = 10 + numHistogramBuckets = 38 +) + +var ( + // The active traces. + activeMu sync.RWMutex + activeTraces = make(map[string]*traceSet) // family -> traces + + // Families of completed traces. + completedMu sync.RWMutex + completedTraces = make(map[string]*family) // family -> traces +) + +type traceSet struct { + mu sync.RWMutex + m map[*trace]bool + + // We could avoid the entire map scan in FirstN by having a slice of all the traces + // ordered by start time, and an index into that from the trace struct, with a periodic + // repack of the slice after enough traces finish; we could also use a skip list or similar. + // However, that would shift some of the expense from /debug/requests time to RPC time, + // which is probably the wrong trade-off. +} + +func (ts *traceSet) Len() int { + ts.mu.RLock() + defer ts.mu.RUnlock() + return len(ts.m) +} + +func (ts *traceSet) Add(tr *trace) { + ts.mu.Lock() + if ts.m == nil { + ts.m = make(map[*trace]bool) + } + ts.m[tr] = true + ts.mu.Unlock() +} + +func (ts *traceSet) Remove(tr *trace) { + ts.mu.Lock() + delete(ts.m, tr) + ts.mu.Unlock() +} + +// FirstN returns the first n traces ordered by time. +func (ts *traceSet) FirstN(n int) traceList { + ts.mu.RLock() + defer ts.mu.RUnlock() + + if n > len(ts.m) { + n = len(ts.m) + } + trl := make(traceList, 0, n) + + // Fast path for when no selectivity is needed. + if n == len(ts.m) { + for tr := range ts.m { + tr.ref() + trl = append(trl, tr) + } + sort.Sort(trl) + return trl + } + + // Pick the oldest n traces. + // This is inefficient. See the comment in the traceSet struct. + for tr := range ts.m { + // Put the first n traces into trl in the order they occur. + // When we have n, sort trl, and thereafter maintain its order. + if len(trl) < n { + tr.ref() + trl = append(trl, tr) + if len(trl) == n { + // This is guaranteed to happen exactly once during this loop. + sort.Sort(trl) + } + continue + } + if tr.Start.After(trl[n-1].Start) { + continue + } + + // Find where to insert this one. + tr.ref() + i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) }) + trl[n-1].unref() + copy(trl[i+1:], trl[i:]) + trl[i] = tr + } + + return trl +} + +func getActiveTraces(fam string) traceList { + activeMu.RLock() + s := activeTraces[fam] + activeMu.RUnlock() + if s == nil { + return nil + } + return s.FirstN(maxActiveTraces) +} + +func getFamily(fam string, allocNew bool) *family { + completedMu.RLock() + f := completedTraces[fam] + completedMu.RUnlock() + if f == nil && allocNew { + f = allocFamily(fam) + } + return f +} + +func allocFamily(fam string) *family { + completedMu.Lock() + defer completedMu.Unlock() + f := completedTraces[fam] + if f == nil { + f = newFamily() + completedTraces[fam] = f + } + return f +} + +// family represents a set of trace buckets and associated latency information. +type family struct { + // traces may occur in multiple buckets. + Buckets [bucketsPerFamily]*traceBucket + + // latency time series + LatencyMu sync.RWMutex + Latency *timeseries.MinuteHourSeries +} + +func newFamily() *family { + return &family{ + Buckets: [bucketsPerFamily]*traceBucket{ + {Cond: minCond(0)}, + {Cond: minCond(50 * time.Millisecond)}, + {Cond: minCond(100 * time.Millisecond)}, + {Cond: minCond(200 * time.Millisecond)}, + {Cond: minCond(500 * time.Millisecond)}, + {Cond: minCond(1 * time.Second)}, + {Cond: minCond(10 * time.Second)}, + {Cond: minCond(100 * time.Second)}, + {Cond: errorCond{}}, + }, + Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }), + } +} + +// traceBucket represents a size-capped bucket of historic traces, +// along with a condition for a trace to belong to the bucket. +type traceBucket struct { + Cond cond + + // Ring buffer implementation of a fixed-size FIFO queue. + mu sync.RWMutex + buf [tracesPerBucket]*trace + start int // < tracesPerBucket + length int // <= tracesPerBucket +} + +func (b *traceBucket) Add(tr *trace) { + b.mu.Lock() + defer b.mu.Unlock() + + i := b.start + b.length + if i >= tracesPerBucket { + i -= tracesPerBucket + } + if b.length == tracesPerBucket { + // "Remove" an element from the bucket. + b.buf[i].unref() + b.start++ + if b.start == tracesPerBucket { + b.start = 0 + } + } + b.buf[i] = tr + if b.length < tracesPerBucket { + b.length++ + } + tr.ref() +} + +// Copy returns a copy of the traces in the bucket. +// If tracedOnly is true, only the traces with trace information will be returned. +// The logs will be ref'd before returning; the caller should call +// the Free method when it is done with them. +// TODO(dsymonds): keep track of traced requests in separate buckets. +func (b *traceBucket) Copy(tracedOnly bool) traceList { + b.mu.RLock() + defer b.mu.RUnlock() + + trl := make(traceList, 0, b.length) + for i, x := 0, b.start; i < b.length; i++ { + tr := b.buf[x] + if !tracedOnly || tr.spanID != 0 { + tr.ref() + trl = append(trl, tr) + } + x++ + if x == b.length { + x = 0 + } + } + return trl +} + +func (b *traceBucket) Empty() bool { + b.mu.RLock() + defer b.mu.RUnlock() + return b.length == 0 +} + +// cond represents a condition on a trace. +type cond interface { + match(t *trace) bool + String() string +} + +type minCond time.Duration + +func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) } +func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) } + +type errorCond struct{} + +func (e errorCond) match(t *trace) bool { return t.IsError } +func (e errorCond) String() string { return "errors" } + +type traceList []*trace + +// Free calls unref on each element of the list. +func (trl traceList) Free() { + for _, t := range trl { + t.unref() + } +} + +// traceList may be sorted in reverse chronological order. +func (trl traceList) Len() int { return len(trl) } +func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) } +func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] } + +// An event is a timestamped log entry in a trace. +type event struct { + When time.Time + Elapsed time.Duration // since previous event in trace + NewDay bool // whether this event is on a different day to the previous event + Recyclable bool // whether this event was passed via LazyLog + Sensitive bool // whether this event contains sensitive information + What interface{} // string or fmt.Stringer +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e event) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// discarded represents a number of discarded events. +// It is stored as *discarded to make it easier to update in-place. +type discarded int + +func (d *discarded) String() string { + return fmt.Sprintf("(%d events discarded)", int(*d)) +} + +// trace represents an active or complete request, +// either sent or received by this program. +type trace struct { + // Family is the top-level grouping of traces to which this belongs. + Family string + + // Title is the title of this trace. + Title string + + // Timing information. + Start time.Time + Elapsed time.Duration // zero while active + + // Trace information if non-zero. + traceID uint64 + spanID uint64 + + // Whether this trace resulted in an error. + IsError bool + + // Append-only sequence of events (modulo discards). + mu sync.RWMutex + events []event + maxEvents int + + refs int32 // how many buckets this is in + recycler func(interface{}) + disc discarded // scratch space to avoid allocation + + finishStack []byte // where finish was called, if DebugUseAfterFinish is set + + eventsBuf [4]event // preallocated buffer in case we only log a few events +} + +func (tr *trace) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + tr.Family = "" + tr.Title = "" + tr.Start = time.Time{} + tr.Elapsed = 0 + tr.traceID = 0 + tr.spanID = 0 + tr.IsError = false + tr.maxEvents = 0 + tr.events = nil + tr.refs = 0 + tr.recycler = nil + tr.disc = 0 + tr.finishStack = nil + for i := range tr.eventsBuf { + tr.eventsBuf[i] = event{} + } +} + +// delta returns the elapsed time since the last event or the trace start, +// and whether it spans midnight. +// L >= tr.mu +func (tr *trace) delta(t time.Time) (time.Duration, bool) { + if len(tr.events) == 0 { + return t.Sub(tr.Start), false + } + prev := tr.events[len(tr.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() +} + +func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) { + if DebugUseAfterFinish && tr.finishStack != nil { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n]) + } + + /* + NOTE TO DEBUGGERS + + If you are here because your program panicked in this code, + it is almost definitely the fault of code using this package, + and very unlikely to be the fault of this code. + + The most likely scenario is that some code elsewhere is using + a trace.Trace after its Finish method is called. + You can temporarily set the DebugUseAfterFinish var + to help discover where that is; do not leave that var set, + since it makes this package much less efficient. + */ + + e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive} + tr.mu.Lock() + e.Elapsed, e.NewDay = tr.delta(e.When) + if len(tr.events) < tr.maxEvents { + tr.events = append(tr.events, e) + } else { + // Discard the middle events. + di := int((tr.maxEvents - 1) / 2) + if d, ok := tr.events[di].What.(*discarded); ok { + (*d)++ + } else { + // disc starts at two to count for the event it is replacing, + // plus the next one that we are about to drop. + tr.disc = 2 + if tr.recycler != nil && tr.events[di].Recyclable { + go tr.recycler(tr.events[di].What) + } + tr.events[di].What = &tr.disc + } + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + tr.events[di].When = tr.events[di+1].When + + if tr.recycler != nil && tr.events[di+1].Recyclable { + go tr.recycler(tr.events[di+1].What) + } + copy(tr.events[di+1:], tr.events[di+2:]) + tr.events[tr.maxEvents-1] = e + } + tr.mu.Unlock() +} + +func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) { + tr.addEvent(x, true, sensitive) +} + +func (tr *trace) LazyPrintf(format string, a ...interface{}) { + tr.addEvent(&lazySprintf{format, a}, false, false) +} + +func (tr *trace) SetError() { tr.IsError = true } + +func (tr *trace) SetRecycler(f func(interface{})) { + tr.recycler = f +} + +func (tr *trace) SetTraceInfo(traceID, spanID uint64) { + tr.traceID, tr.spanID = traceID, spanID +} + +func (tr *trace) SetMaxEvents(m int) { + // Always keep at least three events: first, discarded count, last. + if len(tr.events) == 0 && m > 3 { + tr.maxEvents = m + } +} + +func (tr *trace) ref() { + atomic.AddInt32(&tr.refs, 1) +} + +func (tr *trace) unref() { + if atomic.AddInt32(&tr.refs, -1) == 0 { + if tr.recycler != nil { + // freeTrace clears tr, so we hold tr.recycler and tr.events here. + go func(f func(interface{}), es []event) { + for _, e := range es { + if e.Recyclable { + f(e.What) + } + } + }(tr.recycler, tr.events) + } + + freeTrace(tr) + } +} + +func (tr *trace) When() string { + return tr.Start.Format("2006/01/02 15:04:05.000000") +} + +func (tr *trace) ElapsedTime() string { + t := tr.Elapsed + if t == 0 { + // Active trace. + t = time.Since(tr.Start) + } + return fmt.Sprintf("%.6f", t.Seconds()) +} + +func (tr *trace) Events() []event { + tr.mu.RLock() + defer tr.mu.RUnlock() + return tr.events +} + +var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool? + +// newTrace returns a trace ready to use. +func newTrace() *trace { + select { + case tr := <-traceFreeList: + return tr + default: + return new(trace) + } +} + +// freeTrace adds tr to traceFreeList if there's room. +// This is non-blocking. +func freeTrace(tr *trace) { + if DebugUseAfterFinish { + return // never reuse + } + tr.reset() + select { + case traceFreeList <- tr: + default: + } +} + +func elapsed(d time.Duration) string { + b := []byte(fmt.Sprintf("%.6f", d.Seconds())) + + // For subsecond durations, blank all zeros before decimal point, + // and all zeros between the decimal point and the first non-zero digit. + if d < time.Second { + dot := bytes.IndexByte(b, '.') + for i := 0; i < dot; i++ { + b[i] = ' ' + } + for i := dot + 1; i < len(b); i++ { + if b[i] == '0' { + b[i] = ' ' + } else { + break + } + } + } + + return string(b) +} + +var pageTmplCache *template.Template +var pageTmplOnce sync.Once + +func pageTmpl() *template.Template { + pageTmplOnce.Do(func() { + pageTmplCache = template.Must(template.New("Page").Funcs(template.FuncMap{ + "elapsed": elapsed, + "add": func(a, b int) int { return a + b }, + }).Parse(pageHTML)) + }) + return pageTmplCache +} + +const pageHTML = ` +{{template "Prolog" .}} +{{template "StatusTable" .}} +{{template "Epilog" .}} + +{{define "Prolog"}} + + + /debug/requests + + + + +

/debug/requests

+{{end}} {{/* end of Prolog */}} + +{{define "StatusTable"}} + + {{range $fam := .Families}} + + + + {{$n := index $.ActiveTraceCount $fam}} + + + {{$f := index $.CompletedTraces $fam}} + {{range $i, $b := $f.Buckets}} + {{$empty := $b.Empty}} + + {{end}} + + {{$nb := len $f.Buckets}} + + + + + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} active] + {{if $n}}{{end}} + + {{if not $empty}}{{end}} + [{{.Cond}}] + {{if not $empty}}{{end}} + + [minute] + + [hour] + + [total] +
+{{end}} {{/* end of StatusTable */}} + +{{define "Epilog"}} +{{if $.Traces}} +
+

Family: {{$.Family}}

+ +{{if or $.Expanded $.Traced}} + [Normal/Summary] +{{else}} + [Normal/Summary] +{{end}} + +{{if or (not $.Expanded) $.Traced}} + [Normal/Expanded] +{{else}} + [Normal/Expanded] +{{end}} + +{{if not $.Active}} + {{if or $.Expanded (not $.Traced)}} + [Traced/Summary] + {{else}} + [Traced/Summary] + {{end}} + {{if or (not $.Expanded) (not $.Traced)}} + [Traced/Expanded] + {{else}} + [Traced/Expanded] + {{end}} +{{end}} + +{{if $.Total}} +

Showing {{len $.Traces}} of {{$.Total}} traces.

+{{end}} + + + + + {{range $tr := $.Traces}} + + + + + {{/* TODO: include traceID/spanID */}} + + {{if $.Expanded}} + {{range $tr.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
+ {{if $.Active}}Active{{else}}Completed{{end}} Requests +
WhenElapsed (s)
{{$tr.When}}{{$tr.ElapsedTime}}{{$tr.Title}}
{{.WhenString}}{{elapsed .Elapsed}}{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}[redacted]{{end}}
+{{end}} {{/* if $.Traces */}} + +{{if $.Histogram}} +

Latency (µs) of {{$.Family}} over {{$.HistogramWindow}}

+{{$.Histogram}} +{{end}} {{/* if $.Histogram */}} + + + +{{end}} {{/* end of Epilog */}} +` diff --git a/vendor/golang.org/x/net/trace/trace_go16.go b/vendor/golang.org/x/net/trace/trace_go16.go new file mode 100644 index 000000000..d60819118 --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace_go16.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package trace + +import "golang.org/x/net/context" + +// NewContext returns a copy of the parent context +// and associates it with a Trace. +func NewContext(ctx context.Context, tr Trace) context.Context { + return context.WithValue(ctx, contextKey, tr) +} + +// FromContext returns the Trace bound to the context, if any. +func FromContext(ctx context.Context) (tr Trace, ok bool) { + tr, ok = ctx.Value(contextKey).(Trace) + return +} diff --git a/vendor/golang.org/x/net/trace/trace_go17.go b/vendor/golang.org/x/net/trace/trace_go17.go new file mode 100644 index 000000000..df6e1fba7 --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace_go17.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package trace + +import "context" + +// NewContext returns a copy of the parent context +// and associates it with a Trace. +func NewContext(ctx context.Context, tr Trace) context.Context { + return context.WithValue(ctx, contextKey, tr) +} + +// FromContext returns the Trace bound to the context, if any. +func FromContext(ctx context.Context) (tr Trace, ok bool) { + tr, ok = ctx.Value(contextKey).(Trace) + return +} diff --git a/vendor/golang.org/x/net/websocket/client.go b/vendor/golang.org/x/net/websocket/client.go new file mode 100644 index 000000000..69a4ac7ee --- /dev/null +++ b/vendor/golang.org/x/net/websocket/client.go @@ -0,0 +1,106 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "io" + "net" + "net/http" + "net/url" +) + +// DialError is an error that occurs while dialling a websocket server. +type DialError struct { + *Config + Err error +} + +func (e *DialError) Error() string { + return "websocket.Dial " + e.Config.Location.String() + ": " + e.Err.Error() +} + +// NewConfig creates a new WebSocket config for client connection. +func NewConfig(server, origin string) (config *Config, err error) { + config = new(Config) + config.Version = ProtocolVersionHybi13 + config.Location, err = url.ParseRequestURI(server) + if err != nil { + return + } + config.Origin, err = url.ParseRequestURI(origin) + if err != nil { + return + } + config.Header = http.Header(make(map[string][]string)) + return +} + +// NewClient creates a new WebSocket client connection over rwc. +func NewClient(config *Config, rwc io.ReadWriteCloser) (ws *Conn, err error) { + br := bufio.NewReader(rwc) + bw := bufio.NewWriter(rwc) + err = hybiClientHandshake(config, br, bw) + if err != nil { + return + } + buf := bufio.NewReadWriter(br, bw) + ws = newHybiClientConn(config, buf, rwc) + return +} + +// Dial opens a new client connection to a WebSocket. +func Dial(url_, protocol, origin string) (ws *Conn, err error) { + config, err := NewConfig(url_, origin) + if err != nil { + return nil, err + } + if protocol != "" { + config.Protocol = []string{protocol} + } + return DialConfig(config) +} + +var portMap = map[string]string{ + "ws": "80", + "wss": "443", +} + +func parseAuthority(location *url.URL) string { + if _, ok := portMap[location.Scheme]; ok { + if _, _, err := net.SplitHostPort(location.Host); err != nil { + return net.JoinHostPort(location.Host, portMap[location.Scheme]) + } + } + return location.Host +} + +// DialConfig opens a new client connection to a WebSocket with a config. +func DialConfig(config *Config) (ws *Conn, err error) { + var client net.Conn + if config.Location == nil { + return nil, &DialError{config, ErrBadWebSocketLocation} + } + if config.Origin == nil { + return nil, &DialError{config, ErrBadWebSocketOrigin} + } + dialer := config.Dialer + if dialer == nil { + dialer = &net.Dialer{} + } + client, err = dialWithDialer(dialer, config) + if err != nil { + goto Error + } + ws, err = NewClient(config, client) + if err != nil { + client.Close() + goto Error + } + return + +Error: + return nil, &DialError{config, err} +} diff --git a/vendor/golang.org/x/net/websocket/dial.go b/vendor/golang.org/x/net/websocket/dial.go new file mode 100644 index 000000000..2dab943a4 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/dial.go @@ -0,0 +1,24 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "crypto/tls" + "net" +) + +func dialWithDialer(dialer *net.Dialer, config *Config) (conn net.Conn, err error) { + switch config.Location.Scheme { + case "ws": + conn, err = dialer.Dial("tcp", parseAuthority(config.Location)) + + case "wss": + conn, err = tls.DialWithDialer(dialer, "tcp", parseAuthority(config.Location), config.TlsConfig) + + default: + err = ErrBadScheme + } + return +} diff --git a/vendor/golang.org/x/net/websocket/hybi.go b/vendor/golang.org/x/net/websocket/hybi.go new file mode 100644 index 000000000..8cffdd16c --- /dev/null +++ b/vendor/golang.org/x/net/websocket/hybi.go @@ -0,0 +1,583 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +// This file implements a protocol of hybi draft. +// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17 + +import ( + "bufio" + "bytes" + "crypto/rand" + "crypto/sha1" + "encoding/base64" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" +) + +const ( + websocketGUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11" + + closeStatusNormal = 1000 + closeStatusGoingAway = 1001 + closeStatusProtocolError = 1002 + closeStatusUnsupportedData = 1003 + closeStatusFrameTooLarge = 1004 + closeStatusNoStatusRcvd = 1005 + closeStatusAbnormalClosure = 1006 + closeStatusBadMessageData = 1007 + closeStatusPolicyViolation = 1008 + closeStatusTooBigData = 1009 + closeStatusExtensionMismatch = 1010 + + maxControlFramePayloadLength = 125 +) + +var ( + ErrBadMaskingKey = &ProtocolError{"bad masking key"} + ErrBadPongMessage = &ProtocolError{"bad pong message"} + ErrBadClosingStatus = &ProtocolError{"bad closing status"} + ErrUnsupportedExtensions = &ProtocolError{"unsupported extensions"} + ErrNotImplemented = &ProtocolError{"not implemented"} + + handshakeHeader = map[string]bool{ + "Host": true, + "Upgrade": true, + "Connection": true, + "Sec-Websocket-Key": true, + "Sec-Websocket-Origin": true, + "Sec-Websocket-Version": true, + "Sec-Websocket-Protocol": true, + "Sec-Websocket-Accept": true, + } +) + +// A hybiFrameHeader is a frame header as defined in hybi draft. +type hybiFrameHeader struct { + Fin bool + Rsv [3]bool + OpCode byte + Length int64 + MaskingKey []byte + + data *bytes.Buffer +} + +// A hybiFrameReader is a reader for hybi frame. +type hybiFrameReader struct { + reader io.Reader + + header hybiFrameHeader + pos int64 + length int +} + +func (frame *hybiFrameReader) Read(msg []byte) (n int, err error) { + n, err = frame.reader.Read(msg) + if frame.header.MaskingKey != nil { + for i := 0; i < n; i++ { + msg[i] = msg[i] ^ frame.header.MaskingKey[frame.pos%4] + frame.pos++ + } + } + return n, err +} + +func (frame *hybiFrameReader) PayloadType() byte { return frame.header.OpCode } + +func (frame *hybiFrameReader) HeaderReader() io.Reader { + if frame.header.data == nil { + return nil + } + if frame.header.data.Len() == 0 { + return nil + } + return frame.header.data +} + +func (frame *hybiFrameReader) TrailerReader() io.Reader { return nil } + +func (frame *hybiFrameReader) Len() (n int) { return frame.length } + +// A hybiFrameReaderFactory creates new frame reader based on its frame type. +type hybiFrameReaderFactory struct { + *bufio.Reader +} + +// NewFrameReader reads a frame header from the connection, and creates new reader for the frame. +// See Section 5.2 Base Framing protocol for detail. +// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17#section-5.2 +func (buf hybiFrameReaderFactory) NewFrameReader() (frame frameReader, err error) { + hybiFrame := new(hybiFrameReader) + frame = hybiFrame + var header []byte + var b byte + // First byte. FIN/RSV1/RSV2/RSV3/OpCode(4bits) + b, err = buf.ReadByte() + if err != nil { + return + } + header = append(header, b) + hybiFrame.header.Fin = ((header[0] >> 7) & 1) != 0 + for i := 0; i < 3; i++ { + j := uint(6 - i) + hybiFrame.header.Rsv[i] = ((header[0] >> j) & 1) != 0 + } + hybiFrame.header.OpCode = header[0] & 0x0f + + // Second byte. Mask/Payload len(7bits) + b, err = buf.ReadByte() + if err != nil { + return + } + header = append(header, b) + mask := (b & 0x80) != 0 + b &= 0x7f + lengthFields := 0 + switch { + case b <= 125: // Payload length 7bits. + hybiFrame.header.Length = int64(b) + case b == 126: // Payload length 7+16bits + lengthFields = 2 + case b == 127: // Payload length 7+64bits + lengthFields = 8 + } + for i := 0; i < lengthFields; i++ { + b, err = buf.ReadByte() + if err != nil { + return + } + if lengthFields == 8 && i == 0 { // MSB must be zero when 7+64 bits + b &= 0x7f + } + header = append(header, b) + hybiFrame.header.Length = hybiFrame.header.Length*256 + int64(b) + } + if mask { + // Masking key. 4 bytes. + for i := 0; i < 4; i++ { + b, err = buf.ReadByte() + if err != nil { + return + } + header = append(header, b) + hybiFrame.header.MaskingKey = append(hybiFrame.header.MaskingKey, b) + } + } + hybiFrame.reader = io.LimitReader(buf.Reader, hybiFrame.header.Length) + hybiFrame.header.data = bytes.NewBuffer(header) + hybiFrame.length = len(header) + int(hybiFrame.header.Length) + return +} + +// A HybiFrameWriter is a writer for hybi frame. +type hybiFrameWriter struct { + writer *bufio.Writer + + header *hybiFrameHeader +} + +func (frame *hybiFrameWriter) Write(msg []byte) (n int, err error) { + var header []byte + var b byte + if frame.header.Fin { + b |= 0x80 + } + for i := 0; i < 3; i++ { + if frame.header.Rsv[i] { + j := uint(6 - i) + b |= 1 << j + } + } + b |= frame.header.OpCode + header = append(header, b) + if frame.header.MaskingKey != nil { + b = 0x80 + } else { + b = 0 + } + lengthFields := 0 + length := len(msg) + switch { + case length <= 125: + b |= byte(length) + case length < 65536: + b |= 126 + lengthFields = 2 + default: + b |= 127 + lengthFields = 8 + } + header = append(header, b) + for i := 0; i < lengthFields; i++ { + j := uint((lengthFields - i - 1) * 8) + b = byte((length >> j) & 0xff) + header = append(header, b) + } + if frame.header.MaskingKey != nil { + if len(frame.header.MaskingKey) != 4 { + return 0, ErrBadMaskingKey + } + header = append(header, frame.header.MaskingKey...) + frame.writer.Write(header) + data := make([]byte, length) + for i := range data { + data[i] = msg[i] ^ frame.header.MaskingKey[i%4] + } + frame.writer.Write(data) + err = frame.writer.Flush() + return length, err + } + frame.writer.Write(header) + frame.writer.Write(msg) + err = frame.writer.Flush() + return length, err +} + +func (frame *hybiFrameWriter) Close() error { return nil } + +type hybiFrameWriterFactory struct { + *bufio.Writer + needMaskingKey bool +} + +func (buf hybiFrameWriterFactory) NewFrameWriter(payloadType byte) (frame frameWriter, err error) { + frameHeader := &hybiFrameHeader{Fin: true, OpCode: payloadType} + if buf.needMaskingKey { + frameHeader.MaskingKey, err = generateMaskingKey() + if err != nil { + return nil, err + } + } + return &hybiFrameWriter{writer: buf.Writer, header: frameHeader}, nil +} + +type hybiFrameHandler struct { + conn *Conn + payloadType byte +} + +func (handler *hybiFrameHandler) HandleFrame(frame frameReader) (frameReader, error) { + if handler.conn.IsServerConn() { + // The client MUST mask all frames sent to the server. + if frame.(*hybiFrameReader).header.MaskingKey == nil { + handler.WriteClose(closeStatusProtocolError) + return nil, io.EOF + } + } else { + // The server MUST NOT mask all frames. + if frame.(*hybiFrameReader).header.MaskingKey != nil { + handler.WriteClose(closeStatusProtocolError) + return nil, io.EOF + } + } + if header := frame.HeaderReader(); header != nil { + io.Copy(ioutil.Discard, header) + } + switch frame.PayloadType() { + case ContinuationFrame: + frame.(*hybiFrameReader).header.OpCode = handler.payloadType + case TextFrame, BinaryFrame: + handler.payloadType = frame.PayloadType() + case CloseFrame: + return nil, io.EOF + case PingFrame, PongFrame: + b := make([]byte, maxControlFramePayloadLength) + n, err := io.ReadFull(frame, b) + if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { + return nil, err + } + io.Copy(ioutil.Discard, frame) + if frame.PayloadType() == PingFrame { + if _, err := handler.WritePong(b[:n]); err != nil { + return nil, err + } + } + return nil, nil + } + return frame, nil +} + +func (handler *hybiFrameHandler) WriteClose(status int) (err error) { + handler.conn.wio.Lock() + defer handler.conn.wio.Unlock() + w, err := handler.conn.frameWriterFactory.NewFrameWriter(CloseFrame) + if err != nil { + return err + } + msg := make([]byte, 2) + binary.BigEndian.PutUint16(msg, uint16(status)) + _, err = w.Write(msg) + w.Close() + return err +} + +func (handler *hybiFrameHandler) WritePong(msg []byte) (n int, err error) { + handler.conn.wio.Lock() + defer handler.conn.wio.Unlock() + w, err := handler.conn.frameWriterFactory.NewFrameWriter(PongFrame) + if err != nil { + return 0, err + } + n, err = w.Write(msg) + w.Close() + return n, err +} + +// newHybiConn creates a new WebSocket connection speaking hybi draft protocol. +func newHybiConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn { + if buf == nil { + br := bufio.NewReader(rwc) + bw := bufio.NewWriter(rwc) + buf = bufio.NewReadWriter(br, bw) + } + ws := &Conn{config: config, request: request, buf: buf, rwc: rwc, + frameReaderFactory: hybiFrameReaderFactory{buf.Reader}, + frameWriterFactory: hybiFrameWriterFactory{ + buf.Writer, request == nil}, + PayloadType: TextFrame, + defaultCloseStatus: closeStatusNormal} + ws.frameHandler = &hybiFrameHandler{conn: ws} + return ws +} + +// generateMaskingKey generates a masking key for a frame. +func generateMaskingKey() (maskingKey []byte, err error) { + maskingKey = make([]byte, 4) + if _, err = io.ReadFull(rand.Reader, maskingKey); err != nil { + return + } + return +} + +// generateNonce generates a nonce consisting of a randomly selected 16-byte +// value that has been base64-encoded. +func generateNonce() (nonce []byte) { + key := make([]byte, 16) + if _, err := io.ReadFull(rand.Reader, key); err != nil { + panic(err) + } + nonce = make([]byte, 24) + base64.StdEncoding.Encode(nonce, key) + return +} + +// removeZone removes IPv6 zone identifer from host. +// E.g., "[fe80::1%en0]:8080" to "[fe80::1]:8080" +func removeZone(host string) string { + if !strings.HasPrefix(host, "[") { + return host + } + i := strings.LastIndex(host, "]") + if i < 0 { + return host + } + j := strings.LastIndex(host[:i], "%") + if j < 0 { + return host + } + return host[:j] + host[i:] +} + +// getNonceAccept computes the base64-encoded SHA-1 of the concatenation of +// the nonce ("Sec-WebSocket-Key" value) with the websocket GUID string. +func getNonceAccept(nonce []byte) (expected []byte, err error) { + h := sha1.New() + if _, err = h.Write(nonce); err != nil { + return + } + if _, err = h.Write([]byte(websocketGUID)); err != nil { + return + } + expected = make([]byte, 28) + base64.StdEncoding.Encode(expected, h.Sum(nil)) + return +} + +// Client handshake described in draft-ietf-hybi-thewebsocket-protocol-17 +func hybiClientHandshake(config *Config, br *bufio.Reader, bw *bufio.Writer) (err error) { + bw.WriteString("GET " + config.Location.RequestURI() + " HTTP/1.1\r\n") + + // According to RFC 6874, an HTTP client, proxy, or other + // intermediary must remove any IPv6 zone identifier attached + // to an outgoing URI. + bw.WriteString("Host: " + removeZone(config.Location.Host) + "\r\n") + bw.WriteString("Upgrade: websocket\r\n") + bw.WriteString("Connection: Upgrade\r\n") + nonce := generateNonce() + if config.handshakeData != nil { + nonce = []byte(config.handshakeData["key"]) + } + bw.WriteString("Sec-WebSocket-Key: " + string(nonce) + "\r\n") + bw.WriteString("Origin: " + strings.ToLower(config.Origin.String()) + "\r\n") + + if config.Version != ProtocolVersionHybi13 { + return ErrBadProtocolVersion + } + + bw.WriteString("Sec-WebSocket-Version: " + fmt.Sprintf("%d", config.Version) + "\r\n") + if len(config.Protocol) > 0 { + bw.WriteString("Sec-WebSocket-Protocol: " + strings.Join(config.Protocol, ", ") + "\r\n") + } + // TODO(ukai): send Sec-WebSocket-Extensions. + err = config.Header.WriteSubset(bw, handshakeHeader) + if err != nil { + return err + } + + bw.WriteString("\r\n") + if err = bw.Flush(); err != nil { + return err + } + + resp, err := http.ReadResponse(br, &http.Request{Method: "GET"}) + if err != nil { + return err + } + if resp.StatusCode != 101 { + return ErrBadStatus + } + if strings.ToLower(resp.Header.Get("Upgrade")) != "websocket" || + strings.ToLower(resp.Header.Get("Connection")) != "upgrade" { + return ErrBadUpgrade + } + expectedAccept, err := getNonceAccept(nonce) + if err != nil { + return err + } + if resp.Header.Get("Sec-WebSocket-Accept") != string(expectedAccept) { + return ErrChallengeResponse + } + if resp.Header.Get("Sec-WebSocket-Extensions") != "" { + return ErrUnsupportedExtensions + } + offeredProtocol := resp.Header.Get("Sec-WebSocket-Protocol") + if offeredProtocol != "" { + protocolMatched := false + for i := 0; i < len(config.Protocol); i++ { + if config.Protocol[i] == offeredProtocol { + protocolMatched = true + break + } + } + if !protocolMatched { + return ErrBadWebSocketProtocol + } + config.Protocol = []string{offeredProtocol} + } + + return nil +} + +// newHybiClientConn creates a client WebSocket connection after handshake. +func newHybiClientConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser) *Conn { + return newHybiConn(config, buf, rwc, nil) +} + +// A HybiServerHandshaker performs a server handshake using hybi draft protocol. +type hybiServerHandshaker struct { + *Config + accept []byte +} + +func (c *hybiServerHandshaker) ReadHandshake(buf *bufio.Reader, req *http.Request) (code int, err error) { + c.Version = ProtocolVersionHybi13 + if req.Method != "GET" { + return http.StatusMethodNotAllowed, ErrBadRequestMethod + } + // HTTP version can be safely ignored. + + if strings.ToLower(req.Header.Get("Upgrade")) != "websocket" || + !strings.Contains(strings.ToLower(req.Header.Get("Connection")), "upgrade") { + return http.StatusBadRequest, ErrNotWebSocket + } + + key := req.Header.Get("Sec-Websocket-Key") + if key == "" { + return http.StatusBadRequest, ErrChallengeResponse + } + version := req.Header.Get("Sec-Websocket-Version") + switch version { + case "13": + c.Version = ProtocolVersionHybi13 + default: + return http.StatusBadRequest, ErrBadWebSocketVersion + } + var scheme string + if req.TLS != nil { + scheme = "wss" + } else { + scheme = "ws" + } + c.Location, err = url.ParseRequestURI(scheme + "://" + req.Host + req.URL.RequestURI()) + if err != nil { + return http.StatusBadRequest, err + } + protocol := strings.TrimSpace(req.Header.Get("Sec-Websocket-Protocol")) + if protocol != "" { + protocols := strings.Split(protocol, ",") + for i := 0; i < len(protocols); i++ { + c.Protocol = append(c.Protocol, strings.TrimSpace(protocols[i])) + } + } + c.accept, err = getNonceAccept([]byte(key)) + if err != nil { + return http.StatusInternalServerError, err + } + return http.StatusSwitchingProtocols, nil +} + +// Origin parses the Origin header in req. +// If the Origin header is not set, it returns nil and nil. +func Origin(config *Config, req *http.Request) (*url.URL, error) { + var origin string + switch config.Version { + case ProtocolVersionHybi13: + origin = req.Header.Get("Origin") + } + if origin == "" { + return nil, nil + } + return url.ParseRequestURI(origin) +} + +func (c *hybiServerHandshaker) AcceptHandshake(buf *bufio.Writer) (err error) { + if len(c.Protocol) > 0 { + if len(c.Protocol) != 1 { + // You need choose a Protocol in Handshake func in Server. + return ErrBadWebSocketProtocol + } + } + buf.WriteString("HTTP/1.1 101 Switching Protocols\r\n") + buf.WriteString("Upgrade: websocket\r\n") + buf.WriteString("Connection: Upgrade\r\n") + buf.WriteString("Sec-WebSocket-Accept: " + string(c.accept) + "\r\n") + if len(c.Protocol) > 0 { + buf.WriteString("Sec-WebSocket-Protocol: " + c.Protocol[0] + "\r\n") + } + // TODO(ukai): send Sec-WebSocket-Extensions. + if c.Header != nil { + err := c.Header.WriteSubset(buf, handshakeHeader) + if err != nil { + return err + } + } + buf.WriteString("\r\n") + return buf.Flush() +} + +func (c *hybiServerHandshaker) NewServerConn(buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn { + return newHybiServerConn(c.Config, buf, rwc, request) +} + +// newHybiServerConn returns a new WebSocket connection speaking hybi draft protocol. +func newHybiServerConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn { + return newHybiConn(config, buf, rwc, request) +} diff --git a/vendor/golang.org/x/net/websocket/server.go b/vendor/golang.org/x/net/websocket/server.go new file mode 100644 index 000000000..0895dea19 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/server.go @@ -0,0 +1,113 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "fmt" + "io" + "net/http" +) + +func newServerConn(rwc io.ReadWriteCloser, buf *bufio.ReadWriter, req *http.Request, config *Config, handshake func(*Config, *http.Request) error) (conn *Conn, err error) { + var hs serverHandshaker = &hybiServerHandshaker{Config: config} + code, err := hs.ReadHandshake(buf.Reader, req) + if err == ErrBadWebSocketVersion { + fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) + fmt.Fprintf(buf, "Sec-WebSocket-Version: %s\r\n", SupportedProtocolVersion) + buf.WriteString("\r\n") + buf.WriteString(err.Error()) + buf.Flush() + return + } + if err != nil { + fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) + buf.WriteString("\r\n") + buf.WriteString(err.Error()) + buf.Flush() + return + } + if handshake != nil { + err = handshake(config, req) + if err != nil { + code = http.StatusForbidden + fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) + buf.WriteString("\r\n") + buf.Flush() + return + } + } + err = hs.AcceptHandshake(buf.Writer) + if err != nil { + code = http.StatusBadRequest + fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) + buf.WriteString("\r\n") + buf.Flush() + return + } + conn = hs.NewServerConn(buf, rwc, req) + return +} + +// Server represents a server of a WebSocket. +type Server struct { + // Config is a WebSocket configuration for new WebSocket connection. + Config + + // Handshake is an optional function in WebSocket handshake. + // For example, you can check, or don't check Origin header. + // Another example, you can select config.Protocol. + Handshake func(*Config, *http.Request) error + + // Handler handles a WebSocket connection. + Handler +} + +// ServeHTTP implements the http.Handler interface for a WebSocket +func (s Server) ServeHTTP(w http.ResponseWriter, req *http.Request) { + s.serveWebSocket(w, req) +} + +func (s Server) serveWebSocket(w http.ResponseWriter, req *http.Request) { + rwc, buf, err := w.(http.Hijacker).Hijack() + if err != nil { + panic("Hijack failed: " + err.Error()) + } + // The server should abort the WebSocket connection if it finds + // the client did not send a handshake that matches with protocol + // specification. + defer rwc.Close() + conn, err := newServerConn(rwc, buf, req, &s.Config, s.Handshake) + if err != nil { + return + } + if conn == nil { + panic("unexpected nil conn") + } + s.Handler(conn) +} + +// Handler is a simple interface to a WebSocket browser client. +// It checks if Origin header is valid URL by default. +// You might want to verify websocket.Conn.Config().Origin in the func. +// If you use Server instead of Handler, you could call websocket.Origin and +// check the origin in your Handshake func. So, if you want to accept +// non-browser clients, which do not send an Origin header, set a +// Server.Handshake that does not check the origin. +type Handler func(*Conn) + +func checkOrigin(config *Config, req *http.Request) (err error) { + config.Origin, err = Origin(config, req) + if err == nil && config.Origin == nil { + return fmt.Errorf("null origin") + } + return err +} + +// ServeHTTP implements the http.Handler interface for a WebSocket +func (h Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + s := Server{Handler: h, Handshake: checkOrigin} + s.serveWebSocket(w, req) +} diff --git a/vendor/golang.org/x/net/websocket/websocket.go b/vendor/golang.org/x/net/websocket/websocket.go new file mode 100644 index 000000000..e242c89a7 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/websocket.go @@ -0,0 +1,448 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package websocket implements a client and server for the WebSocket protocol +// as specified in RFC 6455. +// +// This package currently lacks some features found in an alternative +// and more actively maintained WebSocket package: +// +// https://godoc.org/github.com/gorilla/websocket +// +package websocket // import "golang.org/x/net/websocket" + +import ( + "bufio" + "crypto/tls" + "encoding/json" + "errors" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "sync" + "time" +) + +const ( + ProtocolVersionHybi13 = 13 + ProtocolVersionHybi = ProtocolVersionHybi13 + SupportedProtocolVersion = "13" + + ContinuationFrame = 0 + TextFrame = 1 + BinaryFrame = 2 + CloseFrame = 8 + PingFrame = 9 + PongFrame = 10 + UnknownFrame = 255 + + DefaultMaxPayloadBytes = 32 << 20 // 32MB +) + +// ProtocolError represents WebSocket protocol errors. +type ProtocolError struct { + ErrorString string +} + +func (err *ProtocolError) Error() string { return err.ErrorString } + +var ( + ErrBadProtocolVersion = &ProtocolError{"bad protocol version"} + ErrBadScheme = &ProtocolError{"bad scheme"} + ErrBadStatus = &ProtocolError{"bad status"} + ErrBadUpgrade = &ProtocolError{"missing or bad upgrade"} + ErrBadWebSocketOrigin = &ProtocolError{"missing or bad WebSocket-Origin"} + ErrBadWebSocketLocation = &ProtocolError{"missing or bad WebSocket-Location"} + ErrBadWebSocketProtocol = &ProtocolError{"missing or bad WebSocket-Protocol"} + ErrBadWebSocketVersion = &ProtocolError{"missing or bad WebSocket Version"} + ErrChallengeResponse = &ProtocolError{"mismatch challenge/response"} + ErrBadFrame = &ProtocolError{"bad frame"} + ErrBadFrameBoundary = &ProtocolError{"not on frame boundary"} + ErrNotWebSocket = &ProtocolError{"not websocket protocol"} + ErrBadRequestMethod = &ProtocolError{"bad method"} + ErrNotSupported = &ProtocolError{"not supported"} +) + +// ErrFrameTooLarge is returned by Codec's Receive method if payload size +// exceeds limit set by Conn.MaxPayloadBytes +var ErrFrameTooLarge = errors.New("websocket: frame payload size exceeds limit") + +// Addr is an implementation of net.Addr for WebSocket. +type Addr struct { + *url.URL +} + +// Network returns the network type for a WebSocket, "websocket". +func (addr *Addr) Network() string { return "websocket" } + +// Config is a WebSocket configuration +type Config struct { + // A WebSocket server address. + Location *url.URL + + // A Websocket client origin. + Origin *url.URL + + // WebSocket subprotocols. + Protocol []string + + // WebSocket protocol version. + Version int + + // TLS config for secure WebSocket (wss). + TlsConfig *tls.Config + + // Additional header fields to be sent in WebSocket opening handshake. + Header http.Header + + // Dialer used when opening websocket connections. + Dialer *net.Dialer + + handshakeData map[string]string +} + +// serverHandshaker is an interface to handle WebSocket server side handshake. +type serverHandshaker interface { + // ReadHandshake reads handshake request message from client. + // Returns http response code and error if any. + ReadHandshake(buf *bufio.Reader, req *http.Request) (code int, err error) + + // AcceptHandshake accepts the client handshake request and sends + // handshake response back to client. + AcceptHandshake(buf *bufio.Writer) (err error) + + // NewServerConn creates a new WebSocket connection. + NewServerConn(buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) (conn *Conn) +} + +// frameReader is an interface to read a WebSocket frame. +type frameReader interface { + // Reader is to read payload of the frame. + io.Reader + + // PayloadType returns payload type. + PayloadType() byte + + // HeaderReader returns a reader to read header of the frame. + HeaderReader() io.Reader + + // TrailerReader returns a reader to read trailer of the frame. + // If it returns nil, there is no trailer in the frame. + TrailerReader() io.Reader + + // Len returns total length of the frame, including header and trailer. + Len() int +} + +// frameReaderFactory is an interface to creates new frame reader. +type frameReaderFactory interface { + NewFrameReader() (r frameReader, err error) +} + +// frameWriter is an interface to write a WebSocket frame. +type frameWriter interface { + // Writer is to write payload of the frame. + io.WriteCloser +} + +// frameWriterFactory is an interface to create new frame writer. +type frameWriterFactory interface { + NewFrameWriter(payloadType byte) (w frameWriter, err error) +} + +type frameHandler interface { + HandleFrame(frame frameReader) (r frameReader, err error) + WriteClose(status int) (err error) +} + +// Conn represents a WebSocket connection. +// +// Multiple goroutines may invoke methods on a Conn simultaneously. +type Conn struct { + config *Config + request *http.Request + + buf *bufio.ReadWriter + rwc io.ReadWriteCloser + + rio sync.Mutex + frameReaderFactory + frameReader + + wio sync.Mutex + frameWriterFactory + + frameHandler + PayloadType byte + defaultCloseStatus int + + // MaxPayloadBytes limits the size of frame payload received over Conn + // by Codec's Receive method. If zero, DefaultMaxPayloadBytes is used. + MaxPayloadBytes int +} + +// Read implements the io.Reader interface: +// it reads data of a frame from the WebSocket connection. +// if msg is not large enough for the frame data, it fills the msg and next Read +// will read the rest of the frame data. +// it reads Text frame or Binary frame. +func (ws *Conn) Read(msg []byte) (n int, err error) { + ws.rio.Lock() + defer ws.rio.Unlock() +again: + if ws.frameReader == nil { + frame, err := ws.frameReaderFactory.NewFrameReader() + if err != nil { + return 0, err + } + ws.frameReader, err = ws.frameHandler.HandleFrame(frame) + if err != nil { + return 0, err + } + if ws.frameReader == nil { + goto again + } + } + n, err = ws.frameReader.Read(msg) + if err == io.EOF { + if trailer := ws.frameReader.TrailerReader(); trailer != nil { + io.Copy(ioutil.Discard, trailer) + } + ws.frameReader = nil + goto again + } + return n, err +} + +// Write implements the io.Writer interface: +// it writes data as a frame to the WebSocket connection. +func (ws *Conn) Write(msg []byte) (n int, err error) { + ws.wio.Lock() + defer ws.wio.Unlock() + w, err := ws.frameWriterFactory.NewFrameWriter(ws.PayloadType) + if err != nil { + return 0, err + } + n, err = w.Write(msg) + w.Close() + return n, err +} + +// Close implements the io.Closer interface. +func (ws *Conn) Close() error { + err := ws.frameHandler.WriteClose(ws.defaultCloseStatus) + err1 := ws.rwc.Close() + if err != nil { + return err + } + return err1 +} + +func (ws *Conn) IsClientConn() bool { return ws.request == nil } +func (ws *Conn) IsServerConn() bool { return ws.request != nil } + +// LocalAddr returns the WebSocket Origin for the connection for client, or +// the WebSocket location for server. +func (ws *Conn) LocalAddr() net.Addr { + if ws.IsClientConn() { + return &Addr{ws.config.Origin} + } + return &Addr{ws.config.Location} +} + +// RemoteAddr returns the WebSocket location for the connection for client, or +// the Websocket Origin for server. +func (ws *Conn) RemoteAddr() net.Addr { + if ws.IsClientConn() { + return &Addr{ws.config.Location} + } + return &Addr{ws.config.Origin} +} + +var errSetDeadline = errors.New("websocket: cannot set deadline: not using a net.Conn") + +// SetDeadline sets the connection's network read & write deadlines. +func (ws *Conn) SetDeadline(t time.Time) error { + if conn, ok := ws.rwc.(net.Conn); ok { + return conn.SetDeadline(t) + } + return errSetDeadline +} + +// SetReadDeadline sets the connection's network read deadline. +func (ws *Conn) SetReadDeadline(t time.Time) error { + if conn, ok := ws.rwc.(net.Conn); ok { + return conn.SetReadDeadline(t) + } + return errSetDeadline +} + +// SetWriteDeadline sets the connection's network write deadline. +func (ws *Conn) SetWriteDeadline(t time.Time) error { + if conn, ok := ws.rwc.(net.Conn); ok { + return conn.SetWriteDeadline(t) + } + return errSetDeadline +} + +// Config returns the WebSocket config. +func (ws *Conn) Config() *Config { return ws.config } + +// Request returns the http request upgraded to the WebSocket. +// It is nil for client side. +func (ws *Conn) Request() *http.Request { return ws.request } + +// Codec represents a symmetric pair of functions that implement a codec. +type Codec struct { + Marshal func(v interface{}) (data []byte, payloadType byte, err error) + Unmarshal func(data []byte, payloadType byte, v interface{}) (err error) +} + +// Send sends v marshaled by cd.Marshal as single frame to ws. +func (cd Codec) Send(ws *Conn, v interface{}) (err error) { + data, payloadType, err := cd.Marshal(v) + if err != nil { + return err + } + ws.wio.Lock() + defer ws.wio.Unlock() + w, err := ws.frameWriterFactory.NewFrameWriter(payloadType) + if err != nil { + return err + } + _, err = w.Write(data) + w.Close() + return err +} + +// Receive receives single frame from ws, unmarshaled by cd.Unmarshal and stores +// in v. The whole frame payload is read to an in-memory buffer; max size of +// payload is defined by ws.MaxPayloadBytes. If frame payload size exceeds +// limit, ErrFrameTooLarge is returned; in this case frame is not read off wire +// completely. The next call to Receive would read and discard leftover data of +// previous oversized frame before processing next frame. +func (cd Codec) Receive(ws *Conn, v interface{}) (err error) { + ws.rio.Lock() + defer ws.rio.Unlock() + if ws.frameReader != nil { + _, err = io.Copy(ioutil.Discard, ws.frameReader) + if err != nil { + return err + } + ws.frameReader = nil + } +again: + frame, err := ws.frameReaderFactory.NewFrameReader() + if err != nil { + return err + } + frame, err = ws.frameHandler.HandleFrame(frame) + if err != nil { + return err + } + if frame == nil { + goto again + } + maxPayloadBytes := ws.MaxPayloadBytes + if maxPayloadBytes == 0 { + maxPayloadBytes = DefaultMaxPayloadBytes + } + if hf, ok := frame.(*hybiFrameReader); ok && hf.header.Length > int64(maxPayloadBytes) { + // payload size exceeds limit, no need to call Unmarshal + // + // set frameReader to current oversized frame so that + // the next call to this function can drain leftover + // data before processing the next frame + ws.frameReader = frame + return ErrFrameTooLarge + } + payloadType := frame.PayloadType() + data, err := ioutil.ReadAll(frame) + if err != nil { + return err + } + return cd.Unmarshal(data, payloadType, v) +} + +func marshal(v interface{}) (msg []byte, payloadType byte, err error) { + switch data := v.(type) { + case string: + return []byte(data), TextFrame, nil + case []byte: + return data, BinaryFrame, nil + } + return nil, UnknownFrame, ErrNotSupported +} + +func unmarshal(msg []byte, payloadType byte, v interface{}) (err error) { + switch data := v.(type) { + case *string: + *data = string(msg) + return nil + case *[]byte: + *data = msg + return nil + } + return ErrNotSupported +} + +/* +Message is a codec to send/receive text/binary data in a frame on WebSocket connection. +To send/receive text frame, use string type. +To send/receive binary frame, use []byte type. + +Trivial usage: + + import "websocket" + + // receive text frame + var message string + websocket.Message.Receive(ws, &message) + + // send text frame + message = "hello" + websocket.Message.Send(ws, message) + + // receive binary frame + var data []byte + websocket.Message.Receive(ws, &data) + + // send binary frame + data = []byte{0, 1, 2} + websocket.Message.Send(ws, data) + +*/ +var Message = Codec{marshal, unmarshal} + +func jsonMarshal(v interface{}) (msg []byte, payloadType byte, err error) { + msg, err = json.Marshal(v) + return msg, TextFrame, err +} + +func jsonUnmarshal(msg []byte, payloadType byte, v interface{}) (err error) { + return json.Unmarshal(msg, v) +} + +/* +JSON is a codec to send/receive JSON data in a frame from a WebSocket connection. + +Trivial usage: + + import "websocket" + + type T struct { + Msg string + Count int + } + + // receive JSON type T + var data T + websocket.JSON.Receive(ws, &data) + + // send JSON type T + websocket.JSON.Send(ws, data) +*/ +var JSON = Codec{jsonMarshal, jsonUnmarshal} diff --git a/vendor/golang.org/x/oauth2/AUTHORS b/vendor/golang.org/x/oauth2/AUTHORS new file mode 100644 index 000000000..15167cd74 --- /dev/null +++ b/vendor/golang.org/x/oauth2/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTING.md b/vendor/golang.org/x/oauth2/CONTRIBUTING.md new file mode 100644 index 000000000..46aa2b12d --- /dev/null +++ b/vendor/golang.org/x/oauth2/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing to Go + +Go is an open source project. + +It is the work of hundreds of contributors. We appreciate your help! + + +## Filing issues + +When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. +The gophers there will answer or ask you to file an issue if you've tripped over a bug. + +## Contributing code + +Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) +before sending patches. + +**We do not accept GitHub pull requests** +(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). + +Unless otherwise noted, the Go source files are distributed under +the BSD-style license found in the LICENSE file. + diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTORS b/vendor/golang.org/x/oauth2/CONTRIBUTORS new file mode 100644 index 000000000..1c4577e96 --- /dev/null +++ b/vendor/golang.org/x/oauth2/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/oauth2/LICENSE b/vendor/golang.org/x/oauth2/LICENSE new file mode 100644 index 000000000..d02f24fd5 --- /dev/null +++ b/vendor/golang.org/x/oauth2/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The oauth2 Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md new file mode 100644 index 000000000..b0ddf3c10 --- /dev/null +++ b/vendor/golang.org/x/oauth2/README.md @@ -0,0 +1,74 @@ +# OAuth2 for Go + +[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2) +[![GoDoc](https://godoc.org/golang.org/x/oauth2?status.svg)](https://godoc.org/golang.org/x/oauth2) + +oauth2 package contains a client implementation for OAuth 2.0 spec. + +## Installation + +~~~~ +go get golang.org/x/oauth2 +~~~~ + +See godoc for further documentation and examples. + +* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2) +* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google) + + +## App Engine + +In change 96e89be (March 2015) we removed the `oauth2.Context2` type in favor +of the [`context.Context`](https://golang.org/x/net/context#Context) type from +the `golang.org/x/net/context` package + +This means its no longer possible to use the "Classic App Engine" +`appengine.Context` type with the `oauth2` package. (You're using +Classic App Engine if you import the package `"appengine"`.) + +To work around this, you may use the new `"google.golang.org/appengine"` +package. This package has almost the same API as the `"appengine"` package, +but it can be fetched with `go get` and used on "Managed VMs" and well as +Classic App Engine. + +See the [new `appengine` package's readme](https://github.com/golang/appengine#updating-a-go-app-engine-app) +for information on updating your app. + +If you don't want to update your entire app to use the new App Engine packages, +you may use both sets of packages in parallel, using only the new packages +with the `oauth2` package. + + import ( + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + newappengine "google.golang.org/appengine" + newurlfetch "google.golang.org/appengine/urlfetch" + + "appengine" + ) + + func handler(w http.ResponseWriter, r *http.Request) { + var c appengine.Context = appengine.NewContext(r) + c.Infof("Logging a message with the old package") + + var ctx context.Context = newappengine.NewContext(r) + client := &http.Client{ + Transport: &oauth2.Transport{ + Source: google.AppEngineTokenSource(ctx, "scope"), + Base: &newurlfetch.Transport{Context: ctx}, + }, + } + client.Get("...") + } + +## Contributing + +We appreciate your help! + +To contribute, please read the contribution guidelines: + https://golang.org/doc/contribute.html + +Note that the Go project does not use GitHub pull requests but +uses Gerrit for code reviews. See the contribution guide for details. diff --git a/vendor/golang.org/x/oauth2/client_appengine.go b/vendor/golang.org/x/oauth2/client_appengine.go new file mode 100644 index 000000000..8962c49d1 --- /dev/null +++ b/vendor/golang.org/x/oauth2/client_appengine.go @@ -0,0 +1,25 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine + +// App Engine hooks. + +package oauth2 + +import ( + "net/http" + + "golang.org/x/net/context" + "golang.org/x/oauth2/internal" + "google.golang.org/appengine/urlfetch" +) + +func init() { + internal.RegisterContextClientFunc(contextClientAppEngine) +} + +func contextClientAppEngine(ctx context.Context) (*http.Client, error) { + return urlfetch.Client(ctx), nil +} diff --git a/vendor/golang.org/x/oauth2/google/appengine.go b/vendor/golang.org/x/oauth2/google/appengine.go new file mode 100644 index 000000000..50d918b87 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/appengine.go @@ -0,0 +1,89 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "sort" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2" +) + +// appengineFlex is set at init time by appengineflex_hook.go. If true, we are on App Engine Flex. +var appengineFlex bool + +// Set at init time by appengine_hook.go. If nil, we're not on App Engine. +var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error) + +// Set at init time by appengine_hook.go. If nil, we're not on App Engine. +var appengineAppIDFunc func(c context.Context) string + +// AppEngineTokenSource returns a token source that fetches tokens +// issued to the current App Engine application's service account. +// If you are implementing a 3-legged OAuth 2.0 flow on App Engine +// that involves user accounts, see oauth2.Config instead. +// +// The provided context must have come from appengine.NewContext. +func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { + if appengineTokenFunc == nil { + panic("google: AppEngineTokenSource can only be used on App Engine.") + } + scopes := append([]string{}, scope...) + sort.Strings(scopes) + return &appEngineTokenSource{ + ctx: ctx, + scopes: scopes, + key: strings.Join(scopes, " "), + } +} + +// aeTokens helps the fetched tokens to be reused until their expiration. +var ( + aeTokensMu sync.Mutex + aeTokens = make(map[string]*tokenLock) // key is space-separated scopes +) + +type tokenLock struct { + mu sync.Mutex // guards t; held while fetching or updating t + t *oauth2.Token +} + +type appEngineTokenSource struct { + ctx context.Context + scopes []string + key string // to aeTokens map; space-separated scopes +} + +func (ts *appEngineTokenSource) Token() (*oauth2.Token, error) { + if appengineTokenFunc == nil { + panic("google: AppEngineTokenSource can only be used on App Engine.") + } + + aeTokensMu.Lock() + tok, ok := aeTokens[ts.key] + if !ok { + tok = &tokenLock{} + aeTokens[ts.key] = tok + } + aeTokensMu.Unlock() + + tok.mu.Lock() + defer tok.mu.Unlock() + if tok.t.Valid() { + return tok.t, nil + } + access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...) + if err != nil { + return nil, err + } + tok.t = &oauth2.Token{ + AccessToken: access, + Expiry: exp, + } + return tok.t, nil +} diff --git a/vendor/golang.org/x/oauth2/google/appengine_hook.go b/vendor/golang.org/x/oauth2/google/appengine_hook.go new file mode 100644 index 000000000..56669eaa9 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/appengine_hook.go @@ -0,0 +1,14 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine appenginevm + +package google + +import "google.golang.org/appengine" + +func init() { + appengineTokenFunc = appengine.AccessToken + appengineAppIDFunc = appengine.AppID +} diff --git a/vendor/golang.org/x/oauth2/google/appengineflex_hook.go b/vendor/golang.org/x/oauth2/google/appengineflex_hook.go new file mode 100644 index 000000000..5d0231af2 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/appengineflex_hook.go @@ -0,0 +1,11 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appenginevm + +package google + +func init() { + appengineFlex = true // Flex doesn't support appengine.AccessToken; depend on metadata server. +} diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go new file mode 100644 index 000000000..004ed4eab --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/default.go @@ -0,0 +1,130 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "runtime" + + "cloud.google.com/go/compute/metadata" + "golang.org/x/net/context" + "golang.org/x/oauth2" +) + +// DefaultCredentials holds "Application Default Credentials". +// For more details, see: +// https://developers.google.com/accounts/docs/application-default-credentials +type DefaultCredentials struct { + ProjectID string // may be empty + TokenSource oauth2.TokenSource +} + +// DefaultClient returns an HTTP Client that uses the +// DefaultTokenSource to obtain authentication credentials. +func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) { + ts, err := DefaultTokenSource(ctx, scope...) + if err != nil { + return nil, err + } + return oauth2.NewClient(ctx, ts), nil +} + +// DefaultTokenSource returns the token source for +// "Application Default Credentials". +// It is a shortcut for FindDefaultCredentials(ctx, scope).TokenSource. +func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) { + creds, err := FindDefaultCredentials(ctx, scope...) + if err != nil { + return nil, err + } + return creds.TokenSource, nil +} + +// FindDefaultCredentials searches for "Application Default Credentials". +// +// It looks for credentials in the following places, +// preferring the first location found: +// +// 1. A JSON file whose path is specified by the +// GOOGLE_APPLICATION_CREDENTIALS environment variable. +// 2. A JSON file in a location known to the gcloud command-line tool. +// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. +// On other systems, $HOME/.config/gcloud/application_default_credentials.json. +// 3. On Google App Engine it uses the appengine.AccessToken function. +// 4. On Google Compute Engine and Google App Engine Managed VMs, it fetches +// credentials from the metadata server. +// (In this final case any provided scopes are ignored.) +func FindDefaultCredentials(ctx context.Context, scope ...string) (*DefaultCredentials, error) { + // First, try the environment variable. + const envVar = "GOOGLE_APPLICATION_CREDENTIALS" + if filename := os.Getenv(envVar); filename != "" { + creds, err := readCredentialsFile(ctx, filename, scope) + if err != nil { + return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err) + } + return creds, nil + } + + // Second, try a well-known file. + filename := wellKnownFile() + if creds, err := readCredentialsFile(ctx, filename, scope); err == nil { + return creds, nil + } else if !os.IsNotExist(err) { + return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err) + } + + // Third, if we're on Google App Engine use those credentials. + if appengineTokenFunc != nil && !appengineFlex { + return &DefaultCredentials{ + ProjectID: appengineAppIDFunc(ctx), + TokenSource: AppEngineTokenSource(ctx, scope...), + }, nil + } + + // Fourth, if we're on Google Compute Engine use the metadata server. + if metadata.OnGCE() { + id, _ := metadata.ProjectID() + return &DefaultCredentials{ + ProjectID: id, + TokenSource: ComputeTokenSource(""), + }, nil + } + + // None are found; return helpful error. + const url = "https://developers.google.com/accounts/docs/application-default-credentials" + return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url) +} + +func wellKnownFile() string { + const f = "application_default_credentials.json" + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud", f) + } + return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f) +} + +func readCredentialsFile(ctx context.Context, filename string, scopes []string) (*DefaultCredentials, error) { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + var f credentialsFile + if err := json.Unmarshal(b, &f); err != nil { + return nil, err + } + ts, err := f.tokenSource(ctx, append([]string(nil), scopes...)) + if err != nil { + return nil, err + } + return &DefaultCredentials{ + ProjectID: f.ProjectID, + TokenSource: ts, + }, nil +} diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go new file mode 100644 index 000000000..66a8b0e18 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/google.go @@ -0,0 +1,202 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package google provides support for making OAuth2 authorized and +// authenticated HTTP requests to Google APIs. +// It supports the Web server flow, client-side credentials, service accounts, +// Google Compute Engine service accounts, and Google App Engine service +// accounts. +// +// For more information, please read +// https://developers.google.com/accounts/docs/OAuth2 +// and +// https://developers.google.com/accounts/docs/application-default-credentials. +package google // import "golang.org/x/oauth2/google" + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + "cloud.google.com/go/compute/metadata" + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/jwt" +) + +// Endpoint is Google's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://accounts.google.com/o/oauth2/auth", + TokenURL: "https://accounts.google.com/o/oauth2/token", +} + +// JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow. +const JWTTokenURL = "https://accounts.google.com/o/oauth2/token" + +// ConfigFromJSON uses a Google Developers Console client_credentials.json +// file to construct a config. +// client_credentials.json can be downloaded from +// https://console.developers.google.com, under "Credentials". Download the Web +// application credentials in the JSON format and provide the contents of the +// file as jsonKey. +func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) { + type cred struct { + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + RedirectURIs []string `json:"redirect_uris"` + AuthURI string `json:"auth_uri"` + TokenURI string `json:"token_uri"` + } + var j struct { + Web *cred `json:"web"` + Installed *cred `json:"installed"` + } + if err := json.Unmarshal(jsonKey, &j); err != nil { + return nil, err + } + var c *cred + switch { + case j.Web != nil: + c = j.Web + case j.Installed != nil: + c = j.Installed + default: + return nil, fmt.Errorf("oauth2/google: no credentials found") + } + if len(c.RedirectURIs) < 1 { + return nil, errors.New("oauth2/google: missing redirect URL in the client_credentials.json") + } + return &oauth2.Config{ + ClientID: c.ClientID, + ClientSecret: c.ClientSecret, + RedirectURL: c.RedirectURIs[0], + Scopes: scope, + Endpoint: oauth2.Endpoint{ + AuthURL: c.AuthURI, + TokenURL: c.TokenURI, + }, + }, nil +} + +// JWTConfigFromJSON uses a Google Developers service account JSON key file to read +// the credentials that authorize and authenticate the requests. +// Create a service account on "Credentials" for your project at +// https://console.developers.google.com to download a JSON key file. +func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) { + var f credentialsFile + if err := json.Unmarshal(jsonKey, &f); err != nil { + return nil, err + } + if f.Type != serviceAccountKey { + return nil, fmt.Errorf("google: read JWT from JSON credentials: 'type' field is %q (expected %q)", f.Type, serviceAccountKey) + } + scope = append([]string(nil), scope...) // copy + return f.jwtConfig(scope), nil +} + +// JSON key file types. +const ( + serviceAccountKey = "service_account" + userCredentialsKey = "authorized_user" +) + +// credentialsFile is the unmarshalled representation of a credentials file. +type credentialsFile struct { + Type string `json:"type"` // serviceAccountKey or userCredentialsKey + + // Service Account fields + ClientEmail string `json:"client_email"` + PrivateKeyID string `json:"private_key_id"` + PrivateKey string `json:"private_key"` + TokenURL string `json:"token_uri"` + ProjectID string `json:"project_id"` + + // User Credential fields + // (These typically come from gcloud auth.) + ClientSecret string `json:"client_secret"` + ClientID string `json:"client_id"` + RefreshToken string `json:"refresh_token"` +} + +func (f *credentialsFile) jwtConfig(scopes []string) *jwt.Config { + cfg := &jwt.Config{ + Email: f.ClientEmail, + PrivateKey: []byte(f.PrivateKey), + PrivateKeyID: f.PrivateKeyID, + Scopes: scopes, + TokenURL: f.TokenURL, + } + if cfg.TokenURL == "" { + cfg.TokenURL = JWTTokenURL + } + return cfg +} + +func (f *credentialsFile) tokenSource(ctx context.Context, scopes []string) (oauth2.TokenSource, error) { + switch f.Type { + case serviceAccountKey: + cfg := f.jwtConfig(scopes) + return cfg.TokenSource(ctx), nil + case userCredentialsKey: + cfg := &oauth2.Config{ + ClientID: f.ClientID, + ClientSecret: f.ClientSecret, + Scopes: scopes, + Endpoint: Endpoint, + } + tok := &oauth2.Token{RefreshToken: f.RefreshToken} + return cfg.TokenSource(ctx, tok), nil + case "": + return nil, errors.New("missing 'type' field in credentials") + default: + return nil, fmt.Errorf("unknown credential type: %q", f.Type) + } +} + +// ComputeTokenSource returns a token source that fetches access tokens +// from Google Compute Engine (GCE)'s metadata server. It's only valid to use +// this token source if your program is running on a GCE instance. +// If no account is specified, "default" is used. +// Further information about retrieving access tokens from the GCE metadata +// server can be found at https://cloud.google.com/compute/docs/authentication. +func ComputeTokenSource(account string) oauth2.TokenSource { + return oauth2.ReuseTokenSource(nil, computeSource{account: account}) +} + +type computeSource struct { + account string +} + +func (cs computeSource) Token() (*oauth2.Token, error) { + if !metadata.OnGCE() { + return nil, errors.New("oauth2/google: can't get a token from the metadata service; not running on GCE") + } + acct := cs.account + if acct == "" { + acct = "default" + } + tokenJSON, err := metadata.Get("instance/service-accounts/" + acct + "/token") + if err != nil { + return nil, err + } + var res struct { + AccessToken string `json:"access_token"` + ExpiresInSec int `json:"expires_in"` + TokenType string `json:"token_type"` + } + err = json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res) + if err != nil { + return nil, fmt.Errorf("oauth2/google: invalid token JSON from metadata: %v", err) + } + if res.ExpiresInSec == 0 || res.AccessToken == "" { + return nil, fmt.Errorf("oauth2/google: incomplete token received from metadata") + } + return &oauth2.Token{ + AccessToken: res.AccessToken, + TokenType: res.TokenType, + Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second), + }, nil +} diff --git a/vendor/golang.org/x/oauth2/google/jwt.go b/vendor/golang.org/x/oauth2/google/jwt.go new file mode 100644 index 000000000..b0fdb3a88 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/jwt.go @@ -0,0 +1,74 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "crypto/rsa" + "fmt" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" + "golang.org/x/oauth2/jws" +) + +// JWTAccessTokenSourceFromJSON uses a Google Developers service account JSON +// key file to read the credentials that authorize and authenticate the +// requests, and returns a TokenSource that does not use any OAuth2 flow but +// instead creates a JWT and sends that as the access token. +// The audience is typically a URL that specifies the scope of the credentials. +// +// Note that this is not a standard OAuth flow, but rather an +// optimization supported by a few Google services. +// Unless you know otherwise, you should use JWTConfigFromJSON instead. +func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.TokenSource, error) { + cfg, err := JWTConfigFromJSON(jsonKey) + if err != nil { + return nil, fmt.Errorf("google: could not parse JSON key: %v", err) + } + pk, err := internal.ParseKey(cfg.PrivateKey) + if err != nil { + return nil, fmt.Errorf("google: could not parse key: %v", err) + } + ts := &jwtAccessTokenSource{ + email: cfg.Email, + audience: audience, + pk: pk, + pkID: cfg.PrivateKeyID, + } + tok, err := ts.Token() + if err != nil { + return nil, err + } + return oauth2.ReuseTokenSource(tok, ts), nil +} + +type jwtAccessTokenSource struct { + email, audience string + pk *rsa.PrivateKey + pkID string +} + +func (ts *jwtAccessTokenSource) Token() (*oauth2.Token, error) { + iat := time.Now() + exp := iat.Add(time.Hour) + cs := &jws.ClaimSet{ + Iss: ts.email, + Sub: ts.email, + Aud: ts.audience, + Iat: iat.Unix(), + Exp: exp.Unix(), + } + hdr := &jws.Header{ + Algorithm: "RS256", + Typ: "JWT", + KeyID: string(ts.pkID), + } + msg, err := jws.Encode(hdr, cs, ts.pk) + if err != nil { + return nil, fmt.Errorf("google: could not encode JWT: %v", err) + } + return &oauth2.Token{AccessToken: msg, TokenType: "Bearer", Expiry: exp}, nil +} diff --git a/vendor/golang.org/x/oauth2/google/sdk.go b/vendor/golang.org/x/oauth2/google/sdk.go new file mode 100644 index 000000000..bdc18084b --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/sdk.go @@ -0,0 +1,172 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "os" + "os/user" + "path/filepath" + "runtime" + "strings" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" +) + +type sdkCredentials struct { + Data []struct { + Credential struct { + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + TokenExpiry *time.Time `json:"token_expiry"` + } `json:"credential"` + Key struct { + Account string `json:"account"` + Scope string `json:"scope"` + } `json:"key"` + } +} + +// An SDKConfig provides access to tokens from an account already +// authorized via the Google Cloud SDK. +type SDKConfig struct { + conf oauth2.Config + initialToken *oauth2.Token +} + +// NewSDKConfig creates an SDKConfig for the given Google Cloud SDK +// account. If account is empty, the account currently active in +// Google Cloud SDK properties is used. +// Google Cloud SDK credentials must be created by running `gcloud auth` +// before using this function. +// The Google Cloud SDK is available at https://cloud.google.com/sdk/. +func NewSDKConfig(account string) (*SDKConfig, error) { + configPath, err := sdkConfigPath() + if err != nil { + return nil, fmt.Errorf("oauth2/google: error getting SDK config path: %v", err) + } + credentialsPath := filepath.Join(configPath, "credentials") + f, err := os.Open(credentialsPath) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to load SDK credentials: %v", err) + } + defer f.Close() + + var c sdkCredentials + if err := json.NewDecoder(f).Decode(&c); err != nil { + return nil, fmt.Errorf("oauth2/google: failed to decode SDK credentials from %q: %v", credentialsPath, err) + } + if len(c.Data) == 0 { + return nil, fmt.Errorf("oauth2/google: no credentials found in %q, run `gcloud auth login` to create one", credentialsPath) + } + if account == "" { + propertiesPath := filepath.Join(configPath, "properties") + f, err := os.Open(propertiesPath) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to load SDK properties: %v", err) + } + defer f.Close() + ini, err := internal.ParseINI(f) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to parse SDK properties %q: %v", propertiesPath, err) + } + core, ok := ini["core"] + if !ok { + return nil, fmt.Errorf("oauth2/google: failed to find [core] section in %v", ini) + } + active, ok := core["account"] + if !ok { + return nil, fmt.Errorf("oauth2/google: failed to find %q attribute in %v", "account", core) + } + account = active + } + + for _, d := range c.Data { + if account == "" || d.Key.Account == account { + if d.Credential.AccessToken == "" && d.Credential.RefreshToken == "" { + return nil, fmt.Errorf("oauth2/google: no token available for account %q", account) + } + var expiry time.Time + if d.Credential.TokenExpiry != nil { + expiry = *d.Credential.TokenExpiry + } + return &SDKConfig{ + conf: oauth2.Config{ + ClientID: d.Credential.ClientID, + ClientSecret: d.Credential.ClientSecret, + Scopes: strings.Split(d.Key.Scope, " "), + Endpoint: Endpoint, + RedirectURL: "oob", + }, + initialToken: &oauth2.Token{ + AccessToken: d.Credential.AccessToken, + RefreshToken: d.Credential.RefreshToken, + Expiry: expiry, + }, + }, nil + } + } + return nil, fmt.Errorf("oauth2/google: no such credentials for account %q", account) +} + +// Client returns an HTTP client using Google Cloud SDK credentials to +// authorize requests. The token will auto-refresh as necessary. The +// underlying http.RoundTripper will be obtained using the provided +// context. The returned client and its Transport should not be +// modified. +func (c *SDKConfig) Client(ctx context.Context) *http.Client { + return &http.Client{ + Transport: &oauth2.Transport{ + Source: c.TokenSource(ctx), + }, + } +} + +// TokenSource returns an oauth2.TokenSource that retrieve tokens from +// Google Cloud SDK credentials using the provided context. +// It will returns the current access token stored in the credentials, +// and refresh it when it expires, but it won't update the credentials +// with the new access token. +func (c *SDKConfig) TokenSource(ctx context.Context) oauth2.TokenSource { + return c.conf.TokenSource(ctx, c.initialToken) +} + +// Scopes are the OAuth 2.0 scopes the current account is authorized for. +func (c *SDKConfig) Scopes() []string { + return c.conf.Scopes +} + +// sdkConfigPath tries to guess where the gcloud config is located. +// It can be overridden during tests. +var sdkConfigPath = func() (string, error) { + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud"), nil + } + homeDir := guessUnixHomeDir() + if homeDir == "" { + return "", errors.New("unable to get current user home directory: os/user lookup failed; $HOME is empty") + } + return filepath.Join(homeDir, ".config", "gcloud"), nil +} + +func guessUnixHomeDir() string { + // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470 + if v := os.Getenv("HOME"); v != "" { + return v + } + // Else, fall back to user.Current: + if u, err := user.Current(); err == nil { + return u.HomeDir + } + return "" +} diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go new file mode 100644 index 000000000..e31541b39 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/oauth2.go @@ -0,0 +1,76 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal + +import ( + "bufio" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "io" + "strings" +) + +// ParseKey converts the binary contents of a private key file +// to an *rsa.PrivateKey. It detects whether the private key is in a +// PEM container or not. If so, it extracts the the private key +// from PEM container before conversion. It only supports PEM +// containers with no passphrase. +func ParseKey(key []byte) (*rsa.PrivateKey, error) { + block, _ := pem.Decode(key) + if block != nil { + key = block.Bytes + } + parsedKey, err := x509.ParsePKCS8PrivateKey(key) + if err != nil { + parsedKey, err = x509.ParsePKCS1PrivateKey(key) + if err != nil { + return nil, fmt.Errorf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: %v", err) + } + } + parsed, ok := parsedKey.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("private key is invalid") + } + return parsed, nil +} + +func ParseINI(ini io.Reader) (map[string]map[string]string, error) { + result := map[string]map[string]string{ + "": {}, // root section + } + scanner := bufio.NewScanner(ini) + currentSection := "" + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if strings.HasPrefix(line, ";") { + // comment. + continue + } + if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") { + currentSection = strings.TrimSpace(line[1 : len(line)-1]) + result[currentSection] = map[string]string{} + continue + } + parts := strings.SplitN(line, "=", 2) + if len(parts) == 2 && parts[0] != "" { + result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1]) + } + } + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error scanning ini: %v", err) + } + return result, nil +} + +func CondVal(v string) []string { + if v == "" { + return nil + } + return []string{v} +} diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go new file mode 100644 index 000000000..018b58ad1 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/token.go @@ -0,0 +1,247 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "mime" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "golang.org/x/net/context" +) + +// Token represents the crendentials used to authorize +// the requests to access protected resources on the OAuth 2.0 +// provider's backend. +// +// This type is a mirror of oauth2.Token and exists to break +// an otherwise-circular dependency. Other internal packages +// should convert this Token into an oauth2.Token before use. +type Token struct { + // AccessToken is the token that authorizes and authenticates + // the requests. + AccessToken string + + // TokenType is the type of token. + // The Type method returns either this or "Bearer", the default. + TokenType string + + // RefreshToken is a token that's used by the application + // (as opposed to the user) to refresh the access token + // if it expires. + RefreshToken string + + // Expiry is the optional expiration time of the access token. + // + // If zero, TokenSource implementations will reuse the same + // token forever and RefreshToken or equivalent + // mechanisms for that TokenSource will not be used. + Expiry time.Time + + // Raw optionally contains extra metadata from the server + // when updating a token. + Raw interface{} +} + +// tokenJSON is the struct representing the HTTP response from OAuth2 +// providers returning a token in JSON form. +type tokenJSON struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + RefreshToken string `json:"refresh_token"` + ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number + Expires expirationTime `json:"expires"` // broken Facebook spelling of expires_in +} + +func (e *tokenJSON) expiry() (t time.Time) { + if v := e.ExpiresIn; v != 0 { + return time.Now().Add(time.Duration(v) * time.Second) + } + if v := e.Expires; v != 0 { + return time.Now().Add(time.Duration(v) * time.Second) + } + return +} + +type expirationTime int32 + +func (e *expirationTime) UnmarshalJSON(b []byte) error { + var n json.Number + err := json.Unmarshal(b, &n) + if err != nil { + return err + } + i, err := n.Int64() + if err != nil { + return err + } + *e = expirationTime(i) + return nil +} + +var brokenAuthHeaderProviders = []string{ + "https://accounts.google.com/", + "https://api.codeswholesale.com/oauth/token", + "https://api.dropbox.com/", + "https://api.dropboxapi.com/", + "https://api.instagram.com/", + "https://api.netatmo.net/", + "https://api.odnoklassniki.ru/", + "https://api.pushbullet.com/", + "https://api.soundcloud.com/", + "https://api.twitch.tv/", + "https://app.box.com/", + "https://connect.stripe.com/", + "https://graph.facebook.com", // see https://github.com/golang/oauth2/issues/214 + "https://login.microsoftonline.com/", + "https://login.salesforce.com/", + "https://oauth.sandbox.trainingpeaks.com/", + "https://oauth.trainingpeaks.com/", + "https://oauth.vk.com/", + "https://openapi.baidu.com/", + "https://slack.com/", + "https://test-sandbox.auth.corp.google.com", + "https://test.salesforce.com/", + "https://user.gini.net/", + "https://www.douban.com/", + "https://www.googleapis.com/", + "https://www.linkedin.com/", + "https://www.strava.com/oauth/", + "https://www.wunderlist.com/oauth/", + "https://api.patreon.com/", + "https://sandbox.codeswholesale.com/oauth/token", +} + +// brokenAuthHeaderDomains lists broken providers that issue dynamic endpoints. +var brokenAuthHeaderDomains = []string{ + ".force.com", + ".okta.com", + ".oktapreview.com", +} + +func RegisterBrokenAuthHeaderProvider(tokenURL string) { + brokenAuthHeaderProviders = append(brokenAuthHeaderProviders, tokenURL) +} + +// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL +// implements the OAuth2 spec correctly +// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. +// In summary: +// - Reddit only accepts client secret in the Authorization header +// - Dropbox accepts either it in URL param or Auth header, but not both. +// - Google only accepts URL param (not spec compliant?), not Auth header +// - Stripe only accepts client secret in Auth header with Bearer method, not Basic +func providerAuthHeaderWorks(tokenURL string) bool { + for _, s := range brokenAuthHeaderProviders { + if strings.HasPrefix(tokenURL, s) { + // Some sites fail to implement the OAuth2 spec fully. + return false + } + } + + if u, err := url.Parse(tokenURL); err == nil { + for _, s := range brokenAuthHeaderDomains { + if strings.HasSuffix(u.Host, s) { + return false + } + } + } + + // Assume the provider implements the spec properly + // otherwise. We can add more exceptions as they're + // discovered. We will _not_ be adding configurable hooks + // to this package to let users select server bugs. + return true +} + +func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values) (*Token, error) { + hc, err := ContextClient(ctx) + if err != nil { + return nil, err + } + bustedAuth := !providerAuthHeaderWorks(tokenURL) + if bustedAuth { + if clientID != "" { + v.Set("client_id", clientID) + } + if clientSecret != "" { + v.Set("client_secret", clientSecret) + } + } + req, err := http.NewRequest("POST", tokenURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + if !bustedAuth { + req.SetBasicAuth(clientID, clientSecret) + } + r, err := hc.Do(req) + if err != nil { + return nil, err + } + defer r.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + if code := r.StatusCode; code < 200 || code > 299 { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", r.Status, body) + } + + var token *Token + content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) + switch content { + case "application/x-www-form-urlencoded", "text/plain": + vals, err := url.ParseQuery(string(body)) + if err != nil { + return nil, err + } + token = &Token{ + AccessToken: vals.Get("access_token"), + TokenType: vals.Get("token_type"), + RefreshToken: vals.Get("refresh_token"), + Raw: vals, + } + e := vals.Get("expires_in") + if e == "" { + // TODO(jbd): Facebook's OAuth2 implementation is broken and + // returns expires_in field in expires. Remove the fallback to expires, + // when Facebook fixes their implementation. + e = vals.Get("expires") + } + expires, _ := strconv.Atoi(e) + if expires != 0 { + token.Expiry = time.Now().Add(time.Duration(expires) * time.Second) + } + default: + var tj tokenJSON + if err = json.Unmarshal(body, &tj); err != nil { + return nil, err + } + token = &Token{ + AccessToken: tj.AccessToken, + TokenType: tj.TokenType, + RefreshToken: tj.RefreshToken, + Expiry: tj.expiry(), + Raw: make(map[string]interface{}), + } + json.Unmarshal(body, &token.Raw) // no error checks for optional fields + } + // Don't overwrite `RefreshToken` with an empty value + // if this was a token refreshing request. + if token.RefreshToken == "" { + token.RefreshToken = v.Get("refresh_token") + } + return token, nil +} diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go new file mode 100644 index 000000000..f1f173e34 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/transport.go @@ -0,0 +1,69 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal + +import ( + "net/http" + + "golang.org/x/net/context" +) + +// HTTPClient is the context key to use with golang.org/x/net/context's +// WithValue function to associate an *http.Client value with a context. +var HTTPClient ContextKey + +// ContextKey is just an empty struct. It exists so HTTPClient can be +// an immutable public variable with a unique type. It's immutable +// because nobody else can create a ContextKey, being unexported. +type ContextKey struct{} + +// ContextClientFunc is a func which tries to return an *http.Client +// given a Context value. If it returns an error, the search stops +// with that error. If it returns (nil, nil), the search continues +// down the list of registered funcs. +type ContextClientFunc func(context.Context) (*http.Client, error) + +var contextClientFuncs []ContextClientFunc + +func RegisterContextClientFunc(fn ContextClientFunc) { + contextClientFuncs = append(contextClientFuncs, fn) +} + +func ContextClient(ctx context.Context) (*http.Client, error) { + if ctx != nil { + if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok { + return hc, nil + } + } + for _, fn := range contextClientFuncs { + c, err := fn(ctx) + if err != nil { + return nil, err + } + if c != nil { + return c, nil + } + } + return http.DefaultClient, nil +} + +func ContextTransport(ctx context.Context) http.RoundTripper { + hc, err := ContextClient(ctx) + // This is a rare error case (somebody using nil on App Engine). + if err != nil { + return ErrorTransport{err} + } + return hc.Transport +} + +// ErrorTransport returns the specified error on RoundTrip. +// This RoundTripper should be used in rare error cases where +// error handling can be postponed to response handling time. +type ErrorTransport struct{ Err error } + +func (t ErrorTransport) RoundTrip(*http.Request) (*http.Response, error) { + return nil, t.Err +} diff --git a/vendor/golang.org/x/oauth2/jws/jws.go b/vendor/golang.org/x/oauth2/jws/jws.go new file mode 100644 index 000000000..683d2d271 --- /dev/null +++ b/vendor/golang.org/x/oauth2/jws/jws.go @@ -0,0 +1,182 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jws provides a partial implementation +// of JSON Web Signature encoding and decoding. +// It exists to support the golang.org/x/oauth2 package. +// +// See RFC 7515. +// +// Deprecated: this package is not intended for public use and might be +// removed in the future. It exists for internal use only. +// Please switch to another JWS package or copy this package into your own +// source tree. +package jws // import "golang.org/x/oauth2/jws" + +import ( + "bytes" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "strings" + "time" +) + +// ClaimSet contains information about the JWT signature including the +// permissions being requested (scopes), the target of the token, the issuer, +// the time the token was issued, and the lifetime of the token. +type ClaimSet struct { + Iss string `json:"iss"` // email address of the client_id of the application making the access token request + Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests + Aud string `json:"aud"` // descriptor of the intended target of the assertion (Optional). + Exp int64 `json:"exp"` // the expiration time of the assertion (seconds since Unix epoch) + Iat int64 `json:"iat"` // the time the assertion was issued (seconds since Unix epoch) + Typ string `json:"typ,omitempty"` // token type (Optional). + + // Email for which the application is requesting delegated access (Optional). + Sub string `json:"sub,omitempty"` + + // The old name of Sub. Client keeps setting Prn to be + // complaint with legacy OAuth 2.0 providers. (Optional) + Prn string `json:"prn,omitempty"` + + // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3 + // This array is marshalled using custom code (see (c *ClaimSet) encode()). + PrivateClaims map[string]interface{} `json:"-"` +} + +func (c *ClaimSet) encode() (string, error) { + // Reverting time back for machines whose time is not perfectly in sync. + // If client machine's time is in the future according + // to Google servers, an access token will not be issued. + now := time.Now().Add(-10 * time.Second) + if c.Iat == 0 { + c.Iat = now.Unix() + } + if c.Exp == 0 { + c.Exp = now.Add(time.Hour).Unix() + } + if c.Exp < c.Iat { + return "", fmt.Errorf("jws: invalid Exp = %v; must be later than Iat = %v", c.Exp, c.Iat) + } + + b, err := json.Marshal(c) + if err != nil { + return "", err + } + + if len(c.PrivateClaims) == 0 { + return base64.RawURLEncoding.EncodeToString(b), nil + } + + // Marshal private claim set and then append it to b. + prv, err := json.Marshal(c.PrivateClaims) + if err != nil { + return "", fmt.Errorf("jws: invalid map of private claims %v", c.PrivateClaims) + } + + // Concatenate public and private claim JSON objects. + if !bytes.HasSuffix(b, []byte{'}'}) { + return "", fmt.Errorf("jws: invalid JSON %s", b) + } + if !bytes.HasPrefix(prv, []byte{'{'}) { + return "", fmt.Errorf("jws: invalid JSON %s", prv) + } + b[len(b)-1] = ',' // Replace closing curly brace with a comma. + b = append(b, prv[1:]...) // Append private claims. + return base64.RawURLEncoding.EncodeToString(b), nil +} + +// Header represents the header for the signed JWS payloads. +type Header struct { + // The algorithm used for signature. + Algorithm string `json:"alg"` + + // Represents the token type. + Typ string `json:"typ"` + + // The optional hint of which key is being used. + KeyID string `json:"kid,omitempty"` +} + +func (h *Header) encode() (string, error) { + b, err := json.Marshal(h) + if err != nil { + return "", err + } + return base64.RawURLEncoding.EncodeToString(b), nil +} + +// Decode decodes a claim set from a JWS payload. +func Decode(payload string) (*ClaimSet, error) { + // decode returned id token to get expiry + s := strings.Split(payload, ".") + if len(s) < 2 { + // TODO(jbd): Provide more context about the error. + return nil, errors.New("jws: invalid token received") + } + decoded, err := base64.RawURLEncoding.DecodeString(s[1]) + if err != nil { + return nil, err + } + c := &ClaimSet{} + err = json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c) + return c, err +} + +// Signer returns a signature for the given data. +type Signer func(data []byte) (sig []byte, err error) + +// EncodeWithSigner encodes a header and claim set with the provided signer. +func EncodeWithSigner(header *Header, c *ClaimSet, sg Signer) (string, error) { + head, err := header.encode() + if err != nil { + return "", err + } + cs, err := c.encode() + if err != nil { + return "", err + } + ss := fmt.Sprintf("%s.%s", head, cs) + sig, err := sg([]byte(ss)) + if err != nil { + return "", err + } + return fmt.Sprintf("%s.%s", ss, base64.RawURLEncoding.EncodeToString(sig)), nil +} + +// Encode encodes a signed JWS with provided header and claim set. +// This invokes EncodeWithSigner using crypto/rsa.SignPKCS1v15 with the given RSA private key. +func Encode(header *Header, c *ClaimSet, key *rsa.PrivateKey) (string, error) { + sg := func(data []byte) (sig []byte, err error) { + h := sha256.New() + h.Write(data) + return rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil)) + } + return EncodeWithSigner(header, c, sg) +} + +// Verify tests whether the provided JWT token's signature was produced by the private key +// associated with the supplied public key. +func Verify(token string, key *rsa.PublicKey) error { + parts := strings.Split(token, ".") + if len(parts) != 3 { + return errors.New("jws: invalid token received, token must have 3 parts") + } + + signedContent := parts[0] + "." + parts[1] + signatureString, err := base64.RawURLEncoding.DecodeString(parts[2]) + if err != nil { + return err + } + + h := sha256.New() + h.Write([]byte(signedContent)) + return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), []byte(signatureString)) +} diff --git a/vendor/golang.org/x/oauth2/jwt/jwt.go b/vendor/golang.org/x/oauth2/jwt/jwt.go new file mode 100644 index 000000000..e016db421 --- /dev/null +++ b/vendor/golang.org/x/oauth2/jwt/jwt.go @@ -0,0 +1,159 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jwt implements the OAuth 2.0 JSON Web Token flow, commonly +// known as "two-legged OAuth 2.0". +// +// See: https://tools.ietf.org/html/draft-ietf-oauth-jwt-bearer-12 +package jwt + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" + "golang.org/x/oauth2/jws" +) + +var ( + defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer" + defaultHeader = &jws.Header{Algorithm: "RS256", Typ: "JWT"} +) + +// Config is the configuration for using JWT to fetch tokens, +// commonly known as "two-legged OAuth 2.0". +type Config struct { + // Email is the OAuth client identifier used when communicating with + // the configured OAuth provider. + Email string + + // PrivateKey contains the contents of an RSA private key or the + // contents of a PEM file that contains a private key. The provided + // private key is used to sign JWT payloads. + // PEM containers with a passphrase are not supported. + // Use the following command to convert a PKCS 12 file into a PEM. + // + // $ openssl pkcs12 -in key.p12 -out key.pem -nodes + // + PrivateKey []byte + + // PrivateKeyID contains an optional hint indicating which key is being + // used. + PrivateKeyID string + + // Subject is the optional user to impersonate. + Subject string + + // Scopes optionally specifies a list of requested permission scopes. + Scopes []string + + // TokenURL is the endpoint required to complete the 2-legged JWT flow. + TokenURL string + + // Expires optionally specifies how long the token is valid for. + Expires time.Duration +} + +// TokenSource returns a JWT TokenSource using the configuration +// in c and the HTTP client from the provided context. +func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource { + return oauth2.ReuseTokenSource(nil, jwtSource{ctx, c}) +} + +// Client returns an HTTP client wrapping the context's +// HTTP transport and adding Authorization headers with tokens +// obtained from c. +// +// The returned client and its Transport should not be modified. +func (c *Config) Client(ctx context.Context) *http.Client { + return oauth2.NewClient(ctx, c.TokenSource(ctx)) +} + +// jwtSource is a source that always does a signed JWT request for a token. +// It should typically be wrapped with a reuseTokenSource. +type jwtSource struct { + ctx context.Context + conf *Config +} + +func (js jwtSource) Token() (*oauth2.Token, error) { + pk, err := internal.ParseKey(js.conf.PrivateKey) + if err != nil { + return nil, err + } + hc := oauth2.NewClient(js.ctx, nil) + claimSet := &jws.ClaimSet{ + Iss: js.conf.Email, + Scope: strings.Join(js.conf.Scopes, " "), + Aud: js.conf.TokenURL, + } + if subject := js.conf.Subject; subject != "" { + claimSet.Sub = subject + // prn is the old name of sub. Keep setting it + // to be compatible with legacy OAuth 2.0 providers. + claimSet.Prn = subject + } + if t := js.conf.Expires; t > 0 { + claimSet.Exp = time.Now().Add(t).Unix() + } + h := *defaultHeader + h.KeyID = js.conf.PrivateKeyID + payload, err := jws.Encode(&h, claimSet, pk) + if err != nil { + return nil, err + } + v := url.Values{} + v.Set("grant_type", defaultGrantType) + v.Set("assertion", payload) + resp, err := hc.PostForm(js.conf.TokenURL, v) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", resp.Status, body) + } + // tokenRes is the JSON response body. + var tokenRes struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + IDToken string `json:"id_token"` + ExpiresIn int64 `json:"expires_in"` // relative seconds from now + } + if err := json.Unmarshal(body, &tokenRes); err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + token := &oauth2.Token{ + AccessToken: tokenRes.AccessToken, + TokenType: tokenRes.TokenType, + } + raw := make(map[string]interface{}) + json.Unmarshal(body, &raw) // no error checks for optional fields + token = token.WithExtra(raw) + + if secs := tokenRes.ExpiresIn; secs > 0 { + token.Expiry = time.Now().Add(time.Duration(secs) * time.Second) + } + if v := tokenRes.IDToken; v != "" { + // decode returned id token to get expiry + claimSet, err := jws.Decode(v) + if err != nil { + return nil, fmt.Errorf("oauth2: error decoding JWT token: %v", err) + } + token.Expiry = time.Unix(claimSet.Exp, 0) + } + return token, nil +} diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go new file mode 100644 index 000000000..3e4835d7e --- /dev/null +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -0,0 +1,340 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package oauth2 provides support for making +// OAuth2 authorized and authenticated HTTP requests. +// It can additionally grant authorization with Bearer JWT. +package oauth2 // import "golang.org/x/oauth2" + +import ( + "bytes" + "errors" + "net/http" + "net/url" + "strings" + "sync" + + "golang.org/x/net/context" + "golang.org/x/oauth2/internal" +) + +// NoContext is the default context you should supply if not using +// your own context.Context (see https://golang.org/x/net/context). +// +// Deprecated: Use context.Background() or context.TODO() instead. +var NoContext = context.TODO() + +// RegisterBrokenAuthHeaderProvider registers an OAuth2 server +// identified by the tokenURL prefix as an OAuth2 implementation +// which doesn't support the HTTP Basic authentication +// scheme to authenticate with the authorization server. +// Once a server is registered, credentials (client_id and client_secret) +// will be passed as query parameters rather than being present +// in the Authorization header. +// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. +func RegisterBrokenAuthHeaderProvider(tokenURL string) { + internal.RegisterBrokenAuthHeaderProvider(tokenURL) +} + +// Config describes a typical 3-legged OAuth2 flow, with both the +// client application information and the server's endpoint URLs. +// For the client credentials 2-legged OAuth2 flow, see the clientcredentials +// package (https://golang.org/x/oauth2/clientcredentials). +type Config struct { + // ClientID is the application's ID. + ClientID string + + // ClientSecret is the application's secret. + ClientSecret string + + // Endpoint contains the resource server's token endpoint + // URLs. These are constants specific to each server and are + // often available via site-specific packages, such as + // google.Endpoint or github.Endpoint. + Endpoint Endpoint + + // RedirectURL is the URL to redirect users going through + // the OAuth flow, after the resource owner's URLs. + RedirectURL string + + // Scope specifies optional requested permissions. + Scopes []string +} + +// A TokenSource is anything that can return a token. +type TokenSource interface { + // Token returns a token or an error. + // Token must be safe for concurrent use by multiple goroutines. + // The returned Token must not be modified. + Token() (*Token, error) +} + +// Endpoint contains the OAuth 2.0 provider's authorization and token +// endpoint URLs. +type Endpoint struct { + AuthURL string + TokenURL string +} + +var ( + // AccessTypeOnline and AccessTypeOffline are options passed + // to the Options.AuthCodeURL method. They modify the + // "access_type" field that gets sent in the URL returned by + // AuthCodeURL. + // + // Online is the default if neither is specified. If your + // application needs to refresh access tokens when the user + // is not present at the browser, then use offline. This will + // result in your application obtaining a refresh token the + // first time your application exchanges an authorization + // code for a user. + AccessTypeOnline AuthCodeOption = SetAuthURLParam("access_type", "online") + AccessTypeOffline AuthCodeOption = SetAuthURLParam("access_type", "offline") + + // ApprovalForce forces the users to view the consent dialog + // and confirm the permissions request at the URL returned + // from AuthCodeURL, even if they've already done so. + ApprovalForce AuthCodeOption = SetAuthURLParam("approval_prompt", "force") +) + +// An AuthCodeOption is passed to Config.AuthCodeURL. +type AuthCodeOption interface { + setValue(url.Values) +} + +type setParam struct{ k, v string } + +func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) } + +// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters +// to a provider's authorization endpoint. +func SetAuthURLParam(key, value string) AuthCodeOption { + return setParam{key, value} +} + +// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page +// that asks for permissions for the required scopes explicitly. +// +// State is a token to protect the user from CSRF attacks. You must +// always provide a non-zero string and validate that it matches the +// the state query parameter on your redirect callback. +// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. +// +// Opts may include AccessTypeOnline or AccessTypeOffline, as well +// as ApprovalForce. +func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { + var buf bytes.Buffer + buf.WriteString(c.Endpoint.AuthURL) + v := url.Values{ + "response_type": {"code"}, + "client_id": {c.ClientID}, + "redirect_uri": internal.CondVal(c.RedirectURL), + "scope": internal.CondVal(strings.Join(c.Scopes, " ")), + "state": internal.CondVal(state), + } + for _, opt := range opts { + opt.setValue(v) + } + if strings.Contains(c.Endpoint.AuthURL, "?") { + buf.WriteByte('&') + } else { + buf.WriteByte('?') + } + buf.WriteString(v.Encode()) + return buf.String() +} + +// PasswordCredentialsToken converts a resource owner username and password +// pair into a token. +// +// Per the RFC, this grant type should only be used "when there is a high +// degree of trust between the resource owner and the client (e.g., the client +// is part of the device operating system or a highly privileged application), +// and when other authorization grant types are not available." +// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info. +// +// The HTTP client to use is derived from the context. +// If nil, http.DefaultClient is used. +func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) { + return retrieveToken(ctx, c, url.Values{ + "grant_type": {"password"}, + "username": {username}, + "password": {password}, + "scope": internal.CondVal(strings.Join(c.Scopes, " ")), + }) +} + +// Exchange converts an authorization code into a token. +// +// It is used after a resource provider redirects the user back +// to the Redirect URI (the URL obtained from AuthCodeURL). +// +// The HTTP client to use is derived from the context. +// If a client is not provided via the context, http.DefaultClient is used. +// +// The code will be in the *http.Request.FormValue("code"). Before +// calling Exchange, be sure to validate FormValue("state"). +func (c *Config) Exchange(ctx context.Context, code string) (*Token, error) { + return retrieveToken(ctx, c, url.Values{ + "grant_type": {"authorization_code"}, + "code": {code}, + "redirect_uri": internal.CondVal(c.RedirectURL), + }) +} + +// Client returns an HTTP client using the provided token. +// The token will auto-refresh as necessary. The underlying +// HTTP transport will be obtained using the provided context. +// The returned client and its Transport should not be modified. +func (c *Config) Client(ctx context.Context, t *Token) *http.Client { + return NewClient(ctx, c.TokenSource(ctx, t)) +} + +// TokenSource returns a TokenSource that returns t until t expires, +// automatically refreshing it as necessary using the provided context. +// +// Most users will use Config.Client instead. +func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource { + tkr := &tokenRefresher{ + ctx: ctx, + conf: c, + } + if t != nil { + tkr.refreshToken = t.RefreshToken + } + return &reuseTokenSource{ + t: t, + new: tkr, + } +} + +// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token" +// HTTP requests to renew a token using a RefreshToken. +type tokenRefresher struct { + ctx context.Context // used to get HTTP requests + conf *Config + refreshToken string +} + +// WARNING: Token is not safe for concurrent access, as it +// updates the tokenRefresher's refreshToken field. +// Within this package, it is used by reuseTokenSource which +// synchronizes calls to this method with its own mutex. +func (tf *tokenRefresher) Token() (*Token, error) { + if tf.refreshToken == "" { + return nil, errors.New("oauth2: token expired and refresh token is not set") + } + + tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{ + "grant_type": {"refresh_token"}, + "refresh_token": {tf.refreshToken}, + }) + + if err != nil { + return nil, err + } + if tf.refreshToken != tk.RefreshToken { + tf.refreshToken = tk.RefreshToken + } + return tk, err +} + +// reuseTokenSource is a TokenSource that holds a single token in memory +// and validates its expiry before each call to retrieve it with +// Token. If it's expired, it will be auto-refreshed using the +// new TokenSource. +type reuseTokenSource struct { + new TokenSource // called when t is expired. + + mu sync.Mutex // guards t + t *Token +} + +// Token returns the current token if it's still valid, else will +// refresh the current token (using r.Context for HTTP client +// information) and return the new one. +func (s *reuseTokenSource) Token() (*Token, error) { + s.mu.Lock() + defer s.mu.Unlock() + if s.t.Valid() { + return s.t, nil + } + t, err := s.new.Token() + if err != nil { + return nil, err + } + s.t = t + return t, nil +} + +// StaticTokenSource returns a TokenSource that always returns the same token. +// Because the provided token t is never refreshed, StaticTokenSource is only +// useful for tokens that never expire. +func StaticTokenSource(t *Token) TokenSource { + return staticTokenSource{t} +} + +// staticTokenSource is a TokenSource that always returns the same Token. +type staticTokenSource struct { + t *Token +} + +func (s staticTokenSource) Token() (*Token, error) { + return s.t, nil +} + +// HTTPClient is the context key to use with golang.org/x/net/context's +// WithValue function to associate an *http.Client value with a context. +var HTTPClient internal.ContextKey + +// NewClient creates an *http.Client from a Context and TokenSource. +// The returned client is not valid beyond the lifetime of the context. +// +// As a special case, if src is nil, a non-OAuth2 client is returned +// using the provided context. This exists to support related OAuth2 +// packages. +func NewClient(ctx context.Context, src TokenSource) *http.Client { + if src == nil { + c, err := internal.ContextClient(ctx) + if err != nil { + return &http.Client{Transport: internal.ErrorTransport{Err: err}} + } + return c + } + return &http.Client{ + Transport: &Transport{ + Base: internal.ContextTransport(ctx), + Source: ReuseTokenSource(nil, src), + }, + } +} + +// ReuseTokenSource returns a TokenSource which repeatedly returns the +// same token as long as it's valid, starting with t. +// When its cached token is invalid, a new token is obtained from src. +// +// ReuseTokenSource is typically used to reuse tokens from a cache +// (such as a file on disk) between runs of a program, rather than +// obtaining new tokens unnecessarily. +// +// The initial token t may be nil, in which case the TokenSource is +// wrapped in a caching version if it isn't one already. This also +// means it's always safe to wrap ReuseTokenSource around any other +// TokenSource without adverse effects. +func ReuseTokenSource(t *Token, src TokenSource) TokenSource { + // Don't wrap a reuseTokenSource in itself. That would work, + // but cause an unnecessary number of mutex operations. + // Just build the equivalent one. + if rt, ok := src.(*reuseTokenSource); ok { + if t == nil { + // Just use it directly. + return rt + } + src = rt.new + } + return &reuseTokenSource{ + t: t, + new: src, + } +} diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go new file mode 100644 index 000000000..7a3167f15 --- /dev/null +++ b/vendor/golang.org/x/oauth2/token.go @@ -0,0 +1,158 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2/internal" +) + +// expiryDelta determines how earlier a token should be considered +// expired than its actual expiration time. It is used to avoid late +// expirations due to client-server time mismatches. +const expiryDelta = 10 * time.Second + +// Token represents the crendentials used to authorize +// the requests to access protected resources on the OAuth 2.0 +// provider's backend. +// +// Most users of this package should not access fields of Token +// directly. They're exported mostly for use by related packages +// implementing derivative OAuth2 flows. +type Token struct { + // AccessToken is the token that authorizes and authenticates + // the requests. + AccessToken string `json:"access_token"` + + // TokenType is the type of token. + // The Type method returns either this or "Bearer", the default. + TokenType string `json:"token_type,omitempty"` + + // RefreshToken is a token that's used by the application + // (as opposed to the user) to refresh the access token + // if it expires. + RefreshToken string `json:"refresh_token,omitempty"` + + // Expiry is the optional expiration time of the access token. + // + // If zero, TokenSource implementations will reuse the same + // token forever and RefreshToken or equivalent + // mechanisms for that TokenSource will not be used. + Expiry time.Time `json:"expiry,omitempty"` + + // raw optionally contains extra metadata from the server + // when updating a token. + raw interface{} +} + +// Type returns t.TokenType if non-empty, else "Bearer". +func (t *Token) Type() string { + if strings.EqualFold(t.TokenType, "bearer") { + return "Bearer" + } + if strings.EqualFold(t.TokenType, "mac") { + return "MAC" + } + if strings.EqualFold(t.TokenType, "basic") { + return "Basic" + } + if t.TokenType != "" { + return t.TokenType + } + return "Bearer" +} + +// SetAuthHeader sets the Authorization header to r using the access +// token in t. +// +// This method is unnecessary when using Transport or an HTTP Client +// returned by this package. +func (t *Token) SetAuthHeader(r *http.Request) { + r.Header.Set("Authorization", t.Type()+" "+t.AccessToken) +} + +// WithExtra returns a new Token that's a clone of t, but using the +// provided raw extra map. This is only intended for use by packages +// implementing derivative OAuth2 flows. +func (t *Token) WithExtra(extra interface{}) *Token { + t2 := new(Token) + *t2 = *t + t2.raw = extra + return t2 +} + +// Extra returns an extra field. +// Extra fields are key-value pairs returned by the server as a +// part of the token retrieval response. +func (t *Token) Extra(key string) interface{} { + if raw, ok := t.raw.(map[string]interface{}); ok { + return raw[key] + } + + vals, ok := t.raw.(url.Values) + if !ok { + return nil + } + + v := vals.Get(key) + switch s := strings.TrimSpace(v); strings.Count(s, ".") { + case 0: // Contains no "."; try to parse as int + if i, err := strconv.ParseInt(s, 10, 64); err == nil { + return i + } + case 1: // Contains a single "."; try to parse as float + if f, err := strconv.ParseFloat(s, 64); err == nil { + return f + } + } + + return v +} + +// expired reports whether the token is expired. +// t must be non-nil. +func (t *Token) expired() bool { + if t.Expiry.IsZero() { + return false + } + return t.Expiry.Add(-expiryDelta).Before(time.Now()) +} + +// Valid reports whether t is non-nil, has an AccessToken, and is not expired. +func (t *Token) Valid() bool { + return t != nil && t.AccessToken != "" && !t.expired() +} + +// tokenFromInternal maps an *internal.Token struct into +// a *Token struct. +func tokenFromInternal(t *internal.Token) *Token { + if t == nil { + return nil + } + return &Token{ + AccessToken: t.AccessToken, + TokenType: t.TokenType, + RefreshToken: t.RefreshToken, + Expiry: t.Expiry, + raw: t.Raw, + } +} + +// retrieveToken takes a *Config and uses that to retrieve an *internal.Token. +// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along +// with an error.. +func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { + tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v) + if err != nil { + return nil, err + } + return tokenFromInternal(tk), nil +} diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go new file mode 100644 index 000000000..92ac7e253 --- /dev/null +++ b/vendor/golang.org/x/oauth2/transport.go @@ -0,0 +1,132 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "errors" + "io" + "net/http" + "sync" +) + +// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests, +// wrapping a base RoundTripper and adding an Authorization header +// with a token from the supplied Sources. +// +// Transport is a low-level mechanism. Most code will use the +// higher-level Config.Client method instead. +type Transport struct { + // Source supplies the token to add to outgoing requests' + // Authorization headers. + Source TokenSource + + // Base is the base RoundTripper used to make HTTP requests. + // If nil, http.DefaultTransport is used. + Base http.RoundTripper + + mu sync.Mutex // guards modReq + modReq map[*http.Request]*http.Request // original -> modified +} + +// RoundTrip authorizes and authenticates the request with an +// access token. If no token exists or token is expired, +// tries to refresh/fetch a new token. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + if t.Source == nil { + return nil, errors.New("oauth2: Transport's Source is nil") + } + token, err := t.Source.Token() + if err != nil { + return nil, err + } + + req2 := cloneRequest(req) // per RoundTripper contract + token.SetAuthHeader(req2) + t.setModReq(req, req2) + res, err := t.base().RoundTrip(req2) + if err != nil { + t.setModReq(req, nil) + return nil, err + } + res.Body = &onEOFReader{ + rc: res.Body, + fn: func() { t.setModReq(req, nil) }, + } + return res, nil +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *Transport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base().(canceler); ok { + t.mu.Lock() + modReq := t.modReq[req] + delete(t.modReq, req) + t.mu.Unlock() + cr.CancelRequest(modReq) + } +} + +func (t *Transport) base() http.RoundTripper { + if t.Base != nil { + return t.Base + } + return http.DefaultTransport +} + +func (t *Transport) setModReq(orig, mod *http.Request) { + t.mu.Lock() + defer t.mu.Unlock() + if t.modReq == nil { + t.modReq = make(map[*http.Request]*http.Request) + } + if mod == nil { + delete(t.modReq, orig) + } else { + t.modReq[orig] = mod + } +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + return r2 +} + +type onEOFReader struct { + rc io.ReadCloser + fn func() +} + +func (r *onEOFReader) Read(p []byte) (n int, err error) { + n, err = r.rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +func (r *onEOFReader) Close() error { + err := r.rc.Close() + r.runFunc() + return err +} + +func (r *onEOFReader) runFunc() { + if fn := r.fn; fn != nil { + fn() + r.fn = nil + } +} diff --git a/vendor/golang.org/x/text/LICENSE b/vendor/golang.org/x/text/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/vendor/golang.org/x/text/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/text/PATENTS b/vendor/golang.org/x/text/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/vendor/golang.org/x/text/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/text/internal/gen/code.go b/vendor/golang.org/x/text/internal/gen/code.go new file mode 100644 index 000000000..d7031b694 --- /dev/null +++ b/vendor/golang.org/x/text/internal/gen/code.go @@ -0,0 +1,351 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gen + +import ( + "bytes" + "encoding/gob" + "fmt" + "hash" + "hash/fnv" + "io" + "log" + "os" + "reflect" + "strings" + "unicode" + "unicode/utf8" +) + +// This file contains utilities for generating code. + +// TODO: other write methods like: +// - slices, maps, types, etc. + +// CodeWriter is a utility for writing structured code. It computes the content +// hash and size of written content. It ensures there are newlines between +// written code blocks. +type CodeWriter struct { + buf bytes.Buffer + Size int + Hash hash.Hash32 // content hash + gob *gob.Encoder + // For comments we skip the usual one-line separator if they are followed by + // a code block. + skipSep bool +} + +func (w *CodeWriter) Write(p []byte) (n int, err error) { + return w.buf.Write(p) +} + +// NewCodeWriter returns a new CodeWriter. +func NewCodeWriter() *CodeWriter { + h := fnv.New32() + return &CodeWriter{Hash: h, gob: gob.NewEncoder(h)} +} + +// WriteGoFile appends the buffer with the total size of all created structures +// and writes it as a Go file to the the given file with the given package name. +func (w *CodeWriter) WriteGoFile(filename, pkg string) { + f, err := os.Create(filename) + if err != nil { + log.Fatalf("Could not create file %s: %v", filename, err) + } + defer f.Close() + if _, err = w.WriteGo(f, pkg); err != nil { + log.Fatalf("Error writing file %s: %v", filename, err) + } +} + +// WriteGo appends the buffer with the total size of all created structures and +// writes it as a Go file to the the given writer with the given package name. +func (w *CodeWriter) WriteGo(out io.Writer, pkg string) (n int, err error) { + sz := w.Size + w.WriteComment("Total table size %d bytes (%dKiB); checksum: %X\n", sz, sz/1024, w.Hash.Sum32()) + defer w.buf.Reset() + return WriteGo(out, pkg, w.buf.Bytes()) +} + +func (w *CodeWriter) printf(f string, x ...interface{}) { + fmt.Fprintf(w, f, x...) +} + +func (w *CodeWriter) insertSep() { + if w.skipSep { + w.skipSep = false + return + } + // Use at least two newlines to ensure a blank space between the previous + // block. WriteGoFile will remove extraneous newlines. + w.printf("\n\n") +} + +// WriteComment writes a comment block. All line starts are prefixed with "//". +// Initial empty lines are gobbled. The indentation for the first line is +// stripped from consecutive lines. +func (w *CodeWriter) WriteComment(comment string, args ...interface{}) { + s := fmt.Sprintf(comment, args...) + s = strings.Trim(s, "\n") + + // Use at least two newlines to ensure a blank space between the previous + // block. WriteGoFile will remove extraneous newlines. + w.printf("\n\n// ") + w.skipSep = true + + // strip first indent level. + sep := "\n" + for ; len(s) > 0 && (s[0] == '\t' || s[0] == ' '); s = s[1:] { + sep += s[:1] + } + + strings.NewReplacer(sep, "\n// ", "\n", "\n// ").WriteString(w, s) + + w.printf("\n") +} + +func (w *CodeWriter) writeSizeInfo(size int) { + w.printf("// Size: %d bytes\n", size) +} + +// WriteConst writes a constant of the given name and value. +func (w *CodeWriter) WriteConst(name string, x interface{}) { + w.insertSep() + v := reflect.ValueOf(x) + + switch v.Type().Kind() { + case reflect.String: + w.printf("const %s %s = ", name, typeName(x)) + w.WriteString(v.String()) + w.printf("\n") + default: + w.printf("const %s = %#v\n", name, x) + } +} + +// WriteVar writes a variable of the given name and value. +func (w *CodeWriter) WriteVar(name string, x interface{}) { + w.insertSep() + v := reflect.ValueOf(x) + oldSize := w.Size + sz := int(v.Type().Size()) + w.Size += sz + + switch v.Type().Kind() { + case reflect.String: + w.printf("var %s %s = ", name, typeName(x)) + w.WriteString(v.String()) + case reflect.Struct: + w.gob.Encode(x) + fallthrough + case reflect.Slice, reflect.Array: + w.printf("var %s = ", name) + w.writeValue(v) + w.writeSizeInfo(w.Size - oldSize) + default: + w.printf("var %s %s = ", name, typeName(x)) + w.gob.Encode(x) + w.writeValue(v) + w.writeSizeInfo(w.Size - oldSize) + } + w.printf("\n") +} + +func (w *CodeWriter) writeValue(v reflect.Value) { + x := v.Interface() + switch v.Kind() { + case reflect.String: + w.WriteString(v.String()) + case reflect.Array: + // Don't double count: callers of WriteArray count on the size being + // added, so we need to discount it here. + w.Size -= int(v.Type().Size()) + w.writeSlice(x, true) + case reflect.Slice: + w.writeSlice(x, false) + case reflect.Struct: + w.printf("%s{\n", typeName(v.Interface())) + t := v.Type() + for i := 0; i < v.NumField(); i++ { + w.printf("%s: ", t.Field(i).Name) + w.writeValue(v.Field(i)) + w.printf(",\n") + } + w.printf("}") + default: + w.printf("%#v", x) + } +} + +// WriteString writes a string literal. +func (w *CodeWriter) WriteString(s string) { + s = strings.Replace(s, `\`, `\\`, -1) + io.WriteString(w.Hash, s) // content hash + w.Size += len(s) + + const maxInline = 40 + if len(s) <= maxInline { + w.printf("%q", s) + return + } + + // We will render the string as a multi-line string. + const maxWidth = 80 - 4 - len(`"`) - len(`" +`) + + // When starting on its own line, go fmt indents line 2+ an extra level. + n, max := maxWidth, maxWidth-4 + + // As per https://golang.org/issue/18078, the compiler has trouble + // compiling the concatenation of many strings, s0 + s1 + s2 + ... + sN, + // for large N. We insert redundant, explicit parentheses to work around + // that, lowering the N at any given step: (s0 + s1 + ... + s63) + (s64 + + // ... + s127) + etc + (etc + ... + sN). + explicitParens, extraComment := len(s) > 128*1024, "" + if explicitParens { + w.printf(`(`) + extraComment = "; the redundant, explicit parens are for https://golang.org/issue/18078" + } + + // Print "" +\n, if a string does not start on its own line. + b := w.buf.Bytes() + if p := len(bytes.TrimRight(b, " \t")); p > 0 && b[p-1] != '\n' { + w.printf("\"\" + // Size: %d bytes%s\n", len(s), extraComment) + n, max = maxWidth, maxWidth + } + + w.printf(`"`) + + for sz, p, nLines := 0, 0, 0; p < len(s); { + var r rune + r, sz = utf8.DecodeRuneInString(s[p:]) + out := s[p : p+sz] + chars := 1 + if !unicode.IsPrint(r) || r == utf8.RuneError || r == '"' { + switch sz { + case 1: + out = fmt.Sprintf("\\x%02x", s[p]) + case 2, 3: + out = fmt.Sprintf("\\u%04x", r) + case 4: + out = fmt.Sprintf("\\U%08x", r) + } + chars = len(out) + } + if n -= chars; n < 0 { + nLines++ + if explicitParens && nLines&63 == 63 { + w.printf("\") + (\"") + } + w.printf("\" +\n\"") + n = max - len(out) + } + w.printf("%s", out) + p += sz + } + w.printf(`"`) + if explicitParens { + w.printf(`)`) + } +} + +// WriteSlice writes a slice value. +func (w *CodeWriter) WriteSlice(x interface{}) { + w.writeSlice(x, false) +} + +// WriteArray writes an array value. +func (w *CodeWriter) WriteArray(x interface{}) { + w.writeSlice(x, true) +} + +func (w *CodeWriter) writeSlice(x interface{}, isArray bool) { + v := reflect.ValueOf(x) + w.gob.Encode(v.Len()) + w.Size += v.Len() * int(v.Type().Elem().Size()) + name := typeName(x) + if isArray { + name = fmt.Sprintf("[%d]%s", v.Len(), name[strings.Index(name, "]")+1:]) + } + if isArray { + w.printf("%s{\n", name) + } else { + w.printf("%s{ // %d elements\n", name, v.Len()) + } + + switch kind := v.Type().Elem().Kind(); kind { + case reflect.String: + for _, s := range x.([]string) { + w.WriteString(s) + w.printf(",\n") + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + // nLine and nBlock are the number of elements per line and block. + nLine, nBlock, format := 8, 64, "%d," + switch kind { + case reflect.Uint8: + format = "%#02x," + case reflect.Uint16: + format = "%#04x," + case reflect.Uint32: + nLine, nBlock, format = 4, 32, "%#08x," + case reflect.Uint, reflect.Uint64: + nLine, nBlock, format = 4, 32, "%#016x," + case reflect.Int8: + nLine = 16 + } + n := nLine + for i := 0; i < v.Len(); i++ { + if i%nBlock == 0 && v.Len() > nBlock { + w.printf("// Entry %X - %X\n", i, i+nBlock-1) + } + x := v.Index(i).Interface() + w.gob.Encode(x) + w.printf(format, x) + if n--; n == 0 { + n = nLine + w.printf("\n") + } + } + w.printf("\n") + case reflect.Struct: + zero := reflect.Zero(v.Type().Elem()).Interface() + for i := 0; i < v.Len(); i++ { + x := v.Index(i).Interface() + w.gob.EncodeValue(v) + if !reflect.DeepEqual(zero, x) { + line := fmt.Sprintf("%#v,\n", x) + line = line[strings.IndexByte(line, '{'):] + w.printf("%d: ", i) + w.printf(line) + } + } + case reflect.Array: + for i := 0; i < v.Len(); i++ { + w.printf("%d: %#v,\n", i, v.Index(i).Interface()) + } + default: + panic("gen: slice elem type not supported") + } + w.printf("}") +} + +// WriteType writes a definition of the type of the given value and returns the +// type name. +func (w *CodeWriter) WriteType(x interface{}) string { + t := reflect.TypeOf(x) + w.printf("type %s struct {\n", t.Name()) + for i := 0; i < t.NumField(); i++ { + w.printf("\t%s %s\n", t.Field(i).Name, t.Field(i).Type) + } + w.printf("}\n") + return t.Name() +} + +// typeName returns the name of the go type of x. +func typeName(x interface{}) string { + t := reflect.ValueOf(x).Type() + return strings.Replace(fmt.Sprint(t), "main.", "", 1) +} diff --git a/vendor/golang.org/x/text/internal/gen/gen.go b/vendor/golang.org/x/text/internal/gen/gen.go new file mode 100644 index 000000000..2acb0355a --- /dev/null +++ b/vendor/golang.org/x/text/internal/gen/gen.go @@ -0,0 +1,281 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gen contains common code for the various code generation tools in the +// text repository. Its usage ensures consistency between tools. +// +// This package defines command line flags that are common to most generation +// tools. The flags allow for specifying specific Unicode and CLDR versions +// in the public Unicode data repository (http://www.unicode.org/Public). +// +// A local Unicode data mirror can be set through the flag -local or the +// environment variable UNICODE_DIR. The former takes precedence. The local +// directory should follow the same structure as the public repository. +// +// IANA data can also optionally be mirrored by putting it in the iana directory +// rooted at the top of the local mirror. Beware, though, that IANA data is not +// versioned. So it is up to the developer to use the right version. +package gen // import "golang.org/x/text/internal/gen" + +import ( + "bytes" + "flag" + "fmt" + "go/build" + "go/format" + "io" + "io/ioutil" + "log" + "net/http" + "os" + "path" + "path/filepath" + "sync" + "unicode" + + "golang.org/x/text/unicode/cldr" +) + +var ( + url = flag.String("url", + "http://www.unicode.org/Public", + "URL of Unicode database directory") + iana = flag.String("iana", + "http://www.iana.org", + "URL of the IANA repository") + unicodeVersion = flag.String("unicode", + getEnv("UNICODE_VERSION", unicode.Version), + "unicode version to use") + cldrVersion = flag.String("cldr", + getEnv("CLDR_VERSION", cldr.Version), + "cldr version to use") +) + +func getEnv(name, def string) string { + if v := os.Getenv(name); v != "" { + return v + } + return def +} + +// Init performs common initialization for a gen command. It parses the flags +// and sets up the standard logging parameters. +func Init() { + log.SetPrefix("") + log.SetFlags(log.Lshortfile) + flag.Parse() +} + +const header = `// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package %s + +` + +// UnicodeVersion reports the requested Unicode version. +func UnicodeVersion() string { + return *unicodeVersion +} + +// UnicodeVersion reports the requested CLDR version. +func CLDRVersion() string { + return *cldrVersion +} + +// IsLocal reports whether data files are available locally. +func IsLocal() bool { + dir, err := localReadmeFile() + if err != nil { + return false + } + if _, err = os.Stat(dir); err != nil { + return false + } + return true +} + +// OpenUCDFile opens the requested UCD file. The file is specified relative to +// the public Unicode root directory. It will call log.Fatal if there are any +// errors. +func OpenUCDFile(file string) io.ReadCloser { + return openUnicode(path.Join(*unicodeVersion, "ucd", file)) +} + +// OpenCLDRCoreZip opens the CLDR core zip file. It will call log.Fatal if there +// are any errors. +func OpenCLDRCoreZip() io.ReadCloser { + return OpenUnicodeFile("cldr", *cldrVersion, "core.zip") +} + +// OpenUnicodeFile opens the requested file of the requested category from the +// root of the Unicode data archive. The file is specified relative to the +// public Unicode root directory. If version is "", it will use the default +// Unicode version. It will call log.Fatal if there are any errors. +func OpenUnicodeFile(category, version, file string) io.ReadCloser { + if version == "" { + version = UnicodeVersion() + } + return openUnicode(path.Join(category, version, file)) +} + +// OpenIANAFile opens the requested IANA file. The file is specified relative +// to the IANA root, which is typically either http://www.iana.org or the +// iana directory in the local mirror. It will call log.Fatal if there are any +// errors. +func OpenIANAFile(path string) io.ReadCloser { + return Open(*iana, "iana", path) +} + +var ( + dirMutex sync.Mutex + localDir string +) + +const permissions = 0755 + +func localReadmeFile() (string, error) { + p, err := build.Import("golang.org/x/text", "", build.FindOnly) + if err != nil { + return "", fmt.Errorf("Could not locate package: %v", err) + } + return filepath.Join(p.Dir, "DATA", "README"), nil +} + +func getLocalDir() string { + dirMutex.Lock() + defer dirMutex.Unlock() + + readme, err := localReadmeFile() + if err != nil { + log.Fatal(err) + } + dir := filepath.Dir(readme) + if _, err := os.Stat(readme); err != nil { + if err := os.MkdirAll(dir, permissions); err != nil { + log.Fatalf("Could not create directory: %v", err) + } + ioutil.WriteFile(readme, []byte(readmeTxt), permissions) + } + return dir +} + +const readmeTxt = `Generated by golang.org/x/text/internal/gen. DO NOT EDIT. + +This directory contains downloaded files used to generate the various tables +in the golang.org/x/text subrepo. + +Note that the language subtag repo (iana/assignments/language-subtag-registry) +and all other times in the iana subdirectory are not versioned and will need +to be periodically manually updated. The easiest way to do this is to remove +the entire iana directory. This is mostly of concern when updating the language +package. +` + +// Open opens subdir/path if a local directory is specified and the file exists, +// where subdir is a directory relative to the local root, or fetches it from +// urlRoot/path otherwise. It will call log.Fatal if there are any errors. +func Open(urlRoot, subdir, path string) io.ReadCloser { + file := filepath.Join(getLocalDir(), subdir, filepath.FromSlash(path)) + return open(file, urlRoot, path) +} + +func openUnicode(path string) io.ReadCloser { + file := filepath.Join(getLocalDir(), filepath.FromSlash(path)) + return open(file, *url, path) +} + +// TODO: automatically periodically update non-versioned files. + +func open(file, urlRoot, path string) io.ReadCloser { + if f, err := os.Open(file); err == nil { + return f + } + r := get(urlRoot, path) + defer r.Close() + b, err := ioutil.ReadAll(r) + if err != nil { + log.Fatalf("Could not download file: %v", err) + } + os.MkdirAll(filepath.Dir(file), permissions) + if err := ioutil.WriteFile(file, b, permissions); err != nil { + log.Fatalf("Could not create file: %v", err) + } + return ioutil.NopCloser(bytes.NewReader(b)) +} + +func get(root, path string) io.ReadCloser { + url := root + "/" + path + fmt.Printf("Fetching %s...", url) + defer fmt.Println(" done.") + resp, err := http.Get(url) + if err != nil { + log.Fatalf("HTTP GET: %v", err) + } + if resp.StatusCode != 200 { + log.Fatalf("Bad GET status for %q: %q", url, resp.Status) + } + return resp.Body +} + +// TODO: use Write*Version in all applicable packages. + +// WriteUnicodeVersion writes a constant for the Unicode version from which the +// tables are generated. +func WriteUnicodeVersion(w io.Writer) { + fmt.Fprintf(w, "// UnicodeVersion is the Unicode version from which the tables in this package are derived.\n") + fmt.Fprintf(w, "const UnicodeVersion = %q\n\n", UnicodeVersion()) +} + +// WriteCLDRVersion writes a constant for the CLDR version from which the +// tables are generated. +func WriteCLDRVersion(w io.Writer) { + fmt.Fprintf(w, "// CLDRVersion is the CLDR version from which the tables in this package are derived.\n") + fmt.Fprintf(w, "const CLDRVersion = %q\n\n", CLDRVersion()) +} + +// WriteGoFile prepends a standard file comment and package statement to the +// given bytes, applies gofmt, and writes them to a file with the given name. +// It will call log.Fatal if there are any errors. +func WriteGoFile(filename, pkg string, b []byte) { + w, err := os.Create(filename) + if err != nil { + log.Fatalf("Could not create file %s: %v", filename, err) + } + defer w.Close() + if _, err = WriteGo(w, pkg, b); err != nil { + log.Fatalf("Error writing file %s: %v", filename, err) + } +} + +// WriteGo prepends a standard file comment and package statement to the given +// bytes, applies gofmt, and writes them to w. +func WriteGo(w io.Writer, pkg string, b []byte) (n int, err error) { + src := []byte(fmt.Sprintf(header, pkg)) + src = append(src, b...) + formatted, err := format.Source(src) + if err != nil { + // Print the generated code even in case of an error so that the + // returned error can be meaningfully interpreted. + n, _ = w.Write(src) + return n, err + } + return w.Write(formatted) +} + +// Repackage rewrites a Go file from belonging to package main to belonging to +// the given package. +func Repackage(inFile, outFile, pkg string) { + src, err := ioutil.ReadFile(inFile) + if err != nil { + log.Fatalf("reading %s: %v", inFile, err) + } + const toDelete = "package main\n\n" + i := bytes.Index(src, []byte(toDelete)) + if i < 0 { + log.Fatalf("Could not find %q in %s.", toDelete, inFile) + } + w := &bytes.Buffer{} + w.Write(src[i+len(toDelete):]) + WriteGoFile(outFile, pkg, w.Bytes()) +} diff --git a/vendor/golang.org/x/text/internal/triegen/compact.go b/vendor/golang.org/x/text/internal/triegen/compact.go new file mode 100644 index 000000000..397b975c1 --- /dev/null +++ b/vendor/golang.org/x/text/internal/triegen/compact.go @@ -0,0 +1,58 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package triegen + +// This file defines Compacter and its implementations. + +import "io" + +// A Compacter generates an alternative, more space-efficient way to store a +// trie value block. A trie value block holds all possible values for the last +// byte of a UTF-8 encoded rune. Excluding ASCII characters, a trie value block +// always has 64 values, as a UTF-8 encoding ends with a byte in [0x80, 0xC0). +type Compacter interface { + // Size returns whether the Compacter could encode the given block as well + // as its size in case it can. len(v) is always 64. + Size(v []uint64) (sz int, ok bool) + + // Store stores the block using the Compacter's compression method. + // It returns a handle with which the block can be retrieved. + // len(v) is always 64. + Store(v []uint64) uint32 + + // Print writes the data structures associated to the given store to w. + Print(w io.Writer) error + + // Handler returns the name of a function that gets called during trie + // lookup for blocks generated by the Compacter. The function should be of + // the form func (n uint32, b byte) uint64, where n is the index returned by + // the Compacter's Store method and b is the last byte of the UTF-8 + // encoding, where 0x80 <= b < 0xC0, for which to do the lookup in the + // block. + Handler() string +} + +// simpleCompacter is the default Compacter used by builder. It implements a +// normal trie block. +type simpleCompacter builder + +func (b *simpleCompacter) Size([]uint64) (sz int, ok bool) { + return blockSize * b.ValueSize, true +} + +func (b *simpleCompacter) Store(v []uint64) uint32 { + h := uint32(len(b.ValueBlocks) - blockOffset) + b.ValueBlocks = append(b.ValueBlocks, v) + return h +} + +func (b *simpleCompacter) Print(io.Writer) error { + // Structures are printed in print.go. + return nil +} + +func (b *simpleCompacter) Handler() string { + panic("Handler should be special-cased for this Compacter") +} diff --git a/vendor/golang.org/x/text/internal/triegen/print.go b/vendor/golang.org/x/text/internal/triegen/print.go new file mode 100644 index 000000000..8d9f120bc --- /dev/null +++ b/vendor/golang.org/x/text/internal/triegen/print.go @@ -0,0 +1,251 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package triegen + +import ( + "bytes" + "fmt" + "io" + "strings" + "text/template" +) + +// print writes all the data structures as well as the code necessary to use the +// trie to w. +func (b *builder) print(w io.Writer) error { + b.Stats.NValueEntries = len(b.ValueBlocks) * blockSize + b.Stats.NValueBytes = len(b.ValueBlocks) * blockSize * b.ValueSize + b.Stats.NIndexEntries = len(b.IndexBlocks) * blockSize + b.Stats.NIndexBytes = len(b.IndexBlocks) * blockSize * b.IndexSize + b.Stats.NHandleBytes = len(b.Trie) * 2 * b.IndexSize + + // If we only have one root trie, all starter blocks are at position 0 and + // we can access the arrays directly. + if len(b.Trie) == 1 { + // At this point we cannot refer to the generated tables directly. + b.ASCIIBlock = b.Name + "Values" + b.StarterBlock = b.Name + "Index" + } else { + // Otherwise we need to have explicit starter indexes in the trie + // structure. + b.ASCIIBlock = "t.ascii" + b.StarterBlock = "t.utf8Start" + } + + b.SourceType = "[]byte" + if err := lookupGen.Execute(w, b); err != nil { + return err + } + + b.SourceType = "string" + if err := lookupGen.Execute(w, b); err != nil { + return err + } + + if err := trieGen.Execute(w, b); err != nil { + return err + } + + for _, c := range b.Compactions { + if err := c.c.Print(w); err != nil { + return err + } + } + + return nil +} + +func printValues(n int, values []uint64) string { + w := &bytes.Buffer{} + boff := n * blockSize + fmt.Fprintf(w, "\t// Block %#x, offset %#x", n, boff) + var newline bool + for i, v := range values { + if i%6 == 0 { + newline = true + } + if v != 0 { + if newline { + fmt.Fprintf(w, "\n") + newline = false + } + fmt.Fprintf(w, "\t%#02x:%#04x, ", boff+i, v) + } + } + return w.String() +} + +func printIndex(b *builder, nr int, n *node) string { + w := &bytes.Buffer{} + boff := nr * blockSize + fmt.Fprintf(w, "\t// Block %#x, offset %#x", nr, boff) + var newline bool + for i, c := range n.children { + if i%8 == 0 { + newline = true + } + if c != nil { + v := b.Compactions[c.index.compaction].Offset + uint32(c.index.index) + if v != 0 { + if newline { + fmt.Fprintf(w, "\n") + newline = false + } + fmt.Fprintf(w, "\t%#02x:%#02x, ", boff+i, v) + } + } + } + return w.String() +} + +var ( + trieGen = template.Must(template.New("trie").Funcs(template.FuncMap{ + "printValues": printValues, + "printIndex": printIndex, + "title": strings.Title, + "dec": func(x int) int { return x - 1 }, + "psize": func(n int) string { + return fmt.Sprintf("%d bytes (%.2f KiB)", n, float64(n)/1024) + }, + }).Parse(trieTemplate)) + lookupGen = template.Must(template.New("lookup").Parse(lookupTemplate)) +) + +// TODO: consider the return type of lookup. It could be uint64, even if the +// internal value type is smaller. We will have to verify this with the +// performance of unicode/norm, which is very sensitive to such changes. +const trieTemplate = `{{$b := .}}{{$multi := gt (len .Trie) 1}} +// {{.Name}}Trie. Total size: {{psize .Size}}. Checksum: {{printf "%08x" .Checksum}}. +type {{.Name}}Trie struct { {{if $multi}} + ascii []{{.ValueType}} // index for ASCII bytes + utf8Start []{{.IndexType}} // index for UTF-8 bytes >= 0xC0 +{{end}}} + +func new{{title .Name}}Trie(i int) *{{.Name}}Trie { {{if $multi}} + h := {{.Name}}TrieHandles[i] + return &{{.Name}}Trie{ {{.Name}}Values[uint32(h.ascii)<<6:], {{.Name}}Index[uint32(h.multi)<<6:] } +} + +type {{.Name}}TrieHandle struct { + ascii, multi {{.IndexType}} +} + +// {{.Name}}TrieHandles: {{len .Trie}} handles, {{.Stats.NHandleBytes}} bytes +var {{.Name}}TrieHandles = [{{len .Trie}}]{{.Name}}TrieHandle{ +{{range .Trie}} { {{.ASCIIIndex}}, {{.StarterIndex}} }, // {{printf "%08x" .Checksum}}: {{.Name}} +{{end}}}{{else}} + return &{{.Name}}Trie{} +} +{{end}} +// lookupValue determines the type of block n and looks up the value for b. +func (t *{{.Name}}Trie) lookupValue(n uint32, b byte) {{.ValueType}}{{$last := dec (len .Compactions)}} { + switch { {{range $i, $c := .Compactions}} + {{if eq $i $last}}default{{else}}case n < {{$c.Cutoff}}{{end}}:{{if ne $i 0}} + n -= {{$c.Offset}}{{end}} + return {{print $b.ValueType}}({{$c.Handler}}){{end}} + } +} + +// {{.Name}}Values: {{len .ValueBlocks}} blocks, {{.Stats.NValueEntries}} entries, {{.Stats.NValueBytes}} bytes +// The third block is the zero block. +var {{.Name}}Values = [{{.Stats.NValueEntries}}]{{.ValueType}} { +{{range $i, $v := .ValueBlocks}}{{printValues $i $v}} +{{end}}} + +// {{.Name}}Index: {{len .IndexBlocks}} blocks, {{.Stats.NIndexEntries}} entries, {{.Stats.NIndexBytes}} bytes +// Block 0 is the zero block. +var {{.Name}}Index = [{{.Stats.NIndexEntries}}]{{.IndexType}} { +{{range $i, $v := .IndexBlocks}}{{printIndex $b $i $v}} +{{end}}} +` + +// TODO: consider allowing zero-length strings after evaluating performance with +// unicode/norm. +const lookupTemplate = ` +// lookup{{if eq .SourceType "string"}}String{{end}} returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *{{.Name}}Trie) lookup{{if eq .SourceType "string"}}String{{end}}(s {{.SourceType}}) (v {{.ValueType}}, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return {{.ASCIIBlock}}[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := {{.StarterBlock}}[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := {{.StarterBlock}}[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = {{.Name}}Index[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := {{.StarterBlock}}[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = {{.Name}}Index[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = {{.Name}}Index[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookup{{if eq .SourceType "string"}}String{{end}}Unsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *{{.Name}}Trie) lookup{{if eq .SourceType "string"}}String{{end}}Unsafe(s {{.SourceType}}) {{.ValueType}} { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return {{.ASCIIBlock}}[c0] + } + i := {{.StarterBlock}}[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = {{.Name}}Index[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = {{.Name}}Index[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} +` diff --git a/vendor/golang.org/x/text/internal/triegen/triegen.go b/vendor/golang.org/x/text/internal/triegen/triegen.go new file mode 100644 index 000000000..adb010812 --- /dev/null +++ b/vendor/golang.org/x/text/internal/triegen/triegen.go @@ -0,0 +1,494 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package triegen implements a code generator for a trie for associating +// unsigned integer values with UTF-8 encoded runes. +// +// Many of the go.text packages use tries for storing per-rune information. A +// trie is especially useful if many of the runes have the same value. If this +// is the case, many blocks can be expected to be shared allowing for +// information on many runes to be stored in little space. +// +// As most of the lookups are done directly on []byte slices, the tries use the +// UTF-8 bytes directly for the lookup. This saves a conversion from UTF-8 to +// runes and contributes a little bit to better performance. It also naturally +// provides a fast path for ASCII. +// +// Space is also an issue. There are many code points defined in Unicode and as +// a result tables can get quite large. So every byte counts. The triegen +// package automatically chooses the smallest integer values to represent the +// tables. Compacters allow further compression of the trie by allowing for +// alternative representations of individual trie blocks. +// +// triegen allows generating multiple tries as a single structure. This is +// useful when, for example, one wants to generate tries for several languages +// that have a lot of values in common. Some existing libraries for +// internationalization store all per-language data as a dynamically loadable +// chunk. The go.text packages are designed with the assumption that the user +// typically wants to compile in support for all supported languages, in line +// with the approach common to Go to create a single standalone binary. The +// multi-root trie approach can give significant storage savings in this +// scenario. +// +// triegen generates both tables and code. The code is optimized to use the +// automatically chosen data types. The following code is generated for a Trie +// or multiple Tries named "foo": +// - type fooTrie +// The trie type. +// +// - func newFooTrie(x int) *fooTrie +// Trie constructor, where x is the index of the trie passed to Gen. +// +// - func (t *fooTrie) lookup(s []byte) (v uintX, sz int) +// The lookup method, where uintX is automatically chosen. +// +// - func lookupString, lookupUnsafe and lookupStringUnsafe +// Variants of the above. +// +// - var fooValues and fooIndex and any tables generated by Compacters. +// The core trie data. +// +// - var fooTrieHandles +// Indexes of starter blocks in case of multiple trie roots. +// +// It is recommended that users test the generated trie by checking the returned +// value for every rune. Such exhaustive tests are possible as the the number of +// runes in Unicode is limited. +package triegen // import "golang.org/x/text/internal/triegen" + +// TODO: Arguably, the internally optimized data types would not have to be +// exposed in the generated API. We could also investigate not generating the +// code, but using it through a package. We would have to investigate the impact +// on performance of making such change, though. For packages like unicode/norm, +// small changes like this could tank performance. + +import ( + "encoding/binary" + "fmt" + "hash/crc64" + "io" + "log" + "unicode/utf8" +) + +// builder builds a set of tries for associating values with runes. The set of +// tries can share common index and value blocks. +type builder struct { + Name string + + // ValueType is the type of the trie values looked up. + ValueType string + + // ValueSize is the byte size of the ValueType. + ValueSize int + + // IndexType is the type of trie index values used for all UTF-8 bytes of + // a rune except the last one. + IndexType string + + // IndexSize is the byte size of the IndexType. + IndexSize int + + // SourceType is used when generating the lookup functions. If the user + // requests StringSupport, all lookup functions will be generated for + // string input as well. + SourceType string + + Trie []*Trie + + IndexBlocks []*node + ValueBlocks [][]uint64 + Compactions []compaction + Checksum uint64 + + ASCIIBlock string + StarterBlock string + + indexBlockIdx map[uint64]int + valueBlockIdx map[uint64]nodeIndex + asciiBlockIdx map[uint64]int + + // Stats are used to fill out the template. + Stats struct { + NValueEntries int + NValueBytes int + NIndexEntries int + NIndexBytes int + NHandleBytes int + } + + err error +} + +// A nodeIndex encodes the index of a node, which is defined by the compaction +// which stores it and an index within the compaction. For internal nodes, the +// compaction is always 0. +type nodeIndex struct { + compaction int + index int +} + +// compaction keeps track of stats used for the compaction. +type compaction struct { + c Compacter + blocks []*node + maxHandle uint32 + totalSize int + + // Used by template-based generator and thus exported. + Cutoff uint32 + Offset uint32 + Handler string +} + +func (b *builder) setError(err error) { + if b.err == nil { + b.err = err + } +} + +// An Option can be passed to Gen. +type Option func(b *builder) error + +// Compact configures the trie generator to use the given Compacter. +func Compact(c Compacter) Option { + return func(b *builder) error { + b.Compactions = append(b.Compactions, compaction{ + c: c, + Handler: c.Handler() + "(n, b)"}) + return nil + } +} + +// Gen writes Go code for a shared trie lookup structure to w for the given +// Tries. The generated trie type will be called nameTrie. newNameTrie(x) will +// return the *nameTrie for tries[x]. A value can be looked up by using one of +// the various lookup methods defined on nameTrie. It returns the table size of +// the generated trie. +func Gen(w io.Writer, name string, tries []*Trie, opts ...Option) (sz int, err error) { + // The index contains two dummy blocks, followed by the zero block. The zero + // block is at offset 0x80, so that the offset for the zero block for + // continuation bytes is 0. + b := &builder{ + Name: name, + Trie: tries, + IndexBlocks: []*node{{}, {}, {}}, + Compactions: []compaction{{ + Handler: name + "Values[n<<6+uint32(b)]", + }}, + // The 0 key in indexBlockIdx and valueBlockIdx is the hash of the zero + // block. + indexBlockIdx: map[uint64]int{0: 0}, + valueBlockIdx: map[uint64]nodeIndex{0: {}}, + asciiBlockIdx: map[uint64]int{}, + } + b.Compactions[0].c = (*simpleCompacter)(b) + + for _, f := range opts { + if err := f(b); err != nil { + return 0, err + } + } + b.build() + if b.err != nil { + return 0, b.err + } + if err = b.print(w); err != nil { + return 0, err + } + return b.Size(), nil +} + +// A Trie represents a single root node of a trie. A builder may build several +// overlapping tries at once. +type Trie struct { + root *node + + hiddenTrie +} + +// hiddenTrie contains values we want to be visible to the template generator, +// but hidden from the API documentation. +type hiddenTrie struct { + Name string + Checksum uint64 + ASCIIIndex int + StarterIndex int +} + +// NewTrie returns a new trie root. +func NewTrie(name string) *Trie { + return &Trie{ + &node{ + children: make([]*node, blockSize), + values: make([]uint64, utf8.RuneSelf), + }, + hiddenTrie{Name: name}, + } +} + +// Gen is a convenience wrapper around the Gen func passing t as the only trie +// and uses the name passed to NewTrie. It returns the size of the generated +// tables. +func (t *Trie) Gen(w io.Writer, opts ...Option) (sz int, err error) { + return Gen(w, t.Name, []*Trie{t}, opts...) +} + +// node is a node of the intermediate trie structure. +type node struct { + // children holds this node's children. It is always of length 64. + // A child node may be nil. + children []*node + + // values contains the values of this node. If it is non-nil, this node is + // either a root or leaf node: + // For root nodes, len(values) == 128 and it maps the bytes in [0x00, 0x7F]. + // For leaf nodes, len(values) == 64 and it maps the bytes in [0x80, 0xBF]. + values []uint64 + + index nodeIndex +} + +// Insert associates value with the given rune. Insert will panic if a non-zero +// value is passed for an invalid rune. +func (t *Trie) Insert(r rune, value uint64) { + if value == 0 { + return + } + s := string(r) + if []rune(s)[0] != r && value != 0 { + // Note: The UCD tables will always assign what amounts to a zero value + // to a surrogate. Allowing a zero value for an illegal rune allows + // users to iterate over [0..MaxRune] without having to explicitly + // exclude surrogates, which would be tedious. + panic(fmt.Sprintf("triegen: non-zero value for invalid rune %U", r)) + } + if len(s) == 1 { + // It is a root node value (ASCII). + t.root.values[s[0]] = value + return + } + + n := t.root + for ; len(s) > 1; s = s[1:] { + if n.children == nil { + n.children = make([]*node, blockSize) + } + p := s[0] % blockSize + c := n.children[p] + if c == nil { + c = &node{} + n.children[p] = c + } + if len(s) > 2 && c.values != nil { + log.Fatalf("triegen: insert(%U): found internal node with values", r) + } + n = c + } + if n.values == nil { + n.values = make([]uint64, blockSize) + } + if n.children != nil { + log.Fatalf("triegen: insert(%U): found leaf node that also has child nodes", r) + } + n.values[s[0]-0x80] = value +} + +// Size returns the number of bytes the generated trie will take to store. It +// needs to be exported as it is used in the templates. +func (b *builder) Size() int { + // Index blocks. + sz := len(b.IndexBlocks) * blockSize * b.IndexSize + + // Skip the first compaction, which represents the normal value blocks, as + // its totalSize does not account for the ASCII blocks, which are managed + // separately. + sz += len(b.ValueBlocks) * blockSize * b.ValueSize + for _, c := range b.Compactions[1:] { + sz += c.totalSize + } + + // TODO: this computation does not account for the fixed overhead of a using + // a compaction, either code or data. As for data, though, the typical + // overhead of data is in the order of bytes (2 bytes for cases). Further, + // the savings of using a compaction should anyway be substantial for it to + // be worth it. + + // For multi-root tries, we also need to account for the handles. + if len(b.Trie) > 1 { + sz += 2 * b.IndexSize * len(b.Trie) + } + return sz +} + +func (b *builder) build() { + // Compute the sizes of the values. + var vmax uint64 + for _, t := range b.Trie { + vmax = maxValue(t.root, vmax) + } + b.ValueType, b.ValueSize = getIntType(vmax) + + // Compute all block allocations. + // TODO: first compute the ASCII blocks for all tries and then the other + // nodes. ASCII blocks are more restricted in placement, as they require two + // blocks to be placed consecutively. Processing them first may improve + // sharing (at least one zero block can be expected to be saved.) + for _, t := range b.Trie { + b.Checksum += b.buildTrie(t) + } + + // Compute the offsets for all the Compacters. + offset := uint32(0) + for i := range b.Compactions { + c := &b.Compactions[i] + c.Offset = offset + offset += c.maxHandle + 1 + c.Cutoff = offset + } + + // Compute the sizes of indexes. + // TODO: different byte positions could have different sizes. So far we have + // not found a case where this is beneficial. + imax := uint64(b.Compactions[len(b.Compactions)-1].Cutoff) + for _, ib := range b.IndexBlocks { + if x := uint64(ib.index.index); x > imax { + imax = x + } + } + b.IndexType, b.IndexSize = getIntType(imax) +} + +func maxValue(n *node, max uint64) uint64 { + if n == nil { + return max + } + for _, c := range n.children { + max = maxValue(c, max) + } + for _, v := range n.values { + if max < v { + max = v + } + } + return max +} + +func getIntType(v uint64) (string, int) { + switch { + case v < 1<<8: + return "uint8", 1 + case v < 1<<16: + return "uint16", 2 + case v < 1<<32: + return "uint32", 4 + } + return "uint64", 8 +} + +const ( + blockSize = 64 + + // Subtract two blocks to offset 0x80, the first continuation byte. + blockOffset = 2 + + // Subtract three blocks to offset 0xC0, the first non-ASCII starter. + rootBlockOffset = 3 +) + +var crcTable = crc64.MakeTable(crc64.ISO) + +func (b *builder) buildTrie(t *Trie) uint64 { + n := t.root + + // Get the ASCII offset. For the first trie, the ASCII block will be at + // position 0. + hasher := crc64.New(crcTable) + binary.Write(hasher, binary.BigEndian, n.values) + hash := hasher.Sum64() + + v, ok := b.asciiBlockIdx[hash] + if !ok { + v = len(b.ValueBlocks) + b.asciiBlockIdx[hash] = v + + b.ValueBlocks = append(b.ValueBlocks, n.values[:blockSize], n.values[blockSize:]) + if v == 0 { + // Add the zero block at position 2 so that it will be assigned a + // zero reference in the lookup blocks. + // TODO: always do this? This would allow us to remove a check from + // the trie lookup, but at the expense of extra space. Analyze + // performance for unicode/norm. + b.ValueBlocks = append(b.ValueBlocks, make([]uint64, blockSize)) + } + } + t.ASCIIIndex = v + + // Compute remaining offsets. + t.Checksum = b.computeOffsets(n, true) + // We already subtracted the normal blockOffset from the index. Subtract the + // difference for starter bytes. + t.StarterIndex = n.index.index - (rootBlockOffset - blockOffset) + return t.Checksum +} + +func (b *builder) computeOffsets(n *node, root bool) uint64 { + // For the first trie, the root lookup block will be at position 3, which is + // the offset for UTF-8 non-ASCII starter bytes. + first := len(b.IndexBlocks) == rootBlockOffset + if first { + b.IndexBlocks = append(b.IndexBlocks, n) + } + + // We special-case the cases where all values recursively are 0. This allows + // for the use of a zero block to which all such values can be directed. + hash := uint64(0) + if n.children != nil || n.values != nil { + hasher := crc64.New(crcTable) + for _, c := range n.children { + var v uint64 + if c != nil { + v = b.computeOffsets(c, false) + } + binary.Write(hasher, binary.BigEndian, v) + } + binary.Write(hasher, binary.BigEndian, n.values) + hash = hasher.Sum64() + } + + if first { + b.indexBlockIdx[hash] = rootBlockOffset - blockOffset + } + + // Compacters don't apply to internal nodes. + if n.children != nil { + v, ok := b.indexBlockIdx[hash] + if !ok { + v = len(b.IndexBlocks) - blockOffset + b.IndexBlocks = append(b.IndexBlocks, n) + b.indexBlockIdx[hash] = v + } + n.index = nodeIndex{0, v} + } else { + h, ok := b.valueBlockIdx[hash] + if !ok { + bestI, bestSize := 0, blockSize*b.ValueSize + for i, c := range b.Compactions[1:] { + if sz, ok := c.c.Size(n.values); ok && bestSize > sz { + bestI, bestSize = i+1, sz + } + } + c := &b.Compactions[bestI] + c.totalSize += bestSize + v := c.c.Store(n.values) + if c.maxHandle < v { + c.maxHandle = v + } + h = nodeIndex{bestI, int(v)} + b.valueBlockIdx[hash] = h + } + n.index = h + } + return hash +} diff --git a/vendor/golang.org/x/text/internal/ucd/ucd.go b/vendor/golang.org/x/text/internal/ucd/ucd.go new file mode 100644 index 000000000..309e8d8b1 --- /dev/null +++ b/vendor/golang.org/x/text/internal/ucd/ucd.go @@ -0,0 +1,376 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ucd provides a parser for Unicode Character Database files, the +// format of which is defined in http://www.unicode.org/reports/tr44/. See +// http://www.unicode.org/Public/UCD/latest/ucd/ for example files. +// +// It currently does not support substitutions of missing fields. +package ucd // import "golang.org/x/text/internal/ucd" + +import ( + "bufio" + "bytes" + "errors" + "io" + "log" + "regexp" + "strconv" + "strings" +) + +// UnicodeData.txt fields. +const ( + CodePoint = iota + Name + GeneralCategory + CanonicalCombiningClass + BidiClass + DecompMapping + DecimalValue + DigitValue + NumericValue + BidiMirrored + Unicode1Name + ISOComment + SimpleUppercaseMapping + SimpleLowercaseMapping + SimpleTitlecaseMapping +) + +// Parse calls f for each entry in the given reader of a UCD file. It will close +// the reader upon return. It will call log.Fatal if any error occurred. +// +// This implements the most common usage pattern of using Parser. +func Parse(r io.ReadCloser, f func(p *Parser)) { + defer r.Close() + + p := New(r) + for p.Next() { + f(p) + } + if err := p.Err(); err != nil { + r.Close() // os.Exit will cause defers not to be called. + log.Fatal(err) + } +} + +// An Option is used to configure a Parser. +type Option func(p *Parser) + +func keepRanges(p *Parser) { + p.keepRanges = true +} + +var ( + // KeepRanges prevents the expansion of ranges. The raw ranges can be + // obtained by calling Range(0) on the parser. + KeepRanges Option = keepRanges +) + +// The Part option register a handler for lines starting with a '@'. The text +// after a '@' is available as the first field. Comments are handled as usual. +func Part(f func(p *Parser)) Option { + return func(p *Parser) { + p.partHandler = f + } +} + +// The CommentHandler option passes comments that are on a line by itself to +// a given handler. +func CommentHandler(f func(s string)) Option { + return func(p *Parser) { + p.commentHandler = f + } +} + +// A Parser parses Unicode Character Database (UCD) files. +type Parser struct { + scanner *bufio.Scanner + + keepRanges bool // Don't expand rune ranges in field 0. + + err error + comment []byte + field [][]byte + // parsedRange is needed in case Range(0) is called more than once for one + // field. In some cases this requires scanning ahead. + parsedRange bool + rangeStart, rangeEnd rune + + partHandler func(p *Parser) + commentHandler func(s string) +} + +func (p *Parser) setError(err error) { + if p.err == nil { + p.err = err + } +} + +func (p *Parser) getField(i int) []byte { + if i >= len(p.field) { + return nil + } + return p.field[i] +} + +// Err returns a non-nil error if any error occurred during parsing. +func (p *Parser) Err() error { + return p.err +} + +// New returns a Parser for the given Reader. +func New(r io.Reader, o ...Option) *Parser { + p := &Parser{ + scanner: bufio.NewScanner(r), + } + for _, f := range o { + f(p) + } + return p +} + +// Next parses the next line in the file. It returns true if a line was parsed +// and false if it reached the end of the file. +func (p *Parser) Next() bool { + if !p.keepRanges && p.rangeStart < p.rangeEnd { + p.rangeStart++ + return true + } + p.comment = nil + p.field = p.field[:0] + p.parsedRange = false + + for p.scanner.Scan() { + b := p.scanner.Bytes() + if len(b) == 0 { + continue + } + if b[0] == '#' { + if p.commentHandler != nil { + p.commentHandler(strings.TrimSpace(string(b[1:]))) + } + continue + } + + // Parse line + if i := bytes.IndexByte(b, '#'); i != -1 { + p.comment = bytes.TrimSpace(b[i+1:]) + b = b[:i] + } + if b[0] == '@' { + if p.partHandler != nil { + p.field = append(p.field, bytes.TrimSpace(b[1:])) + p.partHandler(p) + p.field = p.field[:0] + } + p.comment = nil + continue + } + for { + i := bytes.IndexByte(b, ';') + if i == -1 { + p.field = append(p.field, bytes.TrimSpace(b)) + break + } + p.field = append(p.field, bytes.TrimSpace(b[:i])) + b = b[i+1:] + } + if !p.keepRanges { + p.rangeStart, p.rangeEnd = p.getRange(0) + } + return true + } + p.setError(p.scanner.Err()) + return false +} + +func parseRune(b []byte) (rune, error) { + if len(b) > 2 && b[0] == 'U' && b[1] == '+' { + b = b[2:] + } + x, err := strconv.ParseUint(string(b), 16, 32) + return rune(x), err +} + +func (p *Parser) parseRune(b []byte) rune { + x, err := parseRune(b) + p.setError(err) + return x +} + +// Rune parses and returns field i as a rune. +func (p *Parser) Rune(i int) rune { + if i > 0 || p.keepRanges { + return p.parseRune(p.getField(i)) + } + return p.rangeStart +} + +// Runes interprets and returns field i as a sequence of runes. +func (p *Parser) Runes(i int) (runes []rune) { + add := func(b []byte) { + if b = bytes.TrimSpace(b); len(b) > 0 { + runes = append(runes, p.parseRune(b)) + } + } + for b := p.getField(i); ; { + i := bytes.IndexByte(b, ' ') + if i == -1 { + add(b) + break + } + add(b[:i]) + b = b[i+1:] + } + return +} + +var ( + errIncorrectLegacyRange = errors.New("ucd: unmatched <* First>") + + // reRange matches one line of a legacy rune range. + reRange = regexp.MustCompile("^([0-9A-F]*);<([^,]*), ([^>]*)>(.*)$") +) + +// Range parses and returns field i as a rune range. A range is inclusive at +// both ends. If the field only has one rune, first and last will be identical. +// It supports the legacy format for ranges used in UnicodeData.txt. +func (p *Parser) Range(i int) (first, last rune) { + if !p.keepRanges { + return p.rangeStart, p.rangeStart + } + return p.getRange(i) +} + +func (p *Parser) getRange(i int) (first, last rune) { + b := p.getField(i) + if k := bytes.Index(b, []byte("..")); k != -1 { + return p.parseRune(b[:k]), p.parseRune(b[k+2:]) + } + // The first field may not be a rune, in which case we may ignore any error + // and set the range as 0..0. + x, err := parseRune(b) + if err != nil { + // Disable range parsing henceforth. This ensures that an error will be + // returned if the user subsequently will try to parse this field as + // a Rune. + p.keepRanges = true + } + // Special case for UnicodeData that was retained for backwards compatibility. + if i == 0 && len(p.field) > 1 && bytes.HasSuffix(p.field[1], []byte("First>")) { + if p.parsedRange { + return p.rangeStart, p.rangeEnd + } + mf := reRange.FindStringSubmatch(p.scanner.Text()) + if mf == nil || !p.scanner.Scan() { + p.setError(errIncorrectLegacyRange) + return x, x + } + // Using Bytes would be more efficient here, but Text is a lot easier + // and this is not a frequent case. + ml := reRange.FindStringSubmatch(p.scanner.Text()) + if ml == nil || mf[2] != ml[2] || ml[3] != "Last" || mf[4] != ml[4] { + p.setError(errIncorrectLegacyRange) + return x, x + } + p.rangeStart, p.rangeEnd = x, p.parseRune(p.scanner.Bytes()[:len(ml[1])]) + p.parsedRange = true + return p.rangeStart, p.rangeEnd + } + return x, x +} + +// bools recognizes all valid UCD boolean values. +var bools = map[string]bool{ + "": false, + "N": false, + "No": false, + "F": false, + "False": false, + "Y": true, + "Yes": true, + "T": true, + "True": true, +} + +// Bool parses and returns field i as a boolean value. +func (p *Parser) Bool(i int) bool { + b := p.getField(i) + for s, v := range bools { + if bstrEq(b, s) { + return v + } + } + p.setError(strconv.ErrSyntax) + return false +} + +// Int parses and returns field i as an integer value. +func (p *Parser) Int(i int) int { + x, err := strconv.ParseInt(string(p.getField(i)), 10, 64) + p.setError(err) + return int(x) +} + +// Uint parses and returns field i as an unsigned integer value. +func (p *Parser) Uint(i int) uint { + x, err := strconv.ParseUint(string(p.getField(i)), 10, 64) + p.setError(err) + return uint(x) +} + +// Float parses and returns field i as a decimal value. +func (p *Parser) Float(i int) float64 { + x, err := strconv.ParseFloat(string(p.getField(i)), 64) + p.setError(err) + return x +} + +// String parses and returns field i as a string value. +func (p *Parser) String(i int) string { + return string(p.getField(i)) +} + +// Strings parses and returns field i as a space-separated list of strings. +func (p *Parser) Strings(i int) []string { + ss := strings.Split(string(p.getField(i)), " ") + for i, s := range ss { + ss[i] = strings.TrimSpace(s) + } + return ss +} + +// Comment returns the comments for the current line. +func (p *Parser) Comment() string { + return string(p.comment) +} + +var errUndefinedEnum = errors.New("ucd: undefined enum value") + +// Enum interprets and returns field i as a value that must be one of the values +// in enum. +func (p *Parser) Enum(i int, enum ...string) string { + b := p.getField(i) + for _, s := range enum { + if bstrEq(b, s) { + return s + } + } + p.setError(errUndefinedEnum) + return "" +} + +func bstrEq(b []byte, s string) bool { + if len(b) != len(s) { + return false + } + for i, c := range b { + if c != s[i] { + return false + } + } + return true +} diff --git a/vendor/golang.org/x/text/secure/bidirule/bidirule.go b/vendor/golang.org/x/text/secure/bidirule/bidirule.go new file mode 100644 index 000000000..a7161bdd9 --- /dev/null +++ b/vendor/golang.org/x/text/secure/bidirule/bidirule.go @@ -0,0 +1,342 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bidirule implements the Bidi Rule defined by RFC 5893. +// +// This package is under development. The API may change without notice and +// without preserving backward compatibility. +package bidirule + +import ( + "errors" + "unicode/utf8" + + "golang.org/x/text/transform" + "golang.org/x/text/unicode/bidi" +) + +// This file contains an implementation of RFC 5893: Right-to-Left Scripts for +// Internationalized Domain Names for Applications (IDNA) +// +// A label is an individual component of a domain name. Labels are usually +// shown separated by dots; for example, the domain name "www.example.com" is +// composed of three labels: "www", "example", and "com". +// +// An RTL label is a label that contains at least one character of class R, AL, +// or AN. An LTR label is any label that is not an RTL label. +// +// A "Bidi domain name" is a domain name that contains at least one RTL label. +// +// The following guarantees can be made based on the above: +// +// o In a domain name consisting of only labels that satisfy the rule, +// the requirements of Section 3 are satisfied. Note that even LTR +// labels and pure ASCII labels have to be tested. +// +// o In a domain name consisting of only LDH labels (as defined in the +// Definitions document [RFC5890]) and labels that satisfy the rule, +// the requirements of Section 3 are satisfied as long as a label +// that starts with an ASCII digit does not come after a +// right-to-left label. +// +// No guarantee is given for other combinations. + +// ErrInvalid indicates a label is invalid according to the Bidi Rule. +var ErrInvalid = errors.New("bidirule: failed Bidi Rule") + +type ruleState uint8 + +const ( + ruleInitial ruleState = iota + ruleLTR + ruleLTRFinal + ruleRTL + ruleRTLFinal + ruleInvalid +) + +type ruleTransition struct { + next ruleState + mask uint16 +} + +var transitions = [...][2]ruleTransition{ + // [2.1] The first character must be a character with Bidi property L, R, or + // AL. If it has the R or AL property, it is an RTL label; if it has the L + // property, it is an LTR label. + ruleInitial: { + {ruleLTRFinal, 1 << bidi.L}, + {ruleRTLFinal, 1< 0 bytes returned + // before considering the error". + if r.src0 != r.src1 || r.err != nil { + r.dst0 = 0 + r.dst1, n, err = r.t.Transform(r.dst, r.src[r.src0:r.src1], r.err == io.EOF) + r.src0 += n + + switch { + case err == nil: + if r.src0 != r.src1 { + r.err = errInconsistentByteCount + } + // The Transform call was successful; we are complete if we + // cannot read more bytes into src. + r.transformComplete = r.err != nil + continue + case err == ErrShortDst && (r.dst1 != 0 || n != 0): + // Make room in dst by copying out, and try again. + continue + case err == ErrShortSrc && r.src1-r.src0 != len(r.src) && r.err == nil: + // Read more bytes into src via the code below, and try again. + default: + r.transformComplete = true + // The reader error (r.err) takes precedence over the + // transformer error (err) unless r.err is nil or io.EOF. + if r.err == nil || r.err == io.EOF { + r.err = err + } + continue + } + } + + // Move any untransformed source bytes to the start of the buffer + // and read more bytes. + if r.src0 != 0 { + r.src0, r.src1 = 0, copy(r.src, r.src[r.src0:r.src1]) + } + n, r.err = r.r.Read(r.src[r.src1:]) + r.src1 += n + } +} + +// TODO: implement ReadByte (and ReadRune??). + +// Writer wraps another io.Writer by transforming the bytes read. +// The user needs to call Close to flush unwritten bytes that may +// be buffered. +type Writer struct { + w io.Writer + t Transformer + dst []byte + + // src[:n] contains bytes that have not yet passed through t. + src []byte + n int +} + +// NewWriter returns a new Writer that wraps w by transforming the bytes written +// via t. It calls Reset on t. +func NewWriter(w io.Writer, t Transformer) *Writer { + t.Reset() + return &Writer{ + w: w, + t: t, + dst: make([]byte, defaultBufSize), + src: make([]byte, defaultBufSize), + } +} + +// Write implements the io.Writer interface. If there are not enough +// bytes available to complete a Transform, the bytes will be buffered +// for the next write. Call Close to convert the remaining bytes. +func (w *Writer) Write(data []byte) (n int, err error) { + src := data + if w.n > 0 { + // Append bytes from data to the last remainder. + // TODO: limit the amount copied on first try. + n = copy(w.src[w.n:], data) + w.n += n + src = w.src[:w.n] + } + for { + nDst, nSrc, err := w.t.Transform(w.dst, src, false) + if _, werr := w.w.Write(w.dst[:nDst]); werr != nil { + return n, werr + } + src = src[nSrc:] + if w.n == 0 { + n += nSrc + } else if len(src) <= n { + // Enough bytes from w.src have been consumed. We make src point + // to data instead to reduce the copying. + w.n = 0 + n -= len(src) + src = data[n:] + if n < len(data) && (err == nil || err == ErrShortSrc) { + continue + } + } + switch err { + case ErrShortDst: + // This error is okay as long as we are making progress. + if nDst > 0 || nSrc > 0 { + continue + } + case ErrShortSrc: + if len(src) < len(w.src) { + m := copy(w.src, src) + // If w.n > 0, bytes from data were already copied to w.src and n + // was already set to the number of bytes consumed. + if w.n == 0 { + n += m + } + w.n = m + err = nil + } else if nDst > 0 || nSrc > 0 { + // Not enough buffer to store the remainder. Keep processing as + // long as there is progress. Without this case, transforms that + // require a lookahead larger than the buffer may result in an + // error. This is not something one may expect to be common in + // practice, but it may occur when buffers are set to small + // sizes during testing. + continue + } + case nil: + if w.n > 0 { + err = errInconsistentByteCount + } + } + return n, err + } +} + +// Close implements the io.Closer interface. +func (w *Writer) Close() error { + src := w.src[:w.n] + for { + nDst, nSrc, err := w.t.Transform(w.dst, src, true) + if _, werr := w.w.Write(w.dst[:nDst]); werr != nil { + return werr + } + if err != ErrShortDst { + return err + } + src = src[nSrc:] + } +} + +type nop struct{ NopResetter } + +func (nop) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + n := copy(dst, src) + if n < len(src) { + err = ErrShortDst + } + return n, n, err +} + +func (nop) Span(src []byte, atEOF bool) (n int, err error) { + return len(src), nil +} + +type discard struct{ NopResetter } + +func (discard) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + return 0, len(src), nil +} + +var ( + // Discard is a Transformer for which all Transform calls succeed + // by consuming all bytes and writing nothing. + Discard Transformer = discard{} + + // Nop is a SpanningTransformer that copies src to dst. + Nop SpanningTransformer = nop{} +) + +// chain is a sequence of links. A chain with N Transformers has N+1 links and +// N+1 buffers. Of those N+1 buffers, the first and last are the src and dst +// buffers given to chain.Transform and the middle N-1 buffers are intermediate +// buffers owned by the chain. The i'th link transforms bytes from the i'th +// buffer chain.link[i].b at read offset chain.link[i].p to the i+1'th buffer +// chain.link[i+1].b at write offset chain.link[i+1].n, for i in [0, N). +type chain struct { + link []link + err error + // errStart is the index at which the error occurred plus 1. Processing + // errStart at this level at the next call to Transform. As long as + // errStart > 0, chain will not consume any more source bytes. + errStart int +} + +func (c *chain) fatalError(errIndex int, err error) { + if i := errIndex + 1; i > c.errStart { + c.errStart = i + c.err = err + } +} + +type link struct { + t Transformer + // b[p:n] holds the bytes to be transformed by t. + b []byte + p int + n int +} + +func (l *link) src() []byte { + return l.b[l.p:l.n] +} + +func (l *link) dst() []byte { + return l.b[l.n:] +} + +// Chain returns a Transformer that applies t in sequence. +func Chain(t ...Transformer) Transformer { + if len(t) == 0 { + return nop{} + } + c := &chain{link: make([]link, len(t)+1)} + for i, tt := range t { + c.link[i].t = tt + } + // Allocate intermediate buffers. + b := make([][defaultBufSize]byte, len(t)-1) + for i := range b { + c.link[i+1].b = b[i][:] + } + return c +} + +// Reset resets the state of Chain. It calls Reset on all the Transformers. +func (c *chain) Reset() { + for i, l := range c.link { + if l.t != nil { + l.t.Reset() + } + c.link[i].p, c.link[i].n = 0, 0 + } +} + +// TODO: make chain use Span (is going to be fun to implement!) + +// Transform applies the transformers of c in sequence. +func (c *chain) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + // Set up src and dst in the chain. + srcL := &c.link[0] + dstL := &c.link[len(c.link)-1] + srcL.b, srcL.p, srcL.n = src, 0, len(src) + dstL.b, dstL.n = dst, 0 + var lastFull, needProgress bool // for detecting progress + + // i is the index of the next Transformer to apply, for i in [low, high]. + // low is the lowest index for which c.link[low] may still produce bytes. + // high is the highest index for which c.link[high] has a Transformer. + // The error returned by Transform determines whether to increase or + // decrease i. We try to completely fill a buffer before converting it. + for low, i, high := c.errStart, c.errStart, len(c.link)-2; low <= i && i <= high; { + in, out := &c.link[i], &c.link[i+1] + nDst, nSrc, err0 := in.t.Transform(out.dst(), in.src(), atEOF && low == i) + out.n += nDst + in.p += nSrc + if i > 0 && in.p == in.n { + in.p, in.n = 0, 0 + } + needProgress, lastFull = lastFull, false + switch err0 { + case ErrShortDst: + // Process the destination buffer next. Return if we are already + // at the high index. + if i == high { + return dstL.n, srcL.p, ErrShortDst + } + if out.n != 0 { + i++ + // If the Transformer at the next index is not able to process any + // source bytes there is nothing that can be done to make progress + // and the bytes will remain unprocessed. lastFull is used to + // detect this and break out of the loop with a fatal error. + lastFull = true + continue + } + // The destination buffer was too small, but is completely empty. + // Return a fatal error as this transformation can never complete. + c.fatalError(i, errShortInternal) + case ErrShortSrc: + if i == 0 { + // Save ErrShortSrc in err. All other errors take precedence. + err = ErrShortSrc + break + } + // Source bytes were depleted before filling up the destination buffer. + // Verify we made some progress, move the remaining bytes to the errStart + // and try to get more source bytes. + if needProgress && nSrc == 0 || in.n-in.p == len(in.b) { + // There were not enough source bytes to proceed while the source + // buffer cannot hold any more bytes. Return a fatal error as this + // transformation can never complete. + c.fatalError(i, errShortInternal) + break + } + // in.b is an internal buffer and we can make progress. + in.p, in.n = 0, copy(in.b, in.src()) + fallthrough + case nil: + // if i == low, we have depleted the bytes at index i or any lower levels. + // In that case we increase low and i. In all other cases we decrease i to + // fetch more bytes before proceeding to the next index. + if i > low { + i-- + continue + } + default: + c.fatalError(i, err0) + } + // Exhausted level low or fatal error: increase low and continue + // to process the bytes accepted so far. + i++ + low = i + } + + // If c.errStart > 0, this means we found a fatal error. We will clear + // all upstream buffers. At this point, no more progress can be made + // downstream, as Transform would have bailed while handling ErrShortDst. + if c.errStart > 0 { + for i := 1; i < c.errStart; i++ { + c.link[i].p, c.link[i].n = 0, 0 + } + err, c.errStart, c.err = c.err, 0, nil + } + return dstL.n, srcL.p, err +} + +// Deprecated: use runes.Remove instead. +func RemoveFunc(f func(r rune) bool) Transformer { + return removeF(f) +} + +type removeF func(r rune) bool + +func (removeF) Reset() {} + +// Transform implements the Transformer interface. +func (t removeF) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + for r, sz := rune(0), 0; len(src) > 0; src = src[sz:] { + + if r = rune(src[0]); r < utf8.RuneSelf { + sz = 1 + } else { + r, sz = utf8.DecodeRune(src) + + if sz == 1 { + // Invalid rune. + if !atEOF && !utf8.FullRune(src) { + err = ErrShortSrc + break + } + // We replace illegal bytes with RuneError. Not doing so might + // otherwise turn a sequence of invalid UTF-8 into valid UTF-8. + // The resulting byte sequence may subsequently contain runes + // for which t(r) is true that were passed unnoticed. + if !t(r) { + if nDst+3 > len(dst) { + err = ErrShortDst + break + } + nDst += copy(dst[nDst:], "\uFFFD") + } + nSrc++ + continue + } + } + + if !t(r) { + if nDst+sz > len(dst) { + err = ErrShortDst + break + } + nDst += copy(dst[nDst:], src[:sz]) + } + nSrc += sz + } + return +} + +// grow returns a new []byte that is longer than b, and copies the first n bytes +// of b to the start of the new slice. +func grow(b []byte, n int) []byte { + m := len(b) + if m <= 32 { + m = 64 + } else if m <= 256 { + m *= 2 + } else { + m += m >> 1 + } + buf := make([]byte, m) + copy(buf, b[:n]) + return buf +} + +const initialBufSize = 128 + +// String returns a string with the result of converting s[:n] using t, where +// n <= len(s). If err == nil, n will be len(s). It calls Reset on t. +func String(t Transformer, s string) (result string, n int, err error) { + t.Reset() + if s == "" { + // Fast path for the common case for empty input. Results in about a + // 86% reduction of running time for BenchmarkStringLowerEmpty. + if _, _, err := t.Transform(nil, nil, true); err == nil { + return "", 0, nil + } + } + + // Allocate only once. Note that both dst and src escape when passed to + // Transform. + buf := [2 * initialBufSize]byte{} + dst := buf[:initialBufSize:initialBufSize] + src := buf[initialBufSize : 2*initialBufSize] + + // The input string s is transformed in multiple chunks (starting with a + // chunk size of initialBufSize). nDst and nSrc are per-chunk (or + // per-Transform-call) indexes, pDst and pSrc are overall indexes. + nDst, nSrc := 0, 0 + pDst, pSrc := 0, 0 + + // pPrefix is the length of a common prefix: the first pPrefix bytes of the + // result will equal the first pPrefix bytes of s. It is not guaranteed to + // be the largest such value, but if pPrefix, len(result) and len(s) are + // all equal after the final transform (i.e. calling Transform with atEOF + // being true returned nil error) then we don't need to allocate a new + // result string. + pPrefix := 0 + for { + // Invariant: pDst == pPrefix && pSrc == pPrefix. + + n := copy(src, s[pSrc:]) + nDst, nSrc, err = t.Transform(dst, src[:n], pSrc+n == len(s)) + pDst += nDst + pSrc += nSrc + + // TODO: let transformers implement an optional Spanner interface, akin + // to norm's QuickSpan. This would even allow us to avoid any allocation. + if !bytes.Equal(dst[:nDst], src[:nSrc]) { + break + } + pPrefix = pSrc + if err == ErrShortDst { + // A buffer can only be short if a transformer modifies its input. + break + } else if err == ErrShortSrc { + if nSrc == 0 { + // No progress was made. + break + } + // Equal so far and !atEOF, so continue checking. + } else if err != nil || pPrefix == len(s) { + return string(s[:pPrefix]), pPrefix, err + } + } + // Post-condition: pDst == pPrefix + nDst && pSrc == pPrefix + nSrc. + + // We have transformed the first pSrc bytes of the input s to become pDst + // transformed bytes. Those transformed bytes are discontiguous: the first + // pPrefix of them equal s[:pPrefix] and the last nDst of them equal + // dst[:nDst]. We copy them around, into a new dst buffer if necessary, so + // that they become one contiguous slice: dst[:pDst]. + if pPrefix != 0 { + newDst := dst + if pDst > len(newDst) { + newDst = make([]byte, len(s)+nDst-nSrc) + } + copy(newDst[pPrefix:pDst], dst[:nDst]) + copy(newDst[:pPrefix], s[:pPrefix]) + dst = newDst + } + + // Prevent duplicate Transform calls with atEOF being true at the end of + // the input. Also return if we have an unrecoverable error. + if (err == nil && pSrc == len(s)) || + (err != nil && err != ErrShortDst && err != ErrShortSrc) { + return string(dst[:pDst]), pSrc, err + } + + // Transform the remaining input, growing dst and src buffers as necessary. + for { + n := copy(src, s[pSrc:]) + nDst, nSrc, err := t.Transform(dst[pDst:], src[:n], pSrc+n == len(s)) + pDst += nDst + pSrc += nSrc + + // If we got ErrShortDst or ErrShortSrc, do not grow as long as we can + // make progress. This may avoid excessive allocations. + if err == ErrShortDst { + if nDst == 0 { + dst = grow(dst, pDst) + } + } else if err == ErrShortSrc { + if nSrc == 0 { + src = grow(src, 0) + } + } else if err != nil || pSrc == len(s) { + return string(dst[:pDst]), pSrc, err + } + } +} + +// Bytes returns a new byte slice with the result of converting b[:n] using t, +// where n <= len(b). If err == nil, n will be len(b). It calls Reset on t. +func Bytes(t Transformer, b []byte) (result []byte, n int, err error) { + return doAppend(t, 0, make([]byte, len(b)), b) +} + +// Append appends the result of converting src[:n] using t to dst, where +// n <= len(src), If err == nil, n will be len(src). It calls Reset on t. +func Append(t Transformer, dst, src []byte) (result []byte, n int, err error) { + if len(dst) == cap(dst) { + n := len(src) + len(dst) // It is okay for this to be 0. + b := make([]byte, n) + dst = b[:copy(b, dst)] + } + return doAppend(t, len(dst), dst[:cap(dst)], src) +} + +func doAppend(t Transformer, pDst int, dst, src []byte) (result []byte, n int, err error) { + t.Reset() + pSrc := 0 + for { + nDst, nSrc, err := t.Transform(dst[pDst:], src[pSrc:], true) + pDst += nDst + pSrc += nSrc + if err != ErrShortDst { + return dst[:pDst], pSrc, err + } + + // Grow the destination buffer, but do not grow as long as we can make + // progress. This may avoid excessive allocations. + if nDst == 0 { + dst = grow(dst, pDst) + } + } +} diff --git a/vendor/golang.org/x/text/unicode/bidi/bidi.go b/vendor/golang.org/x/text/unicode/bidi/bidi.go new file mode 100644 index 000000000..3fc4a6252 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/bidi/bidi.go @@ -0,0 +1,198 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go gen_trieval.go gen_ranges.go + +// Package bidi contains functionality for bidirectional text support. +// +// See http://www.unicode.org/reports/tr9. +// +// NOTE: UNDER CONSTRUCTION. This API may change in backwards incompatible ways +// and without notice. +package bidi // import "golang.org/x/text/unicode/bidi" + +// TODO: +// The following functionality would not be hard to implement, but hinges on +// the definition of a Segmenter interface. For now this is up to the user. +// - Iterate over paragraphs +// - Segmenter to iterate over runs directly from a given text. +// Also: +// - Transformer for reordering? +// - Transformer (validator, really) for Bidi Rule. + +// This API tries to avoid dealing with embedding levels for now. Under the hood +// these will be computed, but the question is to which extent the user should +// know they exist. We should at some point allow the user to specify an +// embedding hierarchy, though. + +// A Direction indicates the overall flow of text. +type Direction int + +const ( + // LeftToRight indicates the text contains no right-to-left characters and + // that either there are some left-to-right characters or the option + // DefaultDirection(LeftToRight) was passed. + LeftToRight Direction = iota + + // RightToLeft indicates the text contains no left-to-right characters and + // that either there are some right-to-left characters or the option + // DefaultDirection(RightToLeft) was passed. + RightToLeft + + // Mixed indicates text contains both left-to-right and right-to-left + // characters. + Mixed + + // Neutral means that text contains no left-to-right and right-to-left + // characters and that no default direction has been set. + Neutral +) + +type options struct{} + +// An Option is an option for Bidi processing. +type Option func(*options) + +// ICU allows the user to define embedding levels. This may be used, for example, +// to use hierarchical structure of markup languages to define embeddings. +// The following option may be a way to expose this functionality in this API. +// // LevelFunc sets a function that associates nesting levels with the given text. +// // The levels function will be called with monotonically increasing values for p. +// func LevelFunc(levels func(p int) int) Option { +// panic("unimplemented") +// } + +// DefaultDirection sets the default direction for a Paragraph. The direction is +// overridden if the text contains directional characters. +func DefaultDirection(d Direction) Option { + panic("unimplemented") +} + +// A Paragraph holds a single Paragraph for Bidi processing. +type Paragraph struct { + // buffers +} + +// SetBytes configures p for the given paragraph text. It replaces text +// previously set by SetBytes or SetString. If b contains a paragraph separator +// it will only process the first paragraph and report the number of bytes +// consumed from b including this separator. Error may be non-nil if options are +// given. +func (p *Paragraph) SetBytes(b []byte, opts ...Option) (n int, err error) { + panic("unimplemented") +} + +// SetString configures p for the given paragraph text. It replaces text +// previously set by SetBytes or SetString. If b contains a paragraph separator +// it will only process the first paragraph and report the number of bytes +// consumed from b including this separator. Error may be non-nil if options are +// given. +func (p *Paragraph) SetString(s string, opts ...Option) (n int, err error) { + panic("unimplemented") +} + +// IsLeftToRight reports whether the principle direction of rendering for this +// paragraphs is left-to-right. If this returns false, the principle direction +// of rendering is right-to-left. +func (p *Paragraph) IsLeftToRight() bool { + panic("unimplemented") +} + +// Direction returns the direction of the text of this paragraph. +// +// The direction may be LeftToRight, RightToLeft, Mixed, or Neutral. +func (p *Paragraph) Direction() Direction { + panic("unimplemented") +} + +// RunAt reports the Run at the given position of the input text. +// +// This method can be used for computing line breaks on paragraphs. +func (p *Paragraph) RunAt(pos int) Run { + panic("unimplemented") +} + +// Order computes the visual ordering of all the runs in a Paragraph. +func (p *Paragraph) Order() (Ordering, error) { + panic("unimplemented") +} + +// Line computes the visual ordering of runs for a single line starting and +// ending at the given positions in the original text. +func (p *Paragraph) Line(start, end int) (Ordering, error) { + panic("unimplemented") +} + +// An Ordering holds the computed visual order of runs of a Paragraph. Calling +// SetBytes or SetString on the originating Paragraph invalidates an Ordering. +// The methods of an Ordering should only be called by one goroutine at a time. +type Ordering struct{} + +// Direction reports the directionality of the runs. +// +// The direction may be LeftToRight, RightToLeft, Mixed, or Neutral. +func (o *Ordering) Direction() Direction { + panic("unimplemented") +} + +// NumRuns returns the number of runs. +func (o *Ordering) NumRuns() int { + panic("unimplemented") +} + +// Run returns the ith run within the ordering. +func (o *Ordering) Run(i int) Run { + panic("unimplemented") +} + +// TODO: perhaps with options. +// // Reorder creates a reader that reads the runes in visual order per character. +// // Modifiers remain after the runes they modify. +// func (l *Runs) Reorder() io.Reader { +// panic("unimplemented") +// } + +// A Run is a continuous sequence of characters of a single direction. +type Run struct { +} + +// String returns the text of the run in its original order. +func (r *Run) String() string { + panic("unimplemented") +} + +// Bytes returns the text of the run in its original order. +func (r *Run) Bytes() []byte { + panic("unimplemented") +} + +// TODO: methods for +// - Display order +// - headers and footers +// - bracket replacement. + +// Direction reports the direction of the run. +func (r *Run) Direction() Direction { + panic("unimplemented") +} + +// Position of the Run within the text passed to SetBytes or SetString of the +// originating Paragraph value. +func (r *Run) Pos() (start, end int) { + panic("unimplemented") +} + +// AppendReverse reverses the order of characters of in, appends them to out, +// and returns the result. Modifiers will still follow the runes they modify. +// Brackets are replaced with their counterparts. +func AppendReverse(out, in []byte) []byte { + panic("unimplemented") +} + +// ReverseString reverses the order of characters in s and returns a new string. +// Modifiers will still follow the runes they modify. Brackets are replaced with +// their counterparts. +func ReverseString(s string) string { + panic("unimplemented") +} diff --git a/vendor/golang.org/x/text/unicode/bidi/bracket.go b/vendor/golang.org/x/text/unicode/bidi/bracket.go new file mode 100644 index 000000000..601e25920 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/bidi/bracket.go @@ -0,0 +1,335 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bidi + +import ( + "container/list" + "fmt" + "sort" +) + +// This file contains a port of the reference implementation of the +// Bidi Parentheses Algorithm: +// http://www.unicode.org/Public/PROGRAMS/BidiReferenceJava/BidiPBAReference.java +// +// The implementation in this file covers definitions BD14-BD16 and rule N0 +// of UAX#9. +// +// Some preprocessing is done for each rune before data is passed to this +// algorithm: +// - opening and closing brackets are identified +// - a bracket pair type, like '(' and ')' is assigned a unique identifier that +// is identical for the opening and closing bracket. It is left to do these +// mappings. +// - The BPA algorithm requires that bracket characters that are canonical +// equivalents of each other be able to be substituted for each other. +// It is the responsibility of the caller to do this canonicalization. +// +// In implementing BD16, this implementation departs slightly from the "logical" +// algorithm defined in UAX#9. In particular, the stack referenced there +// supports operations that go beyond a "basic" stack. An equivalent +// implementation based on a linked list is used here. + +// Bidi_Paired_Bracket_Type +// BD14. An opening paired bracket is a character whose +// Bidi_Paired_Bracket_Type property value is Open. +// +// BD15. A closing paired bracket is a character whose +// Bidi_Paired_Bracket_Type property value is Close. +type bracketType byte + +const ( + bpNone bracketType = iota + bpOpen + bpClose +) + +// bracketPair holds a pair of index values for opening and closing bracket +// location of a bracket pair. +type bracketPair struct { + opener int + closer int +} + +func (b *bracketPair) String() string { + return fmt.Sprintf("(%v, %v)", b.opener, b.closer) +} + +// bracketPairs is a slice of bracketPairs with a sort.Interface implementation. +type bracketPairs []bracketPair + +func (b bracketPairs) Len() int { return len(b) } +func (b bracketPairs) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b bracketPairs) Less(i, j int) bool { return b[i].opener < b[j].opener } + +// resolvePairedBrackets runs the paired bracket part of the UBA algorithm. +// +// For each rune, it takes the indexes into the original string, the class the +// bracket type (in pairTypes) and the bracket identifier (pairValues). It also +// takes the direction type for the start-of-sentence and the embedding level. +// +// The identifiers for bracket types are the rune of the canonicalized opening +// bracket for brackets (open or close) or 0 for runes that are not brackets. +func resolvePairedBrackets(s *isolatingRunSequence) { + p := bracketPairer{ + sos: s.sos, + openers: list.New(), + codesIsolatedRun: s.types, + indexes: s.indexes, + } + dirEmbed := L + if s.level&1 != 0 { + dirEmbed = R + } + p.locateBrackets(s.p.pairTypes, s.p.pairValues) + p.resolveBrackets(dirEmbed, s.p.initialTypes) +} + +type bracketPairer struct { + sos Class // direction corresponding to start of sequence + + // The following is a restatement of BD 16 using non-algorithmic language. + // + // A bracket pair is a pair of characters consisting of an opening + // paired bracket and a closing paired bracket such that the + // Bidi_Paired_Bracket property value of the former equals the latter, + // subject to the following constraints. + // - both characters of a pair occur in the same isolating run sequence + // - the closing character of a pair follows the opening character + // - any bracket character can belong at most to one pair, the earliest possible one + // - any bracket character not part of a pair is treated like an ordinary character + // - pairs may nest properly, but their spans may not overlap otherwise + + // Bracket characters with canonical decompositions are supposed to be + // treated as if they had been normalized, to allow normalized and non- + // normalized text to give the same result. In this implementation that step + // is pushed out to the caller. The caller has to ensure that the pairValue + // slices contain the rune of the opening bracket after normalization for + // any opening or closing bracket. + + openers *list.List // list of positions for opening brackets + + // bracket pair positions sorted by location of opening bracket + pairPositions bracketPairs + + codesIsolatedRun []Class // directional bidi codes for an isolated run + indexes []int // array of index values into the original string + +} + +// matchOpener reports whether characters at given positions form a matching +// bracket pair. +func (p *bracketPairer) matchOpener(pairValues []rune, opener, closer int) bool { + return pairValues[p.indexes[opener]] == pairValues[p.indexes[closer]] +} + +const maxPairingDepth = 63 + +// locateBrackets locates matching bracket pairs according to BD16. +// +// This implementation uses a linked list instead of a stack, because, while +// elements are added at the front (like a push) they are not generally removed +// in atomic 'pop' operations, reducing the benefit of the stack archetype. +func (p *bracketPairer) locateBrackets(pairTypes []bracketType, pairValues []rune) { + // traverse the run + // do that explicitly (not in a for-each) so we can record position + for i, index := range p.indexes { + + // look at the bracket type for each character + if pairTypes[index] == bpNone || p.codesIsolatedRun[i] != ON { + // continue scanning + continue + } + switch pairTypes[index] { + case bpOpen: + // check if maximum pairing depth reached + if p.openers.Len() == maxPairingDepth { + p.openers.Init() + return + } + // remember opener location, most recent first + p.openers.PushFront(i) + + case bpClose: + // see if there is a match + count := 0 + for elem := p.openers.Front(); elem != nil; elem = elem.Next() { + count++ + opener := elem.Value.(int) + if p.matchOpener(pairValues, opener, i) { + // if the opener matches, add nested pair to the ordered list + p.pairPositions = append(p.pairPositions, bracketPair{opener, i}) + // remove up to and including matched opener + for ; count > 0; count-- { + p.openers.Remove(p.openers.Front()) + } + break + } + } + sort.Sort(p.pairPositions) + // if we get here, the closing bracket matched no openers + // and gets ignored + } + } +} + +// Bracket pairs within an isolating run sequence are processed as units so +// that both the opening and the closing paired bracket in a pair resolve to +// the same direction. +// +// N0. Process bracket pairs in an isolating run sequence sequentially in +// the logical order of the text positions of the opening paired brackets +// using the logic given below. Within this scope, bidirectional types EN +// and AN are treated as R. +// +// Identify the bracket pairs in the current isolating run sequence +// according to BD16. For each bracket-pair element in the list of pairs of +// text positions: +// +// a Inspect the bidirectional types of the characters enclosed within the +// bracket pair. +// +// b If any strong type (either L or R) matching the embedding direction is +// found, set the type for both brackets in the pair to match the embedding +// direction. +// +// o [ e ] o -> o e e e o +// +// o [ o e ] -> o e o e e +// +// o [ NI e ] -> o e NI e e +// +// c Otherwise, if a strong type (opposite the embedding direction) is +// found, test for adjacent strong types as follows: 1 First, check +// backwards before the opening paired bracket until the first strong type +// (L, R, or sos) is found. If that first preceding strong type is opposite +// the embedding direction, then set the type for both brackets in the pair +// to that type. 2 Otherwise, set the type for both brackets in the pair to +// the embedding direction. +// +// o [ o ] e -> o o o o e +// +// o [ o NI ] o -> o o o NI o o +// +// e [ o ] o -> e e o e o +// +// e [ o ] e -> e e o e e +// +// e ( o [ o ] NI ) e -> e e o o o o NI e e +// +// d Otherwise, do not set the type for the current bracket pair. Note that +// if the enclosed text contains no strong types the paired brackets will +// both resolve to the same level when resolved individually using rules N1 +// and N2. +// +// e ( NI ) o -> e ( NI ) o + +// getStrongTypeN0 maps character's directional code to strong type as required +// by rule N0. +// +// TODO: have separate type for "strong" directionality. +func (p *bracketPairer) getStrongTypeN0(index int) Class { + switch p.codesIsolatedRun[index] { + // in the scope of N0, number types are treated as R + case EN, AN, AL, R: + return R + case L: + return L + default: + return ON + } +} + +// classifyPairContent reports the strong types contained inside a Bracket Pair, +// assuming the given embedding direction. +// +// It returns ON if no strong type is found. If a single strong type is found, +// it returns this this type. Otherwise it returns the embedding direction. +// +// TODO: use separate type for "strong" directionality. +func (p *bracketPairer) classifyPairContent(loc bracketPair, dirEmbed Class) Class { + dirOpposite := ON + for i := loc.opener + 1; i < loc.closer; i++ { + dir := p.getStrongTypeN0(i) + if dir == ON { + continue + } + if dir == dirEmbed { + return dir // type matching embedding direction found + } + dirOpposite = dir + } + // return ON if no strong type found, or class opposite to dirEmbed + return dirOpposite +} + +// classBeforePair determines which strong types are present before a Bracket +// Pair. Return R or L if strong type found, otherwise ON. +func (p *bracketPairer) classBeforePair(loc bracketPair) Class { + for i := loc.opener - 1; i >= 0; i-- { + if dir := p.getStrongTypeN0(i); dir != ON { + return dir + } + } + // no strong types found, return sos + return p.sos +} + +// assignBracketType implements rule N0 for a single bracket pair. +func (p *bracketPairer) assignBracketType(loc bracketPair, dirEmbed Class, initialTypes []Class) { + // rule "N0, a", inspect contents of pair + dirPair := p.classifyPairContent(loc, dirEmbed) + + // dirPair is now L, R, or N (no strong type found) + + // the following logical tests are performed out of order compared to + // the statement of the rules but yield the same results + if dirPair == ON { + return // case "d" - nothing to do + } + + if dirPair != dirEmbed { + // case "c": strong type found, opposite - check before (c.1) + dirPair = p.classBeforePair(loc) + if dirPair == dirEmbed || dirPair == ON { + // no strong opposite type found before - use embedding (c.2) + dirPair = dirEmbed + } + } + // else: case "b", strong type found matching embedding, + // no explicit action needed, as dirPair is already set to embedding + // direction + + // set the bracket types to the type found + p.setBracketsToType(loc, dirPair, initialTypes) +} + +func (p *bracketPairer) setBracketsToType(loc bracketPair, dirPair Class, initialTypes []Class) { + p.codesIsolatedRun[loc.opener] = dirPair + p.codesIsolatedRun[loc.closer] = dirPair + + for i := loc.opener + 1; i < loc.closer; i++ { + index := p.indexes[i] + if initialTypes[index] != NSM { + break + } + p.codesIsolatedRun[i] = dirPair + } + + for i := loc.closer + 1; i < len(p.indexes); i++ { + index := p.indexes[i] + if initialTypes[index] != NSM { + break + } + p.codesIsolatedRun[i] = dirPair + } +} + +// resolveBrackets implements rule N0 for a list of pairs. +func (p *bracketPairer) resolveBrackets(dirEmbed Class, initialTypes []Class) { + for _, loc := range p.pairPositions { + p.assignBracketType(loc, dirEmbed, initialTypes) + } +} diff --git a/vendor/golang.org/x/text/unicode/bidi/core.go b/vendor/golang.org/x/text/unicode/bidi/core.go new file mode 100644 index 000000000..d4c1399f0 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/bidi/core.go @@ -0,0 +1,1058 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bidi + +import "log" + +// This implementation is a port based on the reference implementation found at: +// http://www.unicode.org/Public/PROGRAMS/BidiReferenceJava/ +// +// described in Unicode Bidirectional Algorithm (UAX #9). +// +// Input: +// There are two levels of input to the algorithm, since clients may prefer to +// supply some information from out-of-band sources rather than relying on the +// default behavior. +// +// - Bidi class array +// - Bidi class array, with externally supplied base line direction +// +// Output: +// Output is separated into several stages: +// +// - levels array over entire paragraph +// - reordering array over entire paragraph +// - levels array over line +// - reordering array over line +// +// Note that for conformance to the Unicode Bidirectional Algorithm, +// implementations are only required to generate correct reordering and +// character directionality (odd or even levels) over a line. Generating +// identical level arrays over a line is not required. Bidi explicit format +// codes (LRE, RLE, LRO, RLO, PDF) and BN can be assigned arbitrary levels and +// positions as long as the rest of the input is properly reordered. +// +// As the algorithm is defined to operate on a single paragraph at a time, this +// implementation is written to handle single paragraphs. Thus rule P1 is +// presumed by this implementation-- the data provided to the implementation is +// assumed to be a single paragraph, and either contains no 'B' codes, or a +// single 'B' code at the end of the input. 'B' is allowed as input to +// illustrate how the algorithm assigns it a level. +// +// Also note that rules L3 and L4 depend on the rendering engine that uses the +// result of the bidi algorithm. This implementation assumes that the rendering +// engine expects combining marks in visual order (e.g. to the left of their +// base character in RTL runs) and that it adjusts the glyphs used to render +// mirrored characters that are in RTL runs so that they render appropriately. + +// level is the embedding level of a character. Even embedding levels indicate +// left-to-right order and odd levels indicate right-to-left order. The special +// level of -1 is reserved for undefined order. +type level int8 + +const implicitLevel level = -1 + +// in returns if x is equal to any of the values in set. +func (c Class) in(set ...Class) bool { + for _, s := range set { + if c == s { + return true + } + } + return false +} + +// A paragraph contains the state of a paragraph. +type paragraph struct { + initialTypes []Class + + // Arrays of properties needed for paired bracket evaluation in N0 + pairTypes []bracketType // paired Bracket types for paragraph + pairValues []rune // rune for opening bracket or pbOpen and pbClose; 0 for pbNone + + embeddingLevel level // default: = implicitLevel; + + // at the paragraph levels + resultTypes []Class + resultLevels []level + + // Index of matching PDI for isolate initiator characters. For other + // characters, the value of matchingPDI will be set to -1. For isolate + // initiators with no matching PDI, matchingPDI will be set to the length of + // the input string. + matchingPDI []int + + // Index of matching isolate initiator for PDI characters. For other + // characters, and for PDIs with no matching isolate initiator, the value of + // matchingIsolateInitiator will be set to -1. + matchingIsolateInitiator []int +} + +// newParagraph initializes a paragraph. The user needs to supply a few arrays +// corresponding to the preprocessed text input. The types correspond to the +// Unicode BiDi classes for each rune. pairTypes indicates the bracket type for +// each rune. pairValues provides a unique bracket class identifier for each +// rune (suggested is the rune of the open bracket for opening and matching +// close brackets, after normalization). The embedding levels are optional, but +// may be supplied to encode embedding levels of styled text. +// +// TODO: return an error. +func newParagraph(types []Class, pairTypes []bracketType, pairValues []rune, levels level) *paragraph { + validateTypes(types) + validatePbTypes(pairTypes) + validatePbValues(pairValues, pairTypes) + validateParagraphEmbeddingLevel(levels) + + p := ¶graph{ + initialTypes: append([]Class(nil), types...), + embeddingLevel: levels, + + pairTypes: pairTypes, + pairValues: pairValues, + + resultTypes: append([]Class(nil), types...), + } + p.run() + return p +} + +func (p *paragraph) Len() int { return len(p.initialTypes) } + +// The algorithm. Does not include line-based processing (Rules L1, L2). +// These are applied later in the line-based phase of the algorithm. +func (p *paragraph) run() { + p.determineMatchingIsolates() + + // 1) determining the paragraph level + // Rule P1 is the requirement for entering this algorithm. + // Rules P2, P3. + // If no externally supplied paragraph embedding level, use default. + if p.embeddingLevel == implicitLevel { + p.embeddingLevel = p.determineParagraphEmbeddingLevel(0, p.Len()) + } + + // Initialize result levels to paragraph embedding level. + p.resultLevels = make([]level, p.Len()) + setLevels(p.resultLevels, p.embeddingLevel) + + // 2) Explicit levels and directions + // Rules X1-X8. + p.determineExplicitEmbeddingLevels() + + // Rule X9. + // We do not remove the embeddings, the overrides, the PDFs, and the BNs + // from the string explicitly. But they are not copied into isolating run + // sequences when they are created, so they are removed for all + // practical purposes. + + // Rule X10. + // Run remainder of algorithm one isolating run sequence at a time + for _, seq := range p.determineIsolatingRunSequences() { + // 3) resolving weak types + // Rules W1-W7. + seq.resolveWeakTypes() + + // 4a) resolving paired brackets + // Rule N0 + resolvePairedBrackets(seq) + + // 4b) resolving neutral types + // Rules N1-N3. + seq.resolveNeutralTypes() + + // 5) resolving implicit embedding levels + // Rules I1, I2. + seq.resolveImplicitLevels() + + // Apply the computed levels and types + seq.applyLevelsAndTypes() + } + + // Assign appropriate levels to 'hide' LREs, RLEs, LROs, RLOs, PDFs, and + // BNs. This is for convenience, so the resulting level array will have + // a value for every character. + p.assignLevelsToCharactersRemovedByX9() +} + +// determineMatchingIsolates determines the matching PDI for each isolate +// initiator and vice versa. +// +// Definition BD9. +// +// At the end of this function: +// +// - The member variable matchingPDI is set to point to the index of the +// matching PDI character for each isolate initiator character. If there is +// no matching PDI, it is set to the length of the input text. For other +// characters, it is set to -1. +// - The member variable matchingIsolateInitiator is set to point to the +// index of the matching isolate initiator character for each PDI character. +// If there is no matching isolate initiator, or the character is not a PDI, +// it is set to -1. +func (p *paragraph) determineMatchingIsolates() { + p.matchingPDI = make([]int, p.Len()) + p.matchingIsolateInitiator = make([]int, p.Len()) + + for i := range p.matchingIsolateInitiator { + p.matchingIsolateInitiator[i] = -1 + } + + for i := range p.matchingPDI { + p.matchingPDI[i] = -1 + + if t := p.resultTypes[i]; t.in(LRI, RLI, FSI) { + depthCounter := 1 + for j := i + 1; j < p.Len(); j++ { + if u := p.resultTypes[j]; u.in(LRI, RLI, FSI) { + depthCounter++ + } else if u == PDI { + if depthCounter--; depthCounter == 0 { + p.matchingPDI[i] = j + p.matchingIsolateInitiator[j] = i + break + } + } + } + if p.matchingPDI[i] == -1 { + p.matchingPDI[i] = p.Len() + } + } + } +} + +// determineParagraphEmbeddingLevel reports the resolved paragraph direction of +// the substring limited by the given range [start, end). +// +// Determines the paragraph level based on rules P2, P3. This is also used +// in rule X5c to find if an FSI should resolve to LRI or RLI. +func (p *paragraph) determineParagraphEmbeddingLevel(start, end int) level { + var strongType Class = unknownClass + + // Rule P2. + for i := start; i < end; i++ { + if t := p.resultTypes[i]; t.in(L, AL, R) { + strongType = t + break + } else if t.in(FSI, LRI, RLI) { + i = p.matchingPDI[i] // skip over to the matching PDI + if i > end { + log.Panic("assert (i <= end)") + } + } + } + // Rule P3. + switch strongType { + case unknownClass: // none found + // default embedding level when no strong types found is 0. + return 0 + case L: + return 0 + default: // AL, R + return 1 + } +} + +const maxDepth = 125 + +// This stack will store the embedding levels and override and isolated +// statuses +type directionalStatusStack struct { + stackCounter int + embeddingLevelStack [maxDepth + 1]level + overrideStatusStack [maxDepth + 1]Class + isolateStatusStack [maxDepth + 1]bool +} + +func (s *directionalStatusStack) empty() { s.stackCounter = 0 } +func (s *directionalStatusStack) pop() { s.stackCounter-- } +func (s *directionalStatusStack) depth() int { return s.stackCounter } + +func (s *directionalStatusStack) push(level level, overrideStatus Class, isolateStatus bool) { + s.embeddingLevelStack[s.stackCounter] = level + s.overrideStatusStack[s.stackCounter] = overrideStatus + s.isolateStatusStack[s.stackCounter] = isolateStatus + s.stackCounter++ +} + +func (s *directionalStatusStack) lastEmbeddingLevel() level { + return s.embeddingLevelStack[s.stackCounter-1] +} + +func (s *directionalStatusStack) lastDirectionalOverrideStatus() Class { + return s.overrideStatusStack[s.stackCounter-1] +} + +func (s *directionalStatusStack) lastDirectionalIsolateStatus() bool { + return s.isolateStatusStack[s.stackCounter-1] +} + +// Determine explicit levels using rules X1 - X8 +func (p *paragraph) determineExplicitEmbeddingLevels() { + var stack directionalStatusStack + var overflowIsolateCount, overflowEmbeddingCount, validIsolateCount int + + // Rule X1. + stack.push(p.embeddingLevel, ON, false) + + for i, t := range p.resultTypes { + // Rules X2, X3, X4, X5, X5a, X5b, X5c + switch t { + case RLE, LRE, RLO, LRO, RLI, LRI, FSI: + isIsolate := t.in(RLI, LRI, FSI) + isRTL := t.in(RLE, RLO, RLI) + + // override if this is an FSI that resolves to RLI + if t == FSI { + isRTL = (p.determineParagraphEmbeddingLevel(i+1, p.matchingPDI[i]) == 1) + } + if isIsolate { + p.resultLevels[i] = stack.lastEmbeddingLevel() + if stack.lastDirectionalOverrideStatus() != ON { + p.resultTypes[i] = stack.lastDirectionalOverrideStatus() + } + } + + var newLevel level + if isRTL { + // least greater odd + newLevel = (stack.lastEmbeddingLevel() + 1) | 1 + } else { + // least greater even + newLevel = (stack.lastEmbeddingLevel() + 2) &^ 1 + } + + if newLevel <= maxDepth && overflowIsolateCount == 0 && overflowEmbeddingCount == 0 { + if isIsolate { + validIsolateCount++ + } + // Push new embedding level, override status, and isolated + // status. + // No check for valid stack counter, since the level check + // suffices. + switch t { + case LRO: + stack.push(newLevel, L, isIsolate) + case RLO: + stack.push(newLevel, R, isIsolate) + default: + stack.push(newLevel, ON, isIsolate) + } + // Not really part of the spec + if !isIsolate { + p.resultLevels[i] = newLevel + } + } else { + // This is an invalid explicit formatting character, + // so apply the "Otherwise" part of rules X2-X5b. + if isIsolate { + overflowIsolateCount++ + } else { // !isIsolate + if overflowIsolateCount == 0 { + overflowEmbeddingCount++ + } + } + } + + // Rule X6a + case PDI: + if overflowIsolateCount > 0 { + overflowIsolateCount-- + } else if validIsolateCount == 0 { + // do nothing + } else { + overflowEmbeddingCount = 0 + for !stack.lastDirectionalIsolateStatus() { + stack.pop() + } + stack.pop() + validIsolateCount-- + } + p.resultLevels[i] = stack.lastEmbeddingLevel() + + // Rule X7 + case PDF: + // Not really part of the spec + p.resultLevels[i] = stack.lastEmbeddingLevel() + + if overflowIsolateCount > 0 { + // do nothing + } else if overflowEmbeddingCount > 0 { + overflowEmbeddingCount-- + } else if !stack.lastDirectionalIsolateStatus() && stack.depth() >= 2 { + stack.pop() + } + + case B: // paragraph separator. + // Rule X8. + + // These values are reset for clarity, in this implementation B + // can only occur as the last code in the array. + stack.empty() + overflowIsolateCount = 0 + overflowEmbeddingCount = 0 + validIsolateCount = 0 + p.resultLevels[i] = p.embeddingLevel + + default: + p.resultLevels[i] = stack.lastEmbeddingLevel() + if stack.lastDirectionalOverrideStatus() != ON { + p.resultTypes[i] = stack.lastDirectionalOverrideStatus() + } + } + } +} + +type isolatingRunSequence struct { + p *paragraph + + indexes []int // indexes to the original string + + types []Class // type of each character using the index + resolvedLevels []level // resolved levels after application of rules + level level + sos, eos Class +} + +func (i *isolatingRunSequence) Len() int { return len(i.indexes) } + +func maxLevel(a, b level) level { + if a > b { + return a + } + return b +} + +// Rule X10, second bullet: Determine the start-of-sequence (sos) and end-of-sequence (eos) types, +// either L or R, for each isolating run sequence. +func (p *paragraph) isolatingRunSequence(indexes []int) *isolatingRunSequence { + length := len(indexes) + types := make([]Class, length) + for i, x := range indexes { + types[i] = p.resultTypes[x] + } + + // assign level, sos and eos + prevChar := indexes[0] - 1 + for prevChar >= 0 && isRemovedByX9(p.initialTypes[prevChar]) { + prevChar-- + } + prevLevel := p.embeddingLevel + if prevChar >= 0 { + prevLevel = p.resultLevels[prevChar] + } + + var succLevel level + lastType := types[length-1] + if lastType.in(LRI, RLI, FSI) { + succLevel = p.embeddingLevel + } else { + // the first character after the end of run sequence + limit := indexes[length-1] + 1 + for ; limit < p.Len() && isRemovedByX9(p.initialTypes[limit]); limit++ { + + } + succLevel = p.embeddingLevel + if limit < p.Len() { + succLevel = p.resultLevels[limit] + } + } + level := p.resultLevels[indexes[0]] + return &isolatingRunSequence{ + p: p, + indexes: indexes, + types: types, + level: level, + sos: typeForLevel(maxLevel(prevLevel, level)), + eos: typeForLevel(maxLevel(succLevel, level)), + } +} + +// Resolving weak types Rules W1-W7. +// +// Note that some weak types (EN, AN) remain after this processing is +// complete. +func (s *isolatingRunSequence) resolveWeakTypes() { + + // on entry, only these types remain + s.assertOnly(L, R, AL, EN, ES, ET, AN, CS, B, S, WS, ON, NSM, LRI, RLI, FSI, PDI) + + // Rule W1. + // Changes all NSMs. + preceedingCharacterType := s.sos + for i, t := range s.types { + if t == NSM { + s.types[i] = preceedingCharacterType + } else { + if t.in(LRI, RLI, FSI, PDI) { + preceedingCharacterType = ON + } + preceedingCharacterType = t + } + } + + // Rule W2. + // EN does not change at the start of the run, because sos != AL. + for i, t := range s.types { + if t == EN { + for j := i - 1; j >= 0; j-- { + if t := s.types[j]; t.in(L, R, AL) { + if t == AL { + s.types[i] = AN + } + break + } + } + } + } + + // Rule W3. + for i, t := range s.types { + if t == AL { + s.types[i] = R + } + } + + // Rule W4. + // Since there must be values on both sides for this rule to have an + // effect, the scan skips the first and last value. + // + // Although the scan proceeds left to right, and changes the type + // values in a way that would appear to affect the computations + // later in the scan, there is actually no problem. A change in the + // current value can only affect the value to its immediate right, + // and only affect it if it is ES or CS. But the current value can + // only change if the value to its right is not ES or CS. Thus + // either the current value will not change, or its change will have + // no effect on the remainder of the analysis. + + for i := 1; i < s.Len()-1; i++ { + t := s.types[i] + if t == ES || t == CS { + prevSepType := s.types[i-1] + succSepType := s.types[i+1] + if prevSepType == EN && succSepType == EN { + s.types[i] = EN + } else if s.types[i] == CS && prevSepType == AN && succSepType == AN { + s.types[i] = AN + } + } + } + + // Rule W5. + for i, t := range s.types { + if t == ET { + // locate end of sequence + runStart := i + runEnd := s.findRunLimit(runStart, ET) + + // check values at ends of sequence + t := s.sos + if runStart > 0 { + t = s.types[runStart-1] + } + if t != EN { + t = s.eos + if runEnd < len(s.types) { + t = s.types[runEnd] + } + } + if t == EN { + setTypes(s.types[runStart:runEnd], EN) + } + // continue at end of sequence + i = runEnd + } + } + + // Rule W6. + for i, t := range s.types { + if t.in(ES, ET, CS) { + s.types[i] = ON + } + } + + // Rule W7. + for i, t := range s.types { + if t == EN { + // set default if we reach start of run + prevStrongType := s.sos + for j := i - 1; j >= 0; j-- { + t = s.types[j] + if t == L || t == R { // AL's have been changed to R + prevStrongType = t + break + } + } + if prevStrongType == L { + s.types[i] = L + } + } + } +} + +// 6) resolving neutral types Rules N1-N2. +func (s *isolatingRunSequence) resolveNeutralTypes() { + + // on entry, only these types can be in resultTypes + s.assertOnly(L, R, EN, AN, B, S, WS, ON, RLI, LRI, FSI, PDI) + + for i, t := range s.types { + switch t { + case WS, ON, B, S, RLI, LRI, FSI, PDI: + // find bounds of run of neutrals + runStart := i + runEnd := s.findRunLimit(runStart, B, S, WS, ON, RLI, LRI, FSI, PDI) + + // determine effective types at ends of run + var leadType, trailType Class + + // Note that the character found can only be L, R, AN, or + // EN. + if runStart == 0 { + leadType = s.sos + } else { + leadType = s.types[runStart-1] + if leadType.in(AN, EN) { + leadType = R + } + } + if runEnd == len(s.types) { + trailType = s.eos + } else { + trailType = s.types[runEnd] + if trailType.in(AN, EN) { + trailType = R + } + } + + var resolvedType Class + if leadType == trailType { + // Rule N1. + resolvedType = leadType + } else { + // Rule N2. + // Notice the embedding level of the run is used, not + // the paragraph embedding level. + resolvedType = typeForLevel(s.level) + } + + setTypes(s.types[runStart:runEnd], resolvedType) + + // skip over run of (former) neutrals + i = runEnd + } + } +} + +func setLevels(levels []level, newLevel level) { + for i := range levels { + levels[i] = newLevel + } +} + +func setTypes(types []Class, newType Class) { + for i := range types { + types[i] = newType + } +} + +// 7) resolving implicit embedding levels Rules I1, I2. +func (s *isolatingRunSequence) resolveImplicitLevels() { + + // on entry, only these types can be in resultTypes + s.assertOnly(L, R, EN, AN) + + s.resolvedLevels = make([]level, len(s.types)) + setLevels(s.resolvedLevels, s.level) + + if (s.level & 1) == 0 { // even level + for i, t := range s.types { + // Rule I1. + if t == L { + // no change + } else if t == R { + s.resolvedLevels[i] += 1 + } else { // t == AN || t == EN + s.resolvedLevels[i] += 2 + } + } + } else { // odd level + for i, t := range s.types { + // Rule I2. + if t == R { + // no change + } else { // t == L || t == AN || t == EN + s.resolvedLevels[i] += 1 + } + } + } +} + +// Applies the levels and types resolved in rules W1-I2 to the +// resultLevels array. +func (s *isolatingRunSequence) applyLevelsAndTypes() { + for i, x := range s.indexes { + s.p.resultTypes[x] = s.types[i] + s.p.resultLevels[x] = s.resolvedLevels[i] + } +} + +// Return the limit of the run consisting only of the types in validSet +// starting at index. This checks the value at index, and will return +// index if that value is not in validSet. +func (s *isolatingRunSequence) findRunLimit(index int, validSet ...Class) int { +loop: + for ; index < len(s.types); index++ { + t := s.types[index] + for _, valid := range validSet { + if t == valid { + continue loop + } + } + return index // didn't find a match in validSet + } + return len(s.types) +} + +// Algorithm validation. Assert that all values in types are in the +// provided set. +func (s *isolatingRunSequence) assertOnly(codes ...Class) { +loop: + for i, t := range s.types { + for _, c := range codes { + if t == c { + continue loop + } + } + log.Panicf("invalid bidi code %v present in assertOnly at position %d", t, s.indexes[i]) + } +} + +// determineLevelRuns returns an array of level runs. Each level run is +// described as an array of indexes into the input string. +// +// Determines the level runs. Rule X9 will be applied in determining the +// runs, in the way that makes sure the characters that are supposed to be +// removed are not included in the runs. +func (p *paragraph) determineLevelRuns() [][]int { + run := []int{} + allRuns := [][]int{} + currentLevel := implicitLevel + + for i := range p.initialTypes { + if !isRemovedByX9(p.initialTypes[i]) { + if p.resultLevels[i] != currentLevel { + // we just encountered a new run; wrap up last run + if currentLevel >= 0 { // only wrap it up if there was a run + allRuns = append(allRuns, run) + run = nil + } + // Start new run + currentLevel = p.resultLevels[i] + } + run = append(run, i) + } + } + // Wrap up the final run, if any + if len(run) > 0 { + allRuns = append(allRuns, run) + } + return allRuns +} + +// Definition BD13. Determine isolating run sequences. +func (p *paragraph) determineIsolatingRunSequences() []*isolatingRunSequence { + levelRuns := p.determineLevelRuns() + + // Compute the run that each character belongs to + runForCharacter := make([]int, p.Len()) + for i, run := range levelRuns { + for _, index := range run { + runForCharacter[index] = i + } + } + + sequences := []*isolatingRunSequence{} + + var currentRunSequence []int + + for _, run := range levelRuns { + first := run[0] + if p.initialTypes[first] != PDI || p.matchingIsolateInitiator[first] == -1 { + currentRunSequence = nil + // int run = i; + for { + // Copy this level run into currentRunSequence + currentRunSequence = append(currentRunSequence, run...) + + last := currentRunSequence[len(currentRunSequence)-1] + lastT := p.initialTypes[last] + if lastT.in(LRI, RLI, FSI) && p.matchingPDI[last] != p.Len() { + run = levelRuns[runForCharacter[p.matchingPDI[last]]] + } else { + break + } + } + sequences = append(sequences, p.isolatingRunSequence(currentRunSequence)) + } + } + return sequences +} + +// Assign level information to characters removed by rule X9. This is for +// ease of relating the level information to the original input data. Note +// that the levels assigned to these codes are arbitrary, they're chosen so +// as to avoid breaking level runs. +func (p *paragraph) assignLevelsToCharactersRemovedByX9() { + for i, t := range p.initialTypes { + if t.in(LRE, RLE, LRO, RLO, PDF, BN) { + p.resultTypes[i] = t + p.resultLevels[i] = -1 + } + } + // now propagate forward the levels information (could have + // propagated backward, the main thing is not to introduce a level + // break where one doesn't already exist). + + if p.resultLevels[0] == -1 { + p.resultLevels[0] = p.embeddingLevel + } + for i := 1; i < len(p.initialTypes); i++ { + if p.resultLevels[i] == -1 { + p.resultLevels[i] = p.resultLevels[i-1] + } + } + // Embedding information is for informational purposes only so need not be + // adjusted. +} + +// +// Output +// + +// getLevels computes levels array breaking lines at offsets in linebreaks. +// Rule L1. +// +// The linebreaks array must include at least one value. The values must be +// in strictly increasing order (no duplicates) between 1 and the length of +// the text, inclusive. The last value must be the length of the text. +func (p *paragraph) getLevels(linebreaks []int) []level { + // Note that since the previous processing has removed all + // P, S, and WS values from resultTypes, the values referred to + // in these rules are the initial types, before any processing + // has been applied (including processing of overrides). + // + // This example implementation has reinserted explicit format codes + // and BN, in order that the levels array correspond to the + // initial text. Their final placement is not normative. + // These codes are treated like WS in this implementation, + // so they don't interrupt sequences of WS. + + validateLineBreaks(linebreaks, p.Len()) + + result := append([]level(nil), p.resultLevels...) + + // don't worry about linebreaks since if there is a break within + // a series of WS values preceding S, the linebreak itself + // causes the reset. + for i, t := range p.initialTypes { + if t.in(B, S) { + // Rule L1, clauses one and two. + result[i] = p.embeddingLevel + + // Rule L1, clause three. + for j := i - 1; j >= 0; j-- { + if isWhitespace(p.initialTypes[j]) { // including format codes + result[j] = p.embeddingLevel + } else { + break + } + } + } + } + + // Rule L1, clause four. + start := 0 + for _, limit := range linebreaks { + for j := limit - 1; j >= start; j-- { + if isWhitespace(p.initialTypes[j]) { // including format codes + result[j] = p.embeddingLevel + } else { + break + } + } + start = limit + } + + return result +} + +// getReordering returns the reordering of lines from a visual index to a +// logical index for line breaks at the given offsets. +// +// Lines are concatenated from left to right. So for example, the fifth +// character from the left on the third line is +// +// getReordering(linebreaks)[linebreaks[1] + 4] +// +// (linebreaks[1] is the position after the last character of the second +// line, which is also the index of the first character on the third line, +// and adding four gets the fifth character from the left). +// +// The linebreaks array must include at least one value. The values must be +// in strictly increasing order (no duplicates) between 1 and the length of +// the text, inclusive. The last value must be the length of the text. +func (p *paragraph) getReordering(linebreaks []int) []int { + validateLineBreaks(linebreaks, p.Len()) + + return computeMultilineReordering(p.getLevels(linebreaks), linebreaks) +} + +// Return multiline reordering array for a given level array. Reordering +// does not occur across a line break. +func computeMultilineReordering(levels []level, linebreaks []int) []int { + result := make([]int, len(levels)) + + start := 0 + for _, limit := range linebreaks { + tempLevels := make([]level, limit-start) + copy(tempLevels, levels[start:]) + + for j, order := range computeReordering(tempLevels) { + result[start+j] = order + start + } + start = limit + } + return result +} + +// Return reordering array for a given level array. This reorders a single +// line. The reordering is a visual to logical map. For example, the +// leftmost char is string.charAt(order[0]). Rule L2. +func computeReordering(levels []level) []int { + result := make([]int, len(levels)) + // initialize order + for i := range result { + result[i] = i + } + + // locate highest level found on line. + // Note the rules say text, but no reordering across line bounds is + // performed, so this is sufficient. + highestLevel := level(0) + lowestOddLevel := level(maxDepth + 2) + for _, level := range levels { + if level > highestLevel { + highestLevel = level + } + if level&1 != 0 && level < lowestOddLevel { + lowestOddLevel = level + } + } + + for level := highestLevel; level >= lowestOddLevel; level-- { + for i := 0; i < len(levels); i++ { + if levels[i] >= level { + // find range of text at or above this level + start := i + limit := i + 1 + for limit < len(levels) && levels[limit] >= level { + limit++ + } + + for j, k := start, limit-1; j < k; j, k = j+1, k-1 { + result[j], result[k] = result[k], result[j] + } + // skip to end of level run + i = limit + } + } + } + + return result +} + +// isWhitespace reports whether the type is considered a whitespace type for the +// line break rules. +func isWhitespace(c Class) bool { + switch c { + case LRE, RLE, LRO, RLO, PDF, LRI, RLI, FSI, PDI, BN, WS: + return true + } + return false +} + +// isRemovedByX9 reports whether the type is one of the types removed in X9. +func isRemovedByX9(c Class) bool { + switch c { + case LRE, RLE, LRO, RLO, PDF, BN: + return true + } + return false +} + +// typeForLevel reports the strong type (L or R) corresponding to the level. +func typeForLevel(level level) Class { + if (level & 0x1) == 0 { + return L + } + return R +} + +// TODO: change validation to not panic + +func validateTypes(types []Class) { + if len(types) == 0 { + log.Panic("types is null") + } + for i, t := range types[:len(types)-1] { + if t == B { + log.Panicf("B type before end of paragraph at index: %d", i) + } + } +} + +func validateParagraphEmbeddingLevel(embeddingLevel level) { + if embeddingLevel != implicitLevel && + embeddingLevel != 0 && + embeddingLevel != 1 { + log.Panicf("illegal paragraph embedding level: %d", embeddingLevel) + } +} + +func validateLineBreaks(linebreaks []int, textLength int) { + prev := 0 + for i, next := range linebreaks { + if next <= prev { + log.Panicf("bad linebreak: %d at index: %d", next, i) + } + prev = next + } + if prev != textLength { + log.Panicf("last linebreak was %d, want %d", prev, textLength) + } +} + +func validatePbTypes(pairTypes []bracketType) { + if len(pairTypes) == 0 { + log.Panic("pairTypes is null") + } + for i, pt := range pairTypes { + switch pt { + case bpNone, bpOpen, bpClose: + default: + log.Panicf("illegal pairType value at %d: %v", i, pairTypes[i]) + } + } +} + +func validatePbValues(pairValues []rune, pairTypes []bracketType) { + if pairValues == nil { + log.Panic("pairValues is null") + } + if len(pairTypes) != len(pairValues) { + log.Panic("pairTypes is different length from pairValues") + } +} diff --git a/vendor/golang.org/x/text/unicode/bidi/gen.go b/vendor/golang.org/x/text/unicode/bidi/gen.go new file mode 100644 index 000000000..040f3013d --- /dev/null +++ b/vendor/golang.org/x/text/unicode/bidi/gen.go @@ -0,0 +1,133 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +import ( + "flag" + "log" + + "golang.org/x/text/internal/gen" + "golang.org/x/text/internal/triegen" + "golang.org/x/text/internal/ucd" +) + +var outputFile = flag.String("out", "tables.go", "output file") + +func main() { + gen.Init() + gen.Repackage("gen_trieval.go", "trieval.go", "bidi") + gen.Repackage("gen_ranges.go", "ranges_test.go", "bidi") + + genTables() +} + +// bidiClass names and codes taken from class "bc" in +// http://www.unicode.org/Public/8.0.0/ucd/PropertyValueAliases.txt +var bidiClass = map[string]Class{ + "AL": AL, // ArabicLetter + "AN": AN, // ArabicNumber + "B": B, // ParagraphSeparator + "BN": BN, // BoundaryNeutral + "CS": CS, // CommonSeparator + "EN": EN, // EuropeanNumber + "ES": ES, // EuropeanSeparator + "ET": ET, // EuropeanTerminator + "L": L, // LeftToRight + "NSM": NSM, // NonspacingMark + "ON": ON, // OtherNeutral + "R": R, // RightToLeft + "S": S, // SegmentSeparator + "WS": WS, // WhiteSpace + + "FSI": Control, + "PDF": Control, + "PDI": Control, + "LRE": Control, + "LRI": Control, + "LRO": Control, + "RLE": Control, + "RLI": Control, + "RLO": Control, +} + +func genTables() { + if numClass > 0x0F { + log.Fatalf("Too many Class constants (%#x > 0x0F).", numClass) + } + w := gen.NewCodeWriter() + defer w.WriteGoFile(*outputFile, "bidi") + + gen.WriteUnicodeVersion(w) + + t := triegen.NewTrie("bidi") + + // Build data about bracket mapping. These bits need to be or-ed with + // any other bits. + orMask := map[rune]uint64{} + + xorMap := map[rune]int{} + xorMasks := []rune{0} // First value is no-op. + + ucd.Parse(gen.OpenUCDFile("BidiBrackets.txt"), func(p *ucd.Parser) { + r1 := p.Rune(0) + r2 := p.Rune(1) + xor := r1 ^ r2 + if _, ok := xorMap[xor]; !ok { + xorMap[xor] = len(xorMasks) + xorMasks = append(xorMasks, xor) + } + entry := uint64(xorMap[xor]) << xorMaskShift + switch p.String(2) { + case "o": + entry |= openMask + case "c", "n": + default: + log.Fatalf("Unknown bracket class %q.", p.String(2)) + } + orMask[r1] = entry + }) + + w.WriteComment(` + xorMasks contains masks to be xor-ed with brackets to get the reverse + version.`) + w.WriteVar("xorMasks", xorMasks) + + done := map[rune]bool{} + + insert := func(r rune, c Class) { + if !done[r] { + t.Insert(r, orMask[r]|uint64(c)) + done[r] = true + } + } + + // Insert the derived BiDi properties. + ucd.Parse(gen.OpenUCDFile("extracted/DerivedBidiClass.txt"), func(p *ucd.Parser) { + r := p.Rune(0) + class, ok := bidiClass[p.String(1)] + if !ok { + log.Fatalf("%U: Unknown BiDi class %q", r, p.String(1)) + } + insert(r, class) + }) + visitDefaults(insert) + + // TODO: use sparse blocks. This would reduce table size considerably + // from the looks of it. + + sz, err := t.Gen(w) + if err != nil { + log.Fatal(err) + } + w.Size += sz +} + +// dummy values to make methods in gen_common compile. The real versions +// will be generated by this file to tables.go. +var ( + xorMasks []rune +) diff --git a/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go b/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go new file mode 100644 index 000000000..51bd68fa7 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go @@ -0,0 +1,57 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +import ( + "unicode" + + "golang.org/x/text/internal/gen" + "golang.org/x/text/internal/ucd" + "golang.org/x/text/unicode/rangetable" +) + +// These tables are hand-extracted from: +// http://www.unicode.org/Public/8.0.0/ucd/extracted/DerivedBidiClass.txt +func visitDefaults(fn func(r rune, c Class)) { + // first write default values for ranges listed above. + visitRunes(fn, AL, []rune{ + 0x0600, 0x07BF, // Arabic + 0x08A0, 0x08FF, // Arabic Extended-A + 0xFB50, 0xFDCF, // Arabic Presentation Forms + 0xFDF0, 0xFDFF, + 0xFE70, 0xFEFF, + 0x0001EE00, 0x0001EEFF, // Arabic Mathematical Alpha Symbols + }) + visitRunes(fn, R, []rune{ + 0x0590, 0x05FF, // Hebrew + 0x07C0, 0x089F, // Nko et al. + 0xFB1D, 0xFB4F, + 0x00010800, 0x00010FFF, // Cypriot Syllabary et. al. + 0x0001E800, 0x0001EDFF, + 0x0001EF00, 0x0001EFFF, + }) + visitRunes(fn, ET, []rune{ // European Terminator + 0x20A0, 0x20Cf, // Currency symbols + }) + rangetable.Visit(unicode.Noncharacter_Code_Point, func(r rune) { + fn(r, BN) // Boundary Neutral + }) + ucd.Parse(gen.OpenUCDFile("DerivedCoreProperties.txt"), func(p *ucd.Parser) { + if p.String(1) == "Default_Ignorable_Code_Point" { + fn(p.Rune(0), BN) // Boundary Neutral + } + }) +} + +func visitRunes(fn func(r rune, c Class), c Class, runes []rune) { + for i := 0; i < len(runes); i += 2 { + lo, hi := runes[i], runes[i+1] + for j := lo; j <= hi; j++ { + fn(j, c) + } + } +} diff --git a/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go b/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go new file mode 100644 index 000000000..9cb994289 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go @@ -0,0 +1,64 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +// Class is the Unicode BiDi class. Each rune has a single class. +type Class uint + +const ( + L Class = iota // LeftToRight + R // RightToLeft + EN // EuropeanNumber + ES // EuropeanSeparator + ET // EuropeanTerminator + AN // ArabicNumber + CS // CommonSeparator + B // ParagraphSeparator + S // SegmentSeparator + WS // WhiteSpace + ON // OtherNeutral + BN // BoundaryNeutral + NSM // NonspacingMark + AL // ArabicLetter + Control // Control LRO - PDI + + numClass + + LRO // LeftToRightOverride + RLO // RightToLeftOverride + LRE // LeftToRightEmbedding + RLE // RightToLeftEmbedding + PDF // PopDirectionalFormat + LRI // LeftToRightIsolate + RLI // RightToLeftIsolate + FSI // FirstStrongIsolate + PDI // PopDirectionalIsolate + + unknownClass = ^Class(0) +) + +var controlToClass = map[rune]Class{ + 0x202D: LRO, // LeftToRightOverride, + 0x202E: RLO, // RightToLeftOverride, + 0x202A: LRE, // LeftToRightEmbedding, + 0x202B: RLE, // RightToLeftEmbedding, + 0x202C: PDF, // PopDirectionalFormat, + 0x2066: LRI, // LeftToRightIsolate, + 0x2067: RLI, // RightToLeftIsolate, + 0x2068: FSI, // FirstStrongIsolate, + 0x2069: PDI, // PopDirectionalIsolate, +} + +// A trie entry has the following bits: +// 7..5 XOR mask for brackets +// 4 1: Bracket open, 0: Bracket close +// 3..0 Class type + +const ( + openMask = 0x10 + xorMaskShift = 5 +) diff --git a/vendor/golang.org/x/text/unicode/bidi/prop.go b/vendor/golang.org/x/text/unicode/bidi/prop.go new file mode 100644 index 000000000..7c9484e1f --- /dev/null +++ b/vendor/golang.org/x/text/unicode/bidi/prop.go @@ -0,0 +1,206 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bidi + +import "unicode/utf8" + +// Properties provides access to BiDi properties of runes. +type Properties struct { + entry uint8 + last uint8 +} + +var trie = newBidiTrie(0) + +// TODO: using this for bidirule reduces the running time by about 5%. Consider +// if this is worth exposing or if we can find a way to speed up the Class +// method. +// +// // CompactClass is like Class, but maps all of the BiDi control classes +// // (LRO, RLO, LRE, RLE, PDF, LRI, RLI, FSI, PDI) to the class Control. +// func (p Properties) CompactClass() Class { +// return Class(p.entry & 0x0F) +// } + +// Class returns the Bidi class for p. +func (p Properties) Class() Class { + c := Class(p.entry & 0x0F) + if c == Control { + c = controlByteToClass[p.last&0xF] + } + return c +} + +// IsBracket reports whether the rune is a bracket. +func (p Properties) IsBracket() bool { return p.entry&0xF0 != 0 } + +// IsOpeningBracket reports whether the rune is an opening bracket. +// IsBracket must return true. +func (p Properties) IsOpeningBracket() bool { return p.entry&openMask != 0 } + +// TODO: find a better API and expose. +func (p Properties) reverseBracket(r rune) rune { + return xorMasks[p.entry>>xorMaskShift] ^ r +} + +var controlByteToClass = [16]Class{ + 0xD: LRO, // U+202D LeftToRightOverride, + 0xE: RLO, // U+202E RightToLeftOverride, + 0xA: LRE, // U+202A LeftToRightEmbedding, + 0xB: RLE, // U+202B RightToLeftEmbedding, + 0xC: PDF, // U+202C PopDirectionalFormat, + 0x6: LRI, // U+2066 LeftToRightIsolate, + 0x7: RLI, // U+2067 RightToLeftIsolate, + 0x8: FSI, // U+2068 FirstStrongIsolate, + 0x9: PDI, // U+2069 PopDirectionalIsolate, +} + +// LookupRune returns properties for r. +func LookupRune(r rune) (p Properties, size int) { + var buf [4]byte + n := utf8.EncodeRune(buf[:], r) + return Lookup(buf[:n]) +} + +// TODO: these lookup methods are based on the generated trie code. The returned +// sizes have slightly different semantics from the generated code, in that it +// always returns size==1 for an illegal UTF-8 byte (instead of the length +// of the maximum invalid subsequence). Most Transformers, like unicode/norm, +// leave invalid UTF-8 untouched, in which case it has performance benefits to +// do so (without changing the semantics). Bidi requires the semantics used here +// for the bidirule implementation to be compatible with the Go semantics. +// They ultimately should perhaps be adopted by all trie implementations, for +// convenience sake. +// This unrolled code also boosts performance of the secure/bidirule package by +// about 30%. +// So, to remove this code: +// - add option to trie generator to define return type. +// - always return 1 byte size for ill-formed UTF-8 runes. + +// Lookup returns properties for the first rune in s and the width in bytes of +// its encoding. The size will be 0 if s does not hold enough bytes to complete +// the encoding. +func Lookup(s []byte) (p Properties, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return Properties{entry: bidiValues[c0]}, 1 + case c0 < 0xC2: + return Properties{}, 1 + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return Properties{}, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return Properties{}, 1 + } + return Properties{entry: trie.lookupValue(uint32(i), c1)}, 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return Properties{}, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return Properties{}, 1 + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return Properties{}, 1 + } + return Properties{entry: trie.lookupValue(uint32(i), c2), last: c2}, 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return Properties{}, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return Properties{}, 1 + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return Properties{}, 1 + } + o = uint32(i)<<6 + uint32(c2) + i = bidiIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return Properties{}, 1 + } + return Properties{entry: trie.lookupValue(uint32(i), c3)}, 4 + } + // Illegal rune + return Properties{}, 1 +} + +// LookupString returns properties for the first rune in s and the width in +// bytes of its encoding. The size will be 0 if s does not hold enough bytes to +// complete the encoding. +func LookupString(s string) (p Properties, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return Properties{entry: bidiValues[c0]}, 1 + case c0 < 0xC2: + return Properties{}, 1 + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return Properties{}, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return Properties{}, 1 + } + return Properties{entry: trie.lookupValue(uint32(i), c1)}, 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return Properties{}, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return Properties{}, 1 + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return Properties{}, 1 + } + return Properties{entry: trie.lookupValue(uint32(i), c2), last: c2}, 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return Properties{}, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return Properties{}, 1 + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return Properties{}, 1 + } + o = uint32(i)<<6 + uint32(c2) + i = bidiIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return Properties{}, 1 + } + return Properties{entry: trie.lookupValue(uint32(i), c3)}, 4 + } + // Illegal rune + return Properties{}, 1 +} diff --git a/vendor/golang.org/x/text/unicode/bidi/tables.go b/vendor/golang.org/x/text/unicode/bidi/tables.go new file mode 100644 index 000000000..7212d5add --- /dev/null +++ b/vendor/golang.org/x/text/unicode/bidi/tables.go @@ -0,0 +1,1779 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package bidi + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "9.0.0" + +// xorMasks contains masks to be xor-ed with brackets to get the reverse +// version. +var xorMasks = []int32{ // 8 elements + 0, 1, 6, 7, 3, 15, 29, 63, +} // Size: 56 bytes + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *bidiTrie) lookup(s []byte) (v uint8, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return bidiValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = bidiIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *bidiTrie) lookupUnsafe(s []byte) uint8 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return bidiValues[c0] + } + i := bidiIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *bidiTrie) lookupString(s string) (v uint8, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return bidiValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = bidiIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *bidiTrie) lookupStringUnsafe(s string) uint8 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return bidiValues[c0] + } + i := bidiIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// bidiTrie. Total size: 15744 bytes (15.38 KiB). Checksum: b4c3b70954803b86. +type bidiTrie struct{} + +func newBidiTrie(i int) *bidiTrie { + return &bidiTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *bidiTrie) lookupValue(n uint32, b byte) uint8 { + switch { + default: + return uint8(bidiValues[n<<6+uint32(b)]) + } +} + +// bidiValues: 222 blocks, 14208 entries, 14208 bytes +// The third block is the zero block. +var bidiValues = [14208]uint8{ + // Block 0x0, offset 0x0 + 0x00: 0x000b, 0x01: 0x000b, 0x02: 0x000b, 0x03: 0x000b, 0x04: 0x000b, 0x05: 0x000b, + 0x06: 0x000b, 0x07: 0x000b, 0x08: 0x000b, 0x09: 0x0008, 0x0a: 0x0007, 0x0b: 0x0008, + 0x0c: 0x0009, 0x0d: 0x0007, 0x0e: 0x000b, 0x0f: 0x000b, 0x10: 0x000b, 0x11: 0x000b, + 0x12: 0x000b, 0x13: 0x000b, 0x14: 0x000b, 0x15: 0x000b, 0x16: 0x000b, 0x17: 0x000b, + 0x18: 0x000b, 0x19: 0x000b, 0x1a: 0x000b, 0x1b: 0x000b, 0x1c: 0x0007, 0x1d: 0x0007, + 0x1e: 0x0007, 0x1f: 0x0008, 0x20: 0x0009, 0x21: 0x000a, 0x22: 0x000a, 0x23: 0x0004, + 0x24: 0x0004, 0x25: 0x0004, 0x26: 0x000a, 0x27: 0x000a, 0x28: 0x003a, 0x29: 0x002a, + 0x2a: 0x000a, 0x2b: 0x0003, 0x2c: 0x0006, 0x2d: 0x0003, 0x2e: 0x0006, 0x2f: 0x0006, + 0x30: 0x0002, 0x31: 0x0002, 0x32: 0x0002, 0x33: 0x0002, 0x34: 0x0002, 0x35: 0x0002, + 0x36: 0x0002, 0x37: 0x0002, 0x38: 0x0002, 0x39: 0x0002, 0x3a: 0x0006, 0x3b: 0x000a, + 0x3c: 0x000a, 0x3d: 0x000a, 0x3e: 0x000a, 0x3f: 0x000a, + // Block 0x1, offset 0x40 + 0x40: 0x000a, + 0x5b: 0x005a, 0x5c: 0x000a, 0x5d: 0x004a, + 0x5e: 0x000a, 0x5f: 0x000a, 0x60: 0x000a, + 0x7b: 0x005a, + 0x7c: 0x000a, 0x7d: 0x004a, 0x7e: 0x000a, 0x7f: 0x000b, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x000b, 0xc1: 0x000b, 0xc2: 0x000b, 0xc3: 0x000b, 0xc4: 0x000b, 0xc5: 0x0007, + 0xc6: 0x000b, 0xc7: 0x000b, 0xc8: 0x000b, 0xc9: 0x000b, 0xca: 0x000b, 0xcb: 0x000b, + 0xcc: 0x000b, 0xcd: 0x000b, 0xce: 0x000b, 0xcf: 0x000b, 0xd0: 0x000b, 0xd1: 0x000b, + 0xd2: 0x000b, 0xd3: 0x000b, 0xd4: 0x000b, 0xd5: 0x000b, 0xd6: 0x000b, 0xd7: 0x000b, + 0xd8: 0x000b, 0xd9: 0x000b, 0xda: 0x000b, 0xdb: 0x000b, 0xdc: 0x000b, 0xdd: 0x000b, + 0xde: 0x000b, 0xdf: 0x000b, 0xe0: 0x0006, 0xe1: 0x000a, 0xe2: 0x0004, 0xe3: 0x0004, + 0xe4: 0x0004, 0xe5: 0x0004, 0xe6: 0x000a, 0xe7: 0x000a, 0xe8: 0x000a, 0xe9: 0x000a, + 0xeb: 0x000a, 0xec: 0x000a, 0xed: 0x000b, 0xee: 0x000a, 0xef: 0x000a, + 0xf0: 0x0004, 0xf1: 0x0004, 0xf2: 0x0002, 0xf3: 0x0002, 0xf4: 0x000a, + 0xf6: 0x000a, 0xf7: 0x000a, 0xf8: 0x000a, 0xf9: 0x0002, 0xfb: 0x000a, + 0xfc: 0x000a, 0xfd: 0x000a, 0xfe: 0x000a, 0xff: 0x000a, + // Block 0x4, offset 0x100 + 0x117: 0x000a, + 0x137: 0x000a, + // Block 0x5, offset 0x140 + 0x179: 0x000a, 0x17a: 0x000a, + // Block 0x6, offset 0x180 + 0x182: 0x000a, 0x183: 0x000a, 0x184: 0x000a, 0x185: 0x000a, + 0x186: 0x000a, 0x187: 0x000a, 0x188: 0x000a, 0x189: 0x000a, 0x18a: 0x000a, 0x18b: 0x000a, + 0x18c: 0x000a, 0x18d: 0x000a, 0x18e: 0x000a, 0x18f: 0x000a, + 0x192: 0x000a, 0x193: 0x000a, 0x194: 0x000a, 0x195: 0x000a, 0x196: 0x000a, 0x197: 0x000a, + 0x198: 0x000a, 0x199: 0x000a, 0x19a: 0x000a, 0x19b: 0x000a, 0x19c: 0x000a, 0x19d: 0x000a, + 0x19e: 0x000a, 0x19f: 0x000a, + 0x1a5: 0x000a, 0x1a6: 0x000a, 0x1a7: 0x000a, 0x1a8: 0x000a, 0x1a9: 0x000a, + 0x1aa: 0x000a, 0x1ab: 0x000a, 0x1ac: 0x000a, 0x1ad: 0x000a, 0x1af: 0x000a, + 0x1b0: 0x000a, 0x1b1: 0x000a, 0x1b2: 0x000a, 0x1b3: 0x000a, 0x1b4: 0x000a, 0x1b5: 0x000a, + 0x1b6: 0x000a, 0x1b7: 0x000a, 0x1b8: 0x000a, 0x1b9: 0x000a, 0x1ba: 0x000a, 0x1bb: 0x000a, + 0x1bc: 0x000a, 0x1bd: 0x000a, 0x1be: 0x000a, 0x1bf: 0x000a, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x000c, 0x1c1: 0x000c, 0x1c2: 0x000c, 0x1c3: 0x000c, 0x1c4: 0x000c, 0x1c5: 0x000c, + 0x1c6: 0x000c, 0x1c7: 0x000c, 0x1c8: 0x000c, 0x1c9: 0x000c, 0x1ca: 0x000c, 0x1cb: 0x000c, + 0x1cc: 0x000c, 0x1cd: 0x000c, 0x1ce: 0x000c, 0x1cf: 0x000c, 0x1d0: 0x000c, 0x1d1: 0x000c, + 0x1d2: 0x000c, 0x1d3: 0x000c, 0x1d4: 0x000c, 0x1d5: 0x000c, 0x1d6: 0x000c, 0x1d7: 0x000c, + 0x1d8: 0x000c, 0x1d9: 0x000c, 0x1da: 0x000c, 0x1db: 0x000c, 0x1dc: 0x000c, 0x1dd: 0x000c, + 0x1de: 0x000c, 0x1df: 0x000c, 0x1e0: 0x000c, 0x1e1: 0x000c, 0x1e2: 0x000c, 0x1e3: 0x000c, + 0x1e4: 0x000c, 0x1e5: 0x000c, 0x1e6: 0x000c, 0x1e7: 0x000c, 0x1e8: 0x000c, 0x1e9: 0x000c, + 0x1ea: 0x000c, 0x1eb: 0x000c, 0x1ec: 0x000c, 0x1ed: 0x000c, 0x1ee: 0x000c, 0x1ef: 0x000c, + 0x1f0: 0x000c, 0x1f1: 0x000c, 0x1f2: 0x000c, 0x1f3: 0x000c, 0x1f4: 0x000c, 0x1f5: 0x000c, + 0x1f6: 0x000c, 0x1f7: 0x000c, 0x1f8: 0x000c, 0x1f9: 0x000c, 0x1fa: 0x000c, 0x1fb: 0x000c, + 0x1fc: 0x000c, 0x1fd: 0x000c, 0x1fe: 0x000c, 0x1ff: 0x000c, + // Block 0x8, offset 0x200 + 0x200: 0x000c, 0x201: 0x000c, 0x202: 0x000c, 0x203: 0x000c, 0x204: 0x000c, 0x205: 0x000c, + 0x206: 0x000c, 0x207: 0x000c, 0x208: 0x000c, 0x209: 0x000c, 0x20a: 0x000c, 0x20b: 0x000c, + 0x20c: 0x000c, 0x20d: 0x000c, 0x20e: 0x000c, 0x20f: 0x000c, 0x210: 0x000c, 0x211: 0x000c, + 0x212: 0x000c, 0x213: 0x000c, 0x214: 0x000c, 0x215: 0x000c, 0x216: 0x000c, 0x217: 0x000c, + 0x218: 0x000c, 0x219: 0x000c, 0x21a: 0x000c, 0x21b: 0x000c, 0x21c: 0x000c, 0x21d: 0x000c, + 0x21e: 0x000c, 0x21f: 0x000c, 0x220: 0x000c, 0x221: 0x000c, 0x222: 0x000c, 0x223: 0x000c, + 0x224: 0x000c, 0x225: 0x000c, 0x226: 0x000c, 0x227: 0x000c, 0x228: 0x000c, 0x229: 0x000c, + 0x22a: 0x000c, 0x22b: 0x000c, 0x22c: 0x000c, 0x22d: 0x000c, 0x22e: 0x000c, 0x22f: 0x000c, + 0x234: 0x000a, 0x235: 0x000a, + 0x23e: 0x000a, + // Block 0x9, offset 0x240 + 0x244: 0x000a, 0x245: 0x000a, + 0x247: 0x000a, + // Block 0xa, offset 0x280 + 0x2b6: 0x000a, + // Block 0xb, offset 0x2c0 + 0x2c3: 0x000c, 0x2c4: 0x000c, 0x2c5: 0x000c, + 0x2c6: 0x000c, 0x2c7: 0x000c, 0x2c8: 0x000c, 0x2c9: 0x000c, + // Block 0xc, offset 0x300 + 0x30a: 0x000a, + 0x30d: 0x000a, 0x30e: 0x000a, 0x30f: 0x0004, 0x310: 0x0001, 0x311: 0x000c, + 0x312: 0x000c, 0x313: 0x000c, 0x314: 0x000c, 0x315: 0x000c, 0x316: 0x000c, 0x317: 0x000c, + 0x318: 0x000c, 0x319: 0x000c, 0x31a: 0x000c, 0x31b: 0x000c, 0x31c: 0x000c, 0x31d: 0x000c, + 0x31e: 0x000c, 0x31f: 0x000c, 0x320: 0x000c, 0x321: 0x000c, 0x322: 0x000c, 0x323: 0x000c, + 0x324: 0x000c, 0x325: 0x000c, 0x326: 0x000c, 0x327: 0x000c, 0x328: 0x000c, 0x329: 0x000c, + 0x32a: 0x000c, 0x32b: 0x000c, 0x32c: 0x000c, 0x32d: 0x000c, 0x32e: 0x000c, 0x32f: 0x000c, + 0x330: 0x000c, 0x331: 0x000c, 0x332: 0x000c, 0x333: 0x000c, 0x334: 0x000c, 0x335: 0x000c, + 0x336: 0x000c, 0x337: 0x000c, 0x338: 0x000c, 0x339: 0x000c, 0x33a: 0x000c, 0x33b: 0x000c, + 0x33c: 0x000c, 0x33d: 0x000c, 0x33e: 0x0001, 0x33f: 0x000c, + // Block 0xd, offset 0x340 + 0x340: 0x0001, 0x341: 0x000c, 0x342: 0x000c, 0x343: 0x0001, 0x344: 0x000c, 0x345: 0x000c, + 0x346: 0x0001, 0x347: 0x000c, 0x348: 0x0001, 0x349: 0x0001, 0x34a: 0x0001, 0x34b: 0x0001, + 0x34c: 0x0001, 0x34d: 0x0001, 0x34e: 0x0001, 0x34f: 0x0001, 0x350: 0x0001, 0x351: 0x0001, + 0x352: 0x0001, 0x353: 0x0001, 0x354: 0x0001, 0x355: 0x0001, 0x356: 0x0001, 0x357: 0x0001, + 0x358: 0x0001, 0x359: 0x0001, 0x35a: 0x0001, 0x35b: 0x0001, 0x35c: 0x0001, 0x35d: 0x0001, + 0x35e: 0x0001, 0x35f: 0x0001, 0x360: 0x0001, 0x361: 0x0001, 0x362: 0x0001, 0x363: 0x0001, + 0x364: 0x0001, 0x365: 0x0001, 0x366: 0x0001, 0x367: 0x0001, 0x368: 0x0001, 0x369: 0x0001, + 0x36a: 0x0001, 0x36b: 0x0001, 0x36c: 0x0001, 0x36d: 0x0001, 0x36e: 0x0001, 0x36f: 0x0001, + 0x370: 0x0001, 0x371: 0x0001, 0x372: 0x0001, 0x373: 0x0001, 0x374: 0x0001, 0x375: 0x0001, + 0x376: 0x0001, 0x377: 0x0001, 0x378: 0x0001, 0x379: 0x0001, 0x37a: 0x0001, 0x37b: 0x0001, + 0x37c: 0x0001, 0x37d: 0x0001, 0x37e: 0x0001, 0x37f: 0x0001, + // Block 0xe, offset 0x380 + 0x380: 0x0005, 0x381: 0x0005, 0x382: 0x0005, 0x383: 0x0005, 0x384: 0x0005, 0x385: 0x0005, + 0x386: 0x000a, 0x387: 0x000a, 0x388: 0x000d, 0x389: 0x0004, 0x38a: 0x0004, 0x38b: 0x000d, + 0x38c: 0x0006, 0x38d: 0x000d, 0x38e: 0x000a, 0x38f: 0x000a, 0x390: 0x000c, 0x391: 0x000c, + 0x392: 0x000c, 0x393: 0x000c, 0x394: 0x000c, 0x395: 0x000c, 0x396: 0x000c, 0x397: 0x000c, + 0x398: 0x000c, 0x399: 0x000c, 0x39a: 0x000c, 0x39b: 0x000d, 0x39c: 0x000d, 0x39d: 0x000d, + 0x39e: 0x000d, 0x39f: 0x000d, 0x3a0: 0x000d, 0x3a1: 0x000d, 0x3a2: 0x000d, 0x3a3: 0x000d, + 0x3a4: 0x000d, 0x3a5: 0x000d, 0x3a6: 0x000d, 0x3a7: 0x000d, 0x3a8: 0x000d, 0x3a9: 0x000d, + 0x3aa: 0x000d, 0x3ab: 0x000d, 0x3ac: 0x000d, 0x3ad: 0x000d, 0x3ae: 0x000d, 0x3af: 0x000d, + 0x3b0: 0x000d, 0x3b1: 0x000d, 0x3b2: 0x000d, 0x3b3: 0x000d, 0x3b4: 0x000d, 0x3b5: 0x000d, + 0x3b6: 0x000d, 0x3b7: 0x000d, 0x3b8: 0x000d, 0x3b9: 0x000d, 0x3ba: 0x000d, 0x3bb: 0x000d, + 0x3bc: 0x000d, 0x3bd: 0x000d, 0x3be: 0x000d, 0x3bf: 0x000d, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x000d, 0x3c1: 0x000d, 0x3c2: 0x000d, 0x3c3: 0x000d, 0x3c4: 0x000d, 0x3c5: 0x000d, + 0x3c6: 0x000d, 0x3c7: 0x000d, 0x3c8: 0x000d, 0x3c9: 0x000d, 0x3ca: 0x000d, 0x3cb: 0x000c, + 0x3cc: 0x000c, 0x3cd: 0x000c, 0x3ce: 0x000c, 0x3cf: 0x000c, 0x3d0: 0x000c, 0x3d1: 0x000c, + 0x3d2: 0x000c, 0x3d3: 0x000c, 0x3d4: 0x000c, 0x3d5: 0x000c, 0x3d6: 0x000c, 0x3d7: 0x000c, + 0x3d8: 0x000c, 0x3d9: 0x000c, 0x3da: 0x000c, 0x3db: 0x000c, 0x3dc: 0x000c, 0x3dd: 0x000c, + 0x3de: 0x000c, 0x3df: 0x000c, 0x3e0: 0x0005, 0x3e1: 0x0005, 0x3e2: 0x0005, 0x3e3: 0x0005, + 0x3e4: 0x0005, 0x3e5: 0x0005, 0x3e6: 0x0005, 0x3e7: 0x0005, 0x3e8: 0x0005, 0x3e9: 0x0005, + 0x3ea: 0x0004, 0x3eb: 0x0005, 0x3ec: 0x0005, 0x3ed: 0x000d, 0x3ee: 0x000d, 0x3ef: 0x000d, + 0x3f0: 0x000c, 0x3f1: 0x000d, 0x3f2: 0x000d, 0x3f3: 0x000d, 0x3f4: 0x000d, 0x3f5: 0x000d, + 0x3f6: 0x000d, 0x3f7: 0x000d, 0x3f8: 0x000d, 0x3f9: 0x000d, 0x3fa: 0x000d, 0x3fb: 0x000d, + 0x3fc: 0x000d, 0x3fd: 0x000d, 0x3fe: 0x000d, 0x3ff: 0x000d, + // Block 0x10, offset 0x400 + 0x400: 0x000d, 0x401: 0x000d, 0x402: 0x000d, 0x403: 0x000d, 0x404: 0x000d, 0x405: 0x000d, + 0x406: 0x000d, 0x407: 0x000d, 0x408: 0x000d, 0x409: 0x000d, 0x40a: 0x000d, 0x40b: 0x000d, + 0x40c: 0x000d, 0x40d: 0x000d, 0x40e: 0x000d, 0x40f: 0x000d, 0x410: 0x000d, 0x411: 0x000d, + 0x412: 0x000d, 0x413: 0x000d, 0x414: 0x000d, 0x415: 0x000d, 0x416: 0x000d, 0x417: 0x000d, + 0x418: 0x000d, 0x419: 0x000d, 0x41a: 0x000d, 0x41b: 0x000d, 0x41c: 0x000d, 0x41d: 0x000d, + 0x41e: 0x000d, 0x41f: 0x000d, 0x420: 0x000d, 0x421: 0x000d, 0x422: 0x000d, 0x423: 0x000d, + 0x424: 0x000d, 0x425: 0x000d, 0x426: 0x000d, 0x427: 0x000d, 0x428: 0x000d, 0x429: 0x000d, + 0x42a: 0x000d, 0x42b: 0x000d, 0x42c: 0x000d, 0x42d: 0x000d, 0x42e: 0x000d, 0x42f: 0x000d, + 0x430: 0x000d, 0x431: 0x000d, 0x432: 0x000d, 0x433: 0x000d, 0x434: 0x000d, 0x435: 0x000d, + 0x436: 0x000d, 0x437: 0x000d, 0x438: 0x000d, 0x439: 0x000d, 0x43a: 0x000d, 0x43b: 0x000d, + 0x43c: 0x000d, 0x43d: 0x000d, 0x43e: 0x000d, 0x43f: 0x000d, + // Block 0x11, offset 0x440 + 0x440: 0x000d, 0x441: 0x000d, 0x442: 0x000d, 0x443: 0x000d, 0x444: 0x000d, 0x445: 0x000d, + 0x446: 0x000d, 0x447: 0x000d, 0x448: 0x000d, 0x449: 0x000d, 0x44a: 0x000d, 0x44b: 0x000d, + 0x44c: 0x000d, 0x44d: 0x000d, 0x44e: 0x000d, 0x44f: 0x000d, 0x450: 0x000d, 0x451: 0x000d, + 0x452: 0x000d, 0x453: 0x000d, 0x454: 0x000d, 0x455: 0x000d, 0x456: 0x000c, 0x457: 0x000c, + 0x458: 0x000c, 0x459: 0x000c, 0x45a: 0x000c, 0x45b: 0x000c, 0x45c: 0x000c, 0x45d: 0x0005, + 0x45e: 0x000a, 0x45f: 0x000c, 0x460: 0x000c, 0x461: 0x000c, 0x462: 0x000c, 0x463: 0x000c, + 0x464: 0x000c, 0x465: 0x000d, 0x466: 0x000d, 0x467: 0x000c, 0x468: 0x000c, 0x469: 0x000a, + 0x46a: 0x000c, 0x46b: 0x000c, 0x46c: 0x000c, 0x46d: 0x000c, 0x46e: 0x000d, 0x46f: 0x000d, + 0x470: 0x0002, 0x471: 0x0002, 0x472: 0x0002, 0x473: 0x0002, 0x474: 0x0002, 0x475: 0x0002, + 0x476: 0x0002, 0x477: 0x0002, 0x478: 0x0002, 0x479: 0x0002, 0x47a: 0x000d, 0x47b: 0x000d, + 0x47c: 0x000d, 0x47d: 0x000d, 0x47e: 0x000d, 0x47f: 0x000d, + // Block 0x12, offset 0x480 + 0x480: 0x000d, 0x481: 0x000d, 0x482: 0x000d, 0x483: 0x000d, 0x484: 0x000d, 0x485: 0x000d, + 0x486: 0x000d, 0x487: 0x000d, 0x488: 0x000d, 0x489: 0x000d, 0x48a: 0x000d, 0x48b: 0x000d, + 0x48c: 0x000d, 0x48d: 0x000d, 0x48e: 0x000d, 0x48f: 0x000d, 0x490: 0x000d, 0x491: 0x000c, + 0x492: 0x000d, 0x493: 0x000d, 0x494: 0x000d, 0x495: 0x000d, 0x496: 0x000d, 0x497: 0x000d, + 0x498: 0x000d, 0x499: 0x000d, 0x49a: 0x000d, 0x49b: 0x000d, 0x49c: 0x000d, 0x49d: 0x000d, + 0x49e: 0x000d, 0x49f: 0x000d, 0x4a0: 0x000d, 0x4a1: 0x000d, 0x4a2: 0x000d, 0x4a3: 0x000d, + 0x4a4: 0x000d, 0x4a5: 0x000d, 0x4a6: 0x000d, 0x4a7: 0x000d, 0x4a8: 0x000d, 0x4a9: 0x000d, + 0x4aa: 0x000d, 0x4ab: 0x000d, 0x4ac: 0x000d, 0x4ad: 0x000d, 0x4ae: 0x000d, 0x4af: 0x000d, + 0x4b0: 0x000c, 0x4b1: 0x000c, 0x4b2: 0x000c, 0x4b3: 0x000c, 0x4b4: 0x000c, 0x4b5: 0x000c, + 0x4b6: 0x000c, 0x4b7: 0x000c, 0x4b8: 0x000c, 0x4b9: 0x000c, 0x4ba: 0x000c, 0x4bb: 0x000c, + 0x4bc: 0x000c, 0x4bd: 0x000c, 0x4be: 0x000c, 0x4bf: 0x000c, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x000c, 0x4c1: 0x000c, 0x4c2: 0x000c, 0x4c3: 0x000c, 0x4c4: 0x000c, 0x4c5: 0x000c, + 0x4c6: 0x000c, 0x4c7: 0x000c, 0x4c8: 0x000c, 0x4c9: 0x000c, 0x4ca: 0x000c, 0x4cb: 0x000d, + 0x4cc: 0x000d, 0x4cd: 0x000d, 0x4ce: 0x000d, 0x4cf: 0x000d, 0x4d0: 0x000d, 0x4d1: 0x000d, + 0x4d2: 0x000d, 0x4d3: 0x000d, 0x4d4: 0x000d, 0x4d5: 0x000d, 0x4d6: 0x000d, 0x4d7: 0x000d, + 0x4d8: 0x000d, 0x4d9: 0x000d, 0x4da: 0x000d, 0x4db: 0x000d, 0x4dc: 0x000d, 0x4dd: 0x000d, + 0x4de: 0x000d, 0x4df: 0x000d, 0x4e0: 0x000d, 0x4e1: 0x000d, 0x4e2: 0x000d, 0x4e3: 0x000d, + 0x4e4: 0x000d, 0x4e5: 0x000d, 0x4e6: 0x000d, 0x4e7: 0x000d, 0x4e8: 0x000d, 0x4e9: 0x000d, + 0x4ea: 0x000d, 0x4eb: 0x000d, 0x4ec: 0x000d, 0x4ed: 0x000d, 0x4ee: 0x000d, 0x4ef: 0x000d, + 0x4f0: 0x000d, 0x4f1: 0x000d, 0x4f2: 0x000d, 0x4f3: 0x000d, 0x4f4: 0x000d, 0x4f5: 0x000d, + 0x4f6: 0x000d, 0x4f7: 0x000d, 0x4f8: 0x000d, 0x4f9: 0x000d, 0x4fa: 0x000d, 0x4fb: 0x000d, + 0x4fc: 0x000d, 0x4fd: 0x000d, 0x4fe: 0x000d, 0x4ff: 0x000d, + // Block 0x14, offset 0x500 + 0x500: 0x000d, 0x501: 0x000d, 0x502: 0x000d, 0x503: 0x000d, 0x504: 0x000d, 0x505: 0x000d, + 0x506: 0x000d, 0x507: 0x000d, 0x508: 0x000d, 0x509: 0x000d, 0x50a: 0x000d, 0x50b: 0x000d, + 0x50c: 0x000d, 0x50d: 0x000d, 0x50e: 0x000d, 0x50f: 0x000d, 0x510: 0x000d, 0x511: 0x000d, + 0x512: 0x000d, 0x513: 0x000d, 0x514: 0x000d, 0x515: 0x000d, 0x516: 0x000d, 0x517: 0x000d, + 0x518: 0x000d, 0x519: 0x000d, 0x51a: 0x000d, 0x51b: 0x000d, 0x51c: 0x000d, 0x51d: 0x000d, + 0x51e: 0x000d, 0x51f: 0x000d, 0x520: 0x000d, 0x521: 0x000d, 0x522: 0x000d, 0x523: 0x000d, + 0x524: 0x000d, 0x525: 0x000d, 0x526: 0x000c, 0x527: 0x000c, 0x528: 0x000c, 0x529: 0x000c, + 0x52a: 0x000c, 0x52b: 0x000c, 0x52c: 0x000c, 0x52d: 0x000c, 0x52e: 0x000c, 0x52f: 0x000c, + 0x530: 0x000c, 0x531: 0x000d, 0x532: 0x000d, 0x533: 0x000d, 0x534: 0x000d, 0x535: 0x000d, + 0x536: 0x000d, 0x537: 0x000d, 0x538: 0x000d, 0x539: 0x000d, 0x53a: 0x000d, 0x53b: 0x000d, + 0x53c: 0x000d, 0x53d: 0x000d, 0x53e: 0x000d, 0x53f: 0x000d, + // Block 0x15, offset 0x540 + 0x540: 0x0001, 0x541: 0x0001, 0x542: 0x0001, 0x543: 0x0001, 0x544: 0x0001, 0x545: 0x0001, + 0x546: 0x0001, 0x547: 0x0001, 0x548: 0x0001, 0x549: 0x0001, 0x54a: 0x0001, 0x54b: 0x0001, + 0x54c: 0x0001, 0x54d: 0x0001, 0x54e: 0x0001, 0x54f: 0x0001, 0x550: 0x0001, 0x551: 0x0001, + 0x552: 0x0001, 0x553: 0x0001, 0x554: 0x0001, 0x555: 0x0001, 0x556: 0x0001, 0x557: 0x0001, + 0x558: 0x0001, 0x559: 0x0001, 0x55a: 0x0001, 0x55b: 0x0001, 0x55c: 0x0001, 0x55d: 0x0001, + 0x55e: 0x0001, 0x55f: 0x0001, 0x560: 0x0001, 0x561: 0x0001, 0x562: 0x0001, 0x563: 0x0001, + 0x564: 0x0001, 0x565: 0x0001, 0x566: 0x0001, 0x567: 0x0001, 0x568: 0x0001, 0x569: 0x0001, + 0x56a: 0x0001, 0x56b: 0x000c, 0x56c: 0x000c, 0x56d: 0x000c, 0x56e: 0x000c, 0x56f: 0x000c, + 0x570: 0x000c, 0x571: 0x000c, 0x572: 0x000c, 0x573: 0x000c, 0x574: 0x0001, 0x575: 0x0001, + 0x576: 0x000a, 0x577: 0x000a, 0x578: 0x000a, 0x579: 0x000a, 0x57a: 0x0001, 0x57b: 0x0001, + 0x57c: 0x0001, 0x57d: 0x0001, 0x57e: 0x0001, 0x57f: 0x0001, + // Block 0x16, offset 0x580 + 0x580: 0x0001, 0x581: 0x0001, 0x582: 0x0001, 0x583: 0x0001, 0x584: 0x0001, 0x585: 0x0001, + 0x586: 0x0001, 0x587: 0x0001, 0x588: 0x0001, 0x589: 0x0001, 0x58a: 0x0001, 0x58b: 0x0001, + 0x58c: 0x0001, 0x58d: 0x0001, 0x58e: 0x0001, 0x58f: 0x0001, 0x590: 0x0001, 0x591: 0x0001, + 0x592: 0x0001, 0x593: 0x0001, 0x594: 0x0001, 0x595: 0x0001, 0x596: 0x000c, 0x597: 0x000c, + 0x598: 0x000c, 0x599: 0x000c, 0x59a: 0x0001, 0x59b: 0x000c, 0x59c: 0x000c, 0x59d: 0x000c, + 0x59e: 0x000c, 0x59f: 0x000c, 0x5a0: 0x000c, 0x5a1: 0x000c, 0x5a2: 0x000c, 0x5a3: 0x000c, + 0x5a4: 0x0001, 0x5a5: 0x000c, 0x5a6: 0x000c, 0x5a7: 0x000c, 0x5a8: 0x0001, 0x5a9: 0x000c, + 0x5aa: 0x000c, 0x5ab: 0x000c, 0x5ac: 0x000c, 0x5ad: 0x000c, 0x5ae: 0x0001, 0x5af: 0x0001, + 0x5b0: 0x0001, 0x5b1: 0x0001, 0x5b2: 0x0001, 0x5b3: 0x0001, 0x5b4: 0x0001, 0x5b5: 0x0001, + 0x5b6: 0x0001, 0x5b7: 0x0001, 0x5b8: 0x0001, 0x5b9: 0x0001, 0x5ba: 0x0001, 0x5bb: 0x0001, + 0x5bc: 0x0001, 0x5bd: 0x0001, 0x5be: 0x0001, 0x5bf: 0x0001, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x0001, 0x5c1: 0x0001, 0x5c2: 0x0001, 0x5c3: 0x0001, 0x5c4: 0x0001, 0x5c5: 0x0001, + 0x5c6: 0x0001, 0x5c7: 0x0001, 0x5c8: 0x0001, 0x5c9: 0x0001, 0x5ca: 0x0001, 0x5cb: 0x0001, + 0x5cc: 0x0001, 0x5cd: 0x0001, 0x5ce: 0x0001, 0x5cf: 0x0001, 0x5d0: 0x0001, 0x5d1: 0x0001, + 0x5d2: 0x0001, 0x5d3: 0x0001, 0x5d4: 0x0001, 0x5d5: 0x0001, 0x5d6: 0x0001, 0x5d7: 0x0001, + 0x5d8: 0x0001, 0x5d9: 0x000c, 0x5da: 0x000c, 0x5db: 0x000c, 0x5dc: 0x0001, 0x5dd: 0x0001, + 0x5de: 0x0001, 0x5df: 0x0001, 0x5e0: 0x0001, 0x5e1: 0x0001, 0x5e2: 0x0001, 0x5e3: 0x0001, + 0x5e4: 0x0001, 0x5e5: 0x0001, 0x5e6: 0x0001, 0x5e7: 0x0001, 0x5e8: 0x0001, 0x5e9: 0x0001, + 0x5ea: 0x0001, 0x5eb: 0x0001, 0x5ec: 0x0001, 0x5ed: 0x0001, 0x5ee: 0x0001, 0x5ef: 0x0001, + 0x5f0: 0x0001, 0x5f1: 0x0001, 0x5f2: 0x0001, 0x5f3: 0x0001, 0x5f4: 0x0001, 0x5f5: 0x0001, + 0x5f6: 0x0001, 0x5f7: 0x0001, 0x5f8: 0x0001, 0x5f9: 0x0001, 0x5fa: 0x0001, 0x5fb: 0x0001, + 0x5fc: 0x0001, 0x5fd: 0x0001, 0x5fe: 0x0001, 0x5ff: 0x0001, + // Block 0x18, offset 0x600 + 0x600: 0x0001, 0x601: 0x0001, 0x602: 0x0001, 0x603: 0x0001, 0x604: 0x0001, 0x605: 0x0001, + 0x606: 0x0001, 0x607: 0x0001, 0x608: 0x0001, 0x609: 0x0001, 0x60a: 0x0001, 0x60b: 0x0001, + 0x60c: 0x0001, 0x60d: 0x0001, 0x60e: 0x0001, 0x60f: 0x0001, 0x610: 0x0001, 0x611: 0x0001, + 0x612: 0x0001, 0x613: 0x0001, 0x614: 0x0001, 0x615: 0x0001, 0x616: 0x0001, 0x617: 0x0001, + 0x618: 0x0001, 0x619: 0x0001, 0x61a: 0x0001, 0x61b: 0x0001, 0x61c: 0x0001, 0x61d: 0x0001, + 0x61e: 0x0001, 0x61f: 0x0001, 0x620: 0x000d, 0x621: 0x000d, 0x622: 0x000d, 0x623: 0x000d, + 0x624: 0x000d, 0x625: 0x000d, 0x626: 0x000d, 0x627: 0x000d, 0x628: 0x000d, 0x629: 0x000d, + 0x62a: 0x000d, 0x62b: 0x000d, 0x62c: 0x000d, 0x62d: 0x000d, 0x62e: 0x000d, 0x62f: 0x000d, + 0x630: 0x000d, 0x631: 0x000d, 0x632: 0x000d, 0x633: 0x000d, 0x634: 0x000d, 0x635: 0x000d, + 0x636: 0x000d, 0x637: 0x000d, 0x638: 0x000d, 0x639: 0x000d, 0x63a: 0x000d, 0x63b: 0x000d, + 0x63c: 0x000d, 0x63d: 0x000d, 0x63e: 0x000d, 0x63f: 0x000d, + // Block 0x19, offset 0x640 + 0x640: 0x000d, 0x641: 0x000d, 0x642: 0x000d, 0x643: 0x000d, 0x644: 0x000d, 0x645: 0x000d, + 0x646: 0x000d, 0x647: 0x000d, 0x648: 0x000d, 0x649: 0x000d, 0x64a: 0x000d, 0x64b: 0x000d, + 0x64c: 0x000d, 0x64d: 0x000d, 0x64e: 0x000d, 0x64f: 0x000d, 0x650: 0x000d, 0x651: 0x000d, + 0x652: 0x000d, 0x653: 0x000d, 0x654: 0x000c, 0x655: 0x000c, 0x656: 0x000c, 0x657: 0x000c, + 0x658: 0x000c, 0x659: 0x000c, 0x65a: 0x000c, 0x65b: 0x000c, 0x65c: 0x000c, 0x65d: 0x000c, + 0x65e: 0x000c, 0x65f: 0x000c, 0x660: 0x000c, 0x661: 0x000c, 0x662: 0x0005, 0x663: 0x000c, + 0x664: 0x000c, 0x665: 0x000c, 0x666: 0x000c, 0x667: 0x000c, 0x668: 0x000c, 0x669: 0x000c, + 0x66a: 0x000c, 0x66b: 0x000c, 0x66c: 0x000c, 0x66d: 0x000c, 0x66e: 0x000c, 0x66f: 0x000c, + 0x670: 0x000c, 0x671: 0x000c, 0x672: 0x000c, 0x673: 0x000c, 0x674: 0x000c, 0x675: 0x000c, + 0x676: 0x000c, 0x677: 0x000c, 0x678: 0x000c, 0x679: 0x000c, 0x67a: 0x000c, 0x67b: 0x000c, + 0x67c: 0x000c, 0x67d: 0x000c, 0x67e: 0x000c, 0x67f: 0x000c, + // Block 0x1a, offset 0x680 + 0x680: 0x000c, 0x681: 0x000c, 0x682: 0x000c, + 0x6ba: 0x000c, + 0x6bc: 0x000c, + // Block 0x1b, offset 0x6c0 + 0x6c1: 0x000c, 0x6c2: 0x000c, 0x6c3: 0x000c, 0x6c4: 0x000c, 0x6c5: 0x000c, + 0x6c6: 0x000c, 0x6c7: 0x000c, 0x6c8: 0x000c, + 0x6cd: 0x000c, 0x6d1: 0x000c, + 0x6d2: 0x000c, 0x6d3: 0x000c, 0x6d4: 0x000c, 0x6d5: 0x000c, 0x6d6: 0x000c, 0x6d7: 0x000c, + 0x6e2: 0x000c, 0x6e3: 0x000c, + // Block 0x1c, offset 0x700 + 0x701: 0x000c, + 0x73c: 0x000c, + // Block 0x1d, offset 0x740 + 0x741: 0x000c, 0x742: 0x000c, 0x743: 0x000c, 0x744: 0x000c, + 0x74d: 0x000c, + 0x762: 0x000c, 0x763: 0x000c, + 0x772: 0x0004, 0x773: 0x0004, + 0x77b: 0x0004, + // Block 0x1e, offset 0x780 + 0x781: 0x000c, 0x782: 0x000c, + 0x7bc: 0x000c, + // Block 0x1f, offset 0x7c0 + 0x7c1: 0x000c, 0x7c2: 0x000c, + 0x7c7: 0x000c, 0x7c8: 0x000c, 0x7cb: 0x000c, + 0x7cc: 0x000c, 0x7cd: 0x000c, 0x7d1: 0x000c, + 0x7f0: 0x000c, 0x7f1: 0x000c, 0x7f5: 0x000c, + // Block 0x20, offset 0x800 + 0x801: 0x000c, 0x802: 0x000c, 0x803: 0x000c, 0x804: 0x000c, 0x805: 0x000c, + 0x807: 0x000c, 0x808: 0x000c, + 0x80d: 0x000c, + 0x822: 0x000c, 0x823: 0x000c, + 0x831: 0x0004, + // Block 0x21, offset 0x840 + 0x841: 0x000c, + 0x87c: 0x000c, 0x87f: 0x000c, + // Block 0x22, offset 0x880 + 0x881: 0x000c, 0x882: 0x000c, 0x883: 0x000c, 0x884: 0x000c, + 0x88d: 0x000c, + 0x896: 0x000c, + 0x8a2: 0x000c, 0x8a3: 0x000c, + // Block 0x23, offset 0x8c0 + 0x8c2: 0x000c, + // Block 0x24, offset 0x900 + 0x900: 0x000c, + 0x90d: 0x000c, + 0x933: 0x000a, 0x934: 0x000a, 0x935: 0x000a, + 0x936: 0x000a, 0x937: 0x000a, 0x938: 0x000a, 0x939: 0x0004, 0x93a: 0x000a, + // Block 0x25, offset 0x940 + 0x940: 0x000c, + 0x97e: 0x000c, 0x97f: 0x000c, + // Block 0x26, offset 0x980 + 0x980: 0x000c, + 0x986: 0x000c, 0x987: 0x000c, 0x988: 0x000c, 0x98a: 0x000c, 0x98b: 0x000c, + 0x98c: 0x000c, 0x98d: 0x000c, + 0x995: 0x000c, 0x996: 0x000c, + 0x9a2: 0x000c, 0x9a3: 0x000c, + 0x9b8: 0x000a, 0x9b9: 0x000a, 0x9ba: 0x000a, 0x9bb: 0x000a, + 0x9bc: 0x000a, 0x9bd: 0x000a, 0x9be: 0x000a, + // Block 0x27, offset 0x9c0 + 0x9cc: 0x000c, 0x9cd: 0x000c, + 0x9e2: 0x000c, 0x9e3: 0x000c, + // Block 0x28, offset 0xa00 + 0xa01: 0x000c, + // Block 0x29, offset 0xa40 + 0xa41: 0x000c, 0xa42: 0x000c, 0xa43: 0x000c, 0xa44: 0x000c, + 0xa4d: 0x000c, + 0xa62: 0x000c, 0xa63: 0x000c, + // Block 0x2a, offset 0xa80 + 0xa8a: 0x000c, + 0xa92: 0x000c, 0xa93: 0x000c, 0xa94: 0x000c, 0xa96: 0x000c, + // Block 0x2b, offset 0xac0 + 0xaf1: 0x000c, 0xaf4: 0x000c, 0xaf5: 0x000c, + 0xaf6: 0x000c, 0xaf7: 0x000c, 0xaf8: 0x000c, 0xaf9: 0x000c, 0xafa: 0x000c, + 0xaff: 0x0004, + // Block 0x2c, offset 0xb00 + 0xb07: 0x000c, 0xb08: 0x000c, 0xb09: 0x000c, 0xb0a: 0x000c, 0xb0b: 0x000c, + 0xb0c: 0x000c, 0xb0d: 0x000c, 0xb0e: 0x000c, + // Block 0x2d, offset 0xb40 + 0xb71: 0x000c, 0xb74: 0x000c, 0xb75: 0x000c, + 0xb76: 0x000c, 0xb77: 0x000c, 0xb78: 0x000c, 0xb79: 0x000c, 0xb7b: 0x000c, + 0xb7c: 0x000c, + // Block 0x2e, offset 0xb80 + 0xb88: 0x000c, 0xb89: 0x000c, 0xb8a: 0x000c, 0xb8b: 0x000c, + 0xb8c: 0x000c, 0xb8d: 0x000c, + // Block 0x2f, offset 0xbc0 + 0xbd8: 0x000c, 0xbd9: 0x000c, + 0xbf5: 0x000c, + 0xbf7: 0x000c, 0xbf9: 0x000c, 0xbfa: 0x003a, 0xbfb: 0x002a, + 0xbfc: 0x003a, 0xbfd: 0x002a, + // Block 0x30, offset 0xc00 + 0xc31: 0x000c, 0xc32: 0x000c, 0xc33: 0x000c, 0xc34: 0x000c, 0xc35: 0x000c, + 0xc36: 0x000c, 0xc37: 0x000c, 0xc38: 0x000c, 0xc39: 0x000c, 0xc3a: 0x000c, 0xc3b: 0x000c, + 0xc3c: 0x000c, 0xc3d: 0x000c, 0xc3e: 0x000c, + // Block 0x31, offset 0xc40 + 0xc40: 0x000c, 0xc41: 0x000c, 0xc42: 0x000c, 0xc43: 0x000c, 0xc44: 0x000c, + 0xc46: 0x000c, 0xc47: 0x000c, + 0xc4d: 0x000c, 0xc4e: 0x000c, 0xc4f: 0x000c, 0xc50: 0x000c, 0xc51: 0x000c, + 0xc52: 0x000c, 0xc53: 0x000c, 0xc54: 0x000c, 0xc55: 0x000c, 0xc56: 0x000c, 0xc57: 0x000c, + 0xc59: 0x000c, 0xc5a: 0x000c, 0xc5b: 0x000c, 0xc5c: 0x000c, 0xc5d: 0x000c, + 0xc5e: 0x000c, 0xc5f: 0x000c, 0xc60: 0x000c, 0xc61: 0x000c, 0xc62: 0x000c, 0xc63: 0x000c, + 0xc64: 0x000c, 0xc65: 0x000c, 0xc66: 0x000c, 0xc67: 0x000c, 0xc68: 0x000c, 0xc69: 0x000c, + 0xc6a: 0x000c, 0xc6b: 0x000c, 0xc6c: 0x000c, 0xc6d: 0x000c, 0xc6e: 0x000c, 0xc6f: 0x000c, + 0xc70: 0x000c, 0xc71: 0x000c, 0xc72: 0x000c, 0xc73: 0x000c, 0xc74: 0x000c, 0xc75: 0x000c, + 0xc76: 0x000c, 0xc77: 0x000c, 0xc78: 0x000c, 0xc79: 0x000c, 0xc7a: 0x000c, 0xc7b: 0x000c, + 0xc7c: 0x000c, + // Block 0x32, offset 0xc80 + 0xc86: 0x000c, + // Block 0x33, offset 0xcc0 + 0xced: 0x000c, 0xcee: 0x000c, 0xcef: 0x000c, + 0xcf0: 0x000c, 0xcf2: 0x000c, 0xcf3: 0x000c, 0xcf4: 0x000c, 0xcf5: 0x000c, + 0xcf6: 0x000c, 0xcf7: 0x000c, 0xcf9: 0x000c, 0xcfa: 0x000c, + 0xcfd: 0x000c, 0xcfe: 0x000c, + // Block 0x34, offset 0xd00 + 0xd18: 0x000c, 0xd19: 0x000c, + 0xd1e: 0x000c, 0xd1f: 0x000c, 0xd20: 0x000c, + 0xd31: 0x000c, 0xd32: 0x000c, 0xd33: 0x000c, 0xd34: 0x000c, + // Block 0x35, offset 0xd40 + 0xd42: 0x000c, 0xd45: 0x000c, + 0xd46: 0x000c, + 0xd4d: 0x000c, + 0xd5d: 0x000c, + // Block 0x36, offset 0xd80 + 0xd9d: 0x000c, + 0xd9e: 0x000c, 0xd9f: 0x000c, + // Block 0x37, offset 0xdc0 + 0xdd0: 0x000a, 0xdd1: 0x000a, + 0xdd2: 0x000a, 0xdd3: 0x000a, 0xdd4: 0x000a, 0xdd5: 0x000a, 0xdd6: 0x000a, 0xdd7: 0x000a, + 0xdd8: 0x000a, 0xdd9: 0x000a, + // Block 0x38, offset 0xe00 + 0xe00: 0x000a, + // Block 0x39, offset 0xe40 + 0xe40: 0x0009, + 0xe5b: 0x007a, 0xe5c: 0x006a, + // Block 0x3a, offset 0xe80 + 0xe92: 0x000c, 0xe93: 0x000c, 0xe94: 0x000c, + 0xeb2: 0x000c, 0xeb3: 0x000c, 0xeb4: 0x000c, + // Block 0x3b, offset 0xec0 + 0xed2: 0x000c, 0xed3: 0x000c, + 0xef2: 0x000c, 0xef3: 0x000c, + // Block 0x3c, offset 0xf00 + 0xf34: 0x000c, 0xf35: 0x000c, + 0xf37: 0x000c, 0xf38: 0x000c, 0xf39: 0x000c, 0xf3a: 0x000c, 0xf3b: 0x000c, + 0xf3c: 0x000c, 0xf3d: 0x000c, + // Block 0x3d, offset 0xf40 + 0xf46: 0x000c, 0xf49: 0x000c, 0xf4a: 0x000c, 0xf4b: 0x000c, + 0xf4c: 0x000c, 0xf4d: 0x000c, 0xf4e: 0x000c, 0xf4f: 0x000c, 0xf50: 0x000c, 0xf51: 0x000c, + 0xf52: 0x000c, 0xf53: 0x000c, + 0xf5b: 0x0004, 0xf5d: 0x000c, + 0xf70: 0x000a, 0xf71: 0x000a, 0xf72: 0x000a, 0xf73: 0x000a, 0xf74: 0x000a, 0xf75: 0x000a, + 0xf76: 0x000a, 0xf77: 0x000a, 0xf78: 0x000a, 0xf79: 0x000a, + // Block 0x3e, offset 0xf80 + 0xf80: 0x000a, 0xf81: 0x000a, 0xf82: 0x000a, 0xf83: 0x000a, 0xf84: 0x000a, 0xf85: 0x000a, + 0xf86: 0x000a, 0xf87: 0x000a, 0xf88: 0x000a, 0xf89: 0x000a, 0xf8a: 0x000a, 0xf8b: 0x000c, + 0xf8c: 0x000c, 0xf8d: 0x000c, 0xf8e: 0x000b, + // Block 0x3f, offset 0xfc0 + 0xfc5: 0x000c, + 0xfc6: 0x000c, + 0xfe9: 0x000c, + // Block 0x40, offset 0x1000 + 0x1020: 0x000c, 0x1021: 0x000c, 0x1022: 0x000c, + 0x1027: 0x000c, 0x1028: 0x000c, + 0x1032: 0x000c, + 0x1039: 0x000c, 0x103a: 0x000c, 0x103b: 0x000c, + // Block 0x41, offset 0x1040 + 0x1040: 0x000a, 0x1044: 0x000a, 0x1045: 0x000a, + // Block 0x42, offset 0x1080 + 0x109e: 0x000a, 0x109f: 0x000a, 0x10a0: 0x000a, 0x10a1: 0x000a, 0x10a2: 0x000a, 0x10a3: 0x000a, + 0x10a4: 0x000a, 0x10a5: 0x000a, 0x10a6: 0x000a, 0x10a7: 0x000a, 0x10a8: 0x000a, 0x10a9: 0x000a, + 0x10aa: 0x000a, 0x10ab: 0x000a, 0x10ac: 0x000a, 0x10ad: 0x000a, 0x10ae: 0x000a, 0x10af: 0x000a, + 0x10b0: 0x000a, 0x10b1: 0x000a, 0x10b2: 0x000a, 0x10b3: 0x000a, 0x10b4: 0x000a, 0x10b5: 0x000a, + 0x10b6: 0x000a, 0x10b7: 0x000a, 0x10b8: 0x000a, 0x10b9: 0x000a, 0x10ba: 0x000a, 0x10bb: 0x000a, + 0x10bc: 0x000a, 0x10bd: 0x000a, 0x10be: 0x000a, 0x10bf: 0x000a, + // Block 0x43, offset 0x10c0 + 0x10d7: 0x000c, + 0x10d8: 0x000c, 0x10db: 0x000c, + // Block 0x44, offset 0x1100 + 0x1116: 0x000c, + 0x1118: 0x000c, 0x1119: 0x000c, 0x111a: 0x000c, 0x111b: 0x000c, 0x111c: 0x000c, 0x111d: 0x000c, + 0x111e: 0x000c, 0x1120: 0x000c, 0x1122: 0x000c, + 0x1125: 0x000c, 0x1126: 0x000c, 0x1127: 0x000c, 0x1128: 0x000c, 0x1129: 0x000c, + 0x112a: 0x000c, 0x112b: 0x000c, 0x112c: 0x000c, + 0x1133: 0x000c, 0x1134: 0x000c, 0x1135: 0x000c, + 0x1136: 0x000c, 0x1137: 0x000c, 0x1138: 0x000c, 0x1139: 0x000c, 0x113a: 0x000c, 0x113b: 0x000c, + 0x113c: 0x000c, 0x113f: 0x000c, + // Block 0x45, offset 0x1140 + 0x1170: 0x000c, 0x1171: 0x000c, 0x1172: 0x000c, 0x1173: 0x000c, 0x1174: 0x000c, 0x1175: 0x000c, + 0x1176: 0x000c, 0x1177: 0x000c, 0x1178: 0x000c, 0x1179: 0x000c, 0x117a: 0x000c, 0x117b: 0x000c, + 0x117c: 0x000c, 0x117d: 0x000c, 0x117e: 0x000c, + // Block 0x46, offset 0x1180 + 0x1180: 0x000c, 0x1181: 0x000c, 0x1182: 0x000c, 0x1183: 0x000c, + 0x11b4: 0x000c, + 0x11b6: 0x000c, 0x11b7: 0x000c, 0x11b8: 0x000c, 0x11b9: 0x000c, 0x11ba: 0x000c, + 0x11bc: 0x000c, + // Block 0x47, offset 0x11c0 + 0x11c2: 0x000c, + 0x11eb: 0x000c, 0x11ec: 0x000c, 0x11ed: 0x000c, 0x11ee: 0x000c, 0x11ef: 0x000c, + 0x11f0: 0x000c, 0x11f1: 0x000c, 0x11f2: 0x000c, 0x11f3: 0x000c, + // Block 0x48, offset 0x1200 + 0x1200: 0x000c, 0x1201: 0x000c, + 0x1222: 0x000c, 0x1223: 0x000c, + 0x1224: 0x000c, 0x1225: 0x000c, 0x1228: 0x000c, 0x1229: 0x000c, + 0x122b: 0x000c, 0x122c: 0x000c, 0x122d: 0x000c, + // Block 0x49, offset 0x1240 + 0x1266: 0x000c, 0x1268: 0x000c, 0x1269: 0x000c, + 0x126d: 0x000c, 0x126f: 0x000c, + 0x1270: 0x000c, 0x1271: 0x000c, + // Block 0x4a, offset 0x1280 + 0x12ac: 0x000c, 0x12ad: 0x000c, 0x12ae: 0x000c, 0x12af: 0x000c, + 0x12b0: 0x000c, 0x12b1: 0x000c, 0x12b2: 0x000c, 0x12b3: 0x000c, + 0x12b6: 0x000c, 0x12b7: 0x000c, + // Block 0x4b, offset 0x12c0 + 0x12d0: 0x000c, 0x12d1: 0x000c, + 0x12d2: 0x000c, 0x12d4: 0x000c, 0x12d5: 0x000c, 0x12d6: 0x000c, 0x12d7: 0x000c, + 0x12d8: 0x000c, 0x12d9: 0x000c, 0x12da: 0x000c, 0x12db: 0x000c, 0x12dc: 0x000c, 0x12dd: 0x000c, + 0x12de: 0x000c, 0x12df: 0x000c, 0x12e0: 0x000c, 0x12e2: 0x000c, 0x12e3: 0x000c, + 0x12e4: 0x000c, 0x12e5: 0x000c, 0x12e6: 0x000c, 0x12e7: 0x000c, 0x12e8: 0x000c, + 0x12ed: 0x000c, + 0x12f4: 0x000c, + 0x12f8: 0x000c, 0x12f9: 0x000c, + // Block 0x4c, offset 0x1300 + 0x1300: 0x000c, 0x1301: 0x000c, 0x1302: 0x000c, 0x1303: 0x000c, 0x1304: 0x000c, 0x1305: 0x000c, + 0x1306: 0x000c, 0x1307: 0x000c, 0x1308: 0x000c, 0x1309: 0x000c, 0x130a: 0x000c, 0x130b: 0x000c, + 0x130c: 0x000c, 0x130d: 0x000c, 0x130e: 0x000c, 0x130f: 0x000c, 0x1310: 0x000c, 0x1311: 0x000c, + 0x1312: 0x000c, 0x1313: 0x000c, 0x1314: 0x000c, 0x1315: 0x000c, 0x1316: 0x000c, 0x1317: 0x000c, + 0x1318: 0x000c, 0x1319: 0x000c, 0x131a: 0x000c, 0x131b: 0x000c, 0x131c: 0x000c, 0x131d: 0x000c, + 0x131e: 0x000c, 0x131f: 0x000c, 0x1320: 0x000c, 0x1321: 0x000c, 0x1322: 0x000c, 0x1323: 0x000c, + 0x1324: 0x000c, 0x1325: 0x000c, 0x1326: 0x000c, 0x1327: 0x000c, 0x1328: 0x000c, 0x1329: 0x000c, + 0x132a: 0x000c, 0x132b: 0x000c, 0x132c: 0x000c, 0x132d: 0x000c, 0x132e: 0x000c, 0x132f: 0x000c, + 0x1330: 0x000c, 0x1331: 0x000c, 0x1332: 0x000c, 0x1333: 0x000c, 0x1334: 0x000c, 0x1335: 0x000c, + 0x133b: 0x000c, + 0x133c: 0x000c, 0x133d: 0x000c, 0x133e: 0x000c, 0x133f: 0x000c, + // Block 0x4d, offset 0x1340 + 0x137d: 0x000a, 0x137f: 0x000a, + // Block 0x4e, offset 0x1380 + 0x1380: 0x000a, 0x1381: 0x000a, + 0x138d: 0x000a, 0x138e: 0x000a, 0x138f: 0x000a, + 0x139d: 0x000a, + 0x139e: 0x000a, 0x139f: 0x000a, + 0x13ad: 0x000a, 0x13ae: 0x000a, 0x13af: 0x000a, + 0x13bd: 0x000a, 0x13be: 0x000a, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x0009, 0x13c1: 0x0009, 0x13c2: 0x0009, 0x13c3: 0x0009, 0x13c4: 0x0009, 0x13c5: 0x0009, + 0x13c6: 0x0009, 0x13c7: 0x0009, 0x13c8: 0x0009, 0x13c9: 0x0009, 0x13ca: 0x0009, 0x13cb: 0x000b, + 0x13cc: 0x000b, 0x13cd: 0x000b, 0x13cf: 0x0001, 0x13d0: 0x000a, 0x13d1: 0x000a, + 0x13d2: 0x000a, 0x13d3: 0x000a, 0x13d4: 0x000a, 0x13d5: 0x000a, 0x13d6: 0x000a, 0x13d7: 0x000a, + 0x13d8: 0x000a, 0x13d9: 0x000a, 0x13da: 0x000a, 0x13db: 0x000a, 0x13dc: 0x000a, 0x13dd: 0x000a, + 0x13de: 0x000a, 0x13df: 0x000a, 0x13e0: 0x000a, 0x13e1: 0x000a, 0x13e2: 0x000a, 0x13e3: 0x000a, + 0x13e4: 0x000a, 0x13e5: 0x000a, 0x13e6: 0x000a, 0x13e7: 0x000a, 0x13e8: 0x0009, 0x13e9: 0x0007, + 0x13ea: 0x000e, 0x13eb: 0x000e, 0x13ec: 0x000e, 0x13ed: 0x000e, 0x13ee: 0x000e, 0x13ef: 0x0006, + 0x13f0: 0x0004, 0x13f1: 0x0004, 0x13f2: 0x0004, 0x13f3: 0x0004, 0x13f4: 0x0004, 0x13f5: 0x000a, + 0x13f6: 0x000a, 0x13f7: 0x000a, 0x13f8: 0x000a, 0x13f9: 0x000a, 0x13fa: 0x000a, 0x13fb: 0x000a, + 0x13fc: 0x000a, 0x13fd: 0x000a, 0x13fe: 0x000a, 0x13ff: 0x000a, + // Block 0x50, offset 0x1400 + 0x1400: 0x000a, 0x1401: 0x000a, 0x1402: 0x000a, 0x1403: 0x000a, 0x1404: 0x0006, 0x1405: 0x009a, + 0x1406: 0x008a, 0x1407: 0x000a, 0x1408: 0x000a, 0x1409: 0x000a, 0x140a: 0x000a, 0x140b: 0x000a, + 0x140c: 0x000a, 0x140d: 0x000a, 0x140e: 0x000a, 0x140f: 0x000a, 0x1410: 0x000a, 0x1411: 0x000a, + 0x1412: 0x000a, 0x1413: 0x000a, 0x1414: 0x000a, 0x1415: 0x000a, 0x1416: 0x000a, 0x1417: 0x000a, + 0x1418: 0x000a, 0x1419: 0x000a, 0x141a: 0x000a, 0x141b: 0x000a, 0x141c: 0x000a, 0x141d: 0x000a, + 0x141e: 0x000a, 0x141f: 0x0009, 0x1420: 0x000b, 0x1421: 0x000b, 0x1422: 0x000b, 0x1423: 0x000b, + 0x1424: 0x000b, 0x1425: 0x000b, 0x1426: 0x000e, 0x1427: 0x000e, 0x1428: 0x000e, 0x1429: 0x000e, + 0x142a: 0x000b, 0x142b: 0x000b, 0x142c: 0x000b, 0x142d: 0x000b, 0x142e: 0x000b, 0x142f: 0x000b, + 0x1430: 0x0002, 0x1434: 0x0002, 0x1435: 0x0002, + 0x1436: 0x0002, 0x1437: 0x0002, 0x1438: 0x0002, 0x1439: 0x0002, 0x143a: 0x0003, 0x143b: 0x0003, + 0x143c: 0x000a, 0x143d: 0x009a, 0x143e: 0x008a, + // Block 0x51, offset 0x1440 + 0x1440: 0x0002, 0x1441: 0x0002, 0x1442: 0x0002, 0x1443: 0x0002, 0x1444: 0x0002, 0x1445: 0x0002, + 0x1446: 0x0002, 0x1447: 0x0002, 0x1448: 0x0002, 0x1449: 0x0002, 0x144a: 0x0003, 0x144b: 0x0003, + 0x144c: 0x000a, 0x144d: 0x009a, 0x144e: 0x008a, + 0x1460: 0x0004, 0x1461: 0x0004, 0x1462: 0x0004, 0x1463: 0x0004, + 0x1464: 0x0004, 0x1465: 0x0004, 0x1466: 0x0004, 0x1467: 0x0004, 0x1468: 0x0004, 0x1469: 0x0004, + 0x146a: 0x0004, 0x146b: 0x0004, 0x146c: 0x0004, 0x146d: 0x0004, 0x146e: 0x0004, 0x146f: 0x0004, + 0x1470: 0x0004, 0x1471: 0x0004, 0x1472: 0x0004, 0x1473: 0x0004, 0x1474: 0x0004, 0x1475: 0x0004, + 0x1476: 0x0004, 0x1477: 0x0004, 0x1478: 0x0004, 0x1479: 0x0004, 0x147a: 0x0004, 0x147b: 0x0004, + 0x147c: 0x0004, 0x147d: 0x0004, 0x147e: 0x0004, 0x147f: 0x0004, + // Block 0x52, offset 0x1480 + 0x1480: 0x0004, 0x1481: 0x0004, 0x1482: 0x0004, 0x1483: 0x0004, 0x1484: 0x0004, 0x1485: 0x0004, + 0x1486: 0x0004, 0x1487: 0x0004, 0x1488: 0x0004, 0x1489: 0x0004, 0x148a: 0x0004, 0x148b: 0x0004, + 0x148c: 0x0004, 0x148d: 0x0004, 0x148e: 0x0004, 0x148f: 0x0004, 0x1490: 0x000c, 0x1491: 0x000c, + 0x1492: 0x000c, 0x1493: 0x000c, 0x1494: 0x000c, 0x1495: 0x000c, 0x1496: 0x000c, 0x1497: 0x000c, + 0x1498: 0x000c, 0x1499: 0x000c, 0x149a: 0x000c, 0x149b: 0x000c, 0x149c: 0x000c, 0x149d: 0x000c, + 0x149e: 0x000c, 0x149f: 0x000c, 0x14a0: 0x000c, 0x14a1: 0x000c, 0x14a2: 0x000c, 0x14a3: 0x000c, + 0x14a4: 0x000c, 0x14a5: 0x000c, 0x14a6: 0x000c, 0x14a7: 0x000c, 0x14a8: 0x000c, 0x14a9: 0x000c, + 0x14aa: 0x000c, 0x14ab: 0x000c, 0x14ac: 0x000c, 0x14ad: 0x000c, 0x14ae: 0x000c, 0x14af: 0x000c, + 0x14b0: 0x000c, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x000a, 0x14c1: 0x000a, 0x14c3: 0x000a, 0x14c4: 0x000a, 0x14c5: 0x000a, + 0x14c6: 0x000a, 0x14c8: 0x000a, 0x14c9: 0x000a, + 0x14d4: 0x000a, 0x14d6: 0x000a, 0x14d7: 0x000a, + 0x14d8: 0x000a, + 0x14de: 0x000a, 0x14df: 0x000a, 0x14e0: 0x000a, 0x14e1: 0x000a, 0x14e2: 0x000a, 0x14e3: 0x000a, + 0x14e5: 0x000a, 0x14e7: 0x000a, 0x14e9: 0x000a, + 0x14ee: 0x0004, + 0x14fa: 0x000a, 0x14fb: 0x000a, + // Block 0x54, offset 0x1500 + 0x1500: 0x000a, 0x1501: 0x000a, 0x1502: 0x000a, 0x1503: 0x000a, 0x1504: 0x000a, + 0x150a: 0x000a, 0x150b: 0x000a, + 0x150c: 0x000a, 0x150d: 0x000a, 0x1510: 0x000a, 0x1511: 0x000a, + 0x1512: 0x000a, 0x1513: 0x000a, 0x1514: 0x000a, 0x1515: 0x000a, 0x1516: 0x000a, 0x1517: 0x000a, + 0x1518: 0x000a, 0x1519: 0x000a, 0x151a: 0x000a, 0x151b: 0x000a, 0x151c: 0x000a, 0x151d: 0x000a, + 0x151e: 0x000a, 0x151f: 0x000a, + // Block 0x55, offset 0x1540 + 0x1549: 0x000a, 0x154a: 0x000a, 0x154b: 0x000a, + 0x1550: 0x000a, 0x1551: 0x000a, + 0x1552: 0x000a, 0x1553: 0x000a, 0x1554: 0x000a, 0x1555: 0x000a, 0x1556: 0x000a, 0x1557: 0x000a, + 0x1558: 0x000a, 0x1559: 0x000a, 0x155a: 0x000a, 0x155b: 0x000a, 0x155c: 0x000a, 0x155d: 0x000a, + 0x155e: 0x000a, 0x155f: 0x000a, 0x1560: 0x000a, 0x1561: 0x000a, 0x1562: 0x000a, 0x1563: 0x000a, + 0x1564: 0x000a, 0x1565: 0x000a, 0x1566: 0x000a, 0x1567: 0x000a, 0x1568: 0x000a, 0x1569: 0x000a, + 0x156a: 0x000a, 0x156b: 0x000a, 0x156c: 0x000a, 0x156d: 0x000a, 0x156e: 0x000a, 0x156f: 0x000a, + 0x1570: 0x000a, 0x1571: 0x000a, 0x1572: 0x000a, 0x1573: 0x000a, 0x1574: 0x000a, 0x1575: 0x000a, + 0x1576: 0x000a, 0x1577: 0x000a, 0x1578: 0x000a, 0x1579: 0x000a, 0x157a: 0x000a, 0x157b: 0x000a, + 0x157c: 0x000a, 0x157d: 0x000a, 0x157e: 0x000a, 0x157f: 0x000a, + // Block 0x56, offset 0x1580 + 0x1580: 0x000a, 0x1581: 0x000a, 0x1582: 0x000a, 0x1583: 0x000a, 0x1584: 0x000a, 0x1585: 0x000a, + 0x1586: 0x000a, 0x1587: 0x000a, 0x1588: 0x000a, 0x1589: 0x000a, 0x158a: 0x000a, 0x158b: 0x000a, + 0x158c: 0x000a, 0x158d: 0x000a, 0x158e: 0x000a, 0x158f: 0x000a, 0x1590: 0x000a, 0x1591: 0x000a, + 0x1592: 0x000a, 0x1593: 0x000a, 0x1594: 0x000a, 0x1595: 0x000a, 0x1596: 0x000a, 0x1597: 0x000a, + 0x1598: 0x000a, 0x1599: 0x000a, 0x159a: 0x000a, 0x159b: 0x000a, 0x159c: 0x000a, 0x159d: 0x000a, + 0x159e: 0x000a, 0x159f: 0x000a, 0x15a0: 0x000a, 0x15a1: 0x000a, 0x15a2: 0x000a, 0x15a3: 0x000a, + 0x15a4: 0x000a, 0x15a5: 0x000a, 0x15a6: 0x000a, 0x15a7: 0x000a, 0x15a8: 0x000a, 0x15a9: 0x000a, + 0x15aa: 0x000a, 0x15ab: 0x000a, 0x15ac: 0x000a, 0x15ad: 0x000a, 0x15ae: 0x000a, 0x15af: 0x000a, + 0x15b0: 0x000a, 0x15b1: 0x000a, 0x15b2: 0x000a, 0x15b3: 0x000a, 0x15b4: 0x000a, 0x15b5: 0x000a, + 0x15b6: 0x000a, 0x15b7: 0x000a, 0x15b8: 0x000a, 0x15b9: 0x000a, 0x15ba: 0x000a, 0x15bb: 0x000a, + 0x15bc: 0x000a, 0x15bd: 0x000a, 0x15be: 0x000a, 0x15bf: 0x000a, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x000a, 0x15c1: 0x000a, 0x15c2: 0x000a, 0x15c3: 0x000a, 0x15c4: 0x000a, 0x15c5: 0x000a, + 0x15c6: 0x000a, 0x15c7: 0x000a, 0x15c8: 0x000a, 0x15c9: 0x000a, 0x15ca: 0x000a, 0x15cb: 0x000a, + 0x15cc: 0x000a, 0x15cd: 0x000a, 0x15ce: 0x000a, 0x15cf: 0x000a, 0x15d0: 0x000a, 0x15d1: 0x000a, + 0x15d2: 0x0003, 0x15d3: 0x0004, 0x15d4: 0x000a, 0x15d5: 0x000a, 0x15d6: 0x000a, 0x15d7: 0x000a, + 0x15d8: 0x000a, 0x15d9: 0x000a, 0x15da: 0x000a, 0x15db: 0x000a, 0x15dc: 0x000a, 0x15dd: 0x000a, + 0x15de: 0x000a, 0x15df: 0x000a, 0x15e0: 0x000a, 0x15e1: 0x000a, 0x15e2: 0x000a, 0x15e3: 0x000a, + 0x15e4: 0x000a, 0x15e5: 0x000a, 0x15e6: 0x000a, 0x15e7: 0x000a, 0x15e8: 0x000a, 0x15e9: 0x000a, + 0x15ea: 0x000a, 0x15eb: 0x000a, 0x15ec: 0x000a, 0x15ed: 0x000a, 0x15ee: 0x000a, 0x15ef: 0x000a, + 0x15f0: 0x000a, 0x15f1: 0x000a, 0x15f2: 0x000a, 0x15f3: 0x000a, 0x15f4: 0x000a, 0x15f5: 0x000a, + 0x15f6: 0x000a, 0x15f7: 0x000a, 0x15f8: 0x000a, 0x15f9: 0x000a, 0x15fa: 0x000a, 0x15fb: 0x000a, + 0x15fc: 0x000a, 0x15fd: 0x000a, 0x15fe: 0x000a, 0x15ff: 0x000a, + // Block 0x58, offset 0x1600 + 0x1600: 0x000a, 0x1601: 0x000a, 0x1602: 0x000a, 0x1603: 0x000a, 0x1604: 0x000a, 0x1605: 0x000a, + 0x1606: 0x000a, 0x1607: 0x000a, 0x1608: 0x003a, 0x1609: 0x002a, 0x160a: 0x003a, 0x160b: 0x002a, + 0x160c: 0x000a, 0x160d: 0x000a, 0x160e: 0x000a, 0x160f: 0x000a, 0x1610: 0x000a, 0x1611: 0x000a, + 0x1612: 0x000a, 0x1613: 0x000a, 0x1614: 0x000a, 0x1615: 0x000a, 0x1616: 0x000a, 0x1617: 0x000a, + 0x1618: 0x000a, 0x1619: 0x000a, 0x161a: 0x000a, 0x161b: 0x000a, 0x161c: 0x000a, 0x161d: 0x000a, + 0x161e: 0x000a, 0x161f: 0x000a, 0x1620: 0x000a, 0x1621: 0x000a, 0x1622: 0x000a, 0x1623: 0x000a, + 0x1624: 0x000a, 0x1625: 0x000a, 0x1626: 0x000a, 0x1627: 0x000a, 0x1628: 0x000a, 0x1629: 0x009a, + 0x162a: 0x008a, 0x162b: 0x000a, 0x162c: 0x000a, 0x162d: 0x000a, 0x162e: 0x000a, 0x162f: 0x000a, + 0x1630: 0x000a, 0x1631: 0x000a, 0x1632: 0x000a, 0x1633: 0x000a, 0x1634: 0x000a, 0x1635: 0x000a, + // Block 0x59, offset 0x1640 + 0x167b: 0x000a, + 0x167c: 0x000a, 0x167d: 0x000a, 0x167e: 0x000a, 0x167f: 0x000a, + // Block 0x5a, offset 0x1680 + 0x1680: 0x000a, 0x1681: 0x000a, 0x1682: 0x000a, 0x1683: 0x000a, 0x1684: 0x000a, 0x1685: 0x000a, + 0x1686: 0x000a, 0x1687: 0x000a, 0x1688: 0x000a, 0x1689: 0x000a, 0x168a: 0x000a, 0x168b: 0x000a, + 0x168c: 0x000a, 0x168d: 0x000a, 0x168e: 0x000a, 0x168f: 0x000a, 0x1690: 0x000a, 0x1691: 0x000a, + 0x1692: 0x000a, 0x1693: 0x000a, 0x1694: 0x000a, 0x1696: 0x000a, 0x1697: 0x000a, + 0x1698: 0x000a, 0x1699: 0x000a, 0x169a: 0x000a, 0x169b: 0x000a, 0x169c: 0x000a, 0x169d: 0x000a, + 0x169e: 0x000a, 0x169f: 0x000a, 0x16a0: 0x000a, 0x16a1: 0x000a, 0x16a2: 0x000a, 0x16a3: 0x000a, + 0x16a4: 0x000a, 0x16a5: 0x000a, 0x16a6: 0x000a, 0x16a7: 0x000a, 0x16a8: 0x000a, 0x16a9: 0x000a, + 0x16aa: 0x000a, 0x16ab: 0x000a, 0x16ac: 0x000a, 0x16ad: 0x000a, 0x16ae: 0x000a, 0x16af: 0x000a, + 0x16b0: 0x000a, 0x16b1: 0x000a, 0x16b2: 0x000a, 0x16b3: 0x000a, 0x16b4: 0x000a, 0x16b5: 0x000a, + 0x16b6: 0x000a, 0x16b7: 0x000a, 0x16b8: 0x000a, 0x16b9: 0x000a, 0x16ba: 0x000a, 0x16bb: 0x000a, + 0x16bc: 0x000a, 0x16bd: 0x000a, 0x16be: 0x000a, 0x16bf: 0x000a, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x000a, 0x16c1: 0x000a, 0x16c2: 0x000a, 0x16c3: 0x000a, 0x16c4: 0x000a, 0x16c5: 0x000a, + 0x16c6: 0x000a, 0x16c7: 0x000a, 0x16c8: 0x000a, 0x16c9: 0x000a, 0x16ca: 0x000a, 0x16cb: 0x000a, + 0x16cc: 0x000a, 0x16cd: 0x000a, 0x16ce: 0x000a, 0x16cf: 0x000a, 0x16d0: 0x000a, 0x16d1: 0x000a, + 0x16d2: 0x000a, 0x16d3: 0x000a, 0x16d4: 0x000a, 0x16d5: 0x000a, 0x16d6: 0x000a, 0x16d7: 0x000a, + 0x16d8: 0x000a, 0x16d9: 0x000a, 0x16da: 0x000a, 0x16db: 0x000a, 0x16dc: 0x000a, 0x16dd: 0x000a, + 0x16de: 0x000a, 0x16df: 0x000a, 0x16e0: 0x000a, 0x16e1: 0x000a, 0x16e2: 0x000a, 0x16e3: 0x000a, + 0x16e4: 0x000a, 0x16e5: 0x000a, 0x16e6: 0x000a, 0x16e7: 0x000a, 0x16e8: 0x000a, 0x16e9: 0x000a, + 0x16ea: 0x000a, 0x16eb: 0x000a, 0x16ec: 0x000a, 0x16ed: 0x000a, 0x16ee: 0x000a, 0x16ef: 0x000a, + 0x16f0: 0x000a, 0x16f1: 0x000a, 0x16f2: 0x000a, 0x16f3: 0x000a, 0x16f4: 0x000a, 0x16f5: 0x000a, + 0x16f6: 0x000a, 0x16f7: 0x000a, 0x16f8: 0x000a, 0x16f9: 0x000a, 0x16fa: 0x000a, 0x16fb: 0x000a, + 0x16fc: 0x000a, 0x16fd: 0x000a, 0x16fe: 0x000a, + // Block 0x5c, offset 0x1700 + 0x1700: 0x000a, 0x1701: 0x000a, 0x1702: 0x000a, 0x1703: 0x000a, 0x1704: 0x000a, 0x1705: 0x000a, + 0x1706: 0x000a, 0x1707: 0x000a, 0x1708: 0x000a, 0x1709: 0x000a, 0x170a: 0x000a, 0x170b: 0x000a, + 0x170c: 0x000a, 0x170d: 0x000a, 0x170e: 0x000a, 0x170f: 0x000a, 0x1710: 0x000a, 0x1711: 0x000a, + 0x1712: 0x000a, 0x1713: 0x000a, 0x1714: 0x000a, 0x1715: 0x000a, 0x1716: 0x000a, 0x1717: 0x000a, + 0x1718: 0x000a, 0x1719: 0x000a, 0x171a: 0x000a, 0x171b: 0x000a, 0x171c: 0x000a, 0x171d: 0x000a, + 0x171e: 0x000a, 0x171f: 0x000a, 0x1720: 0x000a, 0x1721: 0x000a, 0x1722: 0x000a, 0x1723: 0x000a, + 0x1724: 0x000a, 0x1725: 0x000a, 0x1726: 0x000a, + // Block 0x5d, offset 0x1740 + 0x1740: 0x000a, 0x1741: 0x000a, 0x1742: 0x000a, 0x1743: 0x000a, 0x1744: 0x000a, 0x1745: 0x000a, + 0x1746: 0x000a, 0x1747: 0x000a, 0x1748: 0x000a, 0x1749: 0x000a, 0x174a: 0x000a, + 0x1760: 0x000a, 0x1761: 0x000a, 0x1762: 0x000a, 0x1763: 0x000a, + 0x1764: 0x000a, 0x1765: 0x000a, 0x1766: 0x000a, 0x1767: 0x000a, 0x1768: 0x000a, 0x1769: 0x000a, + 0x176a: 0x000a, 0x176b: 0x000a, 0x176c: 0x000a, 0x176d: 0x000a, 0x176e: 0x000a, 0x176f: 0x000a, + 0x1770: 0x000a, 0x1771: 0x000a, 0x1772: 0x000a, 0x1773: 0x000a, 0x1774: 0x000a, 0x1775: 0x000a, + 0x1776: 0x000a, 0x1777: 0x000a, 0x1778: 0x000a, 0x1779: 0x000a, 0x177a: 0x000a, 0x177b: 0x000a, + 0x177c: 0x000a, 0x177d: 0x000a, 0x177e: 0x000a, 0x177f: 0x000a, + // Block 0x5e, offset 0x1780 + 0x1780: 0x000a, 0x1781: 0x000a, 0x1782: 0x000a, 0x1783: 0x000a, 0x1784: 0x000a, 0x1785: 0x000a, + 0x1786: 0x000a, 0x1787: 0x000a, 0x1788: 0x0002, 0x1789: 0x0002, 0x178a: 0x0002, 0x178b: 0x0002, + 0x178c: 0x0002, 0x178d: 0x0002, 0x178e: 0x0002, 0x178f: 0x0002, 0x1790: 0x0002, 0x1791: 0x0002, + 0x1792: 0x0002, 0x1793: 0x0002, 0x1794: 0x0002, 0x1795: 0x0002, 0x1796: 0x0002, 0x1797: 0x0002, + 0x1798: 0x0002, 0x1799: 0x0002, 0x179a: 0x0002, 0x179b: 0x0002, + // Block 0x5f, offset 0x17c0 + 0x17ea: 0x000a, 0x17eb: 0x000a, 0x17ec: 0x000a, 0x17ed: 0x000a, 0x17ee: 0x000a, 0x17ef: 0x000a, + 0x17f0: 0x000a, 0x17f1: 0x000a, 0x17f2: 0x000a, 0x17f3: 0x000a, 0x17f4: 0x000a, 0x17f5: 0x000a, + 0x17f6: 0x000a, 0x17f7: 0x000a, 0x17f8: 0x000a, 0x17f9: 0x000a, 0x17fa: 0x000a, 0x17fb: 0x000a, + 0x17fc: 0x000a, 0x17fd: 0x000a, 0x17fe: 0x000a, 0x17ff: 0x000a, + // Block 0x60, offset 0x1800 + 0x1800: 0x000a, 0x1801: 0x000a, 0x1802: 0x000a, 0x1803: 0x000a, 0x1804: 0x000a, 0x1805: 0x000a, + 0x1806: 0x000a, 0x1807: 0x000a, 0x1808: 0x000a, 0x1809: 0x000a, 0x180a: 0x000a, 0x180b: 0x000a, + 0x180c: 0x000a, 0x180d: 0x000a, 0x180e: 0x000a, 0x180f: 0x000a, 0x1810: 0x000a, 0x1811: 0x000a, + 0x1812: 0x000a, 0x1813: 0x000a, 0x1814: 0x000a, 0x1815: 0x000a, 0x1816: 0x000a, 0x1817: 0x000a, + 0x1818: 0x000a, 0x1819: 0x000a, 0x181a: 0x000a, 0x181b: 0x000a, 0x181c: 0x000a, 0x181d: 0x000a, + 0x181e: 0x000a, 0x181f: 0x000a, 0x1820: 0x000a, 0x1821: 0x000a, 0x1822: 0x000a, 0x1823: 0x000a, + 0x1824: 0x000a, 0x1825: 0x000a, 0x1826: 0x000a, 0x1827: 0x000a, 0x1828: 0x000a, 0x1829: 0x000a, + 0x182a: 0x000a, 0x182b: 0x000a, 0x182d: 0x000a, 0x182e: 0x000a, 0x182f: 0x000a, + 0x1830: 0x000a, 0x1831: 0x000a, 0x1832: 0x000a, 0x1833: 0x000a, 0x1834: 0x000a, 0x1835: 0x000a, + 0x1836: 0x000a, 0x1837: 0x000a, 0x1838: 0x000a, 0x1839: 0x000a, 0x183a: 0x000a, 0x183b: 0x000a, + 0x183c: 0x000a, 0x183d: 0x000a, 0x183e: 0x000a, 0x183f: 0x000a, + // Block 0x61, offset 0x1840 + 0x1840: 0x000a, 0x1841: 0x000a, 0x1842: 0x000a, 0x1843: 0x000a, 0x1844: 0x000a, 0x1845: 0x000a, + 0x1846: 0x000a, 0x1847: 0x000a, 0x1848: 0x000a, 0x1849: 0x000a, 0x184a: 0x000a, 0x184b: 0x000a, + 0x184c: 0x000a, 0x184d: 0x000a, 0x184e: 0x000a, 0x184f: 0x000a, 0x1850: 0x000a, 0x1851: 0x000a, + 0x1852: 0x000a, 0x1853: 0x000a, 0x1854: 0x000a, 0x1855: 0x000a, 0x1856: 0x000a, 0x1857: 0x000a, + 0x1858: 0x000a, 0x1859: 0x000a, 0x185a: 0x000a, 0x185b: 0x000a, 0x185c: 0x000a, 0x185d: 0x000a, + 0x185e: 0x000a, 0x185f: 0x000a, 0x1860: 0x000a, 0x1861: 0x000a, 0x1862: 0x000a, 0x1863: 0x000a, + 0x1864: 0x000a, 0x1865: 0x000a, 0x1866: 0x000a, 0x1867: 0x000a, 0x1868: 0x003a, 0x1869: 0x002a, + 0x186a: 0x003a, 0x186b: 0x002a, 0x186c: 0x003a, 0x186d: 0x002a, 0x186e: 0x003a, 0x186f: 0x002a, + 0x1870: 0x003a, 0x1871: 0x002a, 0x1872: 0x003a, 0x1873: 0x002a, 0x1874: 0x003a, 0x1875: 0x002a, + 0x1876: 0x000a, 0x1877: 0x000a, 0x1878: 0x000a, 0x1879: 0x000a, 0x187a: 0x000a, 0x187b: 0x000a, + 0x187c: 0x000a, 0x187d: 0x000a, 0x187e: 0x000a, 0x187f: 0x000a, + // Block 0x62, offset 0x1880 + 0x1880: 0x000a, 0x1881: 0x000a, 0x1882: 0x000a, 0x1883: 0x000a, 0x1884: 0x000a, 0x1885: 0x009a, + 0x1886: 0x008a, 0x1887: 0x000a, 0x1888: 0x000a, 0x1889: 0x000a, 0x188a: 0x000a, 0x188b: 0x000a, + 0x188c: 0x000a, 0x188d: 0x000a, 0x188e: 0x000a, 0x188f: 0x000a, 0x1890: 0x000a, 0x1891: 0x000a, + 0x1892: 0x000a, 0x1893: 0x000a, 0x1894: 0x000a, 0x1895: 0x000a, 0x1896: 0x000a, 0x1897: 0x000a, + 0x1898: 0x000a, 0x1899: 0x000a, 0x189a: 0x000a, 0x189b: 0x000a, 0x189c: 0x000a, 0x189d: 0x000a, + 0x189e: 0x000a, 0x189f: 0x000a, 0x18a0: 0x000a, 0x18a1: 0x000a, 0x18a2: 0x000a, 0x18a3: 0x000a, + 0x18a4: 0x000a, 0x18a5: 0x000a, 0x18a6: 0x003a, 0x18a7: 0x002a, 0x18a8: 0x003a, 0x18a9: 0x002a, + 0x18aa: 0x003a, 0x18ab: 0x002a, 0x18ac: 0x003a, 0x18ad: 0x002a, 0x18ae: 0x003a, 0x18af: 0x002a, + 0x18b0: 0x000a, 0x18b1: 0x000a, 0x18b2: 0x000a, 0x18b3: 0x000a, 0x18b4: 0x000a, 0x18b5: 0x000a, + 0x18b6: 0x000a, 0x18b7: 0x000a, 0x18b8: 0x000a, 0x18b9: 0x000a, 0x18ba: 0x000a, 0x18bb: 0x000a, + 0x18bc: 0x000a, 0x18bd: 0x000a, 0x18be: 0x000a, 0x18bf: 0x000a, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x000a, 0x18c1: 0x000a, 0x18c2: 0x000a, 0x18c3: 0x007a, 0x18c4: 0x006a, 0x18c5: 0x009a, + 0x18c6: 0x008a, 0x18c7: 0x00ba, 0x18c8: 0x00aa, 0x18c9: 0x009a, 0x18ca: 0x008a, 0x18cb: 0x007a, + 0x18cc: 0x006a, 0x18cd: 0x00da, 0x18ce: 0x002a, 0x18cf: 0x003a, 0x18d0: 0x00ca, 0x18d1: 0x009a, + 0x18d2: 0x008a, 0x18d3: 0x007a, 0x18d4: 0x006a, 0x18d5: 0x009a, 0x18d6: 0x008a, 0x18d7: 0x00ba, + 0x18d8: 0x00aa, 0x18d9: 0x000a, 0x18da: 0x000a, 0x18db: 0x000a, 0x18dc: 0x000a, 0x18dd: 0x000a, + 0x18de: 0x000a, 0x18df: 0x000a, 0x18e0: 0x000a, 0x18e1: 0x000a, 0x18e2: 0x000a, 0x18e3: 0x000a, + 0x18e4: 0x000a, 0x18e5: 0x000a, 0x18e6: 0x000a, 0x18e7: 0x000a, 0x18e8: 0x000a, 0x18e9: 0x000a, + 0x18ea: 0x000a, 0x18eb: 0x000a, 0x18ec: 0x000a, 0x18ed: 0x000a, 0x18ee: 0x000a, 0x18ef: 0x000a, + 0x18f0: 0x000a, 0x18f1: 0x000a, 0x18f2: 0x000a, 0x18f3: 0x000a, 0x18f4: 0x000a, 0x18f5: 0x000a, + 0x18f6: 0x000a, 0x18f7: 0x000a, 0x18f8: 0x000a, 0x18f9: 0x000a, 0x18fa: 0x000a, 0x18fb: 0x000a, + 0x18fc: 0x000a, 0x18fd: 0x000a, 0x18fe: 0x000a, 0x18ff: 0x000a, + // Block 0x64, offset 0x1900 + 0x1900: 0x000a, 0x1901: 0x000a, 0x1902: 0x000a, 0x1903: 0x000a, 0x1904: 0x000a, 0x1905: 0x000a, + 0x1906: 0x000a, 0x1907: 0x000a, 0x1908: 0x000a, 0x1909: 0x000a, 0x190a: 0x000a, 0x190b: 0x000a, + 0x190c: 0x000a, 0x190d: 0x000a, 0x190e: 0x000a, 0x190f: 0x000a, 0x1910: 0x000a, 0x1911: 0x000a, + 0x1912: 0x000a, 0x1913: 0x000a, 0x1914: 0x000a, 0x1915: 0x000a, 0x1916: 0x000a, 0x1917: 0x000a, + 0x1918: 0x003a, 0x1919: 0x002a, 0x191a: 0x003a, 0x191b: 0x002a, 0x191c: 0x000a, 0x191d: 0x000a, + 0x191e: 0x000a, 0x191f: 0x000a, 0x1920: 0x000a, 0x1921: 0x000a, 0x1922: 0x000a, 0x1923: 0x000a, + 0x1924: 0x000a, 0x1925: 0x000a, 0x1926: 0x000a, 0x1927: 0x000a, 0x1928: 0x000a, 0x1929: 0x000a, + 0x192a: 0x000a, 0x192b: 0x000a, 0x192c: 0x000a, 0x192d: 0x000a, 0x192e: 0x000a, 0x192f: 0x000a, + 0x1930: 0x000a, 0x1931: 0x000a, 0x1932: 0x000a, 0x1933: 0x000a, 0x1934: 0x000a, 0x1935: 0x000a, + 0x1936: 0x000a, 0x1937: 0x000a, 0x1938: 0x000a, 0x1939: 0x000a, 0x193a: 0x000a, 0x193b: 0x000a, + 0x193c: 0x003a, 0x193d: 0x002a, 0x193e: 0x000a, 0x193f: 0x000a, + // Block 0x65, offset 0x1940 + 0x1940: 0x000a, 0x1941: 0x000a, 0x1942: 0x000a, 0x1943: 0x000a, 0x1944: 0x000a, 0x1945: 0x000a, + 0x1946: 0x000a, 0x1947: 0x000a, 0x1948: 0x000a, 0x1949: 0x000a, 0x194a: 0x000a, 0x194b: 0x000a, + 0x194c: 0x000a, 0x194d: 0x000a, 0x194e: 0x000a, 0x194f: 0x000a, 0x1950: 0x000a, 0x1951: 0x000a, + 0x1952: 0x000a, 0x1953: 0x000a, 0x1954: 0x000a, 0x1955: 0x000a, 0x1956: 0x000a, 0x1957: 0x000a, + 0x1958: 0x000a, 0x1959: 0x000a, 0x195a: 0x000a, 0x195b: 0x000a, 0x195c: 0x000a, 0x195d: 0x000a, + 0x195e: 0x000a, 0x195f: 0x000a, 0x1960: 0x000a, 0x1961: 0x000a, 0x1962: 0x000a, 0x1963: 0x000a, + 0x1964: 0x000a, 0x1965: 0x000a, 0x1966: 0x000a, 0x1967: 0x000a, 0x1968: 0x000a, 0x1969: 0x000a, + 0x196a: 0x000a, 0x196b: 0x000a, 0x196c: 0x000a, 0x196d: 0x000a, 0x196e: 0x000a, 0x196f: 0x000a, + 0x1970: 0x000a, 0x1971: 0x000a, 0x1972: 0x000a, 0x1973: 0x000a, + 0x1976: 0x000a, 0x1977: 0x000a, 0x1978: 0x000a, 0x1979: 0x000a, 0x197a: 0x000a, 0x197b: 0x000a, + 0x197c: 0x000a, 0x197d: 0x000a, 0x197e: 0x000a, 0x197f: 0x000a, + // Block 0x66, offset 0x1980 + 0x1980: 0x000a, 0x1981: 0x000a, 0x1982: 0x000a, 0x1983: 0x000a, 0x1984: 0x000a, 0x1985: 0x000a, + 0x1986: 0x000a, 0x1987: 0x000a, 0x1988: 0x000a, 0x1989: 0x000a, 0x198a: 0x000a, 0x198b: 0x000a, + 0x198c: 0x000a, 0x198d: 0x000a, 0x198e: 0x000a, 0x198f: 0x000a, 0x1990: 0x000a, 0x1991: 0x000a, + 0x1992: 0x000a, 0x1993: 0x000a, 0x1994: 0x000a, 0x1995: 0x000a, + 0x1998: 0x000a, 0x1999: 0x000a, 0x199a: 0x000a, 0x199b: 0x000a, 0x199c: 0x000a, 0x199d: 0x000a, + 0x199e: 0x000a, 0x199f: 0x000a, 0x19a0: 0x000a, 0x19a1: 0x000a, 0x19a2: 0x000a, 0x19a3: 0x000a, + 0x19a4: 0x000a, 0x19a5: 0x000a, 0x19a6: 0x000a, 0x19a7: 0x000a, 0x19a8: 0x000a, 0x19a9: 0x000a, + 0x19aa: 0x000a, 0x19ab: 0x000a, 0x19ac: 0x000a, 0x19ad: 0x000a, 0x19ae: 0x000a, 0x19af: 0x000a, + 0x19b0: 0x000a, 0x19b1: 0x000a, 0x19b2: 0x000a, 0x19b3: 0x000a, 0x19b4: 0x000a, 0x19b5: 0x000a, + 0x19b6: 0x000a, 0x19b7: 0x000a, 0x19b8: 0x000a, 0x19b9: 0x000a, + 0x19bd: 0x000a, 0x19be: 0x000a, 0x19bf: 0x000a, + // Block 0x67, offset 0x19c0 + 0x19c0: 0x000a, 0x19c1: 0x000a, 0x19c2: 0x000a, 0x19c3: 0x000a, 0x19c4: 0x000a, 0x19c5: 0x000a, + 0x19c6: 0x000a, 0x19c7: 0x000a, 0x19c8: 0x000a, 0x19ca: 0x000a, 0x19cb: 0x000a, + 0x19cc: 0x000a, 0x19cd: 0x000a, 0x19ce: 0x000a, 0x19cf: 0x000a, 0x19d0: 0x000a, 0x19d1: 0x000a, + 0x19ec: 0x000a, 0x19ed: 0x000a, 0x19ee: 0x000a, 0x19ef: 0x000a, + // Block 0x68, offset 0x1a00 + 0x1a25: 0x000a, 0x1a26: 0x000a, 0x1a27: 0x000a, 0x1a28: 0x000a, 0x1a29: 0x000a, + 0x1a2a: 0x000a, 0x1a2f: 0x000c, + 0x1a30: 0x000c, 0x1a31: 0x000c, + 0x1a39: 0x000a, 0x1a3a: 0x000a, 0x1a3b: 0x000a, + 0x1a3c: 0x000a, 0x1a3d: 0x000a, 0x1a3e: 0x000a, 0x1a3f: 0x000a, + // Block 0x69, offset 0x1a40 + 0x1a7f: 0x000c, + // Block 0x6a, offset 0x1a80 + 0x1aa0: 0x000c, 0x1aa1: 0x000c, 0x1aa2: 0x000c, 0x1aa3: 0x000c, + 0x1aa4: 0x000c, 0x1aa5: 0x000c, 0x1aa6: 0x000c, 0x1aa7: 0x000c, 0x1aa8: 0x000c, 0x1aa9: 0x000c, + 0x1aaa: 0x000c, 0x1aab: 0x000c, 0x1aac: 0x000c, 0x1aad: 0x000c, 0x1aae: 0x000c, 0x1aaf: 0x000c, + 0x1ab0: 0x000c, 0x1ab1: 0x000c, 0x1ab2: 0x000c, 0x1ab3: 0x000c, 0x1ab4: 0x000c, 0x1ab5: 0x000c, + 0x1ab6: 0x000c, 0x1ab7: 0x000c, 0x1ab8: 0x000c, 0x1ab9: 0x000c, 0x1aba: 0x000c, 0x1abb: 0x000c, + 0x1abc: 0x000c, 0x1abd: 0x000c, 0x1abe: 0x000c, 0x1abf: 0x000c, + // Block 0x6b, offset 0x1ac0 + 0x1ac0: 0x000a, 0x1ac1: 0x000a, 0x1ac2: 0x000a, 0x1ac3: 0x000a, 0x1ac4: 0x000a, 0x1ac5: 0x000a, + 0x1ac6: 0x000a, 0x1ac7: 0x000a, 0x1ac8: 0x000a, 0x1ac9: 0x000a, 0x1aca: 0x000a, 0x1acb: 0x000a, + 0x1acc: 0x000a, 0x1acd: 0x000a, 0x1ace: 0x000a, 0x1acf: 0x000a, 0x1ad0: 0x000a, 0x1ad1: 0x000a, + 0x1ad2: 0x000a, 0x1ad3: 0x000a, 0x1ad4: 0x000a, 0x1ad5: 0x000a, 0x1ad6: 0x000a, 0x1ad7: 0x000a, + 0x1ad8: 0x000a, 0x1ad9: 0x000a, 0x1ada: 0x000a, 0x1adb: 0x000a, 0x1adc: 0x000a, 0x1add: 0x000a, + 0x1ade: 0x000a, 0x1adf: 0x000a, 0x1ae0: 0x000a, 0x1ae1: 0x000a, 0x1ae2: 0x003a, 0x1ae3: 0x002a, + 0x1ae4: 0x003a, 0x1ae5: 0x002a, 0x1ae6: 0x003a, 0x1ae7: 0x002a, 0x1ae8: 0x003a, 0x1ae9: 0x002a, + 0x1aea: 0x000a, 0x1aeb: 0x000a, 0x1aec: 0x000a, 0x1aed: 0x000a, 0x1aee: 0x000a, 0x1aef: 0x000a, + 0x1af0: 0x000a, 0x1af1: 0x000a, 0x1af2: 0x000a, 0x1af3: 0x000a, 0x1af4: 0x000a, 0x1af5: 0x000a, + 0x1af6: 0x000a, 0x1af7: 0x000a, 0x1af8: 0x000a, 0x1af9: 0x000a, 0x1afa: 0x000a, 0x1afb: 0x000a, + 0x1afc: 0x000a, 0x1afd: 0x000a, 0x1afe: 0x000a, 0x1aff: 0x000a, + // Block 0x6c, offset 0x1b00 + 0x1b00: 0x000a, 0x1b01: 0x000a, 0x1b02: 0x000a, 0x1b03: 0x000a, 0x1b04: 0x000a, + // Block 0x6d, offset 0x1b40 + 0x1b40: 0x000a, 0x1b41: 0x000a, 0x1b42: 0x000a, 0x1b43: 0x000a, 0x1b44: 0x000a, 0x1b45: 0x000a, + 0x1b46: 0x000a, 0x1b47: 0x000a, 0x1b48: 0x000a, 0x1b49: 0x000a, 0x1b4a: 0x000a, 0x1b4b: 0x000a, + 0x1b4c: 0x000a, 0x1b4d: 0x000a, 0x1b4e: 0x000a, 0x1b4f: 0x000a, 0x1b50: 0x000a, 0x1b51: 0x000a, + 0x1b52: 0x000a, 0x1b53: 0x000a, 0x1b54: 0x000a, 0x1b55: 0x000a, 0x1b56: 0x000a, 0x1b57: 0x000a, + 0x1b58: 0x000a, 0x1b59: 0x000a, 0x1b5b: 0x000a, 0x1b5c: 0x000a, 0x1b5d: 0x000a, + 0x1b5e: 0x000a, 0x1b5f: 0x000a, 0x1b60: 0x000a, 0x1b61: 0x000a, 0x1b62: 0x000a, 0x1b63: 0x000a, + 0x1b64: 0x000a, 0x1b65: 0x000a, 0x1b66: 0x000a, 0x1b67: 0x000a, 0x1b68: 0x000a, 0x1b69: 0x000a, + 0x1b6a: 0x000a, 0x1b6b: 0x000a, 0x1b6c: 0x000a, 0x1b6d: 0x000a, 0x1b6e: 0x000a, 0x1b6f: 0x000a, + 0x1b70: 0x000a, 0x1b71: 0x000a, 0x1b72: 0x000a, 0x1b73: 0x000a, 0x1b74: 0x000a, 0x1b75: 0x000a, + 0x1b76: 0x000a, 0x1b77: 0x000a, 0x1b78: 0x000a, 0x1b79: 0x000a, 0x1b7a: 0x000a, 0x1b7b: 0x000a, + 0x1b7c: 0x000a, 0x1b7d: 0x000a, 0x1b7e: 0x000a, 0x1b7f: 0x000a, + // Block 0x6e, offset 0x1b80 + 0x1b80: 0x000a, 0x1b81: 0x000a, 0x1b82: 0x000a, 0x1b83: 0x000a, 0x1b84: 0x000a, 0x1b85: 0x000a, + 0x1b86: 0x000a, 0x1b87: 0x000a, 0x1b88: 0x000a, 0x1b89: 0x000a, 0x1b8a: 0x000a, 0x1b8b: 0x000a, + 0x1b8c: 0x000a, 0x1b8d: 0x000a, 0x1b8e: 0x000a, 0x1b8f: 0x000a, 0x1b90: 0x000a, 0x1b91: 0x000a, + 0x1b92: 0x000a, 0x1b93: 0x000a, 0x1b94: 0x000a, 0x1b95: 0x000a, 0x1b96: 0x000a, 0x1b97: 0x000a, + 0x1b98: 0x000a, 0x1b99: 0x000a, 0x1b9a: 0x000a, 0x1b9b: 0x000a, 0x1b9c: 0x000a, 0x1b9d: 0x000a, + 0x1b9e: 0x000a, 0x1b9f: 0x000a, 0x1ba0: 0x000a, 0x1ba1: 0x000a, 0x1ba2: 0x000a, 0x1ba3: 0x000a, + 0x1ba4: 0x000a, 0x1ba5: 0x000a, 0x1ba6: 0x000a, 0x1ba7: 0x000a, 0x1ba8: 0x000a, 0x1ba9: 0x000a, + 0x1baa: 0x000a, 0x1bab: 0x000a, 0x1bac: 0x000a, 0x1bad: 0x000a, 0x1bae: 0x000a, 0x1baf: 0x000a, + 0x1bb0: 0x000a, 0x1bb1: 0x000a, 0x1bb2: 0x000a, 0x1bb3: 0x000a, + // Block 0x6f, offset 0x1bc0 + 0x1bc0: 0x000a, 0x1bc1: 0x000a, 0x1bc2: 0x000a, 0x1bc3: 0x000a, 0x1bc4: 0x000a, 0x1bc5: 0x000a, + 0x1bc6: 0x000a, 0x1bc7: 0x000a, 0x1bc8: 0x000a, 0x1bc9: 0x000a, 0x1bca: 0x000a, 0x1bcb: 0x000a, + 0x1bcc: 0x000a, 0x1bcd: 0x000a, 0x1bce: 0x000a, 0x1bcf: 0x000a, 0x1bd0: 0x000a, 0x1bd1: 0x000a, + 0x1bd2: 0x000a, 0x1bd3: 0x000a, 0x1bd4: 0x000a, 0x1bd5: 0x000a, + 0x1bf0: 0x000a, 0x1bf1: 0x000a, 0x1bf2: 0x000a, 0x1bf3: 0x000a, 0x1bf4: 0x000a, 0x1bf5: 0x000a, + 0x1bf6: 0x000a, 0x1bf7: 0x000a, 0x1bf8: 0x000a, 0x1bf9: 0x000a, 0x1bfa: 0x000a, 0x1bfb: 0x000a, + // Block 0x70, offset 0x1c00 + 0x1c00: 0x0009, 0x1c01: 0x000a, 0x1c02: 0x000a, 0x1c03: 0x000a, 0x1c04: 0x000a, + 0x1c08: 0x003a, 0x1c09: 0x002a, 0x1c0a: 0x003a, 0x1c0b: 0x002a, + 0x1c0c: 0x003a, 0x1c0d: 0x002a, 0x1c0e: 0x003a, 0x1c0f: 0x002a, 0x1c10: 0x003a, 0x1c11: 0x002a, + 0x1c12: 0x000a, 0x1c13: 0x000a, 0x1c14: 0x003a, 0x1c15: 0x002a, 0x1c16: 0x003a, 0x1c17: 0x002a, + 0x1c18: 0x003a, 0x1c19: 0x002a, 0x1c1a: 0x003a, 0x1c1b: 0x002a, 0x1c1c: 0x000a, 0x1c1d: 0x000a, + 0x1c1e: 0x000a, 0x1c1f: 0x000a, 0x1c20: 0x000a, + 0x1c2a: 0x000c, 0x1c2b: 0x000c, 0x1c2c: 0x000c, 0x1c2d: 0x000c, + 0x1c30: 0x000a, + 0x1c36: 0x000a, 0x1c37: 0x000a, + 0x1c3d: 0x000a, 0x1c3e: 0x000a, 0x1c3f: 0x000a, + // Block 0x71, offset 0x1c40 + 0x1c59: 0x000c, 0x1c5a: 0x000c, 0x1c5b: 0x000a, 0x1c5c: 0x000a, + 0x1c60: 0x000a, + // Block 0x72, offset 0x1c80 + 0x1cbb: 0x000a, + // Block 0x73, offset 0x1cc0 + 0x1cc0: 0x000a, 0x1cc1: 0x000a, 0x1cc2: 0x000a, 0x1cc3: 0x000a, 0x1cc4: 0x000a, 0x1cc5: 0x000a, + 0x1cc6: 0x000a, 0x1cc7: 0x000a, 0x1cc8: 0x000a, 0x1cc9: 0x000a, 0x1cca: 0x000a, 0x1ccb: 0x000a, + 0x1ccc: 0x000a, 0x1ccd: 0x000a, 0x1cce: 0x000a, 0x1ccf: 0x000a, 0x1cd0: 0x000a, 0x1cd1: 0x000a, + 0x1cd2: 0x000a, 0x1cd3: 0x000a, 0x1cd4: 0x000a, 0x1cd5: 0x000a, 0x1cd6: 0x000a, 0x1cd7: 0x000a, + 0x1cd8: 0x000a, 0x1cd9: 0x000a, 0x1cda: 0x000a, 0x1cdb: 0x000a, 0x1cdc: 0x000a, 0x1cdd: 0x000a, + 0x1cde: 0x000a, 0x1cdf: 0x000a, 0x1ce0: 0x000a, 0x1ce1: 0x000a, 0x1ce2: 0x000a, 0x1ce3: 0x000a, + // Block 0x74, offset 0x1d00 + 0x1d1d: 0x000a, + 0x1d1e: 0x000a, + // Block 0x75, offset 0x1d40 + 0x1d50: 0x000a, 0x1d51: 0x000a, + 0x1d52: 0x000a, 0x1d53: 0x000a, 0x1d54: 0x000a, 0x1d55: 0x000a, 0x1d56: 0x000a, 0x1d57: 0x000a, + 0x1d58: 0x000a, 0x1d59: 0x000a, 0x1d5a: 0x000a, 0x1d5b: 0x000a, 0x1d5c: 0x000a, 0x1d5d: 0x000a, + 0x1d5e: 0x000a, 0x1d5f: 0x000a, + 0x1d7c: 0x000a, 0x1d7d: 0x000a, 0x1d7e: 0x000a, + // Block 0x76, offset 0x1d80 + 0x1db1: 0x000a, 0x1db2: 0x000a, 0x1db3: 0x000a, 0x1db4: 0x000a, 0x1db5: 0x000a, + 0x1db6: 0x000a, 0x1db7: 0x000a, 0x1db8: 0x000a, 0x1db9: 0x000a, 0x1dba: 0x000a, 0x1dbb: 0x000a, + 0x1dbc: 0x000a, 0x1dbd: 0x000a, 0x1dbe: 0x000a, 0x1dbf: 0x000a, + // Block 0x77, offset 0x1dc0 + 0x1dcc: 0x000a, 0x1dcd: 0x000a, 0x1dce: 0x000a, 0x1dcf: 0x000a, + // Block 0x78, offset 0x1e00 + 0x1e37: 0x000a, 0x1e38: 0x000a, 0x1e39: 0x000a, 0x1e3a: 0x000a, + // Block 0x79, offset 0x1e40 + 0x1e5e: 0x000a, 0x1e5f: 0x000a, + 0x1e7f: 0x000a, + // Block 0x7a, offset 0x1e80 + 0x1e90: 0x000a, 0x1e91: 0x000a, + 0x1e92: 0x000a, 0x1e93: 0x000a, 0x1e94: 0x000a, 0x1e95: 0x000a, 0x1e96: 0x000a, 0x1e97: 0x000a, + 0x1e98: 0x000a, 0x1e99: 0x000a, 0x1e9a: 0x000a, 0x1e9b: 0x000a, 0x1e9c: 0x000a, 0x1e9d: 0x000a, + 0x1e9e: 0x000a, 0x1e9f: 0x000a, 0x1ea0: 0x000a, 0x1ea1: 0x000a, 0x1ea2: 0x000a, 0x1ea3: 0x000a, + 0x1ea4: 0x000a, 0x1ea5: 0x000a, 0x1ea6: 0x000a, 0x1ea7: 0x000a, 0x1ea8: 0x000a, 0x1ea9: 0x000a, + 0x1eaa: 0x000a, 0x1eab: 0x000a, 0x1eac: 0x000a, 0x1ead: 0x000a, 0x1eae: 0x000a, 0x1eaf: 0x000a, + 0x1eb0: 0x000a, 0x1eb1: 0x000a, 0x1eb2: 0x000a, 0x1eb3: 0x000a, 0x1eb4: 0x000a, 0x1eb5: 0x000a, + 0x1eb6: 0x000a, 0x1eb7: 0x000a, 0x1eb8: 0x000a, 0x1eb9: 0x000a, 0x1eba: 0x000a, 0x1ebb: 0x000a, + 0x1ebc: 0x000a, 0x1ebd: 0x000a, 0x1ebe: 0x000a, 0x1ebf: 0x000a, + // Block 0x7b, offset 0x1ec0 + 0x1ec0: 0x000a, 0x1ec1: 0x000a, 0x1ec2: 0x000a, 0x1ec3: 0x000a, 0x1ec4: 0x000a, 0x1ec5: 0x000a, + 0x1ec6: 0x000a, + // Block 0x7c, offset 0x1f00 + 0x1f0d: 0x000a, 0x1f0e: 0x000a, 0x1f0f: 0x000a, + // Block 0x7d, offset 0x1f40 + 0x1f6f: 0x000c, + 0x1f70: 0x000c, 0x1f71: 0x000c, 0x1f72: 0x000c, 0x1f73: 0x000a, 0x1f74: 0x000c, 0x1f75: 0x000c, + 0x1f76: 0x000c, 0x1f77: 0x000c, 0x1f78: 0x000c, 0x1f79: 0x000c, 0x1f7a: 0x000c, 0x1f7b: 0x000c, + 0x1f7c: 0x000c, 0x1f7d: 0x000c, 0x1f7e: 0x000a, 0x1f7f: 0x000a, + // Block 0x7e, offset 0x1f80 + 0x1f9e: 0x000c, 0x1f9f: 0x000c, + // Block 0x7f, offset 0x1fc0 + 0x1ff0: 0x000c, 0x1ff1: 0x000c, + // Block 0x80, offset 0x2000 + 0x2000: 0x000a, 0x2001: 0x000a, 0x2002: 0x000a, 0x2003: 0x000a, 0x2004: 0x000a, 0x2005: 0x000a, + 0x2006: 0x000a, 0x2007: 0x000a, 0x2008: 0x000a, 0x2009: 0x000a, 0x200a: 0x000a, 0x200b: 0x000a, + 0x200c: 0x000a, 0x200d: 0x000a, 0x200e: 0x000a, 0x200f: 0x000a, 0x2010: 0x000a, 0x2011: 0x000a, + 0x2012: 0x000a, 0x2013: 0x000a, 0x2014: 0x000a, 0x2015: 0x000a, 0x2016: 0x000a, 0x2017: 0x000a, + 0x2018: 0x000a, 0x2019: 0x000a, 0x201a: 0x000a, 0x201b: 0x000a, 0x201c: 0x000a, 0x201d: 0x000a, + 0x201e: 0x000a, 0x201f: 0x000a, 0x2020: 0x000a, 0x2021: 0x000a, + // Block 0x81, offset 0x2040 + 0x2048: 0x000a, + // Block 0x82, offset 0x2080 + 0x2082: 0x000c, + 0x2086: 0x000c, 0x208b: 0x000c, + 0x20a5: 0x000c, 0x20a6: 0x000c, 0x20a8: 0x000a, 0x20a9: 0x000a, + 0x20aa: 0x000a, 0x20ab: 0x000a, + 0x20b8: 0x0004, 0x20b9: 0x0004, + // Block 0x83, offset 0x20c0 + 0x20f4: 0x000a, 0x20f5: 0x000a, + 0x20f6: 0x000a, 0x20f7: 0x000a, + // Block 0x84, offset 0x2100 + 0x2104: 0x000c, 0x2105: 0x000c, + 0x2120: 0x000c, 0x2121: 0x000c, 0x2122: 0x000c, 0x2123: 0x000c, + 0x2124: 0x000c, 0x2125: 0x000c, 0x2126: 0x000c, 0x2127: 0x000c, 0x2128: 0x000c, 0x2129: 0x000c, + 0x212a: 0x000c, 0x212b: 0x000c, 0x212c: 0x000c, 0x212d: 0x000c, 0x212e: 0x000c, 0x212f: 0x000c, + 0x2130: 0x000c, 0x2131: 0x000c, + // Block 0x85, offset 0x2140 + 0x2166: 0x000c, 0x2167: 0x000c, 0x2168: 0x000c, 0x2169: 0x000c, + 0x216a: 0x000c, 0x216b: 0x000c, 0x216c: 0x000c, 0x216d: 0x000c, + // Block 0x86, offset 0x2180 + 0x2187: 0x000c, 0x2188: 0x000c, 0x2189: 0x000c, 0x218a: 0x000c, 0x218b: 0x000c, + 0x218c: 0x000c, 0x218d: 0x000c, 0x218e: 0x000c, 0x218f: 0x000c, 0x2190: 0x000c, 0x2191: 0x000c, + // Block 0x87, offset 0x21c0 + 0x21c0: 0x000c, 0x21c1: 0x000c, 0x21c2: 0x000c, + 0x21f3: 0x000c, + 0x21f6: 0x000c, 0x21f7: 0x000c, 0x21f8: 0x000c, 0x21f9: 0x000c, + 0x21fc: 0x000c, + // Block 0x88, offset 0x2200 + 0x2225: 0x000c, + // Block 0x89, offset 0x2240 + 0x2269: 0x000c, + 0x226a: 0x000c, 0x226b: 0x000c, 0x226c: 0x000c, 0x226d: 0x000c, 0x226e: 0x000c, + 0x2271: 0x000c, 0x2272: 0x000c, 0x2275: 0x000c, + 0x2276: 0x000c, + // Block 0x8a, offset 0x2280 + 0x2283: 0x000c, + 0x228c: 0x000c, + 0x22bc: 0x000c, + // Block 0x8b, offset 0x22c0 + 0x22f0: 0x000c, 0x22f2: 0x000c, 0x22f3: 0x000c, 0x22f4: 0x000c, + 0x22f7: 0x000c, 0x22f8: 0x000c, + 0x22fe: 0x000c, 0x22ff: 0x000c, + // Block 0x8c, offset 0x2300 + 0x2301: 0x000c, + 0x232c: 0x000c, 0x232d: 0x000c, + 0x2336: 0x000c, + // Block 0x8d, offset 0x2340 + 0x2365: 0x000c, 0x2368: 0x000c, + 0x236d: 0x000c, + // Block 0x8e, offset 0x2380 + 0x239d: 0x0001, + 0x239e: 0x000c, 0x239f: 0x0001, 0x23a0: 0x0001, 0x23a1: 0x0001, 0x23a2: 0x0001, 0x23a3: 0x0001, + 0x23a4: 0x0001, 0x23a5: 0x0001, 0x23a6: 0x0001, 0x23a7: 0x0001, 0x23a8: 0x0001, 0x23a9: 0x0003, + 0x23aa: 0x0001, 0x23ab: 0x0001, 0x23ac: 0x0001, 0x23ad: 0x0001, 0x23ae: 0x0001, 0x23af: 0x0001, + 0x23b0: 0x0001, 0x23b1: 0x0001, 0x23b2: 0x0001, 0x23b3: 0x0001, 0x23b4: 0x0001, 0x23b5: 0x0001, + 0x23b6: 0x0001, 0x23b7: 0x0001, 0x23b8: 0x0001, 0x23b9: 0x0001, 0x23ba: 0x0001, 0x23bb: 0x0001, + 0x23bc: 0x0001, 0x23bd: 0x0001, 0x23be: 0x0001, 0x23bf: 0x0001, + // Block 0x8f, offset 0x23c0 + 0x23c0: 0x0001, 0x23c1: 0x0001, 0x23c2: 0x0001, 0x23c3: 0x0001, 0x23c4: 0x0001, 0x23c5: 0x0001, + 0x23c6: 0x0001, 0x23c7: 0x0001, 0x23c8: 0x0001, 0x23c9: 0x0001, 0x23ca: 0x0001, 0x23cb: 0x0001, + 0x23cc: 0x0001, 0x23cd: 0x0001, 0x23ce: 0x0001, 0x23cf: 0x0001, 0x23d0: 0x000d, 0x23d1: 0x000d, + 0x23d2: 0x000d, 0x23d3: 0x000d, 0x23d4: 0x000d, 0x23d5: 0x000d, 0x23d6: 0x000d, 0x23d7: 0x000d, + 0x23d8: 0x000d, 0x23d9: 0x000d, 0x23da: 0x000d, 0x23db: 0x000d, 0x23dc: 0x000d, 0x23dd: 0x000d, + 0x23de: 0x000d, 0x23df: 0x000d, 0x23e0: 0x000d, 0x23e1: 0x000d, 0x23e2: 0x000d, 0x23e3: 0x000d, + 0x23e4: 0x000d, 0x23e5: 0x000d, 0x23e6: 0x000d, 0x23e7: 0x000d, 0x23e8: 0x000d, 0x23e9: 0x000d, + 0x23ea: 0x000d, 0x23eb: 0x000d, 0x23ec: 0x000d, 0x23ed: 0x000d, 0x23ee: 0x000d, 0x23ef: 0x000d, + 0x23f0: 0x000d, 0x23f1: 0x000d, 0x23f2: 0x000d, 0x23f3: 0x000d, 0x23f4: 0x000d, 0x23f5: 0x000d, + 0x23f6: 0x000d, 0x23f7: 0x000d, 0x23f8: 0x000d, 0x23f9: 0x000d, 0x23fa: 0x000d, 0x23fb: 0x000d, + 0x23fc: 0x000d, 0x23fd: 0x000d, 0x23fe: 0x000d, 0x23ff: 0x000d, + // Block 0x90, offset 0x2400 + 0x2400: 0x000d, 0x2401: 0x000d, 0x2402: 0x000d, 0x2403: 0x000d, 0x2404: 0x000d, 0x2405: 0x000d, + 0x2406: 0x000d, 0x2407: 0x000d, 0x2408: 0x000d, 0x2409: 0x000d, 0x240a: 0x000d, 0x240b: 0x000d, + 0x240c: 0x000d, 0x240d: 0x000d, 0x240e: 0x000d, 0x240f: 0x000d, 0x2410: 0x000d, 0x2411: 0x000d, + 0x2412: 0x000d, 0x2413: 0x000d, 0x2414: 0x000d, 0x2415: 0x000d, 0x2416: 0x000d, 0x2417: 0x000d, + 0x2418: 0x000d, 0x2419: 0x000d, 0x241a: 0x000d, 0x241b: 0x000d, 0x241c: 0x000d, 0x241d: 0x000d, + 0x241e: 0x000d, 0x241f: 0x000d, 0x2420: 0x000d, 0x2421: 0x000d, 0x2422: 0x000d, 0x2423: 0x000d, + 0x2424: 0x000d, 0x2425: 0x000d, 0x2426: 0x000d, 0x2427: 0x000d, 0x2428: 0x000d, 0x2429: 0x000d, + 0x242a: 0x000d, 0x242b: 0x000d, 0x242c: 0x000d, 0x242d: 0x000d, 0x242e: 0x000d, 0x242f: 0x000d, + 0x2430: 0x000d, 0x2431: 0x000d, 0x2432: 0x000d, 0x2433: 0x000d, 0x2434: 0x000d, 0x2435: 0x000d, + 0x2436: 0x000d, 0x2437: 0x000d, 0x2438: 0x000d, 0x2439: 0x000d, 0x243a: 0x000d, 0x243b: 0x000d, + 0x243c: 0x000d, 0x243d: 0x000d, 0x243e: 0x000a, 0x243f: 0x000a, + // Block 0x91, offset 0x2440 + 0x2440: 0x000d, 0x2441: 0x000d, 0x2442: 0x000d, 0x2443: 0x000d, 0x2444: 0x000d, 0x2445: 0x000d, + 0x2446: 0x000d, 0x2447: 0x000d, 0x2448: 0x000d, 0x2449: 0x000d, 0x244a: 0x000d, 0x244b: 0x000d, + 0x244c: 0x000d, 0x244d: 0x000d, 0x244e: 0x000d, 0x244f: 0x000d, 0x2450: 0x000b, 0x2451: 0x000b, + 0x2452: 0x000b, 0x2453: 0x000b, 0x2454: 0x000b, 0x2455: 0x000b, 0x2456: 0x000b, 0x2457: 0x000b, + 0x2458: 0x000b, 0x2459: 0x000b, 0x245a: 0x000b, 0x245b: 0x000b, 0x245c: 0x000b, 0x245d: 0x000b, + 0x245e: 0x000b, 0x245f: 0x000b, 0x2460: 0x000b, 0x2461: 0x000b, 0x2462: 0x000b, 0x2463: 0x000b, + 0x2464: 0x000b, 0x2465: 0x000b, 0x2466: 0x000b, 0x2467: 0x000b, 0x2468: 0x000b, 0x2469: 0x000b, + 0x246a: 0x000b, 0x246b: 0x000b, 0x246c: 0x000b, 0x246d: 0x000b, 0x246e: 0x000b, 0x246f: 0x000b, + 0x2470: 0x000d, 0x2471: 0x000d, 0x2472: 0x000d, 0x2473: 0x000d, 0x2474: 0x000d, 0x2475: 0x000d, + 0x2476: 0x000d, 0x2477: 0x000d, 0x2478: 0x000d, 0x2479: 0x000d, 0x247a: 0x000d, 0x247b: 0x000d, + 0x247c: 0x000d, 0x247d: 0x000a, 0x247e: 0x000d, 0x247f: 0x000d, + // Block 0x92, offset 0x2480 + 0x2480: 0x000c, 0x2481: 0x000c, 0x2482: 0x000c, 0x2483: 0x000c, 0x2484: 0x000c, 0x2485: 0x000c, + 0x2486: 0x000c, 0x2487: 0x000c, 0x2488: 0x000c, 0x2489: 0x000c, 0x248a: 0x000c, 0x248b: 0x000c, + 0x248c: 0x000c, 0x248d: 0x000c, 0x248e: 0x000c, 0x248f: 0x000c, 0x2490: 0x000a, 0x2491: 0x000a, + 0x2492: 0x000a, 0x2493: 0x000a, 0x2494: 0x000a, 0x2495: 0x000a, 0x2496: 0x000a, 0x2497: 0x000a, + 0x2498: 0x000a, 0x2499: 0x000a, + 0x24a0: 0x000c, 0x24a1: 0x000c, 0x24a2: 0x000c, 0x24a3: 0x000c, + 0x24a4: 0x000c, 0x24a5: 0x000c, 0x24a6: 0x000c, 0x24a7: 0x000c, 0x24a8: 0x000c, 0x24a9: 0x000c, + 0x24aa: 0x000c, 0x24ab: 0x000c, 0x24ac: 0x000c, 0x24ad: 0x000c, 0x24ae: 0x000c, 0x24af: 0x000c, + 0x24b0: 0x000a, 0x24b1: 0x000a, 0x24b2: 0x000a, 0x24b3: 0x000a, 0x24b4: 0x000a, 0x24b5: 0x000a, + 0x24b6: 0x000a, 0x24b7: 0x000a, 0x24b8: 0x000a, 0x24b9: 0x000a, 0x24ba: 0x000a, 0x24bb: 0x000a, + 0x24bc: 0x000a, 0x24bd: 0x000a, 0x24be: 0x000a, 0x24bf: 0x000a, + // Block 0x93, offset 0x24c0 + 0x24c0: 0x000a, 0x24c1: 0x000a, 0x24c2: 0x000a, 0x24c3: 0x000a, 0x24c4: 0x000a, 0x24c5: 0x000a, + 0x24c6: 0x000a, 0x24c7: 0x000a, 0x24c8: 0x000a, 0x24c9: 0x000a, 0x24ca: 0x000a, 0x24cb: 0x000a, + 0x24cc: 0x000a, 0x24cd: 0x000a, 0x24ce: 0x000a, 0x24cf: 0x000a, 0x24d0: 0x0006, 0x24d1: 0x000a, + 0x24d2: 0x0006, 0x24d4: 0x000a, 0x24d5: 0x0006, 0x24d6: 0x000a, 0x24d7: 0x000a, + 0x24d8: 0x000a, 0x24d9: 0x009a, 0x24da: 0x008a, 0x24db: 0x007a, 0x24dc: 0x006a, 0x24dd: 0x009a, + 0x24de: 0x008a, 0x24df: 0x0004, 0x24e0: 0x000a, 0x24e1: 0x000a, 0x24e2: 0x0003, 0x24e3: 0x0003, + 0x24e4: 0x000a, 0x24e5: 0x000a, 0x24e6: 0x000a, 0x24e8: 0x000a, 0x24e9: 0x0004, + 0x24ea: 0x0004, 0x24eb: 0x000a, + 0x24f0: 0x000d, 0x24f1: 0x000d, 0x24f2: 0x000d, 0x24f3: 0x000d, 0x24f4: 0x000d, 0x24f5: 0x000d, + 0x24f6: 0x000d, 0x24f7: 0x000d, 0x24f8: 0x000d, 0x24f9: 0x000d, 0x24fa: 0x000d, 0x24fb: 0x000d, + 0x24fc: 0x000d, 0x24fd: 0x000d, 0x24fe: 0x000d, 0x24ff: 0x000d, + // Block 0x94, offset 0x2500 + 0x2500: 0x000d, 0x2501: 0x000d, 0x2502: 0x000d, 0x2503: 0x000d, 0x2504: 0x000d, 0x2505: 0x000d, + 0x2506: 0x000d, 0x2507: 0x000d, 0x2508: 0x000d, 0x2509: 0x000d, 0x250a: 0x000d, 0x250b: 0x000d, + 0x250c: 0x000d, 0x250d: 0x000d, 0x250e: 0x000d, 0x250f: 0x000d, 0x2510: 0x000d, 0x2511: 0x000d, + 0x2512: 0x000d, 0x2513: 0x000d, 0x2514: 0x000d, 0x2515: 0x000d, 0x2516: 0x000d, 0x2517: 0x000d, + 0x2518: 0x000d, 0x2519: 0x000d, 0x251a: 0x000d, 0x251b: 0x000d, 0x251c: 0x000d, 0x251d: 0x000d, + 0x251e: 0x000d, 0x251f: 0x000d, 0x2520: 0x000d, 0x2521: 0x000d, 0x2522: 0x000d, 0x2523: 0x000d, + 0x2524: 0x000d, 0x2525: 0x000d, 0x2526: 0x000d, 0x2527: 0x000d, 0x2528: 0x000d, 0x2529: 0x000d, + 0x252a: 0x000d, 0x252b: 0x000d, 0x252c: 0x000d, 0x252d: 0x000d, 0x252e: 0x000d, 0x252f: 0x000d, + 0x2530: 0x000d, 0x2531: 0x000d, 0x2532: 0x000d, 0x2533: 0x000d, 0x2534: 0x000d, 0x2535: 0x000d, + 0x2536: 0x000d, 0x2537: 0x000d, 0x2538: 0x000d, 0x2539: 0x000d, 0x253a: 0x000d, 0x253b: 0x000d, + 0x253c: 0x000d, 0x253d: 0x000d, 0x253e: 0x000d, 0x253f: 0x000b, + // Block 0x95, offset 0x2540 + 0x2541: 0x000a, 0x2542: 0x000a, 0x2543: 0x0004, 0x2544: 0x0004, 0x2545: 0x0004, + 0x2546: 0x000a, 0x2547: 0x000a, 0x2548: 0x003a, 0x2549: 0x002a, 0x254a: 0x000a, 0x254b: 0x0003, + 0x254c: 0x0006, 0x254d: 0x0003, 0x254e: 0x0006, 0x254f: 0x0006, 0x2550: 0x0002, 0x2551: 0x0002, + 0x2552: 0x0002, 0x2553: 0x0002, 0x2554: 0x0002, 0x2555: 0x0002, 0x2556: 0x0002, 0x2557: 0x0002, + 0x2558: 0x0002, 0x2559: 0x0002, 0x255a: 0x0006, 0x255b: 0x000a, 0x255c: 0x000a, 0x255d: 0x000a, + 0x255e: 0x000a, 0x255f: 0x000a, 0x2560: 0x000a, + 0x257b: 0x005a, + 0x257c: 0x000a, 0x257d: 0x004a, 0x257e: 0x000a, 0x257f: 0x000a, + // Block 0x96, offset 0x2580 + 0x2580: 0x000a, + 0x259b: 0x005a, 0x259c: 0x000a, 0x259d: 0x004a, + 0x259e: 0x000a, 0x259f: 0x00fa, 0x25a0: 0x00ea, 0x25a1: 0x000a, 0x25a2: 0x003a, 0x25a3: 0x002a, + 0x25a4: 0x000a, 0x25a5: 0x000a, + // Block 0x97, offset 0x25c0 + 0x25e0: 0x0004, 0x25e1: 0x0004, 0x25e2: 0x000a, 0x25e3: 0x000a, + 0x25e4: 0x000a, 0x25e5: 0x0004, 0x25e6: 0x0004, 0x25e8: 0x000a, 0x25e9: 0x000a, + 0x25ea: 0x000a, 0x25eb: 0x000a, 0x25ec: 0x000a, 0x25ed: 0x000a, 0x25ee: 0x000a, + 0x25f0: 0x000b, 0x25f1: 0x000b, 0x25f2: 0x000b, 0x25f3: 0x000b, 0x25f4: 0x000b, 0x25f5: 0x000b, + 0x25f6: 0x000b, 0x25f7: 0x000b, 0x25f8: 0x000b, 0x25f9: 0x000a, 0x25fa: 0x000a, 0x25fb: 0x000a, + 0x25fc: 0x000a, 0x25fd: 0x000a, 0x25fe: 0x000b, 0x25ff: 0x000b, + // Block 0x98, offset 0x2600 + 0x2601: 0x000a, + // Block 0x99, offset 0x2640 + 0x2640: 0x000a, 0x2641: 0x000a, 0x2642: 0x000a, 0x2643: 0x000a, 0x2644: 0x000a, 0x2645: 0x000a, + 0x2646: 0x000a, 0x2647: 0x000a, 0x2648: 0x000a, 0x2649: 0x000a, 0x264a: 0x000a, 0x264b: 0x000a, + 0x264c: 0x000a, 0x2650: 0x000a, 0x2651: 0x000a, + 0x2652: 0x000a, 0x2653: 0x000a, 0x2654: 0x000a, 0x2655: 0x000a, 0x2656: 0x000a, 0x2657: 0x000a, + 0x2658: 0x000a, 0x2659: 0x000a, 0x265a: 0x000a, 0x265b: 0x000a, + 0x2660: 0x000a, + // Block 0x9a, offset 0x2680 + 0x26bd: 0x000c, + // Block 0x9b, offset 0x26c0 + 0x26e0: 0x000c, 0x26e1: 0x0002, 0x26e2: 0x0002, 0x26e3: 0x0002, + 0x26e4: 0x0002, 0x26e5: 0x0002, 0x26e6: 0x0002, 0x26e7: 0x0002, 0x26e8: 0x0002, 0x26e9: 0x0002, + 0x26ea: 0x0002, 0x26eb: 0x0002, 0x26ec: 0x0002, 0x26ed: 0x0002, 0x26ee: 0x0002, 0x26ef: 0x0002, + 0x26f0: 0x0002, 0x26f1: 0x0002, 0x26f2: 0x0002, 0x26f3: 0x0002, 0x26f4: 0x0002, 0x26f5: 0x0002, + 0x26f6: 0x0002, 0x26f7: 0x0002, 0x26f8: 0x0002, 0x26f9: 0x0002, 0x26fa: 0x0002, 0x26fb: 0x0002, + // Block 0x9c, offset 0x2700 + 0x2736: 0x000c, 0x2737: 0x000c, 0x2738: 0x000c, 0x2739: 0x000c, 0x273a: 0x000c, + // Block 0x9d, offset 0x2740 + 0x2740: 0x0001, 0x2741: 0x0001, 0x2742: 0x0001, 0x2743: 0x0001, 0x2744: 0x0001, 0x2745: 0x0001, + 0x2746: 0x0001, 0x2747: 0x0001, 0x2748: 0x0001, 0x2749: 0x0001, 0x274a: 0x0001, 0x274b: 0x0001, + 0x274c: 0x0001, 0x274d: 0x0001, 0x274e: 0x0001, 0x274f: 0x0001, 0x2750: 0x0001, 0x2751: 0x0001, + 0x2752: 0x0001, 0x2753: 0x0001, 0x2754: 0x0001, 0x2755: 0x0001, 0x2756: 0x0001, 0x2757: 0x0001, + 0x2758: 0x0001, 0x2759: 0x0001, 0x275a: 0x0001, 0x275b: 0x0001, 0x275c: 0x0001, 0x275d: 0x0001, + 0x275e: 0x0001, 0x275f: 0x0001, 0x2760: 0x0001, 0x2761: 0x0001, 0x2762: 0x0001, 0x2763: 0x0001, + 0x2764: 0x0001, 0x2765: 0x0001, 0x2766: 0x0001, 0x2767: 0x0001, 0x2768: 0x0001, 0x2769: 0x0001, + 0x276a: 0x0001, 0x276b: 0x0001, 0x276c: 0x0001, 0x276d: 0x0001, 0x276e: 0x0001, 0x276f: 0x0001, + 0x2770: 0x0001, 0x2771: 0x0001, 0x2772: 0x0001, 0x2773: 0x0001, 0x2774: 0x0001, 0x2775: 0x0001, + 0x2776: 0x0001, 0x2777: 0x0001, 0x2778: 0x0001, 0x2779: 0x0001, 0x277a: 0x0001, 0x277b: 0x0001, + 0x277c: 0x0001, 0x277d: 0x0001, 0x277e: 0x0001, 0x277f: 0x0001, + // Block 0x9e, offset 0x2780 + 0x2780: 0x0001, 0x2781: 0x0001, 0x2782: 0x0001, 0x2783: 0x0001, 0x2784: 0x0001, 0x2785: 0x0001, + 0x2786: 0x0001, 0x2787: 0x0001, 0x2788: 0x0001, 0x2789: 0x0001, 0x278a: 0x0001, 0x278b: 0x0001, + 0x278c: 0x0001, 0x278d: 0x0001, 0x278e: 0x0001, 0x278f: 0x0001, 0x2790: 0x0001, 0x2791: 0x0001, + 0x2792: 0x0001, 0x2793: 0x0001, 0x2794: 0x0001, 0x2795: 0x0001, 0x2796: 0x0001, 0x2797: 0x0001, + 0x2798: 0x0001, 0x2799: 0x0001, 0x279a: 0x0001, 0x279b: 0x0001, 0x279c: 0x0001, 0x279d: 0x0001, + 0x279e: 0x0001, 0x279f: 0x000a, 0x27a0: 0x0001, 0x27a1: 0x0001, 0x27a2: 0x0001, 0x27a3: 0x0001, + 0x27a4: 0x0001, 0x27a5: 0x0001, 0x27a6: 0x0001, 0x27a7: 0x0001, 0x27a8: 0x0001, 0x27a9: 0x0001, + 0x27aa: 0x0001, 0x27ab: 0x0001, 0x27ac: 0x0001, 0x27ad: 0x0001, 0x27ae: 0x0001, 0x27af: 0x0001, + 0x27b0: 0x0001, 0x27b1: 0x0001, 0x27b2: 0x0001, 0x27b3: 0x0001, 0x27b4: 0x0001, 0x27b5: 0x0001, + 0x27b6: 0x0001, 0x27b7: 0x0001, 0x27b8: 0x0001, 0x27b9: 0x0001, 0x27ba: 0x0001, 0x27bb: 0x0001, + 0x27bc: 0x0001, 0x27bd: 0x0001, 0x27be: 0x0001, 0x27bf: 0x0001, + // Block 0x9f, offset 0x27c0 + 0x27c0: 0x0001, 0x27c1: 0x000c, 0x27c2: 0x000c, 0x27c3: 0x000c, 0x27c4: 0x0001, 0x27c5: 0x000c, + 0x27c6: 0x000c, 0x27c7: 0x0001, 0x27c8: 0x0001, 0x27c9: 0x0001, 0x27ca: 0x0001, 0x27cb: 0x0001, + 0x27cc: 0x000c, 0x27cd: 0x000c, 0x27ce: 0x000c, 0x27cf: 0x000c, 0x27d0: 0x0001, 0x27d1: 0x0001, + 0x27d2: 0x0001, 0x27d3: 0x0001, 0x27d4: 0x0001, 0x27d5: 0x0001, 0x27d6: 0x0001, 0x27d7: 0x0001, + 0x27d8: 0x0001, 0x27d9: 0x0001, 0x27da: 0x0001, 0x27db: 0x0001, 0x27dc: 0x0001, 0x27dd: 0x0001, + 0x27de: 0x0001, 0x27df: 0x0001, 0x27e0: 0x0001, 0x27e1: 0x0001, 0x27e2: 0x0001, 0x27e3: 0x0001, + 0x27e4: 0x0001, 0x27e5: 0x0001, 0x27e6: 0x0001, 0x27e7: 0x0001, 0x27e8: 0x0001, 0x27e9: 0x0001, + 0x27ea: 0x0001, 0x27eb: 0x0001, 0x27ec: 0x0001, 0x27ed: 0x0001, 0x27ee: 0x0001, 0x27ef: 0x0001, + 0x27f0: 0x0001, 0x27f1: 0x0001, 0x27f2: 0x0001, 0x27f3: 0x0001, 0x27f4: 0x0001, 0x27f5: 0x0001, + 0x27f6: 0x0001, 0x27f7: 0x0001, 0x27f8: 0x000c, 0x27f9: 0x000c, 0x27fa: 0x000c, 0x27fb: 0x0001, + 0x27fc: 0x0001, 0x27fd: 0x0001, 0x27fe: 0x0001, 0x27ff: 0x000c, + // Block 0xa0, offset 0x2800 + 0x2800: 0x0001, 0x2801: 0x0001, 0x2802: 0x0001, 0x2803: 0x0001, 0x2804: 0x0001, 0x2805: 0x0001, + 0x2806: 0x0001, 0x2807: 0x0001, 0x2808: 0x0001, 0x2809: 0x0001, 0x280a: 0x0001, 0x280b: 0x0001, + 0x280c: 0x0001, 0x280d: 0x0001, 0x280e: 0x0001, 0x280f: 0x0001, 0x2810: 0x0001, 0x2811: 0x0001, + 0x2812: 0x0001, 0x2813: 0x0001, 0x2814: 0x0001, 0x2815: 0x0001, 0x2816: 0x0001, 0x2817: 0x0001, + 0x2818: 0x0001, 0x2819: 0x0001, 0x281a: 0x0001, 0x281b: 0x0001, 0x281c: 0x0001, 0x281d: 0x0001, + 0x281e: 0x0001, 0x281f: 0x0001, 0x2820: 0x0001, 0x2821: 0x0001, 0x2822: 0x0001, 0x2823: 0x0001, + 0x2824: 0x0001, 0x2825: 0x000c, 0x2826: 0x000c, 0x2827: 0x0001, 0x2828: 0x0001, 0x2829: 0x0001, + 0x282a: 0x0001, 0x282b: 0x0001, 0x282c: 0x0001, 0x282d: 0x0001, 0x282e: 0x0001, 0x282f: 0x0001, + 0x2830: 0x0001, 0x2831: 0x0001, 0x2832: 0x0001, 0x2833: 0x0001, 0x2834: 0x0001, 0x2835: 0x0001, + 0x2836: 0x0001, 0x2837: 0x0001, 0x2838: 0x0001, 0x2839: 0x0001, 0x283a: 0x0001, 0x283b: 0x0001, + 0x283c: 0x0001, 0x283d: 0x0001, 0x283e: 0x0001, 0x283f: 0x0001, + // Block 0xa1, offset 0x2840 + 0x2840: 0x0001, 0x2841: 0x0001, 0x2842: 0x0001, 0x2843: 0x0001, 0x2844: 0x0001, 0x2845: 0x0001, + 0x2846: 0x0001, 0x2847: 0x0001, 0x2848: 0x0001, 0x2849: 0x0001, 0x284a: 0x0001, 0x284b: 0x0001, + 0x284c: 0x0001, 0x284d: 0x0001, 0x284e: 0x0001, 0x284f: 0x0001, 0x2850: 0x0001, 0x2851: 0x0001, + 0x2852: 0x0001, 0x2853: 0x0001, 0x2854: 0x0001, 0x2855: 0x0001, 0x2856: 0x0001, 0x2857: 0x0001, + 0x2858: 0x0001, 0x2859: 0x0001, 0x285a: 0x0001, 0x285b: 0x0001, 0x285c: 0x0001, 0x285d: 0x0001, + 0x285e: 0x0001, 0x285f: 0x0001, 0x2860: 0x0001, 0x2861: 0x0001, 0x2862: 0x0001, 0x2863: 0x0001, + 0x2864: 0x0001, 0x2865: 0x0001, 0x2866: 0x0001, 0x2867: 0x0001, 0x2868: 0x0001, 0x2869: 0x0001, + 0x286a: 0x0001, 0x286b: 0x0001, 0x286c: 0x0001, 0x286d: 0x0001, 0x286e: 0x0001, 0x286f: 0x0001, + 0x2870: 0x0001, 0x2871: 0x0001, 0x2872: 0x0001, 0x2873: 0x0001, 0x2874: 0x0001, 0x2875: 0x0001, + 0x2876: 0x0001, 0x2877: 0x0001, 0x2878: 0x0001, 0x2879: 0x000a, 0x287a: 0x000a, 0x287b: 0x000a, + 0x287c: 0x000a, 0x287d: 0x000a, 0x287e: 0x000a, 0x287f: 0x000a, + // Block 0xa2, offset 0x2880 + 0x2880: 0x0001, 0x2881: 0x0001, 0x2882: 0x0001, 0x2883: 0x0001, 0x2884: 0x0001, 0x2885: 0x0001, + 0x2886: 0x0001, 0x2887: 0x0001, 0x2888: 0x0001, 0x2889: 0x0001, 0x288a: 0x0001, 0x288b: 0x0001, + 0x288c: 0x0001, 0x288d: 0x0001, 0x288e: 0x0001, 0x288f: 0x0001, 0x2890: 0x0001, 0x2891: 0x0001, + 0x2892: 0x0001, 0x2893: 0x0001, 0x2894: 0x0001, 0x2895: 0x0001, 0x2896: 0x0001, 0x2897: 0x0001, + 0x2898: 0x0001, 0x2899: 0x0001, 0x289a: 0x0001, 0x289b: 0x0001, 0x289c: 0x0001, 0x289d: 0x0001, + 0x289e: 0x0001, 0x289f: 0x0001, 0x28a0: 0x0005, 0x28a1: 0x0005, 0x28a2: 0x0005, 0x28a3: 0x0005, + 0x28a4: 0x0005, 0x28a5: 0x0005, 0x28a6: 0x0005, 0x28a7: 0x0005, 0x28a8: 0x0005, 0x28a9: 0x0005, + 0x28aa: 0x0005, 0x28ab: 0x0005, 0x28ac: 0x0005, 0x28ad: 0x0005, 0x28ae: 0x0005, 0x28af: 0x0005, + 0x28b0: 0x0005, 0x28b1: 0x0005, 0x28b2: 0x0005, 0x28b3: 0x0005, 0x28b4: 0x0005, 0x28b5: 0x0005, + 0x28b6: 0x0005, 0x28b7: 0x0005, 0x28b8: 0x0005, 0x28b9: 0x0005, 0x28ba: 0x0005, 0x28bb: 0x0005, + 0x28bc: 0x0005, 0x28bd: 0x0005, 0x28be: 0x0005, 0x28bf: 0x0001, + // Block 0xa3, offset 0x28c0 + 0x28c1: 0x000c, + 0x28f8: 0x000c, 0x28f9: 0x000c, 0x28fa: 0x000c, 0x28fb: 0x000c, + 0x28fc: 0x000c, 0x28fd: 0x000c, 0x28fe: 0x000c, 0x28ff: 0x000c, + // Block 0xa4, offset 0x2900 + 0x2900: 0x000c, 0x2901: 0x000c, 0x2902: 0x000c, 0x2903: 0x000c, 0x2904: 0x000c, 0x2905: 0x000c, + 0x2906: 0x000c, + 0x2912: 0x000a, 0x2913: 0x000a, 0x2914: 0x000a, 0x2915: 0x000a, 0x2916: 0x000a, 0x2917: 0x000a, + 0x2918: 0x000a, 0x2919: 0x000a, 0x291a: 0x000a, 0x291b: 0x000a, 0x291c: 0x000a, 0x291d: 0x000a, + 0x291e: 0x000a, 0x291f: 0x000a, 0x2920: 0x000a, 0x2921: 0x000a, 0x2922: 0x000a, 0x2923: 0x000a, + 0x2924: 0x000a, 0x2925: 0x000a, + 0x293f: 0x000c, + // Block 0xa5, offset 0x2940 + 0x2940: 0x000c, 0x2941: 0x000c, + 0x2973: 0x000c, 0x2974: 0x000c, 0x2975: 0x000c, + 0x2976: 0x000c, 0x2979: 0x000c, 0x297a: 0x000c, + // Block 0xa6, offset 0x2980 + 0x2980: 0x000c, 0x2981: 0x000c, 0x2982: 0x000c, + 0x29a7: 0x000c, 0x29a8: 0x000c, 0x29a9: 0x000c, + 0x29aa: 0x000c, 0x29ab: 0x000c, 0x29ad: 0x000c, 0x29ae: 0x000c, 0x29af: 0x000c, + 0x29b0: 0x000c, 0x29b1: 0x000c, 0x29b2: 0x000c, 0x29b3: 0x000c, 0x29b4: 0x000c, + // Block 0xa7, offset 0x29c0 + 0x29f3: 0x000c, + // Block 0xa8, offset 0x2a00 + 0x2a00: 0x000c, 0x2a01: 0x000c, + 0x2a36: 0x000c, 0x2a37: 0x000c, 0x2a38: 0x000c, 0x2a39: 0x000c, 0x2a3a: 0x000c, 0x2a3b: 0x000c, + 0x2a3c: 0x000c, 0x2a3d: 0x000c, 0x2a3e: 0x000c, + // Block 0xa9, offset 0x2a40 + 0x2a4a: 0x000c, 0x2a4b: 0x000c, + 0x2a4c: 0x000c, + // Block 0xaa, offset 0x2a80 + 0x2aaf: 0x000c, + 0x2ab0: 0x000c, 0x2ab1: 0x000c, 0x2ab4: 0x000c, + 0x2ab6: 0x000c, 0x2ab7: 0x000c, + 0x2abe: 0x000c, + // Block 0xab, offset 0x2ac0 + 0x2adf: 0x000c, 0x2ae3: 0x000c, + 0x2ae4: 0x000c, 0x2ae5: 0x000c, 0x2ae6: 0x000c, 0x2ae7: 0x000c, 0x2ae8: 0x000c, 0x2ae9: 0x000c, + 0x2aea: 0x000c, + // Block 0xac, offset 0x2b00 + 0x2b00: 0x000c, 0x2b01: 0x000c, + 0x2b3c: 0x000c, + // Block 0xad, offset 0x2b40 + 0x2b40: 0x000c, + 0x2b66: 0x000c, 0x2b67: 0x000c, 0x2b68: 0x000c, 0x2b69: 0x000c, + 0x2b6a: 0x000c, 0x2b6b: 0x000c, 0x2b6c: 0x000c, + 0x2b70: 0x000c, 0x2b71: 0x000c, 0x2b72: 0x000c, 0x2b73: 0x000c, 0x2b74: 0x000c, + // Block 0xae, offset 0x2b80 + 0x2bb8: 0x000c, 0x2bb9: 0x000c, 0x2bba: 0x000c, 0x2bbb: 0x000c, + 0x2bbc: 0x000c, 0x2bbd: 0x000c, 0x2bbe: 0x000c, 0x2bbf: 0x000c, + // Block 0xaf, offset 0x2bc0 + 0x2bc2: 0x000c, 0x2bc3: 0x000c, 0x2bc4: 0x000c, + 0x2bc6: 0x000c, + // Block 0xb0, offset 0x2c00 + 0x2c33: 0x000c, 0x2c34: 0x000c, 0x2c35: 0x000c, + 0x2c36: 0x000c, 0x2c37: 0x000c, 0x2c38: 0x000c, 0x2c3a: 0x000c, + 0x2c3f: 0x000c, + // Block 0xb1, offset 0x2c40 + 0x2c40: 0x000c, 0x2c42: 0x000c, 0x2c43: 0x000c, + // Block 0xb2, offset 0x2c80 + 0x2cb2: 0x000c, 0x2cb3: 0x000c, 0x2cb4: 0x000c, 0x2cb5: 0x000c, + 0x2cbc: 0x000c, 0x2cbd: 0x000c, 0x2cbf: 0x000c, + // Block 0xb3, offset 0x2cc0 + 0x2cc0: 0x000c, + 0x2cdc: 0x000c, 0x2cdd: 0x000c, + // Block 0xb4, offset 0x2d00 + 0x2d33: 0x000c, 0x2d34: 0x000c, 0x2d35: 0x000c, + 0x2d36: 0x000c, 0x2d37: 0x000c, 0x2d38: 0x000c, 0x2d39: 0x000c, 0x2d3a: 0x000c, + 0x2d3d: 0x000c, 0x2d3f: 0x000c, + // Block 0xb5, offset 0x2d40 + 0x2d40: 0x000c, + 0x2d60: 0x000a, 0x2d61: 0x000a, 0x2d62: 0x000a, 0x2d63: 0x000a, + 0x2d64: 0x000a, 0x2d65: 0x000a, 0x2d66: 0x000a, 0x2d67: 0x000a, 0x2d68: 0x000a, 0x2d69: 0x000a, + 0x2d6a: 0x000a, 0x2d6b: 0x000a, 0x2d6c: 0x000a, + // Block 0xb6, offset 0x2d80 + 0x2dab: 0x000c, 0x2dad: 0x000c, + 0x2db0: 0x000c, 0x2db1: 0x000c, 0x2db2: 0x000c, 0x2db3: 0x000c, 0x2db4: 0x000c, 0x2db5: 0x000c, + 0x2db7: 0x000c, + // Block 0xb7, offset 0x2dc0 + 0x2ddd: 0x000c, + 0x2dde: 0x000c, 0x2ddf: 0x000c, 0x2de2: 0x000c, 0x2de3: 0x000c, + 0x2de4: 0x000c, 0x2de5: 0x000c, 0x2de7: 0x000c, 0x2de8: 0x000c, 0x2de9: 0x000c, + 0x2dea: 0x000c, 0x2deb: 0x000c, + // Block 0xb8, offset 0x2e00 + 0x2e30: 0x000c, 0x2e31: 0x000c, 0x2e32: 0x000c, 0x2e33: 0x000c, 0x2e34: 0x000c, 0x2e35: 0x000c, + 0x2e36: 0x000c, 0x2e38: 0x000c, 0x2e39: 0x000c, 0x2e3a: 0x000c, 0x2e3b: 0x000c, + 0x2e3c: 0x000c, 0x2e3d: 0x000c, + // Block 0xb9, offset 0x2e40 + 0x2e52: 0x000c, 0x2e53: 0x000c, 0x2e54: 0x000c, 0x2e55: 0x000c, 0x2e56: 0x000c, 0x2e57: 0x000c, + 0x2e58: 0x000c, 0x2e59: 0x000c, 0x2e5a: 0x000c, 0x2e5b: 0x000c, 0x2e5c: 0x000c, 0x2e5d: 0x000c, + 0x2e5e: 0x000c, 0x2e5f: 0x000c, 0x2e60: 0x000c, 0x2e61: 0x000c, 0x2e62: 0x000c, 0x2e63: 0x000c, + 0x2e64: 0x000c, 0x2e65: 0x000c, 0x2e66: 0x000c, 0x2e67: 0x000c, + 0x2e6a: 0x000c, 0x2e6b: 0x000c, 0x2e6c: 0x000c, 0x2e6d: 0x000c, 0x2e6e: 0x000c, 0x2e6f: 0x000c, + 0x2e70: 0x000c, 0x2e72: 0x000c, 0x2e73: 0x000c, 0x2e75: 0x000c, + 0x2e76: 0x000c, + // Block 0xba, offset 0x2e80 + 0x2eb0: 0x000c, 0x2eb1: 0x000c, 0x2eb2: 0x000c, 0x2eb3: 0x000c, 0x2eb4: 0x000c, + // Block 0xbb, offset 0x2ec0 + 0x2ef0: 0x000c, 0x2ef1: 0x000c, 0x2ef2: 0x000c, 0x2ef3: 0x000c, 0x2ef4: 0x000c, 0x2ef5: 0x000c, + 0x2ef6: 0x000c, + // Block 0xbc, offset 0x2f00 + 0x2f0f: 0x000c, 0x2f10: 0x000c, 0x2f11: 0x000c, + 0x2f12: 0x000c, + // Block 0xbd, offset 0x2f40 + 0x2f5d: 0x000c, + 0x2f5e: 0x000c, 0x2f60: 0x000b, 0x2f61: 0x000b, 0x2f62: 0x000b, 0x2f63: 0x000b, + // Block 0xbe, offset 0x2f80 + 0x2fa7: 0x000c, 0x2fa8: 0x000c, 0x2fa9: 0x000c, + 0x2fb3: 0x000b, 0x2fb4: 0x000b, 0x2fb5: 0x000b, + 0x2fb6: 0x000b, 0x2fb7: 0x000b, 0x2fb8: 0x000b, 0x2fb9: 0x000b, 0x2fba: 0x000b, 0x2fbb: 0x000c, + 0x2fbc: 0x000c, 0x2fbd: 0x000c, 0x2fbe: 0x000c, 0x2fbf: 0x000c, + // Block 0xbf, offset 0x2fc0 + 0x2fc0: 0x000c, 0x2fc1: 0x000c, 0x2fc2: 0x000c, 0x2fc5: 0x000c, + 0x2fc6: 0x000c, 0x2fc7: 0x000c, 0x2fc8: 0x000c, 0x2fc9: 0x000c, 0x2fca: 0x000c, 0x2fcb: 0x000c, + 0x2fea: 0x000c, 0x2feb: 0x000c, 0x2fec: 0x000c, 0x2fed: 0x000c, + // Block 0xc0, offset 0x3000 + 0x3000: 0x000a, 0x3001: 0x000a, 0x3002: 0x000c, 0x3003: 0x000c, 0x3004: 0x000c, 0x3005: 0x000a, + // Block 0xc1, offset 0x3040 + 0x3040: 0x000a, 0x3041: 0x000a, 0x3042: 0x000a, 0x3043: 0x000a, 0x3044: 0x000a, 0x3045: 0x000a, + 0x3046: 0x000a, 0x3047: 0x000a, 0x3048: 0x000a, 0x3049: 0x000a, 0x304a: 0x000a, 0x304b: 0x000a, + 0x304c: 0x000a, 0x304d: 0x000a, 0x304e: 0x000a, 0x304f: 0x000a, 0x3050: 0x000a, 0x3051: 0x000a, + 0x3052: 0x000a, 0x3053: 0x000a, 0x3054: 0x000a, 0x3055: 0x000a, 0x3056: 0x000a, + // Block 0xc2, offset 0x3080 + 0x309b: 0x000a, + // Block 0xc3, offset 0x30c0 + 0x30d5: 0x000a, + // Block 0xc4, offset 0x3100 + 0x310f: 0x000a, + // Block 0xc5, offset 0x3140 + 0x3149: 0x000a, + // Block 0xc6, offset 0x3180 + 0x3183: 0x000a, + 0x318e: 0x0002, 0x318f: 0x0002, 0x3190: 0x0002, 0x3191: 0x0002, + 0x3192: 0x0002, 0x3193: 0x0002, 0x3194: 0x0002, 0x3195: 0x0002, 0x3196: 0x0002, 0x3197: 0x0002, + 0x3198: 0x0002, 0x3199: 0x0002, 0x319a: 0x0002, 0x319b: 0x0002, 0x319c: 0x0002, 0x319d: 0x0002, + 0x319e: 0x0002, 0x319f: 0x0002, 0x31a0: 0x0002, 0x31a1: 0x0002, 0x31a2: 0x0002, 0x31a3: 0x0002, + 0x31a4: 0x0002, 0x31a5: 0x0002, 0x31a6: 0x0002, 0x31a7: 0x0002, 0x31a8: 0x0002, 0x31a9: 0x0002, + 0x31aa: 0x0002, 0x31ab: 0x0002, 0x31ac: 0x0002, 0x31ad: 0x0002, 0x31ae: 0x0002, 0x31af: 0x0002, + 0x31b0: 0x0002, 0x31b1: 0x0002, 0x31b2: 0x0002, 0x31b3: 0x0002, 0x31b4: 0x0002, 0x31b5: 0x0002, + 0x31b6: 0x0002, 0x31b7: 0x0002, 0x31b8: 0x0002, 0x31b9: 0x0002, 0x31ba: 0x0002, 0x31bb: 0x0002, + 0x31bc: 0x0002, 0x31bd: 0x0002, 0x31be: 0x0002, 0x31bf: 0x0002, + // Block 0xc7, offset 0x31c0 + 0x31c0: 0x000c, 0x31c1: 0x000c, 0x31c2: 0x000c, 0x31c3: 0x000c, 0x31c4: 0x000c, 0x31c5: 0x000c, + 0x31c6: 0x000c, 0x31c7: 0x000c, 0x31c8: 0x000c, 0x31c9: 0x000c, 0x31ca: 0x000c, 0x31cb: 0x000c, + 0x31cc: 0x000c, 0x31cd: 0x000c, 0x31ce: 0x000c, 0x31cf: 0x000c, 0x31d0: 0x000c, 0x31d1: 0x000c, + 0x31d2: 0x000c, 0x31d3: 0x000c, 0x31d4: 0x000c, 0x31d5: 0x000c, 0x31d6: 0x000c, 0x31d7: 0x000c, + 0x31d8: 0x000c, 0x31d9: 0x000c, 0x31da: 0x000c, 0x31db: 0x000c, 0x31dc: 0x000c, 0x31dd: 0x000c, + 0x31de: 0x000c, 0x31df: 0x000c, 0x31e0: 0x000c, 0x31e1: 0x000c, 0x31e2: 0x000c, 0x31e3: 0x000c, + 0x31e4: 0x000c, 0x31e5: 0x000c, 0x31e6: 0x000c, 0x31e7: 0x000c, 0x31e8: 0x000c, 0x31e9: 0x000c, + 0x31ea: 0x000c, 0x31eb: 0x000c, 0x31ec: 0x000c, 0x31ed: 0x000c, 0x31ee: 0x000c, 0x31ef: 0x000c, + 0x31f0: 0x000c, 0x31f1: 0x000c, 0x31f2: 0x000c, 0x31f3: 0x000c, 0x31f4: 0x000c, 0x31f5: 0x000c, + 0x31f6: 0x000c, 0x31fb: 0x000c, + 0x31fc: 0x000c, 0x31fd: 0x000c, 0x31fe: 0x000c, 0x31ff: 0x000c, + // Block 0xc8, offset 0x3200 + 0x3200: 0x000c, 0x3201: 0x000c, 0x3202: 0x000c, 0x3203: 0x000c, 0x3204: 0x000c, 0x3205: 0x000c, + 0x3206: 0x000c, 0x3207: 0x000c, 0x3208: 0x000c, 0x3209: 0x000c, 0x320a: 0x000c, 0x320b: 0x000c, + 0x320c: 0x000c, 0x320d: 0x000c, 0x320e: 0x000c, 0x320f: 0x000c, 0x3210: 0x000c, 0x3211: 0x000c, + 0x3212: 0x000c, 0x3213: 0x000c, 0x3214: 0x000c, 0x3215: 0x000c, 0x3216: 0x000c, 0x3217: 0x000c, + 0x3218: 0x000c, 0x3219: 0x000c, 0x321a: 0x000c, 0x321b: 0x000c, 0x321c: 0x000c, 0x321d: 0x000c, + 0x321e: 0x000c, 0x321f: 0x000c, 0x3220: 0x000c, 0x3221: 0x000c, 0x3222: 0x000c, 0x3223: 0x000c, + 0x3224: 0x000c, 0x3225: 0x000c, 0x3226: 0x000c, 0x3227: 0x000c, 0x3228: 0x000c, 0x3229: 0x000c, + 0x322a: 0x000c, 0x322b: 0x000c, 0x322c: 0x000c, + 0x3235: 0x000c, + // Block 0xc9, offset 0x3240 + 0x3244: 0x000c, + 0x325b: 0x000c, 0x325c: 0x000c, 0x325d: 0x000c, + 0x325e: 0x000c, 0x325f: 0x000c, 0x3261: 0x000c, 0x3262: 0x000c, 0x3263: 0x000c, + 0x3264: 0x000c, 0x3265: 0x000c, 0x3266: 0x000c, 0x3267: 0x000c, 0x3268: 0x000c, 0x3269: 0x000c, + 0x326a: 0x000c, 0x326b: 0x000c, 0x326c: 0x000c, 0x326d: 0x000c, 0x326e: 0x000c, 0x326f: 0x000c, + // Block 0xca, offset 0x3280 + 0x3280: 0x000c, 0x3281: 0x000c, 0x3282: 0x000c, 0x3283: 0x000c, 0x3284: 0x000c, 0x3285: 0x000c, + 0x3286: 0x000c, 0x3288: 0x000c, 0x3289: 0x000c, 0x328a: 0x000c, 0x328b: 0x000c, + 0x328c: 0x000c, 0x328d: 0x000c, 0x328e: 0x000c, 0x328f: 0x000c, 0x3290: 0x000c, 0x3291: 0x000c, + 0x3292: 0x000c, 0x3293: 0x000c, 0x3294: 0x000c, 0x3295: 0x000c, 0x3296: 0x000c, 0x3297: 0x000c, + 0x3298: 0x000c, 0x329b: 0x000c, 0x329c: 0x000c, 0x329d: 0x000c, + 0x329e: 0x000c, 0x329f: 0x000c, 0x32a0: 0x000c, 0x32a1: 0x000c, 0x32a3: 0x000c, + 0x32a4: 0x000c, 0x32a6: 0x000c, 0x32a7: 0x000c, 0x32a8: 0x000c, 0x32a9: 0x000c, + 0x32aa: 0x000c, + // Block 0xcb, offset 0x32c0 + 0x32c0: 0x0001, 0x32c1: 0x0001, 0x32c2: 0x0001, 0x32c3: 0x0001, 0x32c4: 0x0001, 0x32c5: 0x0001, + 0x32c6: 0x0001, 0x32c7: 0x0001, 0x32c8: 0x0001, 0x32c9: 0x0001, 0x32ca: 0x0001, 0x32cb: 0x0001, + 0x32cc: 0x0001, 0x32cd: 0x0001, 0x32ce: 0x0001, 0x32cf: 0x0001, 0x32d0: 0x000c, 0x32d1: 0x000c, + 0x32d2: 0x000c, 0x32d3: 0x000c, 0x32d4: 0x000c, 0x32d5: 0x000c, 0x32d6: 0x000c, 0x32d7: 0x0001, + 0x32d8: 0x0001, 0x32d9: 0x0001, 0x32da: 0x0001, 0x32db: 0x0001, 0x32dc: 0x0001, 0x32dd: 0x0001, + 0x32de: 0x0001, 0x32df: 0x0001, 0x32e0: 0x0001, 0x32e1: 0x0001, 0x32e2: 0x0001, 0x32e3: 0x0001, + 0x32e4: 0x0001, 0x32e5: 0x0001, 0x32e6: 0x0001, 0x32e7: 0x0001, 0x32e8: 0x0001, 0x32e9: 0x0001, + 0x32ea: 0x0001, 0x32eb: 0x0001, 0x32ec: 0x0001, 0x32ed: 0x0001, 0x32ee: 0x0001, 0x32ef: 0x0001, + 0x32f0: 0x0001, 0x32f1: 0x0001, 0x32f2: 0x0001, 0x32f3: 0x0001, 0x32f4: 0x0001, 0x32f5: 0x0001, + 0x32f6: 0x0001, 0x32f7: 0x0001, 0x32f8: 0x0001, 0x32f9: 0x0001, 0x32fa: 0x0001, 0x32fb: 0x0001, + 0x32fc: 0x0001, 0x32fd: 0x0001, 0x32fe: 0x0001, 0x32ff: 0x0001, + // Block 0xcc, offset 0x3300 + 0x3300: 0x0001, 0x3301: 0x0001, 0x3302: 0x0001, 0x3303: 0x0001, 0x3304: 0x000c, 0x3305: 0x000c, + 0x3306: 0x000c, 0x3307: 0x000c, 0x3308: 0x000c, 0x3309: 0x000c, 0x330a: 0x000c, 0x330b: 0x0001, + 0x330c: 0x0001, 0x330d: 0x0001, 0x330e: 0x0001, 0x330f: 0x0001, 0x3310: 0x0001, 0x3311: 0x0001, + 0x3312: 0x0001, 0x3313: 0x0001, 0x3314: 0x0001, 0x3315: 0x0001, 0x3316: 0x0001, 0x3317: 0x0001, + 0x3318: 0x0001, 0x3319: 0x0001, 0x331a: 0x0001, 0x331b: 0x0001, 0x331c: 0x0001, 0x331d: 0x0001, + 0x331e: 0x0001, 0x331f: 0x0001, 0x3320: 0x0001, 0x3321: 0x0001, 0x3322: 0x0001, 0x3323: 0x0001, + 0x3324: 0x0001, 0x3325: 0x0001, 0x3326: 0x0001, 0x3327: 0x0001, 0x3328: 0x0001, 0x3329: 0x0001, + 0x332a: 0x0001, 0x332b: 0x0001, 0x332c: 0x0001, 0x332d: 0x0001, 0x332e: 0x0001, 0x332f: 0x0001, + 0x3330: 0x0001, 0x3331: 0x0001, 0x3332: 0x0001, 0x3333: 0x0001, 0x3334: 0x0001, 0x3335: 0x0001, + 0x3336: 0x0001, 0x3337: 0x0001, 0x3338: 0x0001, 0x3339: 0x0001, 0x333a: 0x0001, 0x333b: 0x0001, + 0x333c: 0x0001, 0x333d: 0x0001, 0x333e: 0x0001, 0x333f: 0x0001, + // Block 0xcd, offset 0x3340 + 0x3340: 0x000d, 0x3341: 0x000d, 0x3342: 0x000d, 0x3343: 0x000d, 0x3344: 0x000d, 0x3345: 0x000d, + 0x3346: 0x000d, 0x3347: 0x000d, 0x3348: 0x000d, 0x3349: 0x000d, 0x334a: 0x000d, 0x334b: 0x000d, + 0x334c: 0x000d, 0x334d: 0x000d, 0x334e: 0x000d, 0x334f: 0x000d, 0x3350: 0x000d, 0x3351: 0x000d, + 0x3352: 0x000d, 0x3353: 0x000d, 0x3354: 0x000d, 0x3355: 0x000d, 0x3356: 0x000d, 0x3357: 0x000d, + 0x3358: 0x000d, 0x3359: 0x000d, 0x335a: 0x000d, 0x335b: 0x000d, 0x335c: 0x000d, 0x335d: 0x000d, + 0x335e: 0x000d, 0x335f: 0x000d, 0x3360: 0x000d, 0x3361: 0x000d, 0x3362: 0x000d, 0x3363: 0x000d, + 0x3364: 0x000d, 0x3365: 0x000d, 0x3366: 0x000d, 0x3367: 0x000d, 0x3368: 0x000d, 0x3369: 0x000d, + 0x336a: 0x000d, 0x336b: 0x000d, 0x336c: 0x000d, 0x336d: 0x000d, 0x336e: 0x000d, 0x336f: 0x000d, + 0x3370: 0x000a, 0x3371: 0x000a, 0x3372: 0x000d, 0x3373: 0x000d, 0x3374: 0x000d, 0x3375: 0x000d, + 0x3376: 0x000d, 0x3377: 0x000d, 0x3378: 0x000d, 0x3379: 0x000d, 0x337a: 0x000d, 0x337b: 0x000d, + 0x337c: 0x000d, 0x337d: 0x000d, 0x337e: 0x000d, 0x337f: 0x000d, + // Block 0xce, offset 0x3380 + 0x3380: 0x000a, 0x3381: 0x000a, 0x3382: 0x000a, 0x3383: 0x000a, 0x3384: 0x000a, 0x3385: 0x000a, + 0x3386: 0x000a, 0x3387: 0x000a, 0x3388: 0x000a, 0x3389: 0x000a, 0x338a: 0x000a, 0x338b: 0x000a, + 0x338c: 0x000a, 0x338d: 0x000a, 0x338e: 0x000a, 0x338f: 0x000a, 0x3390: 0x000a, 0x3391: 0x000a, + 0x3392: 0x000a, 0x3393: 0x000a, 0x3394: 0x000a, 0x3395: 0x000a, 0x3396: 0x000a, 0x3397: 0x000a, + 0x3398: 0x000a, 0x3399: 0x000a, 0x339a: 0x000a, 0x339b: 0x000a, 0x339c: 0x000a, 0x339d: 0x000a, + 0x339e: 0x000a, 0x339f: 0x000a, 0x33a0: 0x000a, 0x33a1: 0x000a, 0x33a2: 0x000a, 0x33a3: 0x000a, + 0x33a4: 0x000a, 0x33a5: 0x000a, 0x33a6: 0x000a, 0x33a7: 0x000a, 0x33a8: 0x000a, 0x33a9: 0x000a, + 0x33aa: 0x000a, 0x33ab: 0x000a, + 0x33b0: 0x000a, 0x33b1: 0x000a, 0x33b2: 0x000a, 0x33b3: 0x000a, 0x33b4: 0x000a, 0x33b5: 0x000a, + 0x33b6: 0x000a, 0x33b7: 0x000a, 0x33b8: 0x000a, 0x33b9: 0x000a, 0x33ba: 0x000a, 0x33bb: 0x000a, + 0x33bc: 0x000a, 0x33bd: 0x000a, 0x33be: 0x000a, 0x33bf: 0x000a, + // Block 0xcf, offset 0x33c0 + 0x33c0: 0x000a, 0x33c1: 0x000a, 0x33c2: 0x000a, 0x33c3: 0x000a, 0x33c4: 0x000a, 0x33c5: 0x000a, + 0x33c6: 0x000a, 0x33c7: 0x000a, 0x33c8: 0x000a, 0x33c9: 0x000a, 0x33ca: 0x000a, 0x33cb: 0x000a, + 0x33cc: 0x000a, 0x33cd: 0x000a, 0x33ce: 0x000a, 0x33cf: 0x000a, 0x33d0: 0x000a, 0x33d1: 0x000a, + 0x33d2: 0x000a, 0x33d3: 0x000a, + 0x33e0: 0x000a, 0x33e1: 0x000a, 0x33e2: 0x000a, 0x33e3: 0x000a, + 0x33e4: 0x000a, 0x33e5: 0x000a, 0x33e6: 0x000a, 0x33e7: 0x000a, 0x33e8: 0x000a, 0x33e9: 0x000a, + 0x33ea: 0x000a, 0x33eb: 0x000a, 0x33ec: 0x000a, 0x33ed: 0x000a, 0x33ee: 0x000a, + 0x33f1: 0x000a, 0x33f2: 0x000a, 0x33f3: 0x000a, 0x33f4: 0x000a, 0x33f5: 0x000a, + 0x33f6: 0x000a, 0x33f7: 0x000a, 0x33f8: 0x000a, 0x33f9: 0x000a, 0x33fa: 0x000a, 0x33fb: 0x000a, + 0x33fc: 0x000a, 0x33fd: 0x000a, 0x33fe: 0x000a, 0x33ff: 0x000a, + // Block 0xd0, offset 0x3400 + 0x3401: 0x000a, 0x3402: 0x000a, 0x3403: 0x000a, 0x3404: 0x000a, 0x3405: 0x000a, + 0x3406: 0x000a, 0x3407: 0x000a, 0x3408: 0x000a, 0x3409: 0x000a, 0x340a: 0x000a, 0x340b: 0x000a, + 0x340c: 0x000a, 0x340d: 0x000a, 0x340e: 0x000a, 0x340f: 0x000a, 0x3411: 0x000a, + 0x3412: 0x000a, 0x3413: 0x000a, 0x3414: 0x000a, 0x3415: 0x000a, 0x3416: 0x000a, 0x3417: 0x000a, + 0x3418: 0x000a, 0x3419: 0x000a, 0x341a: 0x000a, 0x341b: 0x000a, 0x341c: 0x000a, 0x341d: 0x000a, + 0x341e: 0x000a, 0x341f: 0x000a, 0x3420: 0x000a, 0x3421: 0x000a, 0x3422: 0x000a, 0x3423: 0x000a, + 0x3424: 0x000a, 0x3425: 0x000a, 0x3426: 0x000a, 0x3427: 0x000a, 0x3428: 0x000a, 0x3429: 0x000a, + 0x342a: 0x000a, 0x342b: 0x000a, 0x342c: 0x000a, 0x342d: 0x000a, 0x342e: 0x000a, 0x342f: 0x000a, + 0x3430: 0x000a, 0x3431: 0x000a, 0x3432: 0x000a, 0x3433: 0x000a, 0x3434: 0x000a, 0x3435: 0x000a, + // Block 0xd1, offset 0x3440 + 0x3440: 0x0002, 0x3441: 0x0002, 0x3442: 0x0002, 0x3443: 0x0002, 0x3444: 0x0002, 0x3445: 0x0002, + 0x3446: 0x0002, 0x3447: 0x0002, 0x3448: 0x0002, 0x3449: 0x0002, 0x344a: 0x0002, 0x344b: 0x000a, + 0x344c: 0x000a, + // Block 0xd2, offset 0x3480 + 0x34aa: 0x000a, 0x34ab: 0x000a, + // Block 0xd3, offset 0x34c0 + 0x34c0: 0x000a, 0x34c1: 0x000a, 0x34c2: 0x000a, 0x34c3: 0x000a, 0x34c4: 0x000a, 0x34c5: 0x000a, + 0x34c6: 0x000a, 0x34c7: 0x000a, 0x34c8: 0x000a, 0x34c9: 0x000a, 0x34ca: 0x000a, 0x34cb: 0x000a, + 0x34cc: 0x000a, 0x34cd: 0x000a, 0x34ce: 0x000a, 0x34cf: 0x000a, 0x34d0: 0x000a, 0x34d1: 0x000a, + 0x34d2: 0x000a, + 0x34e0: 0x000a, 0x34e1: 0x000a, 0x34e2: 0x000a, 0x34e3: 0x000a, + 0x34e4: 0x000a, 0x34e5: 0x000a, 0x34e6: 0x000a, 0x34e7: 0x000a, 0x34e8: 0x000a, 0x34e9: 0x000a, + 0x34ea: 0x000a, 0x34eb: 0x000a, 0x34ec: 0x000a, + 0x34f0: 0x000a, 0x34f1: 0x000a, 0x34f2: 0x000a, 0x34f3: 0x000a, 0x34f4: 0x000a, 0x34f5: 0x000a, + 0x34f6: 0x000a, + // Block 0xd4, offset 0x3500 + 0x3500: 0x000a, 0x3501: 0x000a, 0x3502: 0x000a, 0x3503: 0x000a, 0x3504: 0x000a, 0x3505: 0x000a, + 0x3506: 0x000a, 0x3507: 0x000a, 0x3508: 0x000a, 0x3509: 0x000a, 0x350a: 0x000a, 0x350b: 0x000a, + 0x350c: 0x000a, 0x350d: 0x000a, 0x350e: 0x000a, 0x350f: 0x000a, 0x3510: 0x000a, 0x3511: 0x000a, + 0x3512: 0x000a, 0x3513: 0x000a, 0x3514: 0x000a, + // Block 0xd5, offset 0x3540 + 0x3540: 0x000a, 0x3541: 0x000a, 0x3542: 0x000a, 0x3543: 0x000a, 0x3544: 0x000a, 0x3545: 0x000a, + 0x3546: 0x000a, 0x3547: 0x000a, 0x3548: 0x000a, 0x3549: 0x000a, 0x354a: 0x000a, 0x354b: 0x000a, + 0x3550: 0x000a, 0x3551: 0x000a, + 0x3552: 0x000a, 0x3553: 0x000a, 0x3554: 0x000a, 0x3555: 0x000a, 0x3556: 0x000a, 0x3557: 0x000a, + 0x3558: 0x000a, 0x3559: 0x000a, 0x355a: 0x000a, 0x355b: 0x000a, 0x355c: 0x000a, 0x355d: 0x000a, + 0x355e: 0x000a, 0x355f: 0x000a, 0x3560: 0x000a, 0x3561: 0x000a, 0x3562: 0x000a, 0x3563: 0x000a, + 0x3564: 0x000a, 0x3565: 0x000a, 0x3566: 0x000a, 0x3567: 0x000a, 0x3568: 0x000a, 0x3569: 0x000a, + 0x356a: 0x000a, 0x356b: 0x000a, 0x356c: 0x000a, 0x356d: 0x000a, 0x356e: 0x000a, 0x356f: 0x000a, + 0x3570: 0x000a, 0x3571: 0x000a, 0x3572: 0x000a, 0x3573: 0x000a, 0x3574: 0x000a, 0x3575: 0x000a, + 0x3576: 0x000a, 0x3577: 0x000a, 0x3578: 0x000a, 0x3579: 0x000a, 0x357a: 0x000a, 0x357b: 0x000a, + 0x357c: 0x000a, 0x357d: 0x000a, 0x357e: 0x000a, 0x357f: 0x000a, + // Block 0xd6, offset 0x3580 + 0x3580: 0x000a, 0x3581: 0x000a, 0x3582: 0x000a, 0x3583: 0x000a, 0x3584: 0x000a, 0x3585: 0x000a, + 0x3586: 0x000a, 0x3587: 0x000a, + 0x3590: 0x000a, 0x3591: 0x000a, + 0x3592: 0x000a, 0x3593: 0x000a, 0x3594: 0x000a, 0x3595: 0x000a, 0x3596: 0x000a, 0x3597: 0x000a, + 0x3598: 0x000a, 0x3599: 0x000a, + 0x35a0: 0x000a, 0x35a1: 0x000a, 0x35a2: 0x000a, 0x35a3: 0x000a, + 0x35a4: 0x000a, 0x35a5: 0x000a, 0x35a6: 0x000a, 0x35a7: 0x000a, 0x35a8: 0x000a, 0x35a9: 0x000a, + 0x35aa: 0x000a, 0x35ab: 0x000a, 0x35ac: 0x000a, 0x35ad: 0x000a, 0x35ae: 0x000a, 0x35af: 0x000a, + 0x35b0: 0x000a, 0x35b1: 0x000a, 0x35b2: 0x000a, 0x35b3: 0x000a, 0x35b4: 0x000a, 0x35b5: 0x000a, + 0x35b6: 0x000a, 0x35b7: 0x000a, 0x35b8: 0x000a, 0x35b9: 0x000a, 0x35ba: 0x000a, 0x35bb: 0x000a, + 0x35bc: 0x000a, 0x35bd: 0x000a, 0x35be: 0x000a, 0x35bf: 0x000a, + // Block 0xd7, offset 0x35c0 + 0x35c0: 0x000a, 0x35c1: 0x000a, 0x35c2: 0x000a, 0x35c3: 0x000a, 0x35c4: 0x000a, 0x35c5: 0x000a, + 0x35c6: 0x000a, 0x35c7: 0x000a, + 0x35d0: 0x000a, 0x35d1: 0x000a, + 0x35d2: 0x000a, 0x35d3: 0x000a, 0x35d4: 0x000a, 0x35d5: 0x000a, 0x35d6: 0x000a, 0x35d7: 0x000a, + 0x35d8: 0x000a, 0x35d9: 0x000a, 0x35da: 0x000a, 0x35db: 0x000a, 0x35dc: 0x000a, 0x35dd: 0x000a, + 0x35de: 0x000a, 0x35df: 0x000a, 0x35e0: 0x000a, 0x35e1: 0x000a, 0x35e2: 0x000a, 0x35e3: 0x000a, + 0x35e4: 0x000a, 0x35e5: 0x000a, 0x35e6: 0x000a, 0x35e7: 0x000a, 0x35e8: 0x000a, 0x35e9: 0x000a, + 0x35ea: 0x000a, 0x35eb: 0x000a, 0x35ec: 0x000a, 0x35ed: 0x000a, + // Block 0xd8, offset 0x3600 + 0x3610: 0x000a, 0x3611: 0x000a, + 0x3612: 0x000a, 0x3613: 0x000a, 0x3614: 0x000a, 0x3615: 0x000a, 0x3616: 0x000a, 0x3617: 0x000a, + 0x3618: 0x000a, 0x3619: 0x000a, 0x361a: 0x000a, 0x361b: 0x000a, 0x361c: 0x000a, 0x361d: 0x000a, + 0x361e: 0x000a, 0x3620: 0x000a, 0x3621: 0x000a, 0x3622: 0x000a, 0x3623: 0x000a, + 0x3624: 0x000a, 0x3625: 0x000a, 0x3626: 0x000a, 0x3627: 0x000a, + 0x3630: 0x000a, 0x3633: 0x000a, 0x3634: 0x000a, 0x3635: 0x000a, + 0x3636: 0x000a, 0x3637: 0x000a, 0x3638: 0x000a, 0x3639: 0x000a, 0x363a: 0x000a, 0x363b: 0x000a, + 0x363c: 0x000a, 0x363d: 0x000a, 0x363e: 0x000a, + // Block 0xd9, offset 0x3640 + 0x3640: 0x000a, 0x3641: 0x000a, 0x3642: 0x000a, 0x3643: 0x000a, 0x3644: 0x000a, 0x3645: 0x000a, + 0x3646: 0x000a, 0x3647: 0x000a, 0x3648: 0x000a, 0x3649: 0x000a, 0x364a: 0x000a, 0x364b: 0x000a, + 0x3650: 0x000a, 0x3651: 0x000a, + 0x3652: 0x000a, 0x3653: 0x000a, 0x3654: 0x000a, 0x3655: 0x000a, 0x3656: 0x000a, 0x3657: 0x000a, + 0x3658: 0x000a, 0x3659: 0x000a, 0x365a: 0x000a, 0x365b: 0x000a, 0x365c: 0x000a, 0x365d: 0x000a, + 0x365e: 0x000a, + // Block 0xda, offset 0x3680 + 0x3680: 0x000a, 0x3681: 0x000a, 0x3682: 0x000a, 0x3683: 0x000a, 0x3684: 0x000a, 0x3685: 0x000a, + 0x3686: 0x000a, 0x3687: 0x000a, 0x3688: 0x000a, 0x3689: 0x000a, 0x368a: 0x000a, 0x368b: 0x000a, + 0x368c: 0x000a, 0x368d: 0x000a, 0x368e: 0x000a, 0x368f: 0x000a, 0x3690: 0x000a, 0x3691: 0x000a, + // Block 0xdb, offset 0x36c0 + 0x36fe: 0x000b, 0x36ff: 0x000b, + // Block 0xdc, offset 0x3700 + 0x3700: 0x000b, 0x3701: 0x000b, 0x3702: 0x000b, 0x3703: 0x000b, 0x3704: 0x000b, 0x3705: 0x000b, + 0x3706: 0x000b, 0x3707: 0x000b, 0x3708: 0x000b, 0x3709: 0x000b, 0x370a: 0x000b, 0x370b: 0x000b, + 0x370c: 0x000b, 0x370d: 0x000b, 0x370e: 0x000b, 0x370f: 0x000b, 0x3710: 0x000b, 0x3711: 0x000b, + 0x3712: 0x000b, 0x3713: 0x000b, 0x3714: 0x000b, 0x3715: 0x000b, 0x3716: 0x000b, 0x3717: 0x000b, + 0x3718: 0x000b, 0x3719: 0x000b, 0x371a: 0x000b, 0x371b: 0x000b, 0x371c: 0x000b, 0x371d: 0x000b, + 0x371e: 0x000b, 0x371f: 0x000b, 0x3720: 0x000b, 0x3721: 0x000b, 0x3722: 0x000b, 0x3723: 0x000b, + 0x3724: 0x000b, 0x3725: 0x000b, 0x3726: 0x000b, 0x3727: 0x000b, 0x3728: 0x000b, 0x3729: 0x000b, + 0x372a: 0x000b, 0x372b: 0x000b, 0x372c: 0x000b, 0x372d: 0x000b, 0x372e: 0x000b, 0x372f: 0x000b, + 0x3730: 0x000b, 0x3731: 0x000b, 0x3732: 0x000b, 0x3733: 0x000b, 0x3734: 0x000b, 0x3735: 0x000b, + 0x3736: 0x000b, 0x3737: 0x000b, 0x3738: 0x000b, 0x3739: 0x000b, 0x373a: 0x000b, 0x373b: 0x000b, + 0x373c: 0x000b, 0x373d: 0x000b, 0x373e: 0x000b, 0x373f: 0x000b, + // Block 0xdd, offset 0x3740 + 0x3740: 0x000c, 0x3741: 0x000c, 0x3742: 0x000c, 0x3743: 0x000c, 0x3744: 0x000c, 0x3745: 0x000c, + 0x3746: 0x000c, 0x3747: 0x000c, 0x3748: 0x000c, 0x3749: 0x000c, 0x374a: 0x000c, 0x374b: 0x000c, + 0x374c: 0x000c, 0x374d: 0x000c, 0x374e: 0x000c, 0x374f: 0x000c, 0x3750: 0x000c, 0x3751: 0x000c, + 0x3752: 0x000c, 0x3753: 0x000c, 0x3754: 0x000c, 0x3755: 0x000c, 0x3756: 0x000c, 0x3757: 0x000c, + 0x3758: 0x000c, 0x3759: 0x000c, 0x375a: 0x000c, 0x375b: 0x000c, 0x375c: 0x000c, 0x375d: 0x000c, + 0x375e: 0x000c, 0x375f: 0x000c, 0x3760: 0x000c, 0x3761: 0x000c, 0x3762: 0x000c, 0x3763: 0x000c, + 0x3764: 0x000c, 0x3765: 0x000c, 0x3766: 0x000c, 0x3767: 0x000c, 0x3768: 0x000c, 0x3769: 0x000c, + 0x376a: 0x000c, 0x376b: 0x000c, 0x376c: 0x000c, 0x376d: 0x000c, 0x376e: 0x000c, 0x376f: 0x000c, + 0x3770: 0x000b, 0x3771: 0x000b, 0x3772: 0x000b, 0x3773: 0x000b, 0x3774: 0x000b, 0x3775: 0x000b, + 0x3776: 0x000b, 0x3777: 0x000b, 0x3778: 0x000b, 0x3779: 0x000b, 0x377a: 0x000b, 0x377b: 0x000b, + 0x377c: 0x000b, 0x377d: 0x000b, 0x377e: 0x000b, 0x377f: 0x000b, +} + +// bidiIndex: 24 blocks, 1536 entries, 1536 bytes +// Block 0 is the zero block. +var bidiIndex = [1536]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x02, + 0xca: 0x03, 0xcb: 0x04, 0xcc: 0x05, 0xcd: 0x06, 0xce: 0x07, 0xcf: 0x08, + 0xd2: 0x09, 0xd6: 0x0a, 0xd7: 0x0b, + 0xd8: 0x0c, 0xd9: 0x0d, 0xda: 0x0e, 0xdb: 0x0f, 0xdc: 0x10, 0xdd: 0x11, 0xde: 0x12, 0xdf: 0x13, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, + 0xea: 0x07, 0xef: 0x08, + 0xf0: 0x11, 0xf1: 0x12, 0xf2: 0x12, 0xf3: 0x14, 0xf4: 0x15, + // Block 0x4, offset 0x100 + 0x120: 0x14, 0x121: 0x15, 0x122: 0x16, 0x123: 0x17, 0x124: 0x18, 0x125: 0x19, 0x126: 0x1a, 0x127: 0x1b, + 0x128: 0x1c, 0x129: 0x1d, 0x12a: 0x1c, 0x12b: 0x1e, 0x12c: 0x1f, 0x12d: 0x20, 0x12e: 0x21, 0x12f: 0x22, + 0x130: 0x23, 0x131: 0x24, 0x132: 0x1a, 0x133: 0x25, 0x134: 0x26, 0x135: 0x27, 0x137: 0x28, + 0x138: 0x29, 0x139: 0x2a, 0x13a: 0x2b, 0x13b: 0x2c, 0x13c: 0x2d, 0x13d: 0x2e, 0x13e: 0x2f, 0x13f: 0x30, + // Block 0x5, offset 0x140 + 0x140: 0x31, 0x141: 0x32, 0x142: 0x33, + 0x14d: 0x34, 0x14e: 0x35, + 0x150: 0x36, + 0x15a: 0x37, 0x15c: 0x38, 0x15d: 0x39, 0x15e: 0x3a, 0x15f: 0x3b, + 0x160: 0x3c, 0x162: 0x3d, 0x164: 0x3e, 0x165: 0x3f, 0x167: 0x40, + 0x168: 0x41, 0x169: 0x42, 0x16a: 0x43, 0x16c: 0x44, 0x16d: 0x45, 0x16e: 0x46, 0x16f: 0x47, + 0x170: 0x48, 0x173: 0x49, 0x177: 0x4a, + 0x17e: 0x4b, 0x17f: 0x4c, + // Block 0x6, offset 0x180 + 0x180: 0x4d, 0x181: 0x4e, 0x182: 0x4f, 0x183: 0x50, 0x184: 0x51, 0x185: 0x52, 0x186: 0x53, 0x187: 0x54, + 0x188: 0x55, 0x189: 0x54, 0x18a: 0x54, 0x18b: 0x54, 0x18c: 0x56, 0x18d: 0x57, 0x18e: 0x58, 0x18f: 0x59, + 0x190: 0x5a, 0x191: 0x5b, 0x192: 0x5c, 0x193: 0x5d, 0x194: 0x54, 0x195: 0x54, 0x196: 0x54, 0x197: 0x54, + 0x198: 0x54, 0x199: 0x54, 0x19a: 0x5e, 0x19b: 0x54, 0x19c: 0x54, 0x19d: 0x5f, 0x19e: 0x54, 0x19f: 0x60, + 0x1a4: 0x54, 0x1a5: 0x54, 0x1a6: 0x61, 0x1a7: 0x62, + 0x1a8: 0x54, 0x1a9: 0x54, 0x1aa: 0x54, 0x1ab: 0x54, 0x1ac: 0x54, 0x1ad: 0x63, 0x1ae: 0x64, 0x1af: 0x65, + 0x1b3: 0x66, 0x1b5: 0x67, 0x1b7: 0x68, + 0x1b8: 0x69, 0x1b9: 0x6a, 0x1ba: 0x6b, 0x1bb: 0x6c, 0x1bc: 0x54, 0x1bd: 0x54, 0x1be: 0x54, 0x1bf: 0x6d, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x6e, 0x1c2: 0x6f, 0x1c3: 0x70, 0x1c7: 0x71, + 0x1c8: 0x72, 0x1c9: 0x73, 0x1ca: 0x74, 0x1cb: 0x75, 0x1cd: 0x76, 0x1cf: 0x77, + // Block 0x8, offset 0x200 + 0x237: 0x54, + // Block 0x9, offset 0x240 + 0x252: 0x78, 0x253: 0x79, + 0x258: 0x7a, 0x259: 0x7b, 0x25a: 0x7c, 0x25b: 0x7d, 0x25c: 0x7e, 0x25e: 0x7f, + 0x260: 0x80, 0x261: 0x81, 0x263: 0x82, 0x264: 0x83, 0x265: 0x84, 0x266: 0x85, 0x267: 0x86, + 0x268: 0x87, 0x269: 0x88, 0x26a: 0x89, 0x26b: 0x8a, 0x26f: 0x8b, + // Block 0xa, offset 0x280 + 0x2ac: 0x8c, 0x2ad: 0x8d, 0x2ae: 0x0e, 0x2af: 0x0e, + 0x2b0: 0x0e, 0x2b1: 0x0e, 0x2b2: 0x0e, 0x2b3: 0x0e, 0x2b4: 0x8e, 0x2b5: 0x0e, 0x2b6: 0x0e, 0x2b7: 0x8f, + 0x2b8: 0x90, 0x2b9: 0x91, 0x2ba: 0x0e, 0x2bb: 0x92, 0x2bc: 0x93, 0x2bd: 0x94, 0x2bf: 0x95, + // Block 0xb, offset 0x2c0 + 0x2c4: 0x96, 0x2c5: 0x54, 0x2c6: 0x97, 0x2c7: 0x98, + 0x2cb: 0x99, 0x2cd: 0x9a, + 0x2e0: 0x9b, 0x2e1: 0x9b, 0x2e2: 0x9b, 0x2e3: 0x9b, 0x2e4: 0x9c, 0x2e5: 0x9b, 0x2e6: 0x9b, 0x2e7: 0x9b, + 0x2e8: 0x9d, 0x2e9: 0x9b, 0x2ea: 0x9b, 0x2eb: 0x9e, 0x2ec: 0x9f, 0x2ed: 0x9b, 0x2ee: 0x9b, 0x2ef: 0x9b, + 0x2f0: 0x9b, 0x2f1: 0x9b, 0x2f2: 0x9b, 0x2f3: 0x9b, 0x2f4: 0x9b, 0x2f5: 0x9b, 0x2f6: 0x9b, 0x2f7: 0x9b, + 0x2f8: 0x9b, 0x2f9: 0xa0, 0x2fa: 0x9b, 0x2fb: 0x9b, 0x2fc: 0x9b, 0x2fd: 0x9b, 0x2fe: 0x9b, 0x2ff: 0x9b, + // Block 0xc, offset 0x300 + 0x300: 0xa1, 0x301: 0xa2, 0x302: 0xa3, 0x304: 0xa4, 0x305: 0xa5, 0x306: 0xa6, 0x307: 0xa7, + 0x308: 0xa8, 0x30b: 0xa9, 0x30c: 0xaa, 0x30d: 0xab, + 0x310: 0xac, 0x311: 0xad, 0x312: 0xae, 0x313: 0xaf, 0x316: 0xb0, 0x317: 0xb1, + 0x318: 0xb2, 0x319: 0xb3, 0x31a: 0xb4, 0x31c: 0xb5, + 0x330: 0xb6, 0x332: 0xb7, + // Block 0xd, offset 0x340 + 0x36b: 0xb8, 0x36c: 0xb9, + 0x37e: 0xba, + // Block 0xe, offset 0x380 + 0x3b2: 0xbb, + // Block 0xf, offset 0x3c0 + 0x3c5: 0xbc, 0x3c6: 0xbd, + 0x3c8: 0x54, 0x3c9: 0xbe, 0x3cc: 0x54, 0x3cd: 0xbf, + 0x3db: 0xc0, 0x3dc: 0xc1, 0x3dd: 0xc2, 0x3de: 0xc3, 0x3df: 0xc4, + 0x3e8: 0xc5, 0x3e9: 0xc6, 0x3ea: 0xc7, + // Block 0x10, offset 0x400 + 0x400: 0xc8, + 0x420: 0x9b, 0x421: 0x9b, 0x422: 0x9b, 0x423: 0xc9, 0x424: 0x9b, 0x425: 0xca, 0x426: 0x9b, 0x427: 0x9b, + 0x428: 0x9b, 0x429: 0x9b, 0x42a: 0x9b, 0x42b: 0x9b, 0x42c: 0x9b, 0x42d: 0x9b, 0x42e: 0x9b, 0x42f: 0x9b, + 0x430: 0x9b, 0x431: 0x9b, 0x432: 0x9b, 0x433: 0x9b, 0x434: 0x9b, 0x435: 0x9b, 0x436: 0x9b, 0x437: 0x9b, + 0x438: 0x0e, 0x439: 0x0e, 0x43a: 0x0e, 0x43b: 0xcb, 0x43c: 0x9b, 0x43d: 0x9b, 0x43e: 0x9b, 0x43f: 0x9b, + // Block 0x11, offset 0x440 + 0x440: 0xcc, 0x441: 0x54, 0x442: 0xcd, 0x443: 0xce, 0x444: 0xcf, 0x445: 0xd0, + 0x44c: 0x54, 0x44d: 0x54, 0x44e: 0x54, 0x44f: 0x54, + 0x450: 0x54, 0x451: 0x54, 0x452: 0x54, 0x453: 0x54, 0x454: 0x54, 0x455: 0x54, 0x456: 0x54, 0x457: 0x54, + 0x458: 0x54, 0x459: 0x54, 0x45a: 0x54, 0x45b: 0xd1, 0x45c: 0x54, 0x45d: 0x6c, 0x45e: 0x54, 0x45f: 0xd2, + 0x460: 0xd3, 0x461: 0xd4, 0x462: 0xd5, 0x464: 0xd6, 0x465: 0xd7, 0x466: 0xd8, 0x467: 0x36, + 0x47f: 0xd9, + // Block 0x12, offset 0x480 + 0x4bf: 0xd9, + // Block 0x13, offset 0x4c0 + 0x4d0: 0x09, 0x4d1: 0x0a, 0x4d6: 0x0b, + 0x4db: 0x0c, 0x4dd: 0x0d, 0x4de: 0x0e, 0x4df: 0x0f, + 0x4ef: 0x10, + 0x4ff: 0x10, + // Block 0x14, offset 0x500 + 0x50f: 0x10, + 0x51f: 0x10, + 0x52f: 0x10, + 0x53f: 0x10, + // Block 0x15, offset 0x540 + 0x540: 0xda, 0x541: 0xda, 0x542: 0xda, 0x543: 0xda, 0x544: 0x05, 0x545: 0x05, 0x546: 0x05, 0x547: 0xdb, + 0x548: 0xda, 0x549: 0xda, 0x54a: 0xda, 0x54b: 0xda, 0x54c: 0xda, 0x54d: 0xda, 0x54e: 0xda, 0x54f: 0xda, + 0x550: 0xda, 0x551: 0xda, 0x552: 0xda, 0x553: 0xda, 0x554: 0xda, 0x555: 0xda, 0x556: 0xda, 0x557: 0xda, + 0x558: 0xda, 0x559: 0xda, 0x55a: 0xda, 0x55b: 0xda, 0x55c: 0xda, 0x55d: 0xda, 0x55e: 0xda, 0x55f: 0xda, + 0x560: 0xda, 0x561: 0xda, 0x562: 0xda, 0x563: 0xda, 0x564: 0xda, 0x565: 0xda, 0x566: 0xda, 0x567: 0xda, + 0x568: 0xda, 0x569: 0xda, 0x56a: 0xda, 0x56b: 0xda, 0x56c: 0xda, 0x56d: 0xda, 0x56e: 0xda, 0x56f: 0xda, + 0x570: 0xda, 0x571: 0xda, 0x572: 0xda, 0x573: 0xda, 0x574: 0xda, 0x575: 0xda, 0x576: 0xda, 0x577: 0xda, + 0x578: 0xda, 0x579: 0xda, 0x57a: 0xda, 0x57b: 0xda, 0x57c: 0xda, 0x57d: 0xda, 0x57e: 0xda, 0x57f: 0xda, + // Block 0x16, offset 0x580 + 0x58f: 0x10, + 0x59f: 0x10, + 0x5a0: 0x13, + 0x5af: 0x10, + 0x5bf: 0x10, + // Block 0x17, offset 0x5c0 + 0x5cf: 0x10, +} + +// Total table size 15800 bytes (15KiB); checksum: F50EF68C diff --git a/vendor/golang.org/x/text/unicode/bidi/trieval.go b/vendor/golang.org/x/text/unicode/bidi/trieval.go new file mode 100644 index 000000000..4c459c4b7 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/bidi/trieval.go @@ -0,0 +1,60 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package bidi + +// Class is the Unicode BiDi class. Each rune has a single class. +type Class uint + +const ( + L Class = iota // LeftToRight + R // RightToLeft + EN // EuropeanNumber + ES // EuropeanSeparator + ET // EuropeanTerminator + AN // ArabicNumber + CS // CommonSeparator + B // ParagraphSeparator + S // SegmentSeparator + WS // WhiteSpace + ON // OtherNeutral + BN // BoundaryNeutral + NSM // NonspacingMark + AL // ArabicLetter + Control // Control LRO - PDI + + numClass + + LRO // LeftToRightOverride + RLO // RightToLeftOverride + LRE // LeftToRightEmbedding + RLE // RightToLeftEmbedding + PDF // PopDirectionalFormat + LRI // LeftToRightIsolate + RLI // RightToLeftIsolate + FSI // FirstStrongIsolate + PDI // PopDirectionalIsolate + + unknownClass = ^Class(0) +) + +var controlToClass = map[rune]Class{ + 0x202D: LRO, // LeftToRightOverride, + 0x202E: RLO, // RightToLeftOverride, + 0x202A: LRE, // LeftToRightEmbedding, + 0x202B: RLE, // RightToLeftEmbedding, + 0x202C: PDF, // PopDirectionalFormat, + 0x2066: LRI, // LeftToRightIsolate, + 0x2067: RLI, // RightToLeftIsolate, + 0x2068: FSI, // FirstStrongIsolate, + 0x2069: PDI, // PopDirectionalIsolate, +} + +// A trie entry has the following bits: +// 7..5 XOR mask for brackets +// 4 1: Bracket open, 0: Bracket close +// 3..0 Class type + +const ( + openMask = 0x10 + xorMaskShift = 5 +) diff --git a/vendor/golang.org/x/text/unicode/cldr/base.go b/vendor/golang.org/x/text/unicode/cldr/base.go new file mode 100644 index 000000000..2382f4d6d --- /dev/null +++ b/vendor/golang.org/x/text/unicode/cldr/base.go @@ -0,0 +1,100 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cldr + +import ( + "encoding/xml" + "regexp" + "strconv" +) + +// Elem is implemented by every XML element. +type Elem interface { + setEnclosing(Elem) + setName(string) + enclosing() Elem + + GetCommon() *Common +} + +type hidden struct { + CharData string `xml:",chardata"` + Alias *struct { + Common + Source string `xml:"source,attr"` + Path string `xml:"path,attr"` + } `xml:"alias"` + Def *struct { + Common + Choice string `xml:"choice,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + } `xml:"default"` +} + +// Common holds several of the most common attributes and sub elements +// of an XML element. +type Common struct { + XMLName xml.Name + name string + enclElem Elem + Type string `xml:"type,attr,omitempty"` + Reference string `xml:"reference,attr,omitempty"` + Alt string `xml:"alt,attr,omitempty"` + ValidSubLocales string `xml:"validSubLocales,attr,omitempty"` + Draft string `xml:"draft,attr,omitempty"` + hidden +} + +// Default returns the default type to select from the enclosed list +// or "" if no default value is specified. +func (e *Common) Default() string { + if e.Def == nil { + return "" + } + if e.Def.Choice != "" { + return e.Def.Choice + } else if e.Def.Type != "" { + // Type is still used by the default element in collation. + return e.Def.Type + } + return "" +} + +// GetCommon returns e. It is provided such that Common implements Elem. +func (e *Common) GetCommon() *Common { + return e +} + +// Data returns the character data accumulated for this element. +func (e *Common) Data() string { + e.CharData = charRe.ReplaceAllStringFunc(e.CharData, replaceUnicode) + return e.CharData +} + +func (e *Common) setName(s string) { + e.name = s +} + +func (e *Common) enclosing() Elem { + return e.enclElem +} + +func (e *Common) setEnclosing(en Elem) { + e.enclElem = en +} + +// Escape characters that can be escaped without further escaping the string. +var charRe = regexp.MustCompile(`&#x[0-9a-fA-F]*;|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|\\x[0-9a-fA-F]{2}|\\[0-7]{3}|\\[abtnvfr]`) + +// replaceUnicode converts hexadecimal Unicode codepoint notations to a one-rune string. +// It assumes the input string is correctly formatted. +func replaceUnicode(s string) string { + if s[1] == '#' { + r, _ := strconv.ParseInt(s[3:len(s)-1], 16, 32) + return string(r) + } + r, _, _, _ := strconv.UnquoteChar(s, 0) + return string(r) +} diff --git a/vendor/golang.org/x/text/unicode/cldr/cldr.go b/vendor/golang.org/x/text/unicode/cldr/cldr.go new file mode 100644 index 000000000..2197f8ac2 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/cldr/cldr.go @@ -0,0 +1,130 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run makexml.go -output xml.go + +// Package cldr provides a parser for LDML and related XML formats. +// This package is intended to be used by the table generation tools +// for the various internationalization-related packages. +// As the XML types are generated from the CLDR DTD, and as the CLDR standard +// is periodically amended, this package may change considerably over time. +// This mostly means that data may appear and disappear between versions. +// That is, old code should keep compiling for newer versions, but data +// may have moved or changed. +// CLDR version 22 is the first version supported by this package. +// Older versions may not work. +package cldr // import "golang.org/x/text/unicode/cldr" + +import ( + "fmt" + "sort" +) + +// CLDR provides access to parsed data of the Unicode Common Locale Data Repository. +type CLDR struct { + parent map[string][]string + locale map[string]*LDML + resolved map[string]*LDML + bcp47 *LDMLBCP47 + supp *SupplementalData +} + +func makeCLDR() *CLDR { + return &CLDR{ + parent: make(map[string][]string), + locale: make(map[string]*LDML), + resolved: make(map[string]*LDML), + bcp47: &LDMLBCP47{}, + supp: &SupplementalData{}, + } +} + +// BCP47 returns the parsed BCP47 LDML data. If no such data was parsed, nil is returned. +func (cldr *CLDR) BCP47() *LDMLBCP47 { + return nil +} + +// Draft indicates the draft level of an element. +type Draft int + +const ( + Approved Draft = iota + Contributed + Provisional + Unconfirmed +) + +var drafts = []string{"unconfirmed", "provisional", "contributed", "approved", ""} + +// ParseDraft returns the Draft value corresponding to the given string. The +// empty string corresponds to Approved. +func ParseDraft(level string) (Draft, error) { + if level == "" { + return Approved, nil + } + for i, s := range drafts { + if level == s { + return Unconfirmed - Draft(i), nil + } + } + return Approved, fmt.Errorf("cldr: unknown draft level %q", level) +} + +func (d Draft) String() string { + return drafts[len(drafts)-1-int(d)] +} + +// SetDraftLevel sets which draft levels to include in the evaluated LDML. +// Any draft element for which the draft level is higher than lev will be excluded. +// If multiple draft levels are available for a single element, the one with the +// lowest draft level will be selected, unless preferDraft is true, in which case +// the highest draft will be chosen. +// It is assumed that the underlying LDML is canonicalized. +func (cldr *CLDR) SetDraftLevel(lev Draft, preferDraft bool) { + // TODO: implement + cldr.resolved = make(map[string]*LDML) +} + +// RawLDML returns the LDML XML for id in unresolved form. +// id must be one of the strings returned by Locales. +func (cldr *CLDR) RawLDML(loc string) *LDML { + return cldr.locale[loc] +} + +// LDML returns the fully resolved LDML XML for loc, which must be one of +// the strings returned by Locales. +func (cldr *CLDR) LDML(loc string) (*LDML, error) { + return cldr.resolve(loc) +} + +// Supplemental returns the parsed supplemental data. If no such data was parsed, +// nil is returned. +func (cldr *CLDR) Supplemental() *SupplementalData { + return cldr.supp +} + +// Locales returns the locales for which there exist files. +// Valid sublocales for which there is no file are not included. +// The root locale is always sorted first. +func (cldr *CLDR) Locales() []string { + loc := []string{"root"} + hasRoot := false + for l, _ := range cldr.locale { + if l == "root" { + hasRoot = true + continue + } + loc = append(loc, l) + } + sort.Strings(loc[1:]) + if !hasRoot { + return loc[1:] + } + return loc +} + +// Get fills in the fields of x based on the XPath path. +func Get(e Elem, path string) (res Elem, err error) { + return walkXPath(e, path) +} diff --git a/vendor/golang.org/x/text/unicode/cldr/collate.go b/vendor/golang.org/x/text/unicode/cldr/collate.go new file mode 100644 index 000000000..80ee28d79 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/cldr/collate.go @@ -0,0 +1,359 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cldr + +import ( + "bufio" + "encoding/xml" + "errors" + "fmt" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +// RuleProcessor can be passed to Collator's Process method, which +// parses the rules and calls the respective method for each rule found. +type RuleProcessor interface { + Reset(anchor string, before int) error + Insert(level int, str, context, extend string) error + Index(id string) +} + +const ( + // cldrIndex is a Unicode-reserved sentinel value used to mark the start + // of a grouping within an index. + // We ignore any rule that starts with this rune. + // See http://unicode.org/reports/tr35/#Collation_Elements for details. + cldrIndex = "\uFDD0" + + // specialAnchor is the format in which to represent logical reset positions, + // such as "first tertiary ignorable". + specialAnchor = "<%s/>" +) + +// Process parses the rules for the tailorings of this collation +// and calls the respective methods of p for each rule found. +func (c Collation) Process(p RuleProcessor) (err error) { + if len(c.Cr) > 0 { + if len(c.Cr) > 1 { + return fmt.Errorf("multiple cr elements, want 0 or 1") + } + return processRules(p, c.Cr[0].Data()) + } + if c.Rules.Any != nil { + return c.processXML(p) + } + return errors.New("no tailoring data") +} + +// processRules parses rules in the Collation Rule Syntax defined in +// http://www.unicode.org/reports/tr35/tr35-collation.html#Collation_Tailorings. +func processRules(p RuleProcessor, s string) (err error) { + chk := func(s string, e error) string { + if err == nil { + err = e + } + return s + } + i := 0 // Save the line number for use after the loop. + scanner := bufio.NewScanner(strings.NewReader(s)) + for ; scanner.Scan() && err == nil; i++ { + for s := skipSpace(scanner.Text()); s != "" && s[0] != '#'; s = skipSpace(s) { + level := 5 + var ch byte + switch ch, s = s[0], s[1:]; ch { + case '&': // followed by or '[' ']' + if s = skipSpace(s); consume(&s, '[') { + s = chk(parseSpecialAnchor(p, s)) + } else { + s = chk(parseAnchor(p, 0, s)) + } + case '<': // sort relation '<'{1,4}, optionally followed by '*'. + for level = 1; consume(&s, '<'); level++ { + } + if level > 4 { + err = fmt.Errorf("level %d > 4", level) + } + fallthrough + case '=': // identity relation, optionally followed by *. + if consume(&s, '*') { + s = chk(parseSequence(p, level, s)) + } else { + s = chk(parseOrder(p, level, s)) + } + default: + chk("", fmt.Errorf("illegal operator %q", ch)) + break + } + } + } + if chk("", scanner.Err()); err != nil { + return fmt.Errorf("%d: %v", i, err) + } + return nil +} + +// parseSpecialAnchor parses the anchor syntax which is either of the form +// ['before' ] +// or +// [